using System;
using System.Runtime.InteropServices;
namespace LLama.Native;
using llama_token = Int32;
///
/// Input data for llama_decode
/// A llama_batch object can contain input about one or many sequences
/// The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
///
[StructLayout(LayoutKind.Sequential)]
public unsafe struct LLamaNativeBatch
{
///
/// The number of items pointed at by pos, seq_id and logits.
///
public int n_tokens;
///
/// Either `n_tokens` of `llama_token`, or `NULL`, depending on how this batch was created
///
public readonly llama_token* token;
///
/// Either `n_tokens * embd * sizeof(float)` or `NULL`, depending on how this batch was created
///
public readonly float* embd;
///
/// the positions of the respective token in the sequence
///
public readonly LLamaPos* pos;
///
/// https://github.com/ggerganov/llama.cpp/blob/master/llama.h#L139 ???
///
public readonly int* n_seq_id;
///
/// the sequence to which the respective token belongs
///
public readonly LLamaSeqId** seq_id;
///
/// if zero, the logits for the respective token will not be output
///
public readonly byte* logits;
// Note from llama.cpp:
// > helpers for smooth API transition - can be deprecated in the future
// > for future-proof code, use the above fields instead and ignore everything below
private LLamaPos _all_pos_0;
private LLamaPos _all_pos_1;
private LLamaSeqId _all_seq_id;
}