using System;
using LLama.Native;
namespace LLama.Sampling.Tokens;
using llama_token = Int32;
///
/// Processes token logits before sampling, applying penalties to certain tokens
///
public interface ITokenDataProcessor
: IDisposable
{
///
/// Process token logits in a LLamaTokenDataArray
///
/// The context this is operating in
/// The token data array to process
/// The most recent tokens output
/// LLamaTokenDataArray, created from logits
void ProcessTokens(SafeLLamaContextHandle ctx, LLamaTokenDataArray tokens, ReadOnlySpan lastTokens);
///
/// Inform this process when a token is accepted by the model
///
///
///
void AcceptToken(SafeLLamaContextHandle ctx, int token);
///
/// Reset all internal sampling state
///
void Reset();
}