using Microsoft.SemanticKernel.AI; namespace LLamaSharp.SemanticKernel.ChatCompletion; public class ChatRequestSettings : AIRequestSettings { /// /// Temperature controls the randomness of the completion. /// The higher the temperature, the more random the completion. /// public double Temperature { get; set; } = 0; /// /// TopP controls the diversity of the completion. /// The higher the TopP, the more diverse the completion. /// public double TopP { get; set; } = 0; /// /// Number between -2.0 and 2.0. Positive values penalize new tokens /// based on whether they appear in the text so far, increasing the /// model's likelihood to talk about new topics. /// public double PresencePenalty { get; set; } = 0; /// /// Number between -2.0 and 2.0. Positive values penalize new tokens /// based on their existing frequency in the text so far, decreasing /// the model's likelihood to repeat the same line verbatim. /// public double FrequencyPenalty { get; set; } = 0; /// /// Sequences where the completion will stop generating further tokens. /// public IList StopSequences { get; set; } = Array.Empty(); /// /// How many completions to generate for each prompt. Default is 1. /// Note: Because this parameter generates many completions, it can quickly consume your token quota. /// Use carefully and ensure that you have reasonable settings for max_tokens and stop. /// public int ResultsPerPrompt { get; set; } = 1; /// /// The maximum number of tokens to generate in the completion. /// public int? MaxTokens { get; set; } /// /// Modify the likelihood of specified tokens appearing in the completion. /// public IDictionary TokenSelectionBiases { get; set; } = new Dictionary(); }