Browse Source

Merge pull request #300 from martindevans/minor_demos_cleanup

Minor cleanup on example code
tags/v0.8.1
Martin Evans GitHub 2 years ago
parent
commit
9905baa7c4
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 24 additions and 26 deletions
  1. +4
    -2
      LLama.Examples/Examples/BatchedDecoding.cs
  2. +20
    -24
      LLama.Examples/Examples/Runner.cs

+ 4
- 2
LLama.Examples/Examples/BatchedDecoding.cs View File

@@ -1,5 +1,4 @@
using System.Diagnostics;
using System.Security.Cryptography;
using System.Text;
using LLama.Common;
using LLama.Native;
@@ -17,7 +16,7 @@ public class BatchedDecoding

private const int top_k = 80;
private const float top_p = 0.8f;
private const float temp = 0.5f;
private const float temp = 0.75f;

public static async Task Run()
{
@@ -173,5 +172,8 @@ public class BatchedDecoding
Console.ForegroundColor = ConsoleColor.Red;
Console.WriteLine(text);
}

Console.WriteLine("Press any key to exit demo");
Console.ReadKey(true);
}
}

+ 20
- 24
LLama.Examples/Examples/Runner.cs View File

@@ -4,27 +4,28 @@ namespace LLama.Examples.Examples;

public class Runner
{
static Dictionary<string, Func<Task>> Examples = new()
private static readonly Dictionary<string, Func<Task>> Examples = new()
{
{"Run a chat session without stripping the role names.", () => ChatSessionWithRoleName.Run()},
{"Run a chat session with the role names stripped.",()=> ChatSessionStripRoleName.Run()},
{"Interactive mode chat by using executor.",()=> InteractiveModeExecute.Run()},
{"Instruct mode chat by using executor.",()=> InstructModeExecute.Run()},
{"Stateless mode chat by using executor.",()=> StatelessModeExecute.Run()},
{"Load and save chat session.",()=> SaveAndLoadSession.Run()},
{"Load and save state of model and executor.",()=> LoadAndSaveState.Run()},
{"Get embeddings from LLama model.",()=> Task.Run(GetEmbeddings.Run)},
{"Quantize the model.",()=> Task.Run(QuantizeModel.Run)},
{"Automatic conversation.",()=> TalkToYourself.Run()},
{"Constrain response to json format using grammar.",()=> GrammarJsonResponse.Run()},
{"Semantic Kernel Prompt.",()=> SemanticKernelPrompt.Run()},
{"Semantic Kernel Chat.",()=> SemanticKernelChat.Run()},
{"Semantic Kernel Memory.",()=> SemanticKernelMemory.Run()},
{"Coding Assistant.",()=> CodingAssistant.Run()},
{"Batch Decoding.",()=> BatchedDecoding.Run()},
{"SK Kernel Memory.",()=> KernelMemory.Run()},
{"Exit", ()=> Task.CompletedTask}
{ "Run a chat session without stripping the role names.", ChatSessionWithRoleName.Run },
{ "Run a chat session with the role names stripped.", ChatSessionStripRoleName.Run },
{ "Interactive mode chat by using executor.", InteractiveModeExecute.Run },
{ "Instruct mode chat by using executor.", InstructModeExecute.Run },
{ "Stateless mode chat by using executor.", StatelessModeExecute.Run },
{ "Load and save chat session.", SaveAndLoadSession.Run },
{ "Load and save state of model and executor.", LoadAndSaveState.Run },
{ "Get embeddings from LLama model.", () => Task.Run(GetEmbeddings.Run) },
{ "Quantize the model.", () => Task.Run(QuantizeModel.Run) },
{ "Automatic conversation.", TalkToYourself.Run },
{ "Constrain response to json format using grammar.", GrammarJsonResponse.Run },
{ "Semantic Kernel Prompt.", SemanticKernelPrompt.Run },
{ "Semantic Kernel Chat.", SemanticKernelChat.Run },
{ "Semantic Kernel Memory.", SemanticKernelMemory.Run },
{ "Coding Assistant.", CodingAssistant.Run },
{ "Batch Decoding.", BatchedDecoding.Run },
{ "SK Kernel Memory.", KernelMemory.Run },
{ "Exit", async () => Environment.Exit(0) }
};

public static async Task Run()
{
AnsiConsole.Write(new Rule("LLamaSharp Examples"));
@@ -36,13 +37,8 @@ public class Runner
.Title("Please choose[green] an example[/] to run: ")
.AddChoices(Examples.Keys));


if (Examples.TryGetValue(choice, out var example))
{
if (choice == "Exit")
{
break;
}
AnsiConsole.Write(new Rule(choice));
await example();
}


Loading…
Cancel
Save