diff --git a/LLama.Examples/LLama.Examples.csproj b/LLama.Examples/LLama.Examples.csproj
index 3ecacdfe..dcd40c19 100644
--- a/LLama.Examples/LLama.Examples.csproj
+++ b/LLama.Examples/LLama.Examples.csproj
@@ -30,6 +30,7 @@
+
diff --git a/LLama.Examples/NewVersion/GetEmbeddings.cs b/LLama.Examples/NewVersion/GetEmbeddings.cs
index fe9e3ea8..1e5b19be 100644
--- a/LLama.Examples/NewVersion/GetEmbeddings.cs
+++ b/LLama.Examples/NewVersion/GetEmbeddings.cs
@@ -4,7 +4,7 @@ namespace LLama.Examples.NewVersion
{
public class GetEmbeddings
{
- public static void Run()
+ public static Task Run()
{
Console.Write("Please input your model path: ");
var modelPath = Console.ReadLine();
@@ -23,6 +23,7 @@ namespace LLama.Examples.NewVersion
Console.WriteLine(string.Join(", ", embedder.GetEmbeddings(text)));
Console.WriteLine();
}
+ return Task.CompletedTask;
}
}
}
diff --git a/LLama.Examples/NewVersion/QuantizeModel.cs b/LLama.Examples/NewVersion/QuantizeModel.cs
index 71966af8..456d8929 100644
--- a/LLama.Examples/NewVersion/QuantizeModel.cs
+++ b/LLama.Examples/NewVersion/QuantizeModel.cs
@@ -2,7 +2,7 @@
{
public class QuantizeModel
{
- public static void Run()
+ public static Task Run()
{
Console.Write("Please input your original model path: ");
var inputPath = Console.ReadLine();
@@ -21,6 +21,8 @@
{
Console.WriteLine("Quantization failed!");
}
+
+ return Task.CompletedTask;
}
}
}
diff --git a/LLama.Examples/NewVersion/TestRunner.cs b/LLama.Examples/NewVersion/TestRunner.cs
index a21a2eed..c89cba30 100644
--- a/LLama.Examples/NewVersion/TestRunner.cs
+++ b/LLama.Examples/NewVersion/TestRunner.cs
@@ -1,109 +1,54 @@
-namespace LLama.Examples.NewVersion
+using System.Linq.Expressions;
+using Spectre.Console;
+
+namespace LLama.Examples.NewVersion
{
public class NewVersionTestRunner
{
+ static Dictionary> Examples = new Dictionary>
+ {
+ {"Run a chat session without stripping the role names.", () => ChatSessionWithRoleName.Run()},
+ {"Run a chat session with the role names stripped.",()=> ChatSessionStripRoleName.Run()},
+ {"Interactive mode chat by using executor.",()=> InteractiveModeExecute.Run()},
+ {"Instruct mode chat by using executor.",()=> InstructModeExecute.Run()},
+ {"Stateless mode chat by using executor.",()=> StatelessModeExecute.Run()},
+ {"Load and save chat session.",()=> SaveAndLoadSession.Run()},
+ {"Load and save state of model and executor.",()=> LoadAndSaveState.Run()},
+ {"Get embeddings from LLama model.",()=> GetEmbeddings.Run()},
+ {"Quantize the model.",()=> QuantizeModel.Run()},
+ {"Automatic conversation.",()=> TalkToYourself.Run()},
+ {"Constrain response to json format using grammar.",()=> GrammarJsonResponse.Run()},
+ {"Semantic Kernel Prompt.",()=> SemanticKernelPrompt.Run()},
+ {"Semantic Kernel Chat.",()=> SemanticKernelChat.Run()},
+ {"Semantic Kernel Memory.",()=> SemanticKernelMemory.Run()},
+ {"Coding Assistant.",()=> CodingAssistant.Run()},
+ {"Batch Decoding.",()=> BatchedDecoding.Run()},
+ {"SK Kernel Memory.",()=> KernelMemory.Run()},
+ {"Exit", ()=> Task.CompletedTask}
+ };
public static async Task Run()
{
- Console.WriteLine("================LLamaSharp Examples (New Version)==================\n");
-
- Console.WriteLine("Please input a number to choose an example to run:");
- Console.WriteLine("0: Run a chat session without stripping the role names.");
- Console.WriteLine("1: Run a chat session with the role names stripped.");
- Console.WriteLine("2: Interactive mode chat by using executor.");
- Console.WriteLine("3: Instruct mode chat by using executor.");
- Console.WriteLine("4: Stateless mode chat by using executor.");
- Console.WriteLine("5: Load and save chat session.");
- Console.WriteLine("6: Load and save state of model and executor.");
- Console.WriteLine("7: Get embeddings from LLama model.");
- Console.WriteLine("8: Quantize the model.");
- Console.WriteLine("9: Automatic conversation.");
- Console.WriteLine("10: Constrain response to json format using grammar.");
- Console.WriteLine("11: Semantic Kernel Prompt.");
- Console.WriteLine("12: Semantic Kernel Chat.");
- Console.WriteLine("13: Semantic Kernel Memory.");
- Console.WriteLine("14: Coding Assistant.");
- Console.WriteLine("15: Batch Decoding.");
- Console.WriteLine("16: SK Kernel Memory.");
+ AnsiConsole.Write(new Rule("LLamaSharp Examples"));
while (true)
{
- Console.Write("\nYour choice: ");
- int choice = int.Parse(Console.ReadLine());
+ var choice = AnsiConsole.Prompt(
+ new SelectionPrompt()
+ .Title("Please choose[green] an example[/] to run: ")
+ .AddChoices(Examples.Keys));
- if (choice == 0)
- {
- await ChatSessionWithRoleName.Run();
- }
- else if (choice == 1)
- {
- await ChatSessionStripRoleName.Run();
- }
- else if (choice == 2)
- {
- await InteractiveModeExecute.Run();
- }
- else if (choice == 3)
- {
- await InstructModeExecute.Run();
- }
- else if (choice == 4)
- {
- await StatelessModeExecute.Run();
- }
- else if (choice == 5)
- {
- await SaveAndLoadSession.Run();
- }
- else if (choice == 6)
- {
- await LoadAndSaveState.Run();
- }
- else if (choice == 7)
- {
- GetEmbeddings.Run();
- }
- else if (choice == 8)
- {
- QuantizeModel.Run();
- }
- else if (choice == 9)
- {
- await TalkToYourself.Run();
- }
- else if (choice == 10)
- {
- await GrammarJsonResponse.Run();
- }
- else if (choice == 11)
- {
- await SemanticKernelPrompt.Run();
- }
- else if (choice == 12)
- {
- await SemanticKernelChat.Run();
- }
- else if (choice == 13)
- {
- await SemanticKernelMemory.Run();
- }
- else if (choice == 14)
- {
- await CodingAssistant.Run();
- }
- else if (choice == 15)
- {
- await BatchedDecoding.Run();
- }
- else if (choice == 16)
- {
- await KernelMemory.Run();
- }
- else
+
+ if (Examples.TryGetValue(choice, out var example))
{
- Console.WriteLine("Cannot parse your choice. Please select again.");
- continue;
+ if (choice == "Exit")
+ {
+ break;
+ }
+ AnsiConsole.Write(new Rule(choice));
+ await example();
}
- break;
+
+ AnsiConsole.Clear();
}
}
}