Browse Source

🔀 Remove unused code and update examples

- Renamed files and updated namespaces in Examples folder.
- Moved files from NewVersion folder to Examples folder.
- Removed TestRunner.cs file.
- Updated Runner.cs to include new examples.
- Update Program.cs to use the new Runner class instead of NewVersionTestRunner
- Update LLama.Examples namespace in Program.cs
- Update await NewVersionTestRunner.Run() in Program.cs to await Runner.Run()
tags/v0.8.1
xbotter 2 years ago
parent
commit
521e36903c
No known key found for this signature in database GPG Key ID: A3F32F44E9F160E1
20 changed files with 72 additions and 131 deletions
  1. +1
    -1
      LLama.Examples/Examples/BatchedDecoding.cs
  2. +1
    -1
      LLama.Examples/Examples/ChatSessionStripRoleName.cs
  3. +1
    -1
      LLama.Examples/Examples/ChatSessionWithRoleName.cs
  4. +1
    -1
      LLama.Examples/Examples/CodingAssistant.cs
  5. +1
    -1
      LLama.Examples/Examples/GetEmbeddings.cs
  6. +1
    -1
      LLama.Examples/Examples/GrammarJsonResponse.cs
  7. +1
    -1
      LLama.Examples/Examples/InstructModeExecute.cs
  8. +1
    -1
      LLama.Examples/Examples/InteractiveModeExecute.cs
  9. +1
    -1
      LLama.Examples/Examples/KernelMemory.cs
  10. +1
    -1
      LLama.Examples/Examples/LoadAndSaveSession.cs
  11. +1
    -1
      LLama.Examples/Examples/LoadAndSaveState.cs
  12. +1
    -1
      LLama.Examples/Examples/QuantizeModel.cs
  13. +53
    -0
      LLama.Examples/Examples/Runner.cs
  14. +1
    -1
      LLama.Examples/Examples/SemanticKernelChat.cs
  15. +1
    -1
      LLama.Examples/Examples/SemanticKernelMemory.cs
  16. +1
    -1
      LLama.Examples/Examples/SemanticKernelPrompt.cs
  17. +1
    -1
      LLama.Examples/Examples/StatelessModeExecute.cs
  18. +1
    -1
      LLama.Examples/Examples/TalkToYourself.cs
  19. +0
    -112
      LLama.Examples/NewVersion/TestRunner.cs
  20. +2
    -2
      LLama.Examples/Program.cs

LLama.Examples/NewVersion/BatchedDecoding.cs → LLama.Examples/Examples/BatchedDecoding.cs View File

@@ -4,7 +4,7 @@ using System.Text;
using LLama.Common;
using LLama.Native;

namespace LLama.Examples.NewVersion;
namespace LLama.Examples.Examples;

/// <summary>
/// This demonstrates generating multiple replies to the same prompt, with a shared cache

LLama.Examples/NewVersion/ChatSessionStripRoleName.cs → LLama.Examples/Examples/ChatSessionStripRoleName.cs View File

@@ -1,6 +1,6 @@
using LLama.Common;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class ChatSessionStripRoleName
{

LLama.Examples/NewVersion/ChatSessionWithRoleName.cs → LLama.Examples/Examples/ChatSessionWithRoleName.cs View File

@@ -1,6 +1,6 @@
using LLama.Common;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class ChatSessionWithRoleName
{

LLama.Examples/NewVersion/CodingAssistant.cs → LLama.Examples/Examples/CodingAssistant.cs View File

@@ -1,4 +1,4 @@
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
using LLama.Common;
using System;

LLama.Examples/NewVersion/GetEmbeddings.cs → LLama.Examples/Examples/GetEmbeddings.cs View File

@@ -1,6 +1,6 @@
using LLama.Common;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class GetEmbeddings
{

LLama.Examples/NewVersion/GrammarJsonResponse.cs → LLama.Examples/Examples/GrammarJsonResponse.cs View File

@@ -1,7 +1,7 @@
using LLama.Common;
using LLama.Grammars;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class GrammarJsonResponse
{

LLama.Examples/NewVersion/InstructModeExecute.cs → LLama.Examples/Examples/InstructModeExecute.cs View File

@@ -1,6 +1,6 @@
using LLama.Common;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class InstructModeExecute
{

LLama.Examples/NewVersion/InteractiveModeExecute.cs → LLama.Examples/Examples/InteractiveModeExecute.cs View File

@@ -1,6 +1,6 @@
using LLama.Common;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class InteractiveModeExecute
{

LLama.Examples/NewVersion/KernelMemory.cs → LLama.Examples/Examples/KernelMemory.cs View File

@@ -7,7 +7,7 @@ using LLamaSharp.KernelMemory;
using Microsoft.KernelMemory;
using Microsoft.KernelMemory.Handlers;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class KernelMemory
{

LLama.Examples/NewVersion/LoadAndSaveSession.cs → LLama.Examples/Examples/LoadAndSaveSession.cs View File

@@ -1,6 +1,6 @@
using LLama.Common;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class SaveAndLoadSession
{

LLama.Examples/NewVersion/LoadAndSaveState.cs → LLama.Examples/Examples/LoadAndSaveState.cs View File

@@ -1,6 +1,6 @@
using LLama.Common;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class LoadAndSaveState
{

LLama.Examples/NewVersion/QuantizeModel.cs → LLama.Examples/Examples/QuantizeModel.cs View File

@@ -1,4 +1,4 @@
namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class QuantizeModel
{

+ 53
- 0
LLama.Examples/Examples/Runner.cs View File

@@ -0,0 +1,53 @@
using Spectre.Console;

namespace LLama.Examples.Examples;

public class Runner
{
static Dictionary<string, Func<Task>> Examples = new()
{
{"Run a chat session without stripping the role names.", () => ChatSessionWithRoleName.Run()},
{"Run a chat session with the role names stripped.",()=> ChatSessionStripRoleName.Run()},
{"Interactive mode chat by using executor.",()=> InteractiveModeExecute.Run()},
{"Instruct mode chat by using executor.",()=> InstructModeExecute.Run()},
{"Stateless mode chat by using executor.",()=> StatelessModeExecute.Run()},
{"Load and save chat session.",()=> SaveAndLoadSession.Run()},
{"Load and save state of model and executor.",()=> LoadAndSaveState.Run()},
{"Get embeddings from LLama model.",()=> Task.Run(GetEmbeddings.Run)},
{"Quantize the model.",()=> Task.Run(QuantizeModel.Run)},
{"Automatic conversation.",()=> TalkToYourself.Run()},
{"Constrain response to json format using grammar.",()=> GrammarJsonResponse.Run()},
{"Semantic Kernel Prompt.",()=> SemanticKernelPrompt.Run()},
{"Semantic Kernel Chat.",()=> SemanticKernelChat.Run()},
{"Semantic Kernel Memory.",()=> SemanticKernelMemory.Run()},
{"Coding Assistant.",()=> CodingAssistant.Run()},
{"Batch Decoding.",()=> BatchedDecoding.Run()},
{"SK Kernel Memory.",()=> KernelMemory.Run()},
{"Exit", ()=> Task.CompletedTask}
};
public static async Task Run()
{
AnsiConsole.Write(new Rule("LLamaSharp Examples"));

while (true)
{
var choice = AnsiConsole.Prompt(
new SelectionPrompt<string>()
.Title("Please choose[green] an example[/] to run: ")
.AddChoices(Examples.Keys));


if (Examples.TryGetValue(choice, out var example))
{
if (choice == "Exit")
{
break;
}
AnsiConsole.Write(new Rule(choice));
await example();
}

AnsiConsole.Clear();
}
}
}

LLama.Examples/NewVersion/SemanticKernelChat.cs → LLama.Examples/Examples/SemanticKernelChat.cs View File

@@ -3,7 +3,7 @@ using LLama.Common;
using Microsoft.SemanticKernel.AI.ChatCompletion;
using LLamaSharp.SemanticKernel.ChatCompletion;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class SemanticKernelChat
{

LLama.Examples/NewVersion/SemanticKernelMemory.cs → LLama.Examples/Examples/SemanticKernelMemory.cs View File

@@ -5,7 +5,7 @@ using LLamaSharp.SemanticKernel.TextEmbedding;
using Microsoft.SemanticKernel.AI.Embeddings;
using Microsoft.SemanticKernel.Plugins.Memory;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class SemanticKernelMemory
{

LLama.Examples/NewVersion/SemanticKernelPrompt.cs → LLama.Examples/Examples/SemanticKernelPrompt.cs View File

@@ -5,7 +5,7 @@ using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.AI.TextCompletion;
using LLamaSharp.SemanticKernel.TextCompletion;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class SemanticKernelPrompt
{

LLama.Examples/NewVersion/StatelessModeExecute.cs → LLama.Examples/Examples/StatelessModeExecute.cs View File

@@ -1,7 +1,7 @@
using LLama.Common;
using LLama.Examples.Extensions;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class StatelessModeExecute
{

LLama.Examples/NewVersion/TalkToYourself.cs → LLama.Examples/Examples/TalkToYourself.cs View File

@@ -3,7 +3,7 @@ using System.Text;
using LLama.Abstractions;
using LLama.Common;

namespace LLama.Examples.NewVersion
namespace LLama.Examples.Examples
{
public class TalkToYourself
{

+ 0
- 112
LLama.Examples/NewVersion/TestRunner.cs View File

@@ -1,112 +0,0 @@
namespace LLama.Examples.NewVersion
{
public class NewVersionTestRunner
{
public static async Task Run()
{
Console.WriteLine("================LLamaSharp Examples (New Version)==================\n");

Console.WriteLine("Please input a number to choose an example to run:");
Console.WriteLine("0: Run a chat session without stripping the role names.");
Console.WriteLine("1: Run a chat session with the role names stripped.");
Console.WriteLine("2: Interactive mode chat by using executor.");
Console.WriteLine("3: Instruct mode chat by using executor.");
Console.WriteLine("4: Stateless mode chat by using executor.");
Console.WriteLine("5: Load and save chat session.");
Console.WriteLine("6: Load and save state of model and executor.");
Console.WriteLine("7: Get embeddings from LLama model.");
Console.WriteLine("8: Quantize the model.");
Console.WriteLine("9: Automatic conversation.");
Console.WriteLine("10: Constrain response to json format using grammar.");
Console.WriteLine("11: Semantic Kernel Prompt.");
Console.WriteLine("12: Semantic Kernel Chat.");
Console.WriteLine("13: Semantic Kernel Memory.");
Console.WriteLine("14: Coding Assistant.");
Console.WriteLine("15: Batch Decoding.");
Console.WriteLine("16: SK Kernel Memory.");

while (true)
{
Console.Write("\nYour choice: ");
int choice = int.Parse(Console.ReadLine());

if (choice == 0)
{
await ChatSessionWithRoleName.Run();
}
else if (choice == 1)
{
await ChatSessionStripRoleName.Run();
}
else if (choice == 2)
{
await InteractiveModeExecute.Run();
}
else if (choice == 3)
{
await InstructModeExecute.Run();
}
else if (choice == 4)
{
await StatelessModeExecute.Run();
}
else if (choice == 5)
{
await SaveAndLoadSession.Run();
}
else if (choice == 6)
{
await LoadAndSaveState.Run();
}
else if (choice == 7)
{
GetEmbeddings.Run();
}
else if (choice == 8)
{
QuantizeModel.Run();
}
else if (choice == 9)
{
await TalkToYourself.Run();
}
else if (choice == 10)
{
await GrammarJsonResponse.Run();
}
else if (choice == 11)
{
await SemanticKernelPrompt.Run();
}
else if (choice == 12)
{
await SemanticKernelChat.Run();
}
else if (choice == 13)
{
await SemanticKernelMemory.Run();
}
else if (choice == 14)
{
await CodingAssistant.Run();
}
else if (choice == 15)
{
await BatchedDecoding.Run();
}
else if (choice == 16)
{
await KernelMemory.Run();
}
else
{
Console.WriteLine("Cannot parse your choice. Please select again.");
continue;
}
break;
}
}
}


}

+ 2
- 2
LLama.Examples/Program.cs View File

@@ -1,4 +1,4 @@
using LLama.Examples.NewVersion;
using LLama.Examples.Examples;
using LLama.Native;

Console.WriteLine("======================================================================================================");
@@ -12,4 +12,4 @@ NativeLibraryConfig.Instance.WithCuda().WithLogs();
NativeApi.llama_empty_call();
Console.WriteLine();

await NewVersionTestRunner.Run();
await Runner.Run();

Loading…
Cancel
Save