You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

LLavaWeightsTests.cs 1.8 kB

April 2024 Binary Update (#662) * Updated binaries, using [this build](https://github.com/SciSharp/LLamaSharp/actions/runs/8654672719/job/23733195669) for llama.cpp commit `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7`. - Added all new functions. - Moved some functions (e.g. `SafeLlamaModelHandle` specific functions) into `SafeLlamaModelHandle.cs` - Exposed tokens on `SafeLlamaModelHandle` and `LLamaWeights` through a `Tokens` property. As new special tokens are added in the future they can be added here. - Changed all token properties to return nullable tokens, to handle some models not having some tokens. - Fixed `DefaultSamplingPipeline` to handle no newline token in some models. * Moved native methods to more specific locations. - Context specific things have been moved into `SafeLLamaContextHandle.cs` and made private - they're exposed through C# properties and methods already. - Checking that GPU layer count is zero if GPU offload is not supported. - Moved methods for creating default structs (`llama_model_quantize_default_params` and `llama_context_default_params`) into relevant structs. * Removed exception if `GpuLayerCount > 0` when GPU is not supported. * - Added low level wrapper methods for new per-sequence state load/save in `SafeLLamaContextHandle` - Added high level wrapper methods (save/load with `State` object or memory mapped file) in `LLamaContext` - Moved native methods for per-sequence state load/save into `SafeLLamaContextHandle` * Added update and defrag methods for KV cache in `SafeLLamaContextHandle` * Updated submodule to `f7001ccc5aa359fcf41bba19d1c99c3d25c9bcc7` * Passing the sequence ID when saving a single sequence state
1 year ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. using LLama.Common;
  2. using LLama.Native;
  3. namespace LLama.Unittest
  4. {
  5. // Test the same things as llama model + image embedings
  6. //
  7. public sealed class LLavaWeightTests
  8. : IDisposable
  9. {
  10. private readonly LLamaWeights _llamaWeights;
  11. private readonly LLavaWeights _lLavaWeights;
  12. private readonly LLamaContext _context;
  13. public LLavaWeightTests()
  14. {
  15. var @params = new ModelParams(Constants.GenerativeModelPath)
  16. {
  17. // Llava models requires big context
  18. ContextSize = 4096,
  19. GpuLayerCount = Constants.CIGpuLayerCount,
  20. };
  21. _llamaWeights = LLamaWeights.LoadFromFile(@params);
  22. _lLavaWeights = LLavaWeights.LoadFromFile(Constants.LLavaMmpPath);
  23. _context = _llamaWeights.CreateContext(@params);
  24. }
  25. public void Dispose()
  26. {
  27. _llamaWeights.Dispose();
  28. _lLavaWeights.Dispose();
  29. }
  30. [Fact,Trait("Category", "NoCI")]
  31. public void EmbedImageAsFileName()
  32. {
  33. int n_past = 0;
  34. SafeLlavaImageEmbedHandle emb = _lLavaWeights.CreateImageEmbeddings(_context, Constants.LLavaImage);
  35. Assert.True( _lLavaWeights.EvalImageEmbed( _context, emb, ref n_past ) );
  36. }
  37. [Fact,Trait("Category", "NoCI")]
  38. public void EmbedImageAsBinary()
  39. {
  40. int n_past = 0;
  41. byte[] image = System.IO.File.ReadAllBytes(Constants.LLavaImage);
  42. SafeLlavaImageEmbedHandle emb = _lLavaWeights.CreateImageEmbeddings(_context, image);
  43. Assert.True( _lLavaWeights.EvalImageEmbed( _context, emb, ref n_past ) );
  44. }
  45. }
  46. }