From 6c400e64c2bc6ab724ccb6ff573a29028f2c70f0 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Tue, 20 Jun 2023 02:38:57 +0800 Subject: [PATCH] docs: publiash documentation 0.4. --- LLama/ChatSession.cs | 44 + LLama/Common/ChatHistory.cs | 3 + LLama/Common/SessionParams.cs | 27 - LLama/LLamaEmbedder.cs | 3 + LLama/LLamaExecutorBase.cs | 127 + LLama/LLamaInstructExecutor.cs | 29 + LLama/LLamaInteractExecutor.cs | 23 +- LLama/LLamaModel.cs | 3 + LLama/LLamaQuantizer.cs | 5 +- LLama/LLamaStatelessExecutor.cs | 11 +- LLama/LLamaTransforms.cs | 24 + LLama/ResettableLLamaModel.cs | 11 + docs/sciprts/map_xml_files_to_yml.py | 16 + docs/xmldocs/index.md | 121 + .../llama.abstractions.ihistorytransform.md | 49 + .../llama.abstractions.illamaexecutor.md | 66 + ...llama.abstractions.itextstreamtransform.md | 43 + .../llama.abstractions.itexttransform.md | 33 + docs/xmldocs/llama.chatsession.md | 243 + docs/xmldocs/llama.common.authorrole.md | 15 + docs/xmldocs/llama.common.chathistory.md | 53 + docs/xmldocs/llama.common.fixedsizequeue-1.md | 111 + docs/xmldocs/llama.common.illamalogger.md | 28 + docs/xmldocs/llama.common.inferenceparams.md | 264 + .../llama.common.llamadefaultlogger.md | 121 + docs/xmldocs/llama.common.mirostatetype.md | 15 + docs/xmldocs/llama.common.modelparams.md | 234 + docs/xmldocs/llama.exceptions.runtimeerror.md | 110 + .../llama.extensions.dictionaryextension.md | 73 + docs/xmldocs/llama.instructexecutor.md | 142 + docs/xmldocs/llama.interactiveexecutor.md | 142 + docs/xmldocs/llama.llamaembedder.md | 64 + docs/xmldocs/llama.llamamodel.md | 282 + docs/xmldocs/llama.llamaquantizer.md | 75 + docs/xmldocs/llama.llamatransforms.md | 19 + .../llama.native.llamacontextparams.md | 99 + docs/xmldocs/llama.native.llamaftype.md | 15 + docs/xmldocs/llama.native.llamatokendata.md | 51 + .../llama.native.llamatokendataarray.md | 45 + .../llama.native.llamatokendataarraynative.md | 29 + docs/xmldocs/llama.native.nativeapi.md | 786 ++ .../llama.native.safellamacontexthandle.md | 56 + .../llama.native.safellamahandlebase.md | 44 + .../llama.oldversion.chatcompletion.md | 188 + .../llama.oldversion.chatcompletionchoice.md | 146 + .../llama.oldversion.chatcompletionchunk.md | 174 + ...ma.oldversion.chatcompletionchunkchoice.md | 146 + ...ama.oldversion.chatcompletionchunkdelta.md | 132 + .../llama.oldversion.chatcompletionmessage.md | 146 + .../llama.oldversion.chatmessagerecord.md | 132 + docs/xmldocs/llama.oldversion.chatrole.md | 15 + .../xmldocs/llama.oldversion.chatsession-1.md | 93 + docs/xmldocs/llama.oldversion.completion.md | 188 + .../llama.oldversion.completionchoice.md | 160 + .../llama.oldversion.completionchunk.md | 174 + .../llama.oldversion.completionlogprobs.md | 160 + .../llama.oldversion.completionusage.md | 146 + docs/xmldocs/llama.oldversion.embedding.md | 160 + .../xmldocs/llama.oldversion.embeddingdata.md | 146 + .../llama.oldversion.embeddingusage.md | 132 + docs/xmldocs/llama.oldversion.ichatmodel.md | 63 + .../xmldocs/llama.oldversion.llamaembedder.md | 50 + docs/xmldocs/llama.oldversion.llamamodel.md | 362 + docs/xmldocs/llama.oldversion.llamaparams.md | 357 + docs/xmldocs/llama.resettablellamamodel.md | 101 + docs/xmldocs/llama.statefulexecutorbase.md | 234 + docs/xmldocs/llama.statelessexecutor.md | 80 + mkdocs.yml | 63 +- site/404.html | 1526 ++++ site/Architecher/index.html | 1629 ++++ site/ChatSession/basic-usages/index.html | 1653 ++++ site/ChatSession/save-load-session/index.html | 1565 ++++ site/ChatSession/transforms/index.html | 1845 +++++ site/ContributingGuide/index.html | 1711 +++++ site/GetStarted/index.html | 1748 +++++ site/HighLevelApps/bot-sharp/index.html | 1558 ++++ site/LLamaExecutors/differences/index.html | 1667 ++++ site/LLamaExecutors/parameters/index.html | 1689 +++++ .../LLamaExecutors/save-load-state/index.html | 1576 ++++ .../text-to-text-apis/index.html | 1569 ++++ site/LLamaModel/embeddings/index.html | 1564 ++++ site/LLamaModel/parameters/index.html | 1668 ++++ site/LLamaModel/quantization/index.html | 1574 ++++ site/LLamaModel/save-load-state/index.html | 1572 ++++ site/LLamaModel/tokenization/index.html | 1631 ++++ site/NonEnglishUsage/Chinese/index.html | 1558 ++++ site/Tricks/index.html | 1681 +++++ site/assets/images/favicon.png | Bin 0 -> 1870 bytes .../assets/javascripts/bundle.a51614de.min.js | 29 + .../javascripts/bundle.a51614de.min.js.map | 8 + .../javascripts/lunr/min/lunr.ar.min.js | 1 + .../javascripts/lunr/min/lunr.da.min.js | 18 + .../javascripts/lunr/min/lunr.de.min.js | 18 + .../javascripts/lunr/min/lunr.du.min.js | 18 + .../javascripts/lunr/min/lunr.es.min.js | 18 + .../javascripts/lunr/min/lunr.fi.min.js | 18 + .../javascripts/lunr/min/lunr.fr.min.js | 18 + .../javascripts/lunr/min/lunr.hi.min.js | 1 + .../javascripts/lunr/min/lunr.hu.min.js | 18 + .../javascripts/lunr/min/lunr.hy.min.js | 1 + .../javascripts/lunr/min/lunr.it.min.js | 18 + .../javascripts/lunr/min/lunr.ja.min.js | 1 + .../javascripts/lunr/min/lunr.jp.min.js | 1 + .../javascripts/lunr/min/lunr.kn.min.js | 1 + .../javascripts/lunr/min/lunr.ko.min.js | 1 + .../javascripts/lunr/min/lunr.multi.min.js | 1 + .../javascripts/lunr/min/lunr.nl.min.js | 18 + .../javascripts/lunr/min/lunr.no.min.js | 18 + .../javascripts/lunr/min/lunr.pt.min.js | 18 + .../javascripts/lunr/min/lunr.ro.min.js | 18 + .../javascripts/lunr/min/lunr.ru.min.js | 18 + .../javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + .../javascripts/lunr/min/lunr.sv.min.js | 18 + .../javascripts/lunr/min/lunr.ta.min.js | 1 + .../javascripts/lunr/min/lunr.te.min.js | 1 + .../javascripts/lunr/min/lunr.th.min.js | 1 + .../javascripts/lunr/min/lunr.tr.min.js | 18 + .../javascripts/lunr/min/lunr.vi.min.js | 1 + .../javascripts/lunr/min/lunr.zh.min.js | 1 + site/assets/javascripts/lunr/tinyseg.js | 206 + site/assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.208ed371.min.js | 42 + .../workers/search.208ed371.min.js.map | 8 + site/assets/stylesheets/main.26e3688c.min.css | 1 + .../stylesheets/main.26e3688c.min.css.map | 1 + .../stylesheets/palette.ecc896b0.min.css | 1 + .../stylesheets/palette.ecc896b0.min.css.map | 1 + site/index.html | 1657 ++++ site/media/LLamaSharpLogo.png | Bin 0 -> 41408 bytes site/media/structure.jpg | Bin 0 -> 164821 bytes site/media/structure.vsdx | Bin 0 -> 33293 bytes site/sciprts/map_xml_files_to_yml.py | 16 + site/search/search_index.json | 1 + site/sitemap.xml.gz | Bin 0 -> 127 bytes site/xmldocs/index.html | 1747 +++++ .../index.html | 1749 +++++ .../index.html | 1823 +++++ .../index.html | 1744 +++++ .../index.html | 1688 +++++ site/xmldocs/llama.chatsession/index.html | 2485 ++++++ .../llama.common.authorrole/index.html | 1625 ++++ .../llama.common.chathistory/index.html | 1788 +++++ .../llama.common.fixedsizequeue-1/index.html | 2071 +++++ .../llama.common.illamalogger/index.html | 1670 ++++ .../llama.common.inferenceparams/index.html | 2589 +++++++ .../index.html | 2090 +++++ .../llama.common.mirostatetype/index.html | 1625 ++++ .../llama.common.modelparams/index.html | 2364 ++++++ .../llama.exceptions.runtimeerror/index.html | 2070 +++++ .../index.html | 1827 +++++ .../xmldocs/llama.instructexecutor/index.html | 2165 ++++++ .../llama.interactiveexecutor/index.html | 2165 ++++++ site/xmldocs/llama.llamaembedder/index.html | 1794 +++++ site/xmldocs/llama.llamamodel/index.html | 2548 +++++++ site/xmldocs/llama.llamaquantizer/index.html | 1793 +++++ site/xmldocs/llama.llamatransforms/index.html | 1638 ++++ .../index.html | 1818 +++++ .../llama.native.llamaftype/index.html | 1625 ++++ .../llama.native.llamatokendata/index.html | 1748 +++++ .../index.html | 1745 +++++ .../index.html | 1671 ++++ .../xmldocs/llama.native.nativeapi/index.html | 4125 ++++++++++ .../index.html | 1855 +++++ .../index.html | 1783 +++++ .../index.html | 2363 ++++++ .../index.html | 2222 ++++++ .../index.html | 2316 ++++++ .../index.html | 2222 ++++++ .../index.html | 2175 ++++++ .../index.html | 2222 ++++++ .../index.html | 2175 ++++++ .../llama.oldversion.chatrole/index.html | 1625 ++++ .../llama.oldversion.chatsession-1/index.html | 1957 +++++ .../llama.oldversion.completion/index.html | 2363 ++++++ .../index.html | 2269 ++++++ .../index.html | 2316 ++++++ .../index.html | 2269 ++++++ .../index.html | 2222 ++++++ .../llama.oldversion.embedding/index.html | 2269 ++++++ .../llama.oldversion.embeddingdata/index.html | 2222 ++++++ .../index.html | 2175 ++++++ .../llama.oldversion.ichatmodel/index.html | 1846 +++++ .../llama.oldversion.llamaembedder/index.html | 1774 +++++ .../llama.oldversion.llamamodel/index.html | 2637 +++++++ .../llama.oldversion.llamaparams/index.html | 2447 ++++++ .../llama.resettablellamamodel/index.html | 1971 +++++ .../llama.statefulexecutorbase/index.html | 2416 ++++++ .../llama.statelessexecutor/index.html | 1893 +++++ 189 files changed, 159213 insertions(+), 31 deletions(-) delete mode 100644 LLama/Common/SessionParams.cs create mode 100644 docs/sciprts/map_xml_files_to_yml.py create mode 100644 docs/xmldocs/index.md create mode 100644 docs/xmldocs/llama.abstractions.ihistorytransform.md create mode 100644 docs/xmldocs/llama.abstractions.illamaexecutor.md create mode 100644 docs/xmldocs/llama.abstractions.itextstreamtransform.md create mode 100644 docs/xmldocs/llama.abstractions.itexttransform.md create mode 100644 docs/xmldocs/llama.chatsession.md create mode 100644 docs/xmldocs/llama.common.authorrole.md create mode 100644 docs/xmldocs/llama.common.chathistory.md create mode 100644 docs/xmldocs/llama.common.fixedsizequeue-1.md create mode 100644 docs/xmldocs/llama.common.illamalogger.md create mode 100644 docs/xmldocs/llama.common.inferenceparams.md create mode 100644 docs/xmldocs/llama.common.llamadefaultlogger.md create mode 100644 docs/xmldocs/llama.common.mirostatetype.md create mode 100644 docs/xmldocs/llama.common.modelparams.md create mode 100644 docs/xmldocs/llama.exceptions.runtimeerror.md create mode 100644 docs/xmldocs/llama.extensions.dictionaryextension.md create mode 100644 docs/xmldocs/llama.instructexecutor.md create mode 100644 docs/xmldocs/llama.interactiveexecutor.md create mode 100644 docs/xmldocs/llama.llamaembedder.md create mode 100644 docs/xmldocs/llama.llamamodel.md create mode 100644 docs/xmldocs/llama.llamaquantizer.md create mode 100644 docs/xmldocs/llama.llamatransforms.md create mode 100644 docs/xmldocs/llama.native.llamacontextparams.md create mode 100644 docs/xmldocs/llama.native.llamaftype.md create mode 100644 docs/xmldocs/llama.native.llamatokendata.md create mode 100644 docs/xmldocs/llama.native.llamatokendataarray.md create mode 100644 docs/xmldocs/llama.native.llamatokendataarraynative.md create mode 100644 docs/xmldocs/llama.native.nativeapi.md create mode 100644 docs/xmldocs/llama.native.safellamacontexthandle.md create mode 100644 docs/xmldocs/llama.native.safellamahandlebase.md create mode 100644 docs/xmldocs/llama.oldversion.chatcompletion.md create mode 100644 docs/xmldocs/llama.oldversion.chatcompletionchoice.md create mode 100644 docs/xmldocs/llama.oldversion.chatcompletionchunk.md create mode 100644 docs/xmldocs/llama.oldversion.chatcompletionchunkchoice.md create mode 100644 docs/xmldocs/llama.oldversion.chatcompletionchunkdelta.md create mode 100644 docs/xmldocs/llama.oldversion.chatcompletionmessage.md create mode 100644 docs/xmldocs/llama.oldversion.chatmessagerecord.md create mode 100644 docs/xmldocs/llama.oldversion.chatrole.md create mode 100644 docs/xmldocs/llama.oldversion.chatsession-1.md create mode 100644 docs/xmldocs/llama.oldversion.completion.md create mode 100644 docs/xmldocs/llama.oldversion.completionchoice.md create mode 100644 docs/xmldocs/llama.oldversion.completionchunk.md create mode 100644 docs/xmldocs/llama.oldversion.completionlogprobs.md create mode 100644 docs/xmldocs/llama.oldversion.completionusage.md create mode 100644 docs/xmldocs/llama.oldversion.embedding.md create mode 100644 docs/xmldocs/llama.oldversion.embeddingdata.md create mode 100644 docs/xmldocs/llama.oldversion.embeddingusage.md create mode 100644 docs/xmldocs/llama.oldversion.ichatmodel.md create mode 100644 docs/xmldocs/llama.oldversion.llamaembedder.md create mode 100644 docs/xmldocs/llama.oldversion.llamamodel.md create mode 100644 docs/xmldocs/llama.oldversion.llamaparams.md create mode 100644 docs/xmldocs/llama.resettablellamamodel.md create mode 100644 docs/xmldocs/llama.statefulexecutorbase.md create mode 100644 docs/xmldocs/llama.statelessexecutor.md create mode 100644 site/404.html create mode 100644 site/Architecher/index.html create mode 100644 site/ChatSession/basic-usages/index.html create mode 100644 site/ChatSession/save-load-session/index.html create mode 100644 site/ChatSession/transforms/index.html create mode 100644 site/ContributingGuide/index.html create mode 100644 site/GetStarted/index.html create mode 100644 site/HighLevelApps/bot-sharp/index.html create mode 100644 site/LLamaExecutors/differences/index.html create mode 100644 site/LLamaExecutors/parameters/index.html create mode 100644 site/LLamaExecutors/save-load-state/index.html create mode 100644 site/LLamaExecutors/text-to-text-apis/index.html create mode 100644 site/LLamaModel/embeddings/index.html create mode 100644 site/LLamaModel/parameters/index.html create mode 100644 site/LLamaModel/quantization/index.html create mode 100644 site/LLamaModel/save-load-state/index.html create mode 100644 site/LLamaModel/tokenization/index.html create mode 100644 site/NonEnglishUsage/Chinese/index.html create mode 100644 site/Tricks/index.html create mode 100644 site/assets/images/favicon.png create mode 100644 site/assets/javascripts/bundle.a51614de.min.js create mode 100644 site/assets/javascripts/bundle.a51614de.min.js.map create mode 100644 site/assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 site/assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 site/assets/javascripts/lunr/tinyseg.js create mode 100644 site/assets/javascripts/lunr/wordcut.js create mode 100644 site/assets/javascripts/workers/search.208ed371.min.js create mode 100644 site/assets/javascripts/workers/search.208ed371.min.js.map create mode 100644 site/assets/stylesheets/main.26e3688c.min.css create mode 100644 site/assets/stylesheets/main.26e3688c.min.css.map create mode 100644 site/assets/stylesheets/palette.ecc896b0.min.css create mode 100644 site/assets/stylesheets/palette.ecc896b0.min.css.map create mode 100644 site/index.html create mode 100644 site/media/LLamaSharpLogo.png create mode 100644 site/media/structure.jpg create mode 100644 site/media/structure.vsdx create mode 100644 site/sciprts/map_xml_files_to_yml.py create mode 100644 site/search/search_index.json create mode 100644 site/sitemap.xml.gz create mode 100644 site/xmldocs/index.html create mode 100644 site/xmldocs/llama.abstractions.ihistorytransform/index.html create mode 100644 site/xmldocs/llama.abstractions.illamaexecutor/index.html create mode 100644 site/xmldocs/llama.abstractions.itextstreamtransform/index.html create mode 100644 site/xmldocs/llama.abstractions.itexttransform/index.html create mode 100644 site/xmldocs/llama.chatsession/index.html create mode 100644 site/xmldocs/llama.common.authorrole/index.html create mode 100644 site/xmldocs/llama.common.chathistory/index.html create mode 100644 site/xmldocs/llama.common.fixedsizequeue-1/index.html create mode 100644 site/xmldocs/llama.common.illamalogger/index.html create mode 100644 site/xmldocs/llama.common.inferenceparams/index.html create mode 100644 site/xmldocs/llama.common.llamadefaultlogger/index.html create mode 100644 site/xmldocs/llama.common.mirostatetype/index.html create mode 100644 site/xmldocs/llama.common.modelparams/index.html create mode 100644 site/xmldocs/llama.exceptions.runtimeerror/index.html create mode 100644 site/xmldocs/llama.extensions.dictionaryextension/index.html create mode 100644 site/xmldocs/llama.instructexecutor/index.html create mode 100644 site/xmldocs/llama.interactiveexecutor/index.html create mode 100644 site/xmldocs/llama.llamaembedder/index.html create mode 100644 site/xmldocs/llama.llamamodel/index.html create mode 100644 site/xmldocs/llama.llamaquantizer/index.html create mode 100644 site/xmldocs/llama.llamatransforms/index.html create mode 100644 site/xmldocs/llama.native.llamacontextparams/index.html create mode 100644 site/xmldocs/llama.native.llamaftype/index.html create mode 100644 site/xmldocs/llama.native.llamatokendata/index.html create mode 100644 site/xmldocs/llama.native.llamatokendataarray/index.html create mode 100644 site/xmldocs/llama.native.llamatokendataarraynative/index.html create mode 100644 site/xmldocs/llama.native.nativeapi/index.html create mode 100644 site/xmldocs/llama.native.safellamacontexthandle/index.html create mode 100644 site/xmldocs/llama.native.safellamahandlebase/index.html create mode 100644 site/xmldocs/llama.oldversion.chatcompletion/index.html create mode 100644 site/xmldocs/llama.oldversion.chatcompletionchoice/index.html create mode 100644 site/xmldocs/llama.oldversion.chatcompletionchunk/index.html create mode 100644 site/xmldocs/llama.oldversion.chatcompletionchunkchoice/index.html create mode 100644 site/xmldocs/llama.oldversion.chatcompletionchunkdelta/index.html create mode 100644 site/xmldocs/llama.oldversion.chatcompletionmessage/index.html create mode 100644 site/xmldocs/llama.oldversion.chatmessagerecord/index.html create mode 100644 site/xmldocs/llama.oldversion.chatrole/index.html create mode 100644 site/xmldocs/llama.oldversion.chatsession-1/index.html create mode 100644 site/xmldocs/llama.oldversion.completion/index.html create mode 100644 site/xmldocs/llama.oldversion.completionchoice/index.html create mode 100644 site/xmldocs/llama.oldversion.completionchunk/index.html create mode 100644 site/xmldocs/llama.oldversion.completionlogprobs/index.html create mode 100644 site/xmldocs/llama.oldversion.completionusage/index.html create mode 100644 site/xmldocs/llama.oldversion.embedding/index.html create mode 100644 site/xmldocs/llama.oldversion.embeddingdata/index.html create mode 100644 site/xmldocs/llama.oldversion.embeddingusage/index.html create mode 100644 site/xmldocs/llama.oldversion.ichatmodel/index.html create mode 100644 site/xmldocs/llama.oldversion.llamaembedder/index.html create mode 100644 site/xmldocs/llama.oldversion.llamamodel/index.html create mode 100644 site/xmldocs/llama.oldversion.llamaparams/index.html create mode 100644 site/xmldocs/llama.resettablellamamodel/index.html create mode 100644 site/xmldocs/llama.statefulexecutorbase/index.html create mode 100644 site/xmldocs/llama.statelessexecutor/index.html diff --git a/LLama/ChatSession.cs b/LLama/ChatSession.cs index 961f6b6e..26fad9d3 100644 --- a/LLama/ChatSession.cs +++ b/LLama/ChatSession.cs @@ -8,36 +8,73 @@ using System.Threading; namespace LLama { + /// + /// The main chat session class. + /// public class ChatSession { private ILLamaExecutor _executor; private ChatHistory _history; private static readonly string _executorStateFilename = "ExecutorState.json"; private static readonly string _modelStateFilename = "ModelState.st"; + /// + /// The executor for this session. + /// public ILLamaExecutor Executor => _executor; + /// + /// The chat history for this session. + /// public ChatHistory History => _history; + /// + /// The history transform used in this session. + /// public IHistoryTransform HistoryTransform { get; set; } = new LLamaTransforms.DefaultHistoryTransform(); + /// + /// The input transform pipeline used in this session. + /// public List InputTransformPipeline { get; set; } = new(); + /// + /// The output transform used in this session. + /// public ITextStreamTransform OutputTransform = new LLamaTransforms.EmptyTextOutputStreamTransform(); + /// + /// + /// + /// The executor for this session public ChatSession(ILLamaExecutor executor) { _executor = executor; _history = new ChatHistory(); } + /// + /// Use a custom history transform. + /// + /// + /// public ChatSession WithHistoryTransform(IHistoryTransform transform) { HistoryTransform = transform; return this; } + /// + /// Add a text transform to the input transform pipeline. + /// + /// + /// public ChatSession AddInputTransform(ITextTransform transform) { InputTransformPipeline.Add(transform); return this; } + /// + /// Use a custom output transform. + /// + /// + /// public ChatSession WithOutputTransform(ITextStreamTransform transform) { OutputTransform = transform; @@ -155,6 +192,13 @@ namespace LLama History.Messages.AddRange(HistoryTransform.TextToHistory(AuthorRole.Assistant, sb.ToString()).Messages); } + /// + /// Get the response from the LLama model with chat histories asynchronously. + /// + /// + /// + /// + /// public async IAsyncEnumerable ChatAsync(string prompt, InferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { foreach (var inputTransform in InputTransformPipeline) diff --git a/LLama/Common/ChatHistory.cs b/LLama/Common/ChatHistory.cs index d62dac69..fecf9004 100644 --- a/LLama/Common/ChatHistory.cs +++ b/LLama/Common/ChatHistory.cs @@ -12,6 +12,9 @@ namespace LLama.Common Assistant = 2, } // copy from semantic-kernel + /// + /// The chat history class + /// public class ChatHistory { diff --git a/LLama/Common/SessionParams.cs b/LLama/Common/SessionParams.cs deleted file mode 100644 index c0d20308..00000000 --- a/LLama/Common/SessionParams.cs +++ /dev/null @@ -1,27 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; - -namespace LLama.Common -{ - public class SessionParams - { - public string? UserName { get; set; } - public string? AssistantName { get; set; } - public string? SystemName { get; set; } - /// - /// The prefix of input text. Note that this only works when you - /// use the API with text as input. - /// - public string? InputPrefix { get; set; } - /// - /// The suffix of input text. Note that this only works when you - /// use the API with text as input. - /// - public string? InputSuffix { get; set; } - /// - /// Whether to trim the names from the text output at the start and end. - /// - public bool TrimNamesFromOutput { get; set; } = false; - } -} diff --git a/LLama/LLamaEmbedder.cs b/LLama/LLamaEmbedder.cs index a9983ce0..c1733eae 100644 --- a/LLama/LLamaEmbedder.cs +++ b/LLama/LLamaEmbedder.cs @@ -8,6 +8,9 @@ using LLama.Common; namespace LLama { + /// + /// The embedder for LLama, which supports getting embeddings from text. + /// public class LLamaEmbedder : IDisposable { SafeLLamaContextHandle _ctx; diff --git a/LLama/LLamaExecutorBase.cs b/LLama/LLamaExecutorBase.cs index 4756cef2..9ea2ca85 100644 --- a/LLama/LLamaExecutorBase.cs +++ b/LLama/LLamaExecutorBase.cs @@ -13,20 +13,64 @@ using System.Threading; namespace LLama { using llama_token = Int32; + /// + /// The base class for stateful LLama executors. + /// public abstract class StatefulExecutorBase : ILLamaExecutor { + /// + /// The loaded model for this executor. + /// protected readonly LLamaModel _model; + /// + /// The logger used by this executor. + /// protected ILLamaLogger? _logger; + /// + /// The tokens that were already processed by the model. + /// protected int _pastTokensCount; // n_past + /// + /// The tokens that were consumed by the model during the current inference. + /// protected int _consumedTokensCount; // n_consume + /// + /// + /// protected int _n_session_consumed; + /// + /// + /// protected int _n_matching_session_tokens; + /// + /// The path of the session file. + /// protected string? _pathSession; + /// + /// A container of the tokens to be processed and after processed. + /// protected List _embeds = new(); // embd + /// + /// A container for the tokens of input. + /// protected List _embed_inps = new(); + /// + /// + /// protected List _session_tokens = new(); + /// + /// The last tokens generated by the model. + /// protected FixedSizeQueue _last_n_tokens; + /// + /// The mode used by the executor. + /// public LLamaModel Model => _model; + /// + /// + /// + /// + /// protected StatefulExecutorBase(LLamaModel model, ILLamaLogger? logger = null) { _model = model; @@ -39,6 +83,13 @@ namespace LLama _last_n_tokens = new FixedSizeQueue(_model.ContextSize).FillWith(0); } + /// + /// This API is currently not verified. + /// + /// + /// + /// + /// public unsafe StatefulExecutorBase WithSessionFile(string filename) { _pathSession = filename; @@ -94,12 +145,20 @@ namespace LLama return this; } + /// + /// This API has not been verified currently. + /// + /// public void SaveSessionFile(string filename) { var session_token_array = _session_tokens.ToArray(); NativeApi.llama_save_session_file(_model.NativeHandle, filename, session_token_array, (ulong)session_token_array.Length); } + /// + /// After running out of the context, take some tokens from the original prompt and recompute the logits in batches. + /// + /// protected virtual void HandleRunOutOfContext(int tokensToKeep) { // if we run out of context: @@ -116,6 +175,9 @@ namespace LLama _pathSession = string.Empty; } + /// + /// Try to reuse the matching prefix from the session file. + /// protected virtual void TryReuseMathingPrefix() { if (_n_session_consumed < _session_tokens.Count) @@ -146,16 +208,61 @@ namespace LLama } } + /// + /// Decide whether to continue the loop. + /// + /// + /// protected abstract bool GetLoopCondition(InferStateArgs args); + /// + /// Preprocess the inputs before the inference. + /// + /// + /// protected abstract void PreprocessInputs(string text, InferStateArgs args); + /// + /// Do some post processing after the inference. + /// + /// + /// + /// + /// protected abstract bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, out IEnumerable? extraOutputs); + /// + /// The core inference logic. + /// + /// + /// protected abstract void InferInternal(InferenceParams inferenceParams, InferStateArgs args); + /// + /// Save the current state to a file. + /// + /// public abstract void SaveState(string filename); + /// + /// Get the current state data. + /// + /// public abstract ExecutorBaseState GetStateData(); + /// + /// Load the state from data. + /// + /// public abstract void LoadState(ExecutorBaseState data); + /// + /// Load the state from a file. + /// + /// public abstract void LoadState(string filename); + /// + /// Execute the inference. + /// + /// + /// + /// + /// public virtual IEnumerable Infer(string text, InferenceParams? inferenceParams = null, CancellationToken cancellationToken = default) { cancellationToken.ThrowIfCancellationRequested(); @@ -205,6 +312,14 @@ namespace LLama } } } + + /// + /// Execute the inference asynchronously. + /// + /// + /// + /// + /// public virtual async IAsyncEnumerable InferAsync(string text, InferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { foreach (var result in Infer(text, inferenceParams, cancellationToken)) @@ -218,13 +333,25 @@ namespace LLama /// protected class InferStateArgs { + /// + /// + /// public IList? Antiprompts { get; set; } /// /// Tokens count remained to be used. (n_remain) /// public int RemainedTokens { get; set; } + /// + /// + /// public bool ReturnValue { get; set; } + /// + /// + /// public bool WaitForInput { get; set; } + /// + /// + /// public bool NeedToSaveSession { get; set; } } diff --git a/LLama/LLamaInstructExecutor.cs b/LLama/LLamaInstructExecutor.cs index ba68168c..a2bc9c74 100644 --- a/LLama/LLamaInstructExecutor.cs +++ b/LLama/LLamaInstructExecutor.cs @@ -11,11 +11,20 @@ using System.Text.Json.Serialization; namespace LLama { using llama_token = Int32; + /// + /// The LLama executor for instruct mode. + /// public class InstructExecutor : StatefulExecutorBase { bool _is_prompt_run = true; llama_token[] _inp_pfx; llama_token[] _inp_sfx; + /// + /// + /// + /// + /// + /// public InstructExecutor(LLamaModel model, string instructionPrefix = "\n\n### Instruction:\n\n", string instructionSuffix = "\n\n### Response:\n\n") : base(model) { @@ -23,6 +32,7 @@ namespace LLama _inp_sfx = _model.Tokenize(instructionSuffix, false).ToArray(); } + /// public override ExecutorBaseState GetStateData() { InstructExecutorState state = new() @@ -43,6 +53,7 @@ namespace LLama }; return state; } + /// public override void LoadState(ExecutorBaseState data) { if(data is InstructExecutorState state) @@ -66,6 +77,7 @@ namespace LLama } } + /// public override void SaveState(string filename) { InstructExecutorState state = GetStateData() as InstructExecutorState; @@ -74,6 +86,7 @@ namespace LLama JsonSerializer.Serialize(fs, state); } } + /// public override void LoadState(string filename) { using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read)) @@ -83,10 +96,12 @@ namespace LLama } } + /// protected override bool GetLoopCondition(InferStateArgs args) { return args.RemainedTokens != 0 || _is_prompt_run; } + /// protected override void PreprocessInputs(string text, InferStateArgs args) { if (_is_prompt_run) @@ -112,6 +127,7 @@ namespace LLama args.RemainedTokens -= line_inp.Count(); } } + /// protected override bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, out IEnumerable? extraOutputs) { extraOutputs = null; @@ -154,6 +170,7 @@ namespace LLama } return false; } + /// protected override void InferInternal(InferenceParams inferenceParams, InferStateArgs args) { if (_embeds.Count > 0) @@ -214,12 +231,24 @@ namespace LLama } } } + /// + /// The desciptor of the state of the instruct executor. + /// public class InstructExecutorState : ExecutorBaseState { + /// + /// Whether the executor is running for the first time (running the prompt). + /// [JsonPropertyName("is_prompt_run")] public bool IsPromptRun { get; set; } + /// + /// Instruction prefix tokens. + /// [JsonPropertyName("inp_pfx")] public llama_token[] InputPrefixTokens { get; set; } + /// + /// Instruction suffix tokens. + /// [JsonPropertyName("inp_sfx")] public llama_token[] InputSuffixTokens { get; set; } } diff --git a/LLama/LLamaInteractExecutor.cs b/LLama/LLamaInteractExecutor.cs index 4964e326..44448aeb 100644 --- a/LLama/LLamaInteractExecutor.cs +++ b/LLama/LLamaInteractExecutor.cs @@ -14,15 +14,23 @@ using System.Threading.Tasks; namespace LLama { using llama_token = Int32; + /// + /// The LLama executor for interactive mode. + /// public class InteractiveExecutor : StatefulExecutorBase { bool _is_prompt_run = true; llama_token[] _llama_token_newline; + /// + /// + /// + /// public InteractiveExecutor(LLamaModel model) : base(model) { _llama_token_newline = Utils.Tokenize(_model.NativeHandle, "\n", false, _model.Encoding).ToArray(); } + /// public override ExecutorBaseState GetStateData() { InteractiveExecutorState state = new() @@ -42,6 +50,7 @@ namespace LLama }; return state; } + /// public override void LoadState(ExecutorBaseState data) { if (data is InteractiveExecutorState state) @@ -61,7 +70,7 @@ namespace LLama else throw new ArgumentException("Invalid state data type."); } - + /// public override void SaveState(string filename) { InteractiveExecutorState state = GetStateData() as InteractiveExecutorState; @@ -70,6 +79,7 @@ namespace LLama JsonSerializer.Serialize(fs, state); } } + /// public override void LoadState(string filename) { using (FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read)) @@ -88,6 +98,7 @@ namespace LLama return args.RemainedTokens != 0 && !args.WaitForInput || _is_prompt_run; } + /// protected override void PreprocessInputs(string text, InferStateArgs args) { if (_is_prompt_run) @@ -156,6 +167,7 @@ namespace LLama return false; } + /// protected override void InferInternal(InferenceParams inferenceParams, InferStateArgs args) { if (_embeds.Count > 0) @@ -227,10 +239,19 @@ namespace LLama } } + /// + /// The descriptor of the state of the interactive executor. + /// public class InteractiveExecutorState : ExecutorBaseState { + /// + /// Whether the executor is running for the first time (running the prompt). + /// [JsonPropertyName("is_prompt_run")] public bool IsPromptRun { get; set; } + /// + /// Tokens that represent a new line in with the current model. + /// [JsonPropertyName("llama_token_newline")] public llama_token[] LLamaNewlineTokens { get; set; } } diff --git a/LLama/LLamaModel.cs b/LLama/LLamaModel.cs index ec642795..a4e3d278 100644 --- a/LLama/LLamaModel.cs +++ b/LLama/LLamaModel.cs @@ -13,6 +13,9 @@ using LLama.Common; namespace LLama { using llama_token = Int32; + /// + /// The abstraction of a LLama model, which holds the context in the native library. + /// public class LLamaModel: IDisposable { // TODO: expose more properties. diff --git a/LLama/LLamaQuantizer.cs b/LLama/LLamaQuantizer.cs index ef417524..232d9f28 100644 --- a/LLama/LLamaQuantizer.cs +++ b/LLama/LLamaQuantizer.cs @@ -6,7 +6,10 @@ using System.Text; namespace LLama { - public class LLamaQuantizer + /// + /// The quantizer to quantize the model. + /// + public static class LLamaQuantizer { /// /// Quantize the model. diff --git a/LLama/LLamaStatelessExecutor.cs b/LLama/LLamaStatelessExecutor.cs index 2008bf9c..dfa70edd 100644 --- a/LLama/LLamaStatelessExecutor.cs +++ b/LLama/LLamaStatelessExecutor.cs @@ -20,7 +20,14 @@ namespace LLama { private LLamaModel _model; private byte[] _originalState; + /// + /// The mode used by the executor when running the inference. + /// public LLamaModel Model => _model; + /// + /// + /// + /// The LLama model. public StatelessExecutor(LLamaModel model) { _model = model; @@ -28,6 +35,8 @@ namespace LLama Utils.Eval(_model.NativeHandle, tokens.ToArray(), 0, tokens.Count(), 0, _model.Params.Threads); _originalState = model.GetStateData(); } + + /// public IEnumerable Infer(string text, InferenceParams? inferenceParams = null, CancellationToken cancellationToken = default) { cancellationToken.ThrowIfCancellationRequested(); @@ -113,7 +122,7 @@ namespace LLama _model.LoadState(_originalState); } - + /// public async IAsyncEnumerable InferAsync(string text, InferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken token = default) { yield return ""; diff --git a/LLama/LLamaTransforms.cs b/LLama/LLamaTransforms.cs index 59e8e2f7..14e9ccb9 100644 --- a/LLama/LLamaTransforms.cs +++ b/LLama/LLamaTransforms.cs @@ -11,6 +11,9 @@ using System.Text; namespace LLama { + /// + /// A class that contains all the transforms provided internally by LLama. + /// public class LLamaTransforms { /// @@ -30,6 +33,14 @@ namespace LLama string _systemName; string _unknownName; bool _isInstructMode; + /// + /// + /// + /// + /// + /// + /// + /// public DefaultHistoryTransform(string? userName = null, string? assistantName = null, string? systemName = null, string? unknownName = null, bool isInstructMode = false) { @@ -40,6 +51,7 @@ namespace LLama _isInstructMode = isInstructMode; } + /// public virtual string HistoryToText(ChatHistory history) { StringBuilder sb = new(); @@ -65,6 +77,7 @@ namespace LLama return sb.ToString(); } + /// public virtual ChatHistory TextToHistory(AuthorRole role, string text) { ChatHistory history = new ChatHistory(); @@ -72,6 +85,12 @@ namespace LLama return history; } + /// + /// Drop the name at the beginning and the end of the text. + /// + /// + /// + /// public virtual string TrimNamesFromText(string text, AuthorRole role) { if (role == AuthorRole.User && text.StartsWith($"{_userName}:")) @@ -95,6 +114,9 @@ namespace LLama /// public class NaiveTextInputTransform : ITextTransform { + /// + /// + /// public NaiveTextInputTransform() { @@ -110,11 +132,13 @@ namespace LLama /// public class EmptyTextOutputStreamTransform : ITextStreamTransform { + /// public IEnumerable Transform(IEnumerable tokens) { return tokens; } + /// public IAsyncEnumerable TransformAsync(IAsyncEnumerable tokens) { return tokens; diff --git a/LLama/ResettableLLamaModel.cs b/LLama/ResettableLLamaModel.cs index 1ca9e746..2ba4ecc6 100644 --- a/LLama/ResettableLLamaModel.cs +++ b/LLama/ResettableLLamaModel.cs @@ -10,12 +10,23 @@ namespace LLama /// public class ResettableLLamaModel : LLamaModel { + /// + /// The initial state of the model + /// public byte[] OriginalState { get; set; } + /// + /// + /// + /// + /// public ResettableLLamaModel(ModelParams Params, string encoding = "UTF-8") : base(Params, encoding) { OriginalState = GetStateData(); } + /// + /// Reset the state to the initial state. + /// public void Reset() { LoadState(OriginalState); diff --git a/docs/sciprts/map_xml_files_to_yml.py b/docs/sciprts/map_xml_files_to_yml.py new file mode 100644 index 00000000..a2cf98ca --- /dev/null +++ b/docs/sciprts/map_xml_files_to_yml.py @@ -0,0 +1,16 @@ +import os + +def generate_string_list(folder_path, prefix): + file_names = os.listdir(folder_path) + string_list = [] + for file_name in file_names: + new_string = f"- {'.'.join(file_name.split('.')[:-1])}: {prefix}{file_name}" + string_list.append(new_string) + return string_list + +folder_path = "./docs/xmldocs" +prefix = "./xmldocs/" + +string_list = generate_string_list(folder_path, prefix) +result = '\n'.join(string_list) +print(result) diff --git a/docs/xmldocs/index.md b/docs/xmldocs/index.md new file mode 100644 index 00000000..7bc5a746 --- /dev/null +++ b/docs/xmldocs/index.md @@ -0,0 +1,121 @@ +# LLamaSharp + +## LLama + +[ChatSession](./llama.chatsession.md) + +[InstructExecutor](./llama.instructexecutor.md) + +[InteractiveExecutor](./llama.interactiveexecutor.md) + +[LLamaEmbedder](./llama.llamaembedder.md) + +[LLamaModel](./llama.llamamodel.md) + +[LLamaQuantizer](./llama.llamaquantizer.md) + +[LLamaTransforms](./llama.llamatransforms.md) + +[ResettableLLamaModel](./llama.resettablellamamodel.md) + +[StatefulExecutorBase](./llama.statefulexecutorbase.md) + +[StatelessExecutor](./llama.statelessexecutor.md) + +## LLama.Abstractions + +[IHistoryTransform](./llama.abstractions.ihistorytransform.md) + +[ILLamaExecutor](./llama.abstractions.illamaexecutor.md) + +[ITextStreamTransform](./llama.abstractions.itextstreamtransform.md) + +[ITextTransform](./llama.abstractions.itexttransform.md) + +## LLama.Common + +[AuthorRole](./llama.common.authorrole.md) + +[ChatHistory](./llama.common.chathistory.md) + +[FixedSizeQueue<T>](./llama.common.fixedsizequeue-1.md) + +[ILLamaLogger](./llama.common.illamalogger.md) + +[InferenceParams](./llama.common.inferenceparams.md) + +[LLamaDefaultLogger](./llama.common.llamadefaultlogger.md) + +[MiroStateType](./llama.common.mirostatetype.md) + +[ModelParams](./llama.common.modelparams.md) + +## LLama.Exceptions + +[RuntimeError](./llama.exceptions.runtimeerror.md) + +## LLama.Extensions + +[DictionaryExtension](./llama.extensions.dictionaryextension.md) + +## LLama.Native + +[LLamaContextParams](./llama.native.llamacontextparams.md) + +[LLamaFtype](./llama.native.llamaftype.md) + +[LLamaTokenData](./llama.native.llamatokendata.md) + +[LLamaTokenDataArray](./llama.native.llamatokendataarray.md) + +[LLamaTokenDataArrayNative](./llama.native.llamatokendataarraynative.md) + +[NativeApi](./llama.native.nativeapi.md) + +[SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md) + +[SafeLLamaHandleBase](./llama.native.safellamahandlebase.md) + +## LLama.OldVersion + +[ChatCompletion](./llama.oldversion.chatcompletion.md) + +[ChatCompletionChoice](./llama.oldversion.chatcompletionchoice.md) + +[ChatCompletionChunk](./llama.oldversion.chatcompletionchunk.md) + +[ChatCompletionChunkChoice](./llama.oldversion.chatcompletionchunkchoice.md) + +[ChatCompletionChunkDelta](./llama.oldversion.chatcompletionchunkdelta.md) + +[ChatCompletionMessage](./llama.oldversion.chatcompletionmessage.md) + +[ChatMessageRecord](./llama.oldversion.chatmessagerecord.md) + +[ChatRole](./llama.oldversion.chatrole.md) + +[ChatSession<T>](./llama.oldversion.chatsession-1.md) + +[Completion](./llama.oldversion.completion.md) + +[CompletionChoice](./llama.oldversion.completionchoice.md) + +[CompletionChunk](./llama.oldversion.completionchunk.md) + +[CompletionLogprobs](./llama.oldversion.completionlogprobs.md) + +[CompletionUsage](./llama.oldversion.completionusage.md) + +[Embedding](./llama.oldversion.embedding.md) + +[EmbeddingData](./llama.oldversion.embeddingdata.md) + +[EmbeddingUsage](./llama.oldversion.embeddingusage.md) + +[IChatModel](./llama.oldversion.ichatmodel.md) + +[LLamaEmbedder](./llama.oldversion.llamaembedder.md) + +[LLamaModel](./llama.oldversion.llamamodel.md) + +[LLamaParams](./llama.oldversion.llamaparams.md) diff --git a/docs/xmldocs/llama.abstractions.ihistorytransform.md b/docs/xmldocs/llama.abstractions.ihistorytransform.md new file mode 100644 index 00000000..729e457e --- /dev/null +++ b/docs/xmldocs/llama.abstractions.ihistorytransform.md @@ -0,0 +1,49 @@ +# IHistoryTransform + +Namespace: LLama.Abstractions + +Transform history to plain text and vice versa. + +```csharp +public interface IHistoryTransform +``` + +## Methods + +### **HistoryToText(ChatHistory)** + +Convert a ChatHistory instance to plain text. + +```csharp +string HistoryToText(ChatHistory history) +``` + +#### Parameters + +`history` [ChatHistory](./llama.common.chathistory.md)
+The ChatHistory instance + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **TextToHistory(AuthorRole, String)** + +Converts plain text to a ChatHistory instance. + +```csharp +ChatHistory TextToHistory(AuthorRole role, string text) +``` + +#### Parameters + +`role` [AuthorRole](./llama.common.authorrole.md)
+The role for the author. + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The chat history as plain text. + +#### Returns + +[ChatHistory](./llama.common.chathistory.md)
+The updated history. diff --git a/docs/xmldocs/llama.abstractions.illamaexecutor.md b/docs/xmldocs/llama.abstractions.illamaexecutor.md new file mode 100644 index 00000000..9ddaaa45 --- /dev/null +++ b/docs/xmldocs/llama.abstractions.illamaexecutor.md @@ -0,0 +1,66 @@ +# ILLamaExecutor + +Namespace: LLama.Abstractions + +A high level interface for LLama models. + +```csharp +public interface ILLamaExecutor +``` + +## Properties + +### **Model** + +The loaded model for this executor. + +```csharp +public abstract LLamaModel Model { get; } +``` + +#### Property Value + +[LLamaModel](./llama.llamamodel.md)
+ +## Methods + +### **Infer(String, InferenceParams, CancellationToken)** + +Infers a response from the model. + +```csharp +IEnumerable Infer(string text, InferenceParams inferenceParams, CancellationToken token) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+Your prompt + +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+Any additional parameters + +`token` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
+A cancellation token. + +#### Returns + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +### **InferAsync(String, InferenceParams, CancellationToken)** + +```csharp +IAsyncEnumerable InferAsync(string text, InferenceParams inferenceParams, CancellationToken token) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`token` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
+ +#### Returns + +[IAsyncEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.iasyncenumerable-1)
diff --git a/docs/xmldocs/llama.abstractions.itextstreamtransform.md b/docs/xmldocs/llama.abstractions.itextstreamtransform.md new file mode 100644 index 00000000..caa50ac5 --- /dev/null +++ b/docs/xmldocs/llama.abstractions.itextstreamtransform.md @@ -0,0 +1,43 @@ +# ITextStreamTransform + +Namespace: LLama.Abstractions + +Takes a stream of tokens and transforms them. + +```csharp +public interface ITextStreamTransform +``` + +## Methods + +### **Transform(IEnumerable<String>)** + +Takes a stream of tokens and transforms them, returning a new stream of tokens. + +```csharp +IEnumerable Transform(IEnumerable tokens) +``` + +#### Parameters + +`tokens` [IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +#### Returns + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +### **TransformAsync(IAsyncEnumerable<String>)** + +Takes a stream of tokens and transforms them, returning a new stream of tokens asynchronously. + +```csharp +IAsyncEnumerable TransformAsync(IAsyncEnumerable tokens) +``` + +#### Parameters + +`tokens` [IAsyncEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.iasyncenumerable-1)
+ +#### Returns + +[IAsyncEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.iasyncenumerable-1)
diff --git a/docs/xmldocs/llama.abstractions.itexttransform.md b/docs/xmldocs/llama.abstractions.itexttransform.md new file mode 100644 index 00000000..df026ae5 --- /dev/null +++ b/docs/xmldocs/llama.abstractions.itexttransform.md @@ -0,0 +1,33 @@ +# ITextTransform + +Namespace: LLama.Abstractions + +An interface for text transformations. + These can be used to compose a pipeline of text transformations, such as: + - Tokenization + - Lowercasing + - Punctuation removal + - Trimming + - etc. + +```csharp +public interface ITextTransform +``` + +## Methods + +### **Transform(String)** + +Takes a string and transforms it. + +```csharp +string Transform(string text) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
diff --git a/docs/xmldocs/llama.chatsession.md b/docs/xmldocs/llama.chatsession.md new file mode 100644 index 00000000..f81e17f2 --- /dev/null +++ b/docs/xmldocs/llama.chatsession.md @@ -0,0 +1,243 @@ +# ChatSession + +Namespace: LLama + +The main chat session class. + +```csharp +public class ChatSession +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ChatSession](./llama.chatsession.md) + +## Fields + +### **OutputTransform** + +The output transform used in this session. + +```csharp +public ITextStreamTransform OutputTransform; +``` + +## Properties + +### **Executor** + +The executor for this session. + +```csharp +public ILLamaExecutor Executor { get; } +``` + +#### Property Value + +[ILLamaExecutor](./llama.abstractions.illamaexecutor.md)
+ +### **History** + +The chat history for this session. + +```csharp +public ChatHistory History { get; } +``` + +#### Property Value + +[ChatHistory](./llama.common.chathistory.md)
+ +### **HistoryTransform** + +The history transform used in this session. + +```csharp +public IHistoryTransform HistoryTransform { get; set; } +``` + +#### Property Value + +[IHistoryTransform](./llama.abstractions.ihistorytransform.md)
+ +### **InputTransformPipeline** + +The input transform pipeline used in this session. + +```csharp +public List InputTransformPipeline { get; set; } +``` + +#### Property Value + +[List<ITextTransform>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.list-1)
+ +## Constructors + +### **ChatSession(ILLamaExecutor)** + + + +```csharp +public ChatSession(ILLamaExecutor executor) +``` + +#### Parameters + +`executor` [ILLamaExecutor](./llama.abstractions.illamaexecutor.md)
+The executor for this session + +## Methods + +### **WithHistoryTransform(IHistoryTransform)** + +Use a custom history transform. + +```csharp +public ChatSession WithHistoryTransform(IHistoryTransform transform) +``` + +#### Parameters + +`transform` [IHistoryTransform](./llama.abstractions.ihistorytransform.md)
+ +#### Returns + +[ChatSession](./llama.chatsession.md)
+ +### **AddInputTransform(ITextTransform)** + +Add a text transform to the input transform pipeline. + +```csharp +public ChatSession AddInputTransform(ITextTransform transform) +``` + +#### Parameters + +`transform` [ITextTransform](./llama.abstractions.itexttransform.md)
+ +#### Returns + +[ChatSession](./llama.chatsession.md)
+ +### **WithOutputTransform(ITextStreamTransform)** + +Use a custom output transform. + +```csharp +public ChatSession WithOutputTransform(ITextStreamTransform transform) +``` + +#### Parameters + +`transform` [ITextStreamTransform](./llama.abstractions.itextstreamtransform.md)
+ +#### Returns + +[ChatSession](./llama.chatsession.md)
+ +### **SaveSession(String)** + + + +```csharp +public void SaveSession(string path) +``` + +#### Parameters + +`path` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The directory name to save the session. If the directory does not exist, a new directory will be created. + +### **LoadSession(String)** + + + +```csharp +public void LoadSession(string path) +``` + +#### Parameters + +`path` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The directory name to load the session. + +### **Chat(ChatHistory, InferenceParams, CancellationToken)** + +Get the response from the LLama model with chat histories. + +```csharp +public IEnumerable Chat(ChatHistory history, InferenceParams inferenceParams, CancellationToken cancellationToken) +``` + +#### Parameters + +`history` [ChatHistory](./llama.common.chathistory.md)
+ +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`cancellationToken` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
+ +#### Returns + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +### **Chat(String, InferenceParams, CancellationToken)** + +Get the response from the LLama model. Note that prompt could not only be the preset words, + but also the question you want to ask. + +```csharp +public IEnumerable Chat(string prompt, InferenceParams inferenceParams, CancellationToken cancellationToken) +``` + +#### Parameters + +`prompt` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`cancellationToken` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
+ +#### Returns + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +### **ChatAsync(ChatHistory, InferenceParams, CancellationToken)** + +Get the response from the LLama model with chat histories. + +```csharp +public IAsyncEnumerable ChatAsync(ChatHistory history, InferenceParams inferenceParams, CancellationToken cancellationToken) +``` + +#### Parameters + +`history` [ChatHistory](./llama.common.chathistory.md)
+ +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`cancellationToken` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
+ +#### Returns + +[IAsyncEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.iasyncenumerable-1)
+ +### **ChatAsync(String, InferenceParams, CancellationToken)** + +Get the response from the LLama model with chat histories asynchronously. + +```csharp +public IAsyncEnumerable ChatAsync(string prompt, InferenceParams inferenceParams, CancellationToken cancellationToken) +``` + +#### Parameters + +`prompt` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`cancellationToken` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
+ +#### Returns + +[IAsyncEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.iasyncenumerable-1)
diff --git a/docs/xmldocs/llama.common.authorrole.md b/docs/xmldocs/llama.common.authorrole.md new file mode 100644 index 00000000..10fc2b6d --- /dev/null +++ b/docs/xmldocs/llama.common.authorrole.md @@ -0,0 +1,15 @@ +# AuthorRole + +Namespace: LLama.Common + +```csharp +public enum AuthorRole +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ValueType](https://docs.microsoft.com/en-us/dotnet/api/system.valuetype) → [Enum](https://docs.microsoft.com/en-us/dotnet/api/system.enum) → [AuthorRole](./llama.common.authorrole.md)
+Implements [IComparable](https://docs.microsoft.com/en-us/dotnet/api/system.icomparable), [IFormattable](https://docs.microsoft.com/en-us/dotnet/api/system.iformattable), [IConvertible](https://docs.microsoft.com/en-us/dotnet/api/system.iconvertible) + +## Fields + +| Name | Value | Description | +| --- | --: | --- | diff --git a/docs/xmldocs/llama.common.chathistory.md b/docs/xmldocs/llama.common.chathistory.md new file mode 100644 index 00000000..ec2b4af0 --- /dev/null +++ b/docs/xmldocs/llama.common.chathistory.md @@ -0,0 +1,53 @@ +# ChatHistory + +Namespace: LLama.Common + +The chat history class + +```csharp +public class ChatHistory +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ChatHistory](./llama.common.chathistory.md) + +## Properties + +### **Messages** + +List of messages in the chat + +```csharp +public List Messages { get; } +``` + +#### Property Value + +[List<Message>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.list-1)
+ +## Constructors + +### **ChatHistory()** + +Create a new instance of the chat content class + +```csharp +public ChatHistory() +``` + +## Methods + +### **AddMessage(AuthorRole, String)** + +Add a message to the chat history + +```csharp +public void AddMessage(AuthorRole authorRole, string content) +``` + +#### Parameters + +`authorRole` [AuthorRole](./llama.common.authorrole.md)
+Role of the message author + +`content` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+Message content diff --git a/docs/xmldocs/llama.common.fixedsizequeue-1.md b/docs/xmldocs/llama.common.fixedsizequeue-1.md new file mode 100644 index 00000000..c3d1a354 --- /dev/null +++ b/docs/xmldocs/llama.common.fixedsizequeue-1.md @@ -0,0 +1,111 @@ +# FixedSizeQueue<T> + +Namespace: LLama.Common + +A queue with fixed storage size. + Currently it's only a naive implementation and needs to be further optimized in the future. + +```csharp +public class FixedSizeQueue : , System.Collections.IEnumerable +``` + +#### Type Parameters + +`T`
+ +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [FixedSizeQueue<T>](./llama.common.fixedsizequeue-1.md)
+Implements IEnumerable<T>, [IEnumerable](https://docs.microsoft.com/en-us/dotnet/api/system.collections.ienumerable) + +## Properties + +### **Count** + +```csharp +public int Count { get; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Capacity** + +```csharp +public int Capacity { get; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +## Constructors + +### **FixedSizeQueue(Int32)** + +```csharp +public FixedSizeQueue(int size) +``` + +#### Parameters + +`size` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **FixedSizeQueue(Int32, IEnumerable<T>)** + +```csharp +public FixedSizeQueue(int size, IEnumerable data) +``` + +#### Parameters + +`size` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`data` IEnumerable<T>
+ +## Methods + +### **FillWith(T)** + +```csharp +public FixedSizeQueue FillWith(T value) +``` + +#### Parameters + +`value` T
+ +#### Returns + +[FixedSizeQueue<T>](./llama.common.fixedsizequeue-1.md)
+ +### **Enqueue(T)** + +Enquene an element. + +```csharp +public void Enqueue(T item) +``` + +#### Parameters + +`item` T
+ +### **ToArray()** + +```csharp +public T[] ToArray() +``` + +#### Returns + +T[]
+ +### **GetEnumerator()** + +```csharp +public IEnumerator GetEnumerator() +``` + +#### Returns + +IEnumerator<T>
diff --git a/docs/xmldocs/llama.common.illamalogger.md b/docs/xmldocs/llama.common.illamalogger.md new file mode 100644 index 00000000..4ede2f5a --- /dev/null +++ b/docs/xmldocs/llama.common.illamalogger.md @@ -0,0 +1,28 @@ +# ILLamaLogger + +Namespace: LLama.Common + +```csharp +public interface ILLamaLogger +``` + +## Methods + +### **Log(String, String, LogLevel)** + +Write the log in cosutomized way + +```csharp +void Log(string source, string message, LogLevel level) +``` + +#### Parameters + +`source` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The source of the log. It may be a method name or class name. + +`message` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The message. + +`level` [LogLevel](./llama.common.illamalogger.loglevel.md)
+The log level. diff --git a/docs/xmldocs/llama.common.inferenceparams.md b/docs/xmldocs/llama.common.inferenceparams.md new file mode 100644 index 00000000..ac9d7bf2 --- /dev/null +++ b/docs/xmldocs/llama.common.inferenceparams.md @@ -0,0 +1,264 @@ +# InferenceParams + +Namespace: LLama.Common + +```csharp +public class InferenceParams +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [InferenceParams](./llama.common.inferenceparams.md) + +## Properties + +### **TokensKeep** + +number of tokens to keep from initial prompt + +```csharp +public int TokensKeep { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **MaxTokens** + +how many new tokens to predict (n_predict), set to -1 to inifinitely generate response + until it complete. + +```csharp +public int MaxTokens { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **LogitBias** + +logit bias for specific tokens + +```csharp +public Dictionary LogitBias { get; set; } +``` + +#### Property Value + +[Dictionary<Int32, Single>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.dictionary-2)
+ +### **AntiPrompts** + +Sequences where the model will stop generating further tokens. + +```csharp +public IEnumerable AntiPrompts { get; set; } +``` + +#### Property Value + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +### **PathSession** + +path to file for saving/loading model eval state + +```csharp +public string PathSession { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **InputSuffix** + +string to suffix user inputs with + +```csharp +public string InputSuffix { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **InputPrefix** + +string to prefix user inputs with + +```csharp +public string InputPrefix { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **TopK** + +0 or lower to use vocab size + +```csharp +public int TopK { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **TopP** + +1.0 = disabled + +```csharp +public float TopP { get; set; } +``` + +#### Property Value + +[Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **TfsZ** + +1.0 = disabled + +```csharp +public float TfsZ { get; set; } +``` + +#### Property Value + +[Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **TypicalP** + +1.0 = disabled + +```csharp +public float TypicalP { get; set; } +``` + +#### Property Value + +[Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **Temperature** + +1.0 = disabled + +```csharp +public float Temperature { get; set; } +``` + +#### Property Value + +[Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **RepeatPenalty** + +1.0 = disabled + +```csharp +public float RepeatPenalty { get; set; } +``` + +#### Property Value + +[Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **RepeatLastTokensCount** + +last n tokens to penalize (0 = disable penalty, -1 = context size) (repeat_last_n) + +```csharp +public int RepeatLastTokensCount { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **FrequencyPenalty** + +frequency penalty coefficient + 0.0 = disabled + +```csharp +public float FrequencyPenalty { get; set; } +``` + +#### Property Value + +[Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **PresencePenalty** + +presence penalty coefficient + 0.0 = disabled + +```csharp +public float PresencePenalty { get; set; } +``` + +#### Property Value + +[Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **Mirostat** + +Mirostat uses tokens instead of words. + algorithm described in the paper https://arxiv.org/abs/2007.14966. + 0 = disabled, 1 = mirostat, 2 = mirostat 2.0 + +```csharp +public MiroStateType Mirostat { get; set; } +``` + +#### Property Value + +[MiroStateType](./llama.common.mirostatetype.md)
+ +### **MirostatTau** + +target entropy + +```csharp +public float MirostatTau { get; set; } +``` + +#### Property Value + +[Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **MirostatEta** + +learning rate + +```csharp +public float MirostatEta { get; set; } +``` + +#### Property Value + +[Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **PenalizeNL** + +consider newlines as a repeatable token (penalize_nl) + +```csharp +public bool PenalizeNL { get; set; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +## Constructors + +### **InferenceParams()** + +```csharp +public InferenceParams() +``` diff --git a/docs/xmldocs/llama.common.llamadefaultlogger.md b/docs/xmldocs/llama.common.llamadefaultlogger.md new file mode 100644 index 00000000..2159852f --- /dev/null +++ b/docs/xmldocs/llama.common.llamadefaultlogger.md @@ -0,0 +1,121 @@ +# LLamaDefaultLogger + +Namespace: LLama.Common + +The default logger of LLamaSharp. On default it write to console. User methods of `LLamaLogger.Default` to change the behavior. + It's more recommended to inherit `ILLamaLogger` to cosutomize the behavior. + +```csharp +public sealed class LLamaDefaultLogger : ILLamaLogger +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [LLamaDefaultLogger](./llama.common.llamadefaultlogger.md)
+Implements [ILLamaLogger](./llama.common.illamalogger.md) + +## Properties + +### **Default** + +```csharp +public static LLamaDefaultLogger Default { get; } +``` + +#### Property Value + +[LLamaDefaultLogger](./llama.common.llamadefaultlogger.md)
+ +## Methods + +### **EnableConsole()** + +```csharp +public LLamaDefaultLogger EnableConsole() +``` + +#### Returns + +[LLamaDefaultLogger](./llama.common.llamadefaultlogger.md)
+ +### **DisableConsole()** + +```csharp +public LLamaDefaultLogger DisableConsole() +``` + +#### Returns + +[LLamaDefaultLogger](./llama.common.llamadefaultlogger.md)
+ +### **EnableFile(String, FileMode)** + +```csharp +public LLamaDefaultLogger EnableFile(string filename, FileMode mode) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`mode` [FileMode](https://docs.microsoft.com/en-us/dotnet/api/system.io.filemode)
+ +#### Returns + +[LLamaDefaultLogger](./llama.common.llamadefaultlogger.md)
+ +### **DisableFile(String)** + +```csharp +public LLamaDefaultLogger DisableFile(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[LLamaDefaultLogger](./llama.common.llamadefaultlogger.md)
+ +### **Log(String, String, LogLevel)** + +```csharp +public void Log(string source, string message, LogLevel level) +``` + +#### Parameters + +`source` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`message` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`level` [LogLevel](./llama.common.illamalogger.loglevel.md)
+ +### **Info(String)** + +```csharp +public void Info(string message) +``` + +#### Parameters + +`message` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Warn(String)** + +```csharp +public void Warn(string message) +``` + +#### Parameters + +`message` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Error(String)** + +```csharp +public void Error(string message) +``` + +#### Parameters + +`message` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
diff --git a/docs/xmldocs/llama.common.mirostatetype.md b/docs/xmldocs/llama.common.mirostatetype.md new file mode 100644 index 00000000..b72aafc3 --- /dev/null +++ b/docs/xmldocs/llama.common.mirostatetype.md @@ -0,0 +1,15 @@ +# MiroStateType + +Namespace: LLama.Common + +```csharp +public enum MiroStateType +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ValueType](https://docs.microsoft.com/en-us/dotnet/api/system.valuetype) → [Enum](https://docs.microsoft.com/en-us/dotnet/api/system.enum) → [MiroStateType](./llama.common.mirostatetype.md)
+Implements [IComparable](https://docs.microsoft.com/en-us/dotnet/api/system.icomparable), [IFormattable](https://docs.microsoft.com/en-us/dotnet/api/system.iformattable), [IConvertible](https://docs.microsoft.com/en-us/dotnet/api/system.iconvertible) + +## Fields + +| Name | Value | Description | +| --- | --: | --- | diff --git a/docs/xmldocs/llama.common.modelparams.md b/docs/xmldocs/llama.common.modelparams.md new file mode 100644 index 00000000..d041faf2 --- /dev/null +++ b/docs/xmldocs/llama.common.modelparams.md @@ -0,0 +1,234 @@ +# ModelParams + +Namespace: LLama.Common + +```csharp +public class ModelParams +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ModelParams](./llama.common.modelparams.md) + +## Properties + +### **ContextSize** + +Model context size (n_ctx) + +```csharp +public int ContextSize { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **GpuLayerCount** + +Number of layers to run in VRAM / GPU memory (n_gpu_layers) + +```csharp +public int GpuLayerCount { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Seed** + +Seed for the random number generator (seed) + +```csharp +public int Seed { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **UseFp16Memory** + +Use f16 instead of f32 for memory kv (memory_f16) + +```csharp +public bool UseFp16Memory { get; set; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **UseMemorymap** + +Use mmap for faster loads (use_mmap) + +```csharp +public bool UseMemorymap { get; set; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **UseMemoryLock** + +Use mlock to keep model in memory (use_mlock) + +```csharp +public bool UseMemoryLock { get; set; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Perplexity** + +Compute perplexity over the prompt (perplexity) + +```csharp +public bool Perplexity { get; set; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **ModelPath** + +Model path (model) + +```csharp +public string ModelPath { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **LoraAdapter** + +lora adapter path (lora_adapter) + +```csharp +public string LoraAdapter { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **LoraBase** + +base model path for the lora adapter (lora_base) + +```csharp +public string LoraBase { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Threads** + +Number of threads (-1 = autodetect) (n_threads) + +```csharp +public int Threads { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **BatchSize** + +batch size for prompt processing (must be >=32 to use BLAS) (n_batch) + +```csharp +public int BatchSize { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **ConvertEosToNewLine** + +Whether to convert eos to newline during the inference. + +```csharp +public bool ConvertEosToNewLine { get; set; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **EmbeddingMode** + +Whether to use embedding mode. (embedding) Note that if this is set to true, + The LLamaModel won't produce text response anymore. + +```csharp +public bool EmbeddingMode { get; set; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +## Constructors + +### **ModelParams(String, Int32, Int32, Int32, Boolean, Boolean, Boolean, Boolean, String, String, Int32, Int32, Boolean, Boolean)** + + + +```csharp +public ModelParams(string modelPath, int contextSize, int gpuLayerCount, int seed, bool useFp16Memory, bool useMemorymap, bool useMemoryLock, bool perplexity, string loraAdapter, string loraBase, int threads, int batchSize, bool convertEosToNewLine, bool embeddingMode) +``` + +#### Parameters + +`modelPath` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The model path. + +`contextSize` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+Model context size (n_ctx) + +`gpuLayerCount` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+Number of layers to run in VRAM / GPU memory (n_gpu_layers) + +`seed` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+Seed for the random number generator (seed) + +`useFp16Memory` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Whether to use f16 instead of f32 for memory kv (memory_f16) + +`useMemorymap` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Whether to use mmap for faster loads (use_mmap) + +`useMemoryLock` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Whether to use mlock to keep model in memory (use_mlock) + +`perplexity` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Thether to compute perplexity over the prompt (perplexity) + +`loraAdapter` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+Lora adapter path (lora_adapter) + +`loraBase` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+Base model path for the lora adapter (lora_base) + +`threads` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+Number of threads (-1 = autodetect) (n_threads) + +`batchSize` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+Batch size for prompt processing (must be >=32 to use BLAS) (n_batch) + +`convertEosToNewLine` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Whether to convert eos to newline during the inference. + +`embeddingMode` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Whether to use embedding mode. (embedding) Note that if this is set to true, The LLamaModel won't produce text response anymore. diff --git a/docs/xmldocs/llama.exceptions.runtimeerror.md b/docs/xmldocs/llama.exceptions.runtimeerror.md new file mode 100644 index 00000000..7116015f --- /dev/null +++ b/docs/xmldocs/llama.exceptions.runtimeerror.md @@ -0,0 +1,110 @@ +# RuntimeError + +Namespace: LLama.Exceptions + +```csharp +public class RuntimeError : System.Exception, System.Runtime.Serialization.ISerializable +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [Exception](https://docs.microsoft.com/en-us/dotnet/api/system.exception) → [RuntimeError](./llama.exceptions.runtimeerror.md)
+Implements [ISerializable](https://docs.microsoft.com/en-us/dotnet/api/system.runtime.serialization.iserializable) + +## Properties + +### **TargetSite** + +```csharp +public MethodBase TargetSite { get; } +``` + +#### Property Value + +[MethodBase](https://docs.microsoft.com/en-us/dotnet/api/system.reflection.methodbase)
+ +### **Message** + +```csharp +public string Message { get; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Data** + +```csharp +public IDictionary Data { get; } +``` + +#### Property Value + +[IDictionary](https://docs.microsoft.com/en-us/dotnet/api/system.collections.idictionary)
+ +### **InnerException** + +```csharp +public Exception InnerException { get; } +``` + +#### Property Value + +[Exception](https://docs.microsoft.com/en-us/dotnet/api/system.exception)
+ +### **HelpLink** + +```csharp +public string HelpLink { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Source** + +```csharp +public string Source { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **HResult** + +```csharp +public int HResult { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **StackTrace** + +```csharp +public string StackTrace { get; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Constructors + +### **RuntimeError()** + +```csharp +public RuntimeError() +``` + +### **RuntimeError(String)** + +```csharp +public RuntimeError(string message) +``` + +#### Parameters + +`message` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
diff --git a/docs/xmldocs/llama.extensions.dictionaryextension.md b/docs/xmldocs/llama.extensions.dictionaryextension.md new file mode 100644 index 00000000..5c013c46 --- /dev/null +++ b/docs/xmldocs/llama.extensions.dictionaryextension.md @@ -0,0 +1,73 @@ +# DictionaryExtension + +Namespace: LLama.Extensions + +```csharp +public static class DictionaryExtension +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [DictionaryExtension](./llama.extensions.dictionaryextension.md) + +## Methods + +### **Deconstruct<T1, T2>(KeyValuePair<T1, T2>, T1&, T2&)** + +```csharp +public static void Deconstruct(KeyValuePair pair, T1& first, T2& second) +``` + +#### Type Parameters + +`T1`
+ +`T2`
+ +#### Parameters + +`pair` KeyValuePair<T1, T2>
+ +`first` T1&
+ +`second` T2&
+ +### **Update<T1, T2>(Dictionary<T1, T2>, IDictionary<T1, T2>)** + +```csharp +public static void Update(Dictionary dic, IDictionary other) +``` + +#### Type Parameters + +`T1`
+ +`T2`
+ +#### Parameters + +`dic` Dictionary<T1, T2>
+ +`other` IDictionary<T1, T2>
+ +### **GetOrDefault<T1, T2>(Dictionary<T1, T2>, T1, T2)** + +```csharp +public static T2 GetOrDefault(Dictionary dic, T1 key, T2 defaultValue) +``` + +#### Type Parameters + +`T1`
+ +`T2`
+ +#### Parameters + +`dic` Dictionary<T1, T2>
+ +`key` T1
+ +`defaultValue` T2
+ +#### Returns + +T2
diff --git a/docs/xmldocs/llama.instructexecutor.md b/docs/xmldocs/llama.instructexecutor.md new file mode 100644 index 00000000..3d10bbd6 --- /dev/null +++ b/docs/xmldocs/llama.instructexecutor.md @@ -0,0 +1,142 @@ +# InstructExecutor + +Namespace: LLama + +The LLama executor for instruct mode. + +```csharp +public class InstructExecutor : StatefulExecutorBase, LLama.Abstractions.ILLamaExecutor +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [StatefulExecutorBase](./llama.statefulexecutorbase.md) → [InstructExecutor](./llama.instructexecutor.md)
+Implements [ILLamaExecutor](./llama.abstractions.illamaexecutor.md) + +## Properties + +### **Model** + +The mode used by the executor. + +```csharp +public LLamaModel Model { get; } +``` + +#### Property Value + +[LLamaModel](./llama.llamamodel.md)
+ +## Constructors + +### **InstructExecutor(LLamaModel, String, String)** + + + +```csharp +public InstructExecutor(LLamaModel model, string instructionPrefix, string instructionSuffix) +``` + +#### Parameters + +`model` [LLamaModel](./llama.llamamodel.md)
+ +`instructionPrefix` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`instructionSuffix` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Methods + +### **GetStateData()** + +```csharp +public ExecutorBaseState GetStateData() +``` + +#### Returns + +[ExecutorBaseState](./llama.statefulexecutorbase.executorbasestate.md)
+ +### **LoadState(ExecutorBaseState)** + +```csharp +public void LoadState(ExecutorBaseState data) +``` + +#### Parameters + +`data` [ExecutorBaseState](./llama.statefulexecutorbase.executorbasestate.md)
+ +### **SaveState(String)** + +```csharp +public void SaveState(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **LoadState(String)** + +```csharp +public void LoadState(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **GetLoopCondition(InferStateArgs)** + +```csharp +protected bool GetLoopCondition(InferStateArgs args) +``` + +#### Parameters + +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **PreprocessInputs(String, InferStateArgs)** + +```csharp +protected void PreprocessInputs(string text, InferStateArgs args) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
+ +### **PostProcess(InferenceParams, InferStateArgs, IEnumerable`1&)** + +```csharp +protected bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, IEnumerable`1& extraOutputs) +``` + +#### Parameters + +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
+ +`extraOutputs` [IEnumerable`1&](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1&)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **InferInternal(InferenceParams, InferStateArgs)** + +```csharp +protected void InferInternal(InferenceParams inferenceParams, InferStateArgs args) +``` + +#### Parameters + +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
diff --git a/docs/xmldocs/llama.interactiveexecutor.md b/docs/xmldocs/llama.interactiveexecutor.md new file mode 100644 index 00000000..b8953138 --- /dev/null +++ b/docs/xmldocs/llama.interactiveexecutor.md @@ -0,0 +1,142 @@ +# InteractiveExecutor + +Namespace: LLama + +The LLama executor for interactive mode. + +```csharp +public class InteractiveExecutor : StatefulExecutorBase, LLama.Abstractions.ILLamaExecutor +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [StatefulExecutorBase](./llama.statefulexecutorbase.md) → [InteractiveExecutor](./llama.interactiveexecutor.md)
+Implements [ILLamaExecutor](./llama.abstractions.illamaexecutor.md) + +## Properties + +### **Model** + +The mode used by the executor. + +```csharp +public LLamaModel Model { get; } +``` + +#### Property Value + +[LLamaModel](./llama.llamamodel.md)
+ +## Constructors + +### **InteractiveExecutor(LLamaModel)** + + + +```csharp +public InteractiveExecutor(LLamaModel model) +``` + +#### Parameters + +`model` [LLamaModel](./llama.llamamodel.md)
+ +## Methods + +### **GetStateData()** + +```csharp +public ExecutorBaseState GetStateData() +``` + +#### Returns + +[ExecutorBaseState](./llama.statefulexecutorbase.executorbasestate.md)
+ +### **LoadState(ExecutorBaseState)** + +```csharp +public void LoadState(ExecutorBaseState data) +``` + +#### Parameters + +`data` [ExecutorBaseState](./llama.statefulexecutorbase.executorbasestate.md)
+ +### **SaveState(String)** + +```csharp +public void SaveState(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **LoadState(String)** + +```csharp +public void LoadState(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **GetLoopCondition(InferStateArgs)** + +Define whether to continue the loop to generate responses. + +```csharp +protected bool GetLoopCondition(InferStateArgs args) +``` + +#### Parameters + +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **PreprocessInputs(String, InferStateArgs)** + +```csharp +protected void PreprocessInputs(string text, InferStateArgs args) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
+ +### **PostProcess(InferenceParams, InferStateArgs, IEnumerable`1&)** + +Return whether to break the generation. + +```csharp +protected bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, IEnumerable`1& extraOutputs) +``` + +#### Parameters + +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
+ +`extraOutputs` [IEnumerable`1&](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1&)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **InferInternal(InferenceParams, InferStateArgs)** + +```csharp +protected void InferInternal(InferenceParams inferenceParams, InferStateArgs args) +``` + +#### Parameters + +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
diff --git a/docs/xmldocs/llama.llamaembedder.md b/docs/xmldocs/llama.llamaembedder.md new file mode 100644 index 00000000..60c36b63 --- /dev/null +++ b/docs/xmldocs/llama.llamaembedder.md @@ -0,0 +1,64 @@ +# LLamaEmbedder + +Namespace: LLama + +The embedder for LLama, which supports getting embeddings from text. + +```csharp +public class LLamaEmbedder : System.IDisposable +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [LLamaEmbedder](./llama.llamaembedder.md)
+Implements [IDisposable](https://docs.microsoft.com/en-us/dotnet/api/system.idisposable) + +## Constructors + +### **LLamaEmbedder(ModelParams)** + + + +```csharp +public LLamaEmbedder(ModelParams params) +``` + +#### Parameters + +`params` [ModelParams](./llama.common.modelparams.md)
+ +## Methods + +### **GetEmbeddings(String, Int32, Boolean, String)** + +Get the embeddings of the text. + +```csharp +public Single[] GetEmbeddings(string text, int threads, bool addBos, string encoding) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`threads` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+Threads used for inference. + +`addBos` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Add bos to the text. + +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[Single[]](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +#### Exceptions + +[RuntimeError](./llama.exceptions.runtimeerror.md)
+ +### **Dispose()** + + + +```csharp +public void Dispose() +``` diff --git a/docs/xmldocs/llama.llamamodel.md b/docs/xmldocs/llama.llamamodel.md new file mode 100644 index 00000000..4e54b371 --- /dev/null +++ b/docs/xmldocs/llama.llamamodel.md @@ -0,0 +1,282 @@ +# LLamaModel + +Namespace: LLama + +The abstraction of a LLama model, which holds the context in the native library. + +```csharp +public class LLamaModel : System.IDisposable +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [LLamaModel](./llama.llamamodel.md)
+Implements [IDisposable](https://docs.microsoft.com/en-us/dotnet/api/system.idisposable) + +## Properties + +### **ContextSize** + +The context size. + +```csharp +public int ContextSize { get; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Params** + +The model params set for this model. + +```csharp +public ModelParams Params { get; set; } +``` + +#### Property Value + +[ModelParams](./llama.common.modelparams.md)
+ +### **NativeHandle** + +The native handle, which is used to be passed to the native APIs. Please avoid using it + unless you know what is the usage of the Native API. + +```csharp +public SafeLLamaContextHandle NativeHandle { get; } +``` + +#### Property Value + +[SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +### **Encoding** + +The encoding set for this model to deal with text input. + +```csharp +public Encoding Encoding { get; } +``` + +#### Property Value + +[Encoding](https://docs.microsoft.com/en-us/dotnet/api/system.text.encoding)
+ +## Constructors + +### **LLamaModel(ModelParams, String, ILLamaLogger)** + + + +```csharp +public LLamaModel(ModelParams Params, string encoding, ILLamaLogger logger) +``` + +#### Parameters + +`Params` [ModelParams](./llama.common.modelparams.md)
+Model params. + +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+Encoding to deal with text input. + +`logger` [ILLamaLogger](./llama.common.illamalogger.md)
+The logger. + +## Methods + +### **Tokenize(String, Boolean)** + +Tokenize a string. + +```csharp +public IEnumerable Tokenize(string text, bool addBos) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`addBos` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Whether to add a bos to the text. + +#### Returns + +[IEnumerable<Int32>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +### **DeTokenize(IEnumerable<Int32>)** + +Detokenize the tokens to text. + +```csharp +public string DeTokenize(IEnumerable tokens) +``` + +#### Parameters + +`tokens` [IEnumerable<Int32>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **SaveState(String)** + +Save the state to specified path. + +```csharp +public void SaveState(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **GetStateData()** + +Get the state data as a byte array. + +```csharp +public Byte[] GetStateData() +``` + +#### Returns + +[Byte[]](https://docs.microsoft.com/en-us/dotnet/api/system.byte)
+ +### **LoadState(String)** + +Load the state from specified path. + +```csharp +public void LoadState(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Exceptions + +[RuntimeError](./llama.exceptions.runtimeerror.md)
+ +### **LoadState(Byte[])** + +Load the state from memory. + +```csharp +public void LoadState(Byte[] stateData) +``` + +#### Parameters + +`stateData` [Byte[]](https://docs.microsoft.com/en-us/dotnet/api/system.byte)
+ +#### Exceptions + +[RuntimeError](./llama.exceptions.runtimeerror.md)
+ +### **Sample(LLamaTokenDataArray, Single, MiroStateType, Single, Single, Int32, Single, Single, Single)** + +Perform the sampling. Please don't use it unless you fully know what it does. + +```csharp +public int Sample(LLamaTokenDataArray candidates, float temperature, MiroStateType mirostat, float mirostatTau, float mirostatEta, int topK, float topP, float tfsZ, float typicalP) +``` + +#### Parameters + +`candidates` [LLamaTokenDataArray](./llama.native.llamatokendataarray.md)
+ +`temperature` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`mirostat` [MiroStateType](./llama.common.mirostatetype.md)
+ +`mirostatTau` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`mirostatEta` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`topK` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`topP` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`tfsZ` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`typicalP` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **ApplyPenalty(IEnumerable<Int32>, Dictionary<Int32, Single>, Int32, Single, Single, Single, Boolean)** + +Apply the penalty for the tokens. Please don't use it unless you fully know what it does. + +```csharp +public LLamaTokenDataArray ApplyPenalty(IEnumerable lastTokens, Dictionary logitBias, int repeatLastTokensCount, float repeatPenalty, float alphaFrequency, float alphaPresence, bool penalizeNL) +``` + +#### Parameters + +`lastTokens` [IEnumerable<Int32>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +`logitBias` [Dictionary<Int32, Single>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.dictionary-2)
+ +`repeatLastTokensCount` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`repeatPenalty` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`alphaFrequency` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`alphaPresence` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`penalizeNL` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +#### Returns + +[LLamaTokenDataArray](./llama.native.llamatokendataarray.md)
+ +### **Eval(Int32[], Int32)** + + + +```csharp +public int Eval(Int32[] tokens, int pastTokensCount) +``` + +#### Parameters + +`tokens` [Int32[]](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`pastTokensCount` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+The updated `pastTokensCount`. + +#### Exceptions + +[RuntimeError](./llama.exceptions.runtimeerror.md)
+ +### **GenerateResult(IEnumerable<Int32>)** + +```csharp +internal IEnumerable GenerateResult(IEnumerable ids) +``` + +#### Parameters + +`ids` [IEnumerable<Int32>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +#### Returns + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +### **Dispose()** + + + +```csharp +public void Dispose() +``` diff --git a/docs/xmldocs/llama.llamaquantizer.md b/docs/xmldocs/llama.llamaquantizer.md new file mode 100644 index 00000000..ce0349bb --- /dev/null +++ b/docs/xmldocs/llama.llamaquantizer.md @@ -0,0 +1,75 @@ +# LLamaQuantizer + +Namespace: LLama + +The quantizer to quantize the model. + +```csharp +public static class LLamaQuantizer +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [LLamaQuantizer](./llama.llamaquantizer.md) + +## Methods + +### **Quantize(String, String, LLamaFtype, Int32)** + +Quantize the model. + +```csharp +public static bool Quantize(string srcFileName, string dstFilename, LLamaFtype ftype, int nthread) +``` + +#### Parameters + +`srcFileName` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The model file to be quantized. + +`dstFilename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The path to save the quantized model. + +`ftype` [LLamaFtype](./llama.native.llamaftype.md)
+The type of quantization. + +`nthread` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+Thread to be used during the quantization. By default it's the physical core number. + +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Whether the quantization is successful. + +#### Exceptions + +[ArgumentException](https://docs.microsoft.com/en-us/dotnet/api/system.argumentexception)
+ +### **Quantize(String, String, String, Int32)** + +Quantize the model. + +```csharp +public static bool Quantize(string srcFileName, string dstFilename, string ftype, int nthread) +``` + +#### Parameters + +`srcFileName` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The model file to be quantized. + +`dstFilename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The path to save the quantized model. + +`ftype` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The type of quantization. + +`nthread` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+Thread to be used during the quantization. By default it's the physical core number. + +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Whether the quantization is successful. + +#### Exceptions + +[ArgumentException](https://docs.microsoft.com/en-us/dotnet/api/system.argumentexception)
diff --git a/docs/xmldocs/llama.llamatransforms.md b/docs/xmldocs/llama.llamatransforms.md new file mode 100644 index 00000000..5b23a419 --- /dev/null +++ b/docs/xmldocs/llama.llamatransforms.md @@ -0,0 +1,19 @@ +# LLamaTransforms + +Namespace: LLama + +A class that contains all the transforms provided internally by LLama. + +```csharp +public class LLamaTransforms +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [LLamaTransforms](./llama.llamatransforms.md) + +## Constructors + +### **LLamaTransforms()** + +```csharp +public LLamaTransforms() +``` diff --git a/docs/xmldocs/llama.native.llamacontextparams.md b/docs/xmldocs/llama.native.llamacontextparams.md new file mode 100644 index 00000000..e47b2bb6 --- /dev/null +++ b/docs/xmldocs/llama.native.llamacontextparams.md @@ -0,0 +1,99 @@ +# LLamaContextParams + +Namespace: LLama.Native + +```csharp +public struct LLamaContextParams +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ValueType](https://docs.microsoft.com/en-us/dotnet/api/system.valuetype) → [LLamaContextParams](./llama.native.llamacontextparams.md) + +## Fields + +### **n_ctx** + +text context + +```csharp +public int n_ctx; +``` + +### **n_gpu_layers** + +number of layers to store in VRAM + +```csharp +public int n_gpu_layers; +``` + +### **seed** + +RNG seed, -1 for random + +```csharp +public int seed; +``` + +### **f16_kv** + +use fp16 for KV cache + +```csharp +public bool f16_kv; +``` + +### **logits_all** + +the llama_eval() call computes all logits, not just the last one + +```csharp +public bool logits_all; +``` + +### **vocab_only** + +only load the vocabulary, no weights + +```csharp +public bool vocab_only; +``` + +### **use_mmap** + +use mmap if possible + +```csharp +public bool use_mmap; +``` + +### **use_mlock** + +force system to keep model in RAM + +```csharp +public bool use_mlock; +``` + +### **embedding** + +embedding mode only + +```csharp +public bool embedding; +``` + +### **progress_callback** + +called with a progress value between 0 and 1, pass NULL to disable + +```csharp +public IntPtr progress_callback; +``` + +### **progress_callback_user_data** + +context pointer passed to the progress callback + +```csharp +public IntPtr progress_callback_user_data; +``` diff --git a/docs/xmldocs/llama.native.llamaftype.md b/docs/xmldocs/llama.native.llamaftype.md new file mode 100644 index 00000000..2c76c9e1 --- /dev/null +++ b/docs/xmldocs/llama.native.llamaftype.md @@ -0,0 +1,15 @@ +# LLamaFtype + +Namespace: LLama.Native + +```csharp +public enum LLamaFtype +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ValueType](https://docs.microsoft.com/en-us/dotnet/api/system.valuetype) → [Enum](https://docs.microsoft.com/en-us/dotnet/api/system.enum) → [LLamaFtype](./llama.native.llamaftype.md)
+Implements [IComparable](https://docs.microsoft.com/en-us/dotnet/api/system.icomparable), [IFormattable](https://docs.microsoft.com/en-us/dotnet/api/system.iformattable), [IConvertible](https://docs.microsoft.com/en-us/dotnet/api/system.iconvertible) + +## Fields + +| Name | Value | Description | +| --- | --: | --- | diff --git a/docs/xmldocs/llama.native.llamatokendata.md b/docs/xmldocs/llama.native.llamatokendata.md new file mode 100644 index 00000000..8632f702 --- /dev/null +++ b/docs/xmldocs/llama.native.llamatokendata.md @@ -0,0 +1,51 @@ +# LLamaTokenData + +Namespace: LLama.Native + +```csharp +public struct LLamaTokenData +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ValueType](https://docs.microsoft.com/en-us/dotnet/api/system.valuetype) → [LLamaTokenData](./llama.native.llamatokendata.md) + +## Fields + +### **id** + +token id + +```csharp +public int id; +``` + +### **logit** + +log-odds of the token + +```csharp +public float logit; +``` + +### **p** + +probability of the token + +```csharp +public float p; +``` + +## Constructors + +### **LLamaTokenData(Int32, Single, Single)** + +```csharp +LLamaTokenData(int id, float logit, float p) +``` + +#### Parameters + +`id` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`logit` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`p` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
diff --git a/docs/xmldocs/llama.native.llamatokendataarray.md b/docs/xmldocs/llama.native.llamatokendataarray.md new file mode 100644 index 00000000..e9a05e53 --- /dev/null +++ b/docs/xmldocs/llama.native.llamatokendataarray.md @@ -0,0 +1,45 @@ +# LLamaTokenDataArray + +Namespace: LLama.Native + +```csharp +public struct LLamaTokenDataArray +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ValueType](https://docs.microsoft.com/en-us/dotnet/api/system.valuetype) → [LLamaTokenDataArray](./llama.native.llamatokendataarray.md) + +## Fields + +### **data** + +```csharp +public Memory data; +``` + +### **size** + +```csharp +public ulong size; +``` + +### **sorted** + +```csharp +public bool sorted; +``` + +## Constructors + +### **LLamaTokenDataArray(LLamaTokenData[], UInt64, Boolean)** + +```csharp +LLamaTokenDataArray(LLamaTokenData[] data, ulong size, bool sorted) +``` + +#### Parameters + +`data` [LLamaTokenData[]](./llama.native.llamatokendata.md)
+ +`size` [UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +`sorted` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
diff --git a/docs/xmldocs/llama.native.llamatokendataarraynative.md b/docs/xmldocs/llama.native.llamatokendataarraynative.md new file mode 100644 index 00000000..1838d3a5 --- /dev/null +++ b/docs/xmldocs/llama.native.llamatokendataarraynative.md @@ -0,0 +1,29 @@ +# LLamaTokenDataArrayNative + +Namespace: LLama.Native + +```csharp +public struct LLamaTokenDataArrayNative +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ValueType](https://docs.microsoft.com/en-us/dotnet/api/system.valuetype) → [LLamaTokenDataArrayNative](./llama.native.llamatokendataarraynative.md) + +## Fields + +### **data** + +```csharp +public IntPtr data; +``` + +### **size** + +```csharp +public ulong size; +``` + +### **sorted** + +```csharp +public bool sorted; +``` diff --git a/docs/xmldocs/llama.native.nativeapi.md b/docs/xmldocs/llama.native.nativeapi.md new file mode 100644 index 00000000..787529da --- /dev/null +++ b/docs/xmldocs/llama.native.nativeapi.md @@ -0,0 +1,786 @@ +# NativeApi + +Namespace: LLama.Native + +```csharp +public class NativeApi +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [NativeApi](./llama.native.nativeapi.md) + +## Constructors + +### **NativeApi()** + +```csharp +public NativeApi() +``` + +## Methods + +### **llama_print_timings(SafeLLamaContextHandle)** + +```csharp +public static void llama_print_timings(SafeLLamaContextHandle ctx) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +### **llama_reset_timings(SafeLLamaContextHandle)** + +```csharp +public static void llama_reset_timings(SafeLLamaContextHandle ctx) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +### **llama_print_system_info()** + +Print system information + +```csharp +public static IntPtr llama_print_system_info() +``` + +#### Returns + +[IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+ +### **llama_model_quantize(String, String, LLamaFtype, Int32)** + +```csharp +public static int llama_model_quantize(string fname_inp, string fname_out, LLamaFtype ftype, int nthread) +``` + +#### Parameters + +`fname_inp` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`fname_out` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`ftype` [LLamaFtype](./llama.native.llamaftype.md)
+ +`nthread` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_sample_repetition_penalty(SafeLLamaContextHandle, IntPtr, Int32[], UInt64, Single)** + +Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix. + +```csharp +public static void llama_sample_repetition_penalty(SafeLLamaContextHandle ctx, IntPtr candidates, Int32[] last_tokens, ulong last_tokens_size, float penalty) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+Pointer to LLamaTokenDataArray + +`last_tokens` [Int32[]](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`last_tokens_size` [UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +`penalty` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **llama_sample_frequency_and_presence_penalties(SafeLLamaContextHandle, IntPtr, Int32[], UInt64, Single, Single)** + +Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details. + +```csharp +public static void llama_sample_frequency_and_presence_penalties(SafeLLamaContextHandle ctx, IntPtr candidates, Int32[] last_tokens, ulong last_tokens_size, float alpha_frequency, float alpha_presence) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+Pointer to LLamaTokenDataArray + +`last_tokens` [Int32[]](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`last_tokens_size` [UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +`alpha_frequency` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`alpha_presence` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **llama_sample_softmax(SafeLLamaContextHandle, IntPtr)** + +Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. + +```csharp +public static void llama_sample_softmax(SafeLLamaContextHandle ctx, IntPtr candidates) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+Pointer to LLamaTokenDataArray + +### **llama_sample_top_k(SafeLLamaContextHandle, IntPtr, Int32, UInt64)** + +Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + +```csharp +public static void llama_sample_top_k(SafeLLamaContextHandle ctx, IntPtr candidates, int k, ulong min_keep) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+Pointer to LLamaTokenDataArray + +`k` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`min_keep` [UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +### **llama_sample_top_p(SafeLLamaContextHandle, IntPtr, Single, UInt64)** + +Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 + +```csharp +public static void llama_sample_top_p(SafeLLamaContextHandle ctx, IntPtr candidates, float p, ulong min_keep) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+Pointer to LLamaTokenDataArray + +`p` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`min_keep` [UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +### **llama_sample_tail_free(SafeLLamaContextHandle, IntPtr, Single, UInt64)** + +Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/. + +```csharp +public static void llama_sample_tail_free(SafeLLamaContextHandle ctx, IntPtr candidates, float z, ulong min_keep) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+Pointer to LLamaTokenDataArray + +`z` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`min_keep` [UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +### **llama_sample_typical(SafeLLamaContextHandle, IntPtr, Single, UInt64)** + +Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. + +```csharp +public static void llama_sample_typical(SafeLLamaContextHandle ctx, IntPtr candidates, float p, ulong min_keep) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+Pointer to LLamaTokenDataArray + +`p` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`min_keep` [UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +### **llama_sample_temperature(SafeLLamaContextHandle, IntPtr, Single)** + +```csharp +public static void llama_sample_temperature(SafeLLamaContextHandle ctx, IntPtr candidates, float temp) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+ +`temp` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **llama_sample_token_mirostat(SafeLLamaContextHandle, IntPtr, Single, Single, Int32, Single*)** + +Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. + +```csharp +public static int llama_sample_token_mirostat(SafeLLamaContextHandle ctx, IntPtr candidates, float tau, float eta, int m, Single* mu) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. + +`tau` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. + +`eta` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. + +`m` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm. + +`mu` [Single*](https://docs.microsoft.com/en-us/dotnet/api/system.single*)
+Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_sample_token_mirostat_v2(SafeLLamaContextHandle, IntPtr, Single, Single, Single*)** + +Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. + +```csharp +public static int llama_sample_token_mirostat_v2(SafeLLamaContextHandle ctx, IntPtr candidates, float tau, float eta, Single* mu) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. + +`tau` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. + +`eta` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. + +`mu` [Single*](https://docs.microsoft.com/en-us/dotnet/api/system.single*)
+Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_sample_token_greedy(SafeLLamaContextHandle, IntPtr)** + +Selects the token with the highest probability. + +```csharp +public static int llama_sample_token_greedy(SafeLLamaContextHandle ctx, IntPtr candidates) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+Pointer to LLamaTokenDataArray + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_sample_token(SafeLLamaContextHandle, IntPtr)** + +Randomly selects a token from the candidates based on their probabilities. + +```csharp +public static int llama_sample_token(SafeLLamaContextHandle ctx, IntPtr candidates) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`candidates` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+Pointer to LLamaTokenDataArray + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_empty_call()** + +```csharp +public static bool llama_empty_call() +``` + +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **llama_context_default_params()** + +```csharp +public static LLamaContextParams llama_context_default_params() +``` + +#### Returns + +[LLamaContextParams](./llama.native.llamacontextparams.md)
+ +### **llama_mmap_supported()** + +```csharp +public static bool llama_mmap_supported() +``` + +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **llama_mlock_supported()** + +```csharp +public static bool llama_mlock_supported() +``` + +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **llama_init_from_file(String, LLamaContextParams)** + +Various functions for loading a ggml llama model. + Allocate (almost) all memory needed for the model. + Return NULL on failure + +```csharp +public static IntPtr llama_init_from_file(string path_model, LLamaContextParams params_) +``` + +#### Parameters + +`path_model` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`params_` [LLamaContextParams](./llama.native.llamacontextparams.md)
+ +#### Returns + +[IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+ +### **llama_init_backend()** + +not great API - very likely to change. + Initialize the llama + ggml backend + Call once at the start of the program + +```csharp +public static void llama_init_backend() +``` + +### **llama_free(IntPtr)** + +Frees all allocated memory + +```csharp +public static void llama_free(IntPtr ctx) +``` + +#### Parameters + +`ctx` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+ +### **llama_apply_lora_from_file(SafeLLamaContextHandle, String, String, Int32)** + +Apply a LoRA adapter to a loaded model + path_base_model is the path to a higher quality model to use as a base for + the layers modified by the adapter. Can be NULL to use the current loaded model. + The model needs to be reloaded before applying a new adapter, otherwise the adapter + will be applied on top of the previous one + +```csharp +public static int llama_apply_lora_from_file(SafeLLamaContextHandle ctx, string path_lora, string path_base_model, int n_threads) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`path_lora` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`path_base_model` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`n_threads` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+Returns 0 on success + +### **llama_get_kv_cache_token_count(SafeLLamaContextHandle)** + +Returns the number of tokens in the KV cache + +```csharp +public static int llama_get_kv_cache_token_count(SafeLLamaContextHandle ctx) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_set_rng_seed(SafeLLamaContextHandle, Int32)** + +Sets the current rng seed. + +```csharp +public static void llama_set_rng_seed(SafeLLamaContextHandle ctx, int seed) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`seed` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_get_state_size(SafeLLamaContextHandle)** + +Returns the maximum size in bytes of the state (rng, logits, embedding + and kv_cache) - will often be smaller after compacting tokens + +```csharp +public static ulong llama_get_state_size(SafeLLamaContextHandle ctx) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +#### Returns + +[UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +### **llama_copy_state_data(SafeLLamaContextHandle, Byte[])** + +Copies the state to the specified destination address. + Destination needs to have allocated enough memory. + Returns the number of bytes copied + +```csharp +public static ulong llama_copy_state_data(SafeLLamaContextHandle ctx, Byte[] dest) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`dest` [Byte[]](https://docs.microsoft.com/en-us/dotnet/api/system.byte)
+ +#### Returns + +[UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +### **llama_set_state_data(SafeLLamaContextHandle, Byte[])** + +Set the state reading from the specified address + Returns the number of bytes read + +```csharp +public static ulong llama_set_state_data(SafeLLamaContextHandle ctx, Byte[] src) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`src` [Byte[]](https://docs.microsoft.com/en-us/dotnet/api/system.byte)
+ +#### Returns + +[UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +### **llama_load_session_file(SafeLLamaContextHandle, String, Int32[], UInt64, UInt64*)** + +Load session file + +```csharp +public static bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, Int32[] tokens_out, ulong n_token_capacity, UInt64* n_token_count_out) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`path_session` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`tokens_out` [Int32[]](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_token_capacity` [UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +`n_token_count_out` [UInt64*](https://docs.microsoft.com/en-us/dotnet/api/system.uint64*)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **llama_save_session_file(SafeLLamaContextHandle, String, Int32[], UInt64)** + +Save session file + +```csharp +public static bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, Int32[] tokens, ulong n_token_count) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`path_session` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`tokens` [Int32[]](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_token_count` [UInt64](https://docs.microsoft.com/en-us/dotnet/api/system.uint64)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **llama_eval(SafeLLamaContextHandle, Int32[], Int32, Int32, Int32)** + +Run the llama inference to obtain the logits and probabilities for the next token. + tokens + n_tokens is the provided batch of new tokens to process + n_past is the number of tokens to use from previous eval calls + +```csharp +public static int llama_eval(SafeLLamaContextHandle ctx, Int32[] tokens, int n_tokens, int n_past, int n_threads) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`tokens` [Int32[]](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_tokens` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_past` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_threads` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+Returns 0 on success + +### **llama_eval_with_pointer(SafeLLamaContextHandle, Int32*, Int32, Int32, Int32)** + +```csharp +public static int llama_eval_with_pointer(SafeLLamaContextHandle ctx, Int32* tokens, int n_tokens, int n_past, int n_threads) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`tokens` [Int32*](https://docs.microsoft.com/en-us/dotnet/api/system.int32*)
+ +`n_tokens` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_past` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_threads` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_tokenize(SafeLLamaContextHandle, String, Encoding, Int32[], Int32, Boolean)** + +Convert the provided text into tokens. + The tokens pointer must be large enough to hold the resulting tokens. + Returns the number of tokens on success, no more than n_max_tokens + Returns a negative number on failure - the number of tokens that would have been returned + +```csharp +public static int llama_tokenize(SafeLLamaContextHandle ctx, string text, Encoding encoding, Int32[] tokens, int n_max_tokens, bool add_bos) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`encoding` [Encoding](https://docs.microsoft.com/en-us/dotnet/api/system.text.encoding)
+ +`tokens` [Int32[]](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_max_tokens` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`add_bos` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_tokenize_native(SafeLLamaContextHandle, SByte[], Int32[], Int32, Boolean)** + +```csharp +public static int llama_tokenize_native(SafeLLamaContextHandle ctx, SByte[] text, Int32[] tokens, int n_max_tokens, bool add_bos) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`text` [SByte[]](https://docs.microsoft.com/en-us/dotnet/api/system.sbyte)
+ +`tokens` [Int32[]](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_max_tokens` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`add_bos` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_n_vocab(SafeLLamaContextHandle)** + +```csharp +public static int llama_n_vocab(SafeLLamaContextHandle ctx) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_n_ctx(SafeLLamaContextHandle)** + +```csharp +public static int llama_n_ctx(SafeLLamaContextHandle ctx) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_n_embd(SafeLLamaContextHandle)** + +```csharp +public static int llama_n_embd(SafeLLamaContextHandle ctx) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_get_logits(SafeLLamaContextHandle)** + +Token logits obtained from the last call to llama_eval() + The logits for the last token are stored in the last row + Can be mutated in order to change the probabilities of the next token + Rows: n_tokens + Cols: n_vocab + +```csharp +public static Single* llama_get_logits(SafeLLamaContextHandle ctx) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +#### Returns + +[Single*](https://docs.microsoft.com/en-us/dotnet/api/system.single*)
+ +### **llama_get_embeddings(SafeLLamaContextHandle)** + +Get the embeddings for the input + shape: [n_embd] (1-dimensional) + +```csharp +public static Single* llama_get_embeddings(SafeLLamaContextHandle ctx) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +#### Returns + +[Single*](https://docs.microsoft.com/en-us/dotnet/api/system.single*)
+ +### **llama_token_to_str(SafeLLamaContextHandle, Int32)** + +Token Id -> String. Uses the vocabulary in the provided context + +```csharp +public static IntPtr llama_token_to_str(SafeLLamaContextHandle ctx, int token) +``` + +#### Parameters + +`ctx` [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +`token` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +#### Returns + +[IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+Pointer to a string. + +### **llama_token_bos()** + +```csharp +public static int llama_token_bos() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_token_eos()** + +```csharp +public static int llama_token_eos() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **llama_token_nl()** + +```csharp +public static int llama_token_nl() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
diff --git a/docs/xmldocs/llama.native.safellamacontexthandle.md b/docs/xmldocs/llama.native.safellamacontexthandle.md new file mode 100644 index 00000000..ea713984 --- /dev/null +++ b/docs/xmldocs/llama.native.safellamacontexthandle.md @@ -0,0 +1,56 @@ +# SafeLLamaContextHandle + +Namespace: LLama.Native + +```csharp +public class SafeLLamaContextHandle : SafeLLamaHandleBase, System.IDisposable +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [CriticalFinalizerObject](https://docs.microsoft.com/en-us/dotnet/api/system.runtime.constrainedexecution.criticalfinalizerobject) → [SafeHandle](https://docs.microsoft.com/en-us/dotnet/api/system.runtime.interopservices.safehandle) → [SafeLLamaHandleBase](./llama.native.safellamahandlebase.md) → [SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+Implements [IDisposable](https://docs.microsoft.com/en-us/dotnet/api/system.idisposable) + +## Properties + +### **IsInvalid** + +```csharp +public bool IsInvalid { get; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **IsClosed** + +```csharp +public bool IsClosed { get; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +## Constructors + +### **SafeLLamaContextHandle(IntPtr)** + +```csharp +public SafeLLamaContextHandle(IntPtr handle) +``` + +#### Parameters + +`handle` [IntPtr](https://docs.microsoft.com/en-us/dotnet/api/system.intptr)
+ +## Methods + +### **ReleaseHandle()** + +```csharp +protected bool ReleaseHandle() +``` + +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
diff --git a/docs/xmldocs/llama.native.safellamahandlebase.md b/docs/xmldocs/llama.native.safellamahandlebase.md new file mode 100644 index 00000000..1c9f8ef8 --- /dev/null +++ b/docs/xmldocs/llama.native.safellamahandlebase.md @@ -0,0 +1,44 @@ +# SafeLLamaHandleBase + +Namespace: LLama.Native + +```csharp +public abstract class SafeLLamaHandleBase : System.Runtime.InteropServices.SafeHandle, System.IDisposable +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [CriticalFinalizerObject](https://docs.microsoft.com/en-us/dotnet/api/system.runtime.constrainedexecution.criticalfinalizerobject) → [SafeHandle](https://docs.microsoft.com/en-us/dotnet/api/system.runtime.interopservices.safehandle) → [SafeLLamaHandleBase](./llama.native.safellamahandlebase.md)
+Implements [IDisposable](https://docs.microsoft.com/en-us/dotnet/api/system.idisposable) + +## Properties + +### **IsInvalid** + +```csharp +public bool IsInvalid { get; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **IsClosed** + +```csharp +public bool IsClosed { get; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
diff --git a/docs/xmldocs/llama.oldversion.chatcompletion.md b/docs/xmldocs/llama.oldversion.chatcompletion.md new file mode 100644 index 00000000..af1dd253 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.chatcompletion.md @@ -0,0 +1,188 @@ +# ChatCompletion + +Namespace: LLama.OldVersion + +```csharp +public class ChatCompletion : System.IEquatable`1[[LLama.OldVersion.ChatCompletion, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ChatCompletion](./llama.oldversion.chatcompletion.md)
+Implements [IEquatable<ChatCompletion>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Id** + +```csharp +public string Id { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Object** + +```csharp +public string Object { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Created** + +```csharp +public int Created { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Model** + +```csharp +public string Model { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Choices** + +```csharp +public ChatCompletionChoice[] Choices { get; set; } +``` + +#### Property Value + +[ChatCompletionChoice[]](./llama.oldversion.chatcompletionchoice.md)
+ +### **Usage** + +```csharp +public CompletionUsage Usage { get; set; } +``` + +#### Property Value + +[CompletionUsage](./llama.oldversion.completionusage.md)
+ +## Constructors + +### **ChatCompletion(String, String, Int32, String, ChatCompletionChoice[], CompletionUsage)** + +```csharp +public ChatCompletion(string Id, string Object, int Created, string Model, ChatCompletionChoice[] Choices, CompletionUsage Usage) +``` + +#### Parameters + +`Id` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Object` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Created` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`Model` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Choices` [ChatCompletionChoice[]](./llama.oldversion.chatcompletionchoice.md)
+ +`Usage` [CompletionUsage](./llama.oldversion.completionusage.md)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(ChatCompletion)** + +```csharp +public bool Equals(ChatCompletion other) +``` + +#### Parameters + +`other` [ChatCompletion](./llama.oldversion.chatcompletion.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public ChatCompletion $() +``` + +#### Returns + +[ChatCompletion](./llama.oldversion.chatcompletion.md)
+ +### **Deconstruct(String&, String&, Int32&, String&, ChatCompletionChoice[]&, CompletionUsage&)** + +```csharp +public void Deconstruct(String& Id, String& Object, Int32& Created, String& Model, ChatCompletionChoice[]& Choices, CompletionUsage& Usage) +``` + +#### Parameters + +`Id` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Object` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Created` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`Model` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Choices` [ChatCompletionChoice[]&](./llama.oldversion.chatcompletionchoice&.md)
+ +`Usage` [CompletionUsage&](./llama.oldversion.completionusage&.md)
diff --git a/docs/xmldocs/llama.oldversion.chatcompletionchoice.md b/docs/xmldocs/llama.oldversion.chatcompletionchoice.md new file mode 100644 index 00000000..c5f80d7b --- /dev/null +++ b/docs/xmldocs/llama.oldversion.chatcompletionchoice.md @@ -0,0 +1,146 @@ +# ChatCompletionChoice + +Namespace: LLama.OldVersion + +```csharp +public class ChatCompletionChoice : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChoice, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ChatCompletionChoice](./llama.oldversion.chatcompletionchoice.md)
+Implements [IEquatable<ChatCompletionChoice>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Index** + +```csharp +public int Index { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Message** + +```csharp +public ChatCompletionMessage Message { get; set; } +``` + +#### Property Value + +[ChatCompletionMessage](./llama.oldversion.chatcompletionmessage.md)
+ +### **FinishReason** + +```csharp +public string FinishReason { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Constructors + +### **ChatCompletionChoice(Int32, ChatCompletionMessage, String)** + +```csharp +public ChatCompletionChoice(int Index, ChatCompletionMessage Message, string FinishReason) +``` + +#### Parameters + +`Index` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`Message` [ChatCompletionMessage](./llama.oldversion.chatcompletionmessage.md)
+ +`FinishReason` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(ChatCompletionChoice)** + +```csharp +public bool Equals(ChatCompletionChoice other) +``` + +#### Parameters + +`other` [ChatCompletionChoice](./llama.oldversion.chatcompletionchoice.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public ChatCompletionChoice $() +``` + +#### Returns + +[ChatCompletionChoice](./llama.oldversion.chatcompletionchoice.md)
+ +### **Deconstruct(Int32&, ChatCompletionMessage&, String&)** + +```csharp +public void Deconstruct(Int32& Index, ChatCompletionMessage& Message, String& FinishReason) +``` + +#### Parameters + +`Index` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`Message` [ChatCompletionMessage&](./llama.oldversion.chatcompletionmessage&.md)
+ +`FinishReason` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
diff --git a/docs/xmldocs/llama.oldversion.chatcompletionchunk.md b/docs/xmldocs/llama.oldversion.chatcompletionchunk.md new file mode 100644 index 00000000..a15a033e --- /dev/null +++ b/docs/xmldocs/llama.oldversion.chatcompletionchunk.md @@ -0,0 +1,174 @@ +# ChatCompletionChunk + +Namespace: LLama.OldVersion + +```csharp +public class ChatCompletionChunk : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChunk, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ChatCompletionChunk](./llama.oldversion.chatcompletionchunk.md)
+Implements [IEquatable<ChatCompletionChunk>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Id** + +```csharp +public string Id { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Model** + +```csharp +public string Model { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Object** + +```csharp +public string Object { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Created** + +```csharp +public int Created { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Choices** + +```csharp +public ChatCompletionChunkChoice[] Choices { get; set; } +``` + +#### Property Value + +[ChatCompletionChunkChoice[]](./llama.oldversion.chatcompletionchunkchoice.md)
+ +## Constructors + +### **ChatCompletionChunk(String, String, String, Int32, ChatCompletionChunkChoice[])** + +```csharp +public ChatCompletionChunk(string Id, string Model, string Object, int Created, ChatCompletionChunkChoice[] Choices) +``` + +#### Parameters + +`Id` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Model` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Object` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Created` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`Choices` [ChatCompletionChunkChoice[]](./llama.oldversion.chatcompletionchunkchoice.md)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(ChatCompletionChunk)** + +```csharp +public bool Equals(ChatCompletionChunk other) +``` + +#### Parameters + +`other` [ChatCompletionChunk](./llama.oldversion.chatcompletionchunk.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public ChatCompletionChunk $() +``` + +#### Returns + +[ChatCompletionChunk](./llama.oldversion.chatcompletionchunk.md)
+ +### **Deconstruct(String&, String&, String&, Int32&, ChatCompletionChunkChoice[]&)** + +```csharp +public void Deconstruct(String& Id, String& Model, String& Object, Int32& Created, ChatCompletionChunkChoice[]& Choices) +``` + +#### Parameters + +`Id` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Model` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Object` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Created` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`Choices` [ChatCompletionChunkChoice[]&](./llama.oldversion.chatcompletionchunkchoice&.md)
diff --git a/docs/xmldocs/llama.oldversion.chatcompletionchunkchoice.md b/docs/xmldocs/llama.oldversion.chatcompletionchunkchoice.md new file mode 100644 index 00000000..16e2954e --- /dev/null +++ b/docs/xmldocs/llama.oldversion.chatcompletionchunkchoice.md @@ -0,0 +1,146 @@ +# ChatCompletionChunkChoice + +Namespace: LLama.OldVersion + +```csharp +public class ChatCompletionChunkChoice : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChunkChoice, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ChatCompletionChunkChoice](./llama.oldversion.chatcompletionchunkchoice.md)
+Implements [IEquatable<ChatCompletionChunkChoice>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Index** + +```csharp +public int Index { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Delta** + +```csharp +public ChatCompletionChunkDelta Delta { get; set; } +``` + +#### Property Value + +[ChatCompletionChunkDelta](./llama.oldversion.chatcompletionchunkdelta.md)
+ +### **FinishReason** + +```csharp +public string FinishReason { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Constructors + +### **ChatCompletionChunkChoice(Int32, ChatCompletionChunkDelta, String)** + +```csharp +public ChatCompletionChunkChoice(int Index, ChatCompletionChunkDelta Delta, string FinishReason) +``` + +#### Parameters + +`Index` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`Delta` [ChatCompletionChunkDelta](./llama.oldversion.chatcompletionchunkdelta.md)
+ +`FinishReason` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(ChatCompletionChunkChoice)** + +```csharp +public bool Equals(ChatCompletionChunkChoice other) +``` + +#### Parameters + +`other` [ChatCompletionChunkChoice](./llama.oldversion.chatcompletionchunkchoice.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public ChatCompletionChunkChoice $() +``` + +#### Returns + +[ChatCompletionChunkChoice](./llama.oldversion.chatcompletionchunkchoice.md)
+ +### **Deconstruct(Int32&, ChatCompletionChunkDelta&, String&)** + +```csharp +public void Deconstruct(Int32& Index, ChatCompletionChunkDelta& Delta, String& FinishReason) +``` + +#### Parameters + +`Index` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`Delta` [ChatCompletionChunkDelta&](./llama.oldversion.chatcompletionchunkdelta&.md)
+ +`FinishReason` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
diff --git a/docs/xmldocs/llama.oldversion.chatcompletionchunkdelta.md b/docs/xmldocs/llama.oldversion.chatcompletionchunkdelta.md new file mode 100644 index 00000000..a924879d --- /dev/null +++ b/docs/xmldocs/llama.oldversion.chatcompletionchunkdelta.md @@ -0,0 +1,132 @@ +# ChatCompletionChunkDelta + +Namespace: LLama.OldVersion + +```csharp +public class ChatCompletionChunkDelta : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChunkDelta, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ChatCompletionChunkDelta](./llama.oldversion.chatcompletionchunkdelta.md)
+Implements [IEquatable<ChatCompletionChunkDelta>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Role** + +```csharp +public string Role { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Content** + +```csharp +public string Content { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Constructors + +### **ChatCompletionChunkDelta(String, String)** + +```csharp +public ChatCompletionChunkDelta(string Role, string Content) +``` + +#### Parameters + +`Role` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Content` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(ChatCompletionChunkDelta)** + +```csharp +public bool Equals(ChatCompletionChunkDelta other) +``` + +#### Parameters + +`other` [ChatCompletionChunkDelta](./llama.oldversion.chatcompletionchunkdelta.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public ChatCompletionChunkDelta $() +``` + +#### Returns + +[ChatCompletionChunkDelta](./llama.oldversion.chatcompletionchunkdelta.md)
+ +### **Deconstruct(String&, String&)** + +```csharp +public void Deconstruct(String& Role, String& Content) +``` + +#### Parameters + +`Role` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Content` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
diff --git a/docs/xmldocs/llama.oldversion.chatcompletionmessage.md b/docs/xmldocs/llama.oldversion.chatcompletionmessage.md new file mode 100644 index 00000000..2856c180 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.chatcompletionmessage.md @@ -0,0 +1,146 @@ +# ChatCompletionMessage + +Namespace: LLama.OldVersion + +```csharp +public class ChatCompletionMessage : System.IEquatable`1[[LLama.OldVersion.ChatCompletionMessage, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ChatCompletionMessage](./llama.oldversion.chatcompletionmessage.md)
+Implements [IEquatable<ChatCompletionMessage>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Role** + +```csharp +public ChatRole Role { get; set; } +``` + +#### Property Value + +[ChatRole](./llama.oldversion.chatrole.md)
+ +### **Content** + +```csharp +public string Content { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Name** + +```csharp +public string Name { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Constructors + +### **ChatCompletionMessage(ChatRole, String, String)** + +```csharp +public ChatCompletionMessage(ChatRole Role, string Content, string Name) +``` + +#### Parameters + +`Role` [ChatRole](./llama.oldversion.chatrole.md)
+ +`Content` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Name` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(ChatCompletionMessage)** + +```csharp +public bool Equals(ChatCompletionMessage other) +``` + +#### Parameters + +`other` [ChatCompletionMessage](./llama.oldversion.chatcompletionmessage.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public ChatCompletionMessage $() +``` + +#### Returns + +[ChatCompletionMessage](./llama.oldversion.chatcompletionmessage.md)
+ +### **Deconstruct(ChatRole&, String&, String&)** + +```csharp +public void Deconstruct(ChatRole& Role, String& Content, String& Name) +``` + +#### Parameters + +`Role` [ChatRole&](./llama.oldversion.chatrole&.md)
+ +`Content` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Name` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
diff --git a/docs/xmldocs/llama.oldversion.chatmessagerecord.md b/docs/xmldocs/llama.oldversion.chatmessagerecord.md new file mode 100644 index 00000000..8722f4bd --- /dev/null +++ b/docs/xmldocs/llama.oldversion.chatmessagerecord.md @@ -0,0 +1,132 @@ +# ChatMessageRecord + +Namespace: LLama.OldVersion + +```csharp +public class ChatMessageRecord : System.IEquatable`1[[LLama.OldVersion.ChatMessageRecord, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ChatMessageRecord](./llama.oldversion.chatmessagerecord.md)
+Implements [IEquatable<ChatMessageRecord>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Message** + +```csharp +public ChatCompletionMessage Message { get; set; } +``` + +#### Property Value + +[ChatCompletionMessage](./llama.oldversion.chatcompletionmessage.md)
+ +### **Time** + +```csharp +public DateTime Time { get; set; } +``` + +#### Property Value + +[DateTime](https://docs.microsoft.com/en-us/dotnet/api/system.datetime)
+ +## Constructors + +### **ChatMessageRecord(ChatCompletionMessage, DateTime)** + +```csharp +public ChatMessageRecord(ChatCompletionMessage Message, DateTime Time) +``` + +#### Parameters + +`Message` [ChatCompletionMessage](./llama.oldversion.chatcompletionmessage.md)
+ +`Time` [DateTime](https://docs.microsoft.com/en-us/dotnet/api/system.datetime)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(ChatMessageRecord)** + +```csharp +public bool Equals(ChatMessageRecord other) +``` + +#### Parameters + +`other` [ChatMessageRecord](./llama.oldversion.chatmessagerecord.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public ChatMessageRecord $() +``` + +#### Returns + +[ChatMessageRecord](./llama.oldversion.chatmessagerecord.md)
+ +### **Deconstruct(ChatCompletionMessage&, DateTime&)** + +```csharp +public void Deconstruct(ChatCompletionMessage& Message, DateTime& Time) +``` + +#### Parameters + +`Message` [ChatCompletionMessage&](./llama.oldversion.chatcompletionmessage&.md)
+ +`Time` [DateTime&](https://docs.microsoft.com/en-us/dotnet/api/system.datetime&)
diff --git a/docs/xmldocs/llama.oldversion.chatrole.md b/docs/xmldocs/llama.oldversion.chatrole.md new file mode 100644 index 00000000..469f42dc --- /dev/null +++ b/docs/xmldocs/llama.oldversion.chatrole.md @@ -0,0 +1,15 @@ +# ChatRole + +Namespace: LLama.OldVersion + +```csharp +public enum ChatRole +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ValueType](https://docs.microsoft.com/en-us/dotnet/api/system.valuetype) → [Enum](https://docs.microsoft.com/en-us/dotnet/api/system.enum) → [ChatRole](./llama.oldversion.chatrole.md)
+Implements [IComparable](https://docs.microsoft.com/en-us/dotnet/api/system.icomparable), [IFormattable](https://docs.microsoft.com/en-us/dotnet/api/system.iformattable), [IConvertible](https://docs.microsoft.com/en-us/dotnet/api/system.iconvertible) + +## Fields + +| Name | Value | Description | +| --- | --: | --- | diff --git a/docs/xmldocs/llama.oldversion.chatsession-1.md b/docs/xmldocs/llama.oldversion.chatsession-1.md new file mode 100644 index 00000000..4fcbeebf --- /dev/null +++ b/docs/xmldocs/llama.oldversion.chatsession-1.md @@ -0,0 +1,93 @@ +# ChatSession<T> + +Namespace: LLama.OldVersion + +```csharp +public class ChatSession +``` + +#### Type Parameters + +`T`
+ +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ChatSession<T>](./llama.oldversion.chatsession-1.md) + +## Constructors + +### **ChatSession(T)** + +```csharp +public ChatSession(T model) +``` + +#### Parameters + +`model` T
+ +## Methods + +### **Chat(String, String, String)** + +```csharp +public IEnumerable Chat(string text, string prompt, string encoding) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`prompt` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +### **WithPrompt(String, String)** + +```csharp +public ChatSession WithPrompt(string prompt, string encoding) +``` + +#### Parameters + +`prompt` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[ChatSession<T>](./llama.oldversion.chatsession-1.md)
+ +### **WithPromptFile(String, String)** + +```csharp +public ChatSession WithPromptFile(string promptFilename, string encoding) +``` + +#### Parameters + +`promptFilename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[ChatSession<T>](./llama.oldversion.chatsession-1.md)
+ +### **WithAntiprompt(String[])** + +Set the keyword to split the return value of chat AI. + +```csharp +public ChatSession WithAntiprompt(String[] antiprompt) +``` + +#### Parameters + +`antiprompt` [String[]](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[ChatSession<T>](./llama.oldversion.chatsession-1.md)
diff --git a/docs/xmldocs/llama.oldversion.completion.md b/docs/xmldocs/llama.oldversion.completion.md new file mode 100644 index 00000000..39765402 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.completion.md @@ -0,0 +1,188 @@ +# Completion + +Namespace: LLama.OldVersion + +```csharp +public class Completion : System.IEquatable`1[[LLama.OldVersion.Completion, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [Completion](./llama.oldversion.completion.md)
+Implements [IEquatable<Completion>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Id** + +```csharp +public string Id { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Object** + +```csharp +public string Object { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Created** + +```csharp +public int Created { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Model** + +```csharp +public string Model { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Choices** + +```csharp +public CompletionChoice[] Choices { get; set; } +``` + +#### Property Value + +[CompletionChoice[]](./llama.oldversion.completionchoice.md)
+ +### **Usage** + +```csharp +public CompletionUsage Usage { get; set; } +``` + +#### Property Value + +[CompletionUsage](./llama.oldversion.completionusage.md)
+ +## Constructors + +### **Completion(String, String, Int32, String, CompletionChoice[], CompletionUsage)** + +```csharp +public Completion(string Id, string Object, int Created, string Model, CompletionChoice[] Choices, CompletionUsage Usage) +``` + +#### Parameters + +`Id` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Object` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Created` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`Model` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Choices` [CompletionChoice[]](./llama.oldversion.completionchoice.md)
+ +`Usage` [CompletionUsage](./llama.oldversion.completionusage.md)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(Completion)** + +```csharp +public bool Equals(Completion other) +``` + +#### Parameters + +`other` [Completion](./llama.oldversion.completion.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public Completion $() +``` + +#### Returns + +[Completion](./llama.oldversion.completion.md)
+ +### **Deconstruct(String&, String&, Int32&, String&, CompletionChoice[]&, CompletionUsage&)** + +```csharp +public void Deconstruct(String& Id, String& Object, Int32& Created, String& Model, CompletionChoice[]& Choices, CompletionUsage& Usage) +``` + +#### Parameters + +`Id` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Object` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Created` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`Model` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Choices` [CompletionChoice[]&](./llama.oldversion.completionchoice&.md)
+ +`Usage` [CompletionUsage&](./llama.oldversion.completionusage&.md)
diff --git a/docs/xmldocs/llama.oldversion.completionchoice.md b/docs/xmldocs/llama.oldversion.completionchoice.md new file mode 100644 index 00000000..e09df723 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.completionchoice.md @@ -0,0 +1,160 @@ +# CompletionChoice + +Namespace: LLama.OldVersion + +```csharp +public class CompletionChoice : System.IEquatable`1[[LLama.OldVersion.CompletionChoice, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [CompletionChoice](./llama.oldversion.completionchoice.md)
+Implements [IEquatable<CompletionChoice>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Text** + +```csharp +public string Text { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Index** + +```csharp +public int Index { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Logprobs** + +```csharp +public CompletionLogprobs Logprobs { get; set; } +``` + +#### Property Value + +[CompletionLogprobs](./llama.oldversion.completionlogprobs.md)
+ +### **FinishReason** + +```csharp +public string FinishReason { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Constructors + +### **CompletionChoice(String, Int32, CompletionLogprobs, String)** + +```csharp +public CompletionChoice(string Text, int Index, CompletionLogprobs Logprobs, string FinishReason) +``` + +#### Parameters + +`Text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Index` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`Logprobs` [CompletionLogprobs](./llama.oldversion.completionlogprobs.md)
+ +`FinishReason` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(CompletionChoice)** + +```csharp +public bool Equals(CompletionChoice other) +``` + +#### Parameters + +`other` [CompletionChoice](./llama.oldversion.completionchoice.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public CompletionChoice $() +``` + +#### Returns + +[CompletionChoice](./llama.oldversion.completionchoice.md)
+ +### **Deconstruct(String&, Int32&, CompletionLogprobs&, String&)** + +```csharp +public void Deconstruct(String& Text, Int32& Index, CompletionLogprobs& Logprobs, String& FinishReason) +``` + +#### Parameters + +`Text` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Index` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`Logprobs` [CompletionLogprobs&](./llama.oldversion.completionlogprobs&.md)
+ +`FinishReason` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
diff --git a/docs/xmldocs/llama.oldversion.completionchunk.md b/docs/xmldocs/llama.oldversion.completionchunk.md new file mode 100644 index 00000000..cc2ccec8 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.completionchunk.md @@ -0,0 +1,174 @@ +# CompletionChunk + +Namespace: LLama.OldVersion + +```csharp +public class CompletionChunk : System.IEquatable`1[[LLama.OldVersion.CompletionChunk, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [CompletionChunk](./llama.oldversion.completionchunk.md)
+Implements [IEquatable<CompletionChunk>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Id** + +```csharp +public string Id { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Object** + +```csharp +public string Object { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Created** + +```csharp +public int Created { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Model** + +```csharp +public string Model { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Choices** + +```csharp +public CompletionChoice[] Choices { get; set; } +``` + +#### Property Value + +[CompletionChoice[]](./llama.oldversion.completionchoice.md)
+ +## Constructors + +### **CompletionChunk(String, String, Int32, String, CompletionChoice[])** + +```csharp +public CompletionChunk(string Id, string Object, int Created, string Model, CompletionChoice[] Choices) +``` + +#### Parameters + +`Id` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Object` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Created` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`Model` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Choices` [CompletionChoice[]](./llama.oldversion.completionchoice.md)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(CompletionChunk)** + +```csharp +public bool Equals(CompletionChunk other) +``` + +#### Parameters + +`other` [CompletionChunk](./llama.oldversion.completionchunk.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public CompletionChunk $() +``` + +#### Returns + +[CompletionChunk](./llama.oldversion.completionchunk.md)
+ +### **Deconstruct(String&, String&, Int32&, String&, CompletionChoice[]&)** + +```csharp +public void Deconstruct(String& Id, String& Object, Int32& Created, String& Model, CompletionChoice[]& Choices) +``` + +#### Parameters + +`Id` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Object` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Created` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`Model` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Choices` [CompletionChoice[]&](./llama.oldversion.completionchoice&.md)
diff --git a/docs/xmldocs/llama.oldversion.completionlogprobs.md b/docs/xmldocs/llama.oldversion.completionlogprobs.md new file mode 100644 index 00000000..8c20201e --- /dev/null +++ b/docs/xmldocs/llama.oldversion.completionlogprobs.md @@ -0,0 +1,160 @@ +# CompletionLogprobs + +Namespace: LLama.OldVersion + +```csharp +public class CompletionLogprobs : System.IEquatable`1[[LLama.OldVersion.CompletionLogprobs, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [CompletionLogprobs](./llama.oldversion.completionlogprobs.md)
+Implements [IEquatable<CompletionLogprobs>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **TextOffset** + +```csharp +public Int32[] TextOffset { get; set; } +``` + +#### Property Value + +[Int32[]](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **TokenLogProbs** + +```csharp +public Single[] TokenLogProbs { get; set; } +``` + +#### Property Value + +[Single[]](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **Tokens** + +```csharp +public String[] Tokens { get; set; } +``` + +#### Property Value + +[String[]](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **TopLogprobs** + +```csharp +public Dictionary`2[] TopLogprobs { get; set; } +``` + +#### Property Value + +[Dictionary`2[]](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.dictionary-2)
+ +## Constructors + +### **CompletionLogprobs(Int32[], Single[], String[], Dictionary`2[])** + +```csharp +public CompletionLogprobs(Int32[] TextOffset, Single[] TokenLogProbs, String[] Tokens, Dictionary`2[] TopLogprobs) +``` + +#### Parameters + +`TextOffset` [Int32[]](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`TokenLogProbs` [Single[]](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`Tokens` [String[]](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`TopLogprobs` [Dictionary`2[]](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.dictionary-2)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(CompletionLogprobs)** + +```csharp +public bool Equals(CompletionLogprobs other) +``` + +#### Parameters + +`other` [CompletionLogprobs](./llama.oldversion.completionlogprobs.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public CompletionLogprobs $() +``` + +#### Returns + +[CompletionLogprobs](./llama.oldversion.completionlogprobs.md)
+ +### **Deconstruct(Int32[]&, Single[]&, String[]&, Dictionary`2[]&)** + +```csharp +public void Deconstruct(Int32[]& TextOffset, Single[]& TokenLogProbs, String[]& Tokens, Dictionary`2[]& TopLogprobs) +``` + +#### Parameters + +`TextOffset` [Int32[]&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`TokenLogProbs` [Single[]&](https://docs.microsoft.com/en-us/dotnet/api/system.single&)
+ +`Tokens` [String[]&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`TopLogprobs` [Dictionary`2[]&](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.dictionary-2&)
diff --git a/docs/xmldocs/llama.oldversion.completionusage.md b/docs/xmldocs/llama.oldversion.completionusage.md new file mode 100644 index 00000000..ec996c50 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.completionusage.md @@ -0,0 +1,146 @@ +# CompletionUsage + +Namespace: LLama.OldVersion + +```csharp +public class CompletionUsage : System.IEquatable`1[[LLama.OldVersion.CompletionUsage, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [CompletionUsage](./llama.oldversion.completionusage.md)
+Implements [IEquatable<CompletionUsage>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **PromptTokens** + +```csharp +public int PromptTokens { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **CompletionTokens** + +```csharp +public int CompletionTokens { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **TotalTokens** + +```csharp +public int TotalTokens { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +## Constructors + +### **CompletionUsage(Int32, Int32, Int32)** + +```csharp +public CompletionUsage(int PromptTokens, int CompletionTokens, int TotalTokens) +``` + +#### Parameters + +`PromptTokens` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`CompletionTokens` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`TotalTokens` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(CompletionUsage)** + +```csharp +public bool Equals(CompletionUsage other) +``` + +#### Parameters + +`other` [CompletionUsage](./llama.oldversion.completionusage.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public CompletionUsage $() +``` + +#### Returns + +[CompletionUsage](./llama.oldversion.completionusage.md)
+ +### **Deconstruct(Int32&, Int32&, Int32&)** + +```csharp +public void Deconstruct(Int32& PromptTokens, Int32& CompletionTokens, Int32& TotalTokens) +``` + +#### Parameters + +`PromptTokens` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`CompletionTokens` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`TotalTokens` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
diff --git a/docs/xmldocs/llama.oldversion.embedding.md b/docs/xmldocs/llama.oldversion.embedding.md new file mode 100644 index 00000000..e1fa7a89 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.embedding.md @@ -0,0 +1,160 @@ +# Embedding + +Namespace: LLama.OldVersion + +```csharp +public class Embedding : System.IEquatable`1[[LLama.OldVersion.Embedding, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [Embedding](./llama.oldversion.embedding.md)
+Implements [IEquatable<Embedding>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Object** + +```csharp +public string Object { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Model** + +```csharp +public string Model { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Data** + +```csharp +public EmbeddingData[] Data { get; set; } +``` + +#### Property Value + +[EmbeddingData[]](./llama.oldversion.embeddingdata.md)
+ +### **Usage** + +```csharp +public EmbeddingUsage Usage { get; set; } +``` + +#### Property Value + +[EmbeddingUsage](./llama.oldversion.embeddingusage.md)
+ +## Constructors + +### **Embedding(String, String, EmbeddingData[], EmbeddingUsage)** + +```csharp +public Embedding(string Object, string Model, EmbeddingData[] Data, EmbeddingUsage Usage) +``` + +#### Parameters + +`Object` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Model` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Data` [EmbeddingData[]](./llama.oldversion.embeddingdata.md)
+ +`Usage` [EmbeddingUsage](./llama.oldversion.embeddingusage.md)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(Embedding)** + +```csharp +public bool Equals(Embedding other) +``` + +#### Parameters + +`other` [Embedding](./llama.oldversion.embedding.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public Embedding $() +``` + +#### Returns + +[Embedding](./llama.oldversion.embedding.md)
+ +### **Deconstruct(String&, String&, EmbeddingData[]&, EmbeddingUsage&)** + +```csharp +public void Deconstruct(String& Object, String& Model, EmbeddingData[]& Data, EmbeddingUsage& Usage) +``` + +#### Parameters + +`Object` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Model` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Data` [EmbeddingData[]&](./llama.oldversion.embeddingdata&.md)
+ +`Usage` [EmbeddingUsage&](./llama.oldversion.embeddingusage&.md)
diff --git a/docs/xmldocs/llama.oldversion.embeddingdata.md b/docs/xmldocs/llama.oldversion.embeddingdata.md new file mode 100644 index 00000000..34f58e77 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.embeddingdata.md @@ -0,0 +1,146 @@ +# EmbeddingData + +Namespace: LLama.OldVersion + +```csharp +public class EmbeddingData : System.IEquatable`1[[LLama.OldVersion.EmbeddingData, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [EmbeddingData](./llama.oldversion.embeddingdata.md)
+Implements [IEquatable<EmbeddingData>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **Index** + +```csharp +public int Index { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Object** + +```csharp +public string Object { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Embedding** + +```csharp +public Single[] Embedding { get; set; } +``` + +#### Property Value + +[Single[]](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +## Constructors + +### **EmbeddingData(Int32, String, Single[])** + +```csharp +public EmbeddingData(int Index, string Object, Single[] Embedding) +``` + +#### Parameters + +`Index` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`Object` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`Embedding` [Single[]](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(EmbeddingData)** + +```csharp +public bool Equals(EmbeddingData other) +``` + +#### Parameters + +`other` [EmbeddingData](./llama.oldversion.embeddingdata.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public EmbeddingData $() +``` + +#### Returns + +[EmbeddingData](./llama.oldversion.embeddingdata.md)
+ +### **Deconstruct(Int32&, String&, Single[]&)** + +```csharp +public void Deconstruct(Int32& Index, String& Object, Single[]& Embedding) +``` + +#### Parameters + +`Index` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`Object` [String&](https://docs.microsoft.com/en-us/dotnet/api/system.string&)
+ +`Embedding` [Single[]&](https://docs.microsoft.com/en-us/dotnet/api/system.single&)
diff --git a/docs/xmldocs/llama.oldversion.embeddingusage.md b/docs/xmldocs/llama.oldversion.embeddingusage.md new file mode 100644 index 00000000..f6d39441 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.embeddingusage.md @@ -0,0 +1,132 @@ +# EmbeddingUsage + +Namespace: LLama.OldVersion + +```csharp +public class EmbeddingUsage : System.IEquatable`1[[LLama.OldVersion.EmbeddingUsage, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]] +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [EmbeddingUsage](./llama.oldversion.embeddingusage.md)
+Implements [IEquatable<EmbeddingUsage>](https://docs.microsoft.com/en-us/dotnet/api/system.iequatable-1) + +## Properties + +### **PromptTokens** + +```csharp +public int PromptTokens { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **TotalTokens** + +```csharp +public int TotalTokens { get; set; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +## Constructors + +### **EmbeddingUsage(Int32, Int32)** + +```csharp +public EmbeddingUsage(int PromptTokens, int TotalTokens) +``` + +#### Parameters + +`PromptTokens` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`TotalTokens` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +## Methods + +### **ToString()** + +```csharp +public string ToString() +``` + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **PrintMembers(StringBuilder)** + +```csharp +protected bool PrintMembers(StringBuilder builder) +``` + +#### Parameters + +`builder` [StringBuilder](https://docs.microsoft.com/en-us/dotnet/api/system.text.stringbuilder)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **GetHashCode()** + +```csharp +public int GetHashCode() +``` + +#### Returns + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Equals(Object)** + +```csharp +public bool Equals(object obj) +``` + +#### Parameters + +`obj` [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Equals(EmbeddingUsage)** + +```csharp +public bool Equals(EmbeddingUsage other) +``` + +#### Parameters + +`other` [EmbeddingUsage](./llama.oldversion.embeddingusage.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **<Clone>$()** + +```csharp +public EmbeddingUsage $() +``` + +#### Returns + +[EmbeddingUsage](./llama.oldversion.embeddingusage.md)
+ +### **Deconstruct(Int32&, Int32&)** + +```csharp +public void Deconstruct(Int32& PromptTokens, Int32& TotalTokens) +``` + +#### Parameters + +`PromptTokens` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
+ +`TotalTokens` [Int32&](https://docs.microsoft.com/en-us/dotnet/api/system.int32&)
diff --git a/docs/xmldocs/llama.oldversion.ichatmodel.md b/docs/xmldocs/llama.oldversion.ichatmodel.md new file mode 100644 index 00000000..ce7b7134 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.ichatmodel.md @@ -0,0 +1,63 @@ +# IChatModel + +Namespace: LLama.OldVersion + +```csharp +public interface IChatModel +``` + +## Properties + +### **Name** + +```csharp +public abstract string Name { get; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Methods + +### **Chat(String, String, String)** + +```csharp +IEnumerable Chat(string text, string prompt, string encoding) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`prompt` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +### **InitChatPrompt(String, String)** + +Init a prompt for chat and automatically produce the next prompt during the chat. + +```csharp +void InitChatPrompt(string prompt, string encoding) +``` + +#### Parameters + +`prompt` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **InitChatAntiprompt(String[])** + +```csharp +void InitChatAntiprompt(String[] antiprompt) +``` + +#### Parameters + +`antiprompt` [String[]](https://docs.microsoft.com/en-us/dotnet/api/system.string)
diff --git a/docs/xmldocs/llama.oldversion.llamaembedder.md b/docs/xmldocs/llama.oldversion.llamaembedder.md new file mode 100644 index 00000000..0259316d --- /dev/null +++ b/docs/xmldocs/llama.oldversion.llamaembedder.md @@ -0,0 +1,50 @@ +# LLamaEmbedder + +Namespace: LLama.OldVersion + +```csharp +public class LLamaEmbedder : System.IDisposable +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [LLamaEmbedder](./llama.oldversion.llamaembedder.md)
+Implements [IDisposable](https://docs.microsoft.com/en-us/dotnet/api/system.idisposable) + +## Constructors + +### **LLamaEmbedder(LLamaParams)** + +```csharp +public LLamaEmbedder(LLamaParams params) +``` + +#### Parameters + +`params` [LLamaParams](./llama.oldversion.llamaparams.md)
+ +## Methods + +### **GetEmbeddings(String, Int32, Boolean, String)** + +```csharp +public Single[] GetEmbeddings(string text, int n_thread, bool add_bos, string encoding) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`n_thread` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`add_bos` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[Single[]](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +### **Dispose()** + +```csharp +public void Dispose() +``` diff --git a/docs/xmldocs/llama.oldversion.llamamodel.md b/docs/xmldocs/llama.oldversion.llamamodel.md new file mode 100644 index 00000000..4f014907 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.llamamodel.md @@ -0,0 +1,362 @@ +# LLamaModel + +Namespace: LLama.OldVersion + +```csharp +public class LLamaModel : IChatModel, System.IDisposable +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [LLamaModel](./llama.oldversion.llamamodel.md)
+Implements [IChatModel](./llama.oldversion.ichatmodel.md), [IDisposable](https://docs.microsoft.com/en-us/dotnet/api/system.idisposable) + +## Properties + +### **Name** + +```csharp +public string Name { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Verbose** + +```csharp +public bool Verbose { get; set; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **NativeHandle** + +```csharp +public SafeLLamaContextHandle NativeHandle { get; } +``` + +#### Property Value + +[SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +## Constructors + +### **LLamaModel(String, String, Boolean, Int32, Int32, Int32, Int32, Int32, Int32, Int32, Dictionary<Int32, Single>, Int32, Single, Single, Single, Single, Single, Int32, Single, Single, Int32, Single, Single, String, String, String, String, List<String>, String, String, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, String)** + +Please refer `LLamaParams` to find the meanings of each arg. Be sure to have set the `n_gpu_layers`, otherwise it will + load 20 layers to gpu by default. + +```csharp +public LLamaModel(string model_path, string model_name, bool verbose, int seed, int n_threads, int n_predict, int n_ctx, int n_batch, int n_keep, int n_gpu_layers, Dictionary logit_bias, int top_k, float top_p, float tfs_z, float typical_p, float temp, float repeat_penalty, int repeat_last_n, float frequency_penalty, float presence_penalty, int mirostat, float mirostat_tau, float mirostat_eta, string prompt, string path_session, string input_prefix, string input_suffix, List antiprompt, string lora_adapter, string lora_base, bool memory_f16, bool random_prompt, bool use_color, bool interactive, bool embedding, bool interactive_first, bool prompt_cache_all, bool instruct, bool penalize_nl, bool perplexity, bool use_mmap, bool use_mlock, bool mem_test, bool verbose_prompt, string encoding) +``` + +#### Parameters + +`model_path` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The model file path. + +`model_name` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The model name. + +`verbose` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Whether to print details when running the model. + +`seed` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_threads` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_predict` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_ctx` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_batch` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_keep` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_gpu_layers` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`logit_bias` [Dictionary<Int32, Single>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.dictionary-2)
+ +`top_k` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`top_p` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`tfs_z` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`typical_p` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`temp` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`repeat_penalty` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`repeat_last_n` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`frequency_penalty` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`presence_penalty` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`mirostat` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`mirostat_tau` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`mirostat_eta` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`prompt` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`path_session` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`input_prefix` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`input_suffix` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`antiprompt` [List<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.list-1)
+ +`lora_adapter` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`lora_base` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`memory_f16` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`random_prompt` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`use_color` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`interactive` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`embedding` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`interactive_first` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`prompt_cache_all` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`instruct` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`penalize_nl` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`perplexity` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`use_mmap` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`use_mlock` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`mem_test` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`verbose_prompt` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **LLamaModel(LLamaParams, String, Boolean, String)** + +Please refer `LLamaParams` to find the meanings of each arg. Be sure to have set the `n_gpu_layers`, otherwise it will + load 20 layers to gpu by default. + +```csharp +public LLamaModel(LLamaParams params, string name, bool verbose, string encoding) +``` + +#### Parameters + +`params` [LLamaParams](./llama.oldversion.llamaparams.md)
+The LLamaModel params + +`name` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+Model name + +`verbose` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Whether to output the detailed info. + +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Exceptions + +[RuntimeError](./llama.exceptions.runtimeerror.md)
+ +## Methods + +### **WithPrompt(String, String)** + +Apply a prompt to the model. + +```csharp +public LLamaModel WithPrompt(string prompt, string encoding) +``` + +#### Parameters + +`prompt` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[LLamaModel](./llama.oldversion.llamamodel.md)
+ +#### Exceptions + +[ArgumentException](https://docs.microsoft.com/en-us/dotnet/api/system.argumentexception)
+ +### **WithPromptFile(String)** + +Apply the prompt file to the model. + +```csharp +public LLamaModel WithPromptFile(string promptFileName) +``` + +#### Parameters + +`promptFileName` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[LLamaModel](./llama.oldversion.llamamodel.md)
+ +### **InitChatPrompt(String, String)** + +```csharp +public void InitChatPrompt(string prompt, string encoding) +``` + +#### Parameters + +`prompt` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **InitChatAntiprompt(String[])** + +```csharp +public void InitChatAntiprompt(String[] antiprompt) +``` + +#### Parameters + +`antiprompt` [String[]](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Chat(String, String, String)** + +Chat with the LLaMa model under interactive mode. + +```csharp +public IEnumerable Chat(string text, string prompt, string encoding) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`prompt` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +#### Exceptions + +[ArgumentException](https://docs.microsoft.com/en-us/dotnet/api/system.argumentexception)
+ +### **SaveState(String)** + +Save the state to specified path. + +```csharp +public void SaveState(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **LoadState(String, Boolean)** + +Load the state from specified path. + +```csharp +public void LoadState(string filename, bool clearPreviousEmbed) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`clearPreviousEmbed` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+Whether to clear previous footprints of this model. + +#### Exceptions + +[RuntimeError](./llama.exceptions.runtimeerror.md)
+ +### **Tokenize(String, String)** + +Tokenize a string. + +```csharp +public List Tokenize(string text, string encoding) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The utf-8 encoded string to tokenize. + +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[List<Int32>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.list-1)
+A list of tokens. + +#### Exceptions + +[RuntimeError](./llama.exceptions.runtimeerror.md)
+If the tokenization failed. + +### **DeTokenize(IEnumerable<Int32>)** + +Detokenize a list of tokens. + +```csharp +public string DeTokenize(IEnumerable tokens) +``` + +#### Parameters + +`tokens` [IEnumerable<Int32>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+The list of tokens to detokenize. + +#### Returns + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The detokenized string. + +### **Call(String, String)** + +Call the model to run inference. + +```csharp +public IEnumerable Call(string text, string encoding) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +#### Exceptions + +[RuntimeError](./llama.exceptions.runtimeerror.md)
+ +### **Dispose()** + +```csharp +public void Dispose() +``` diff --git a/docs/xmldocs/llama.oldversion.llamaparams.md b/docs/xmldocs/llama.oldversion.llamaparams.md new file mode 100644 index 00000000..ce242f59 --- /dev/null +++ b/docs/xmldocs/llama.oldversion.llamaparams.md @@ -0,0 +1,357 @@ +# LLamaParams + +Namespace: LLama.OldVersion + +```csharp +public struct LLamaParams +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [ValueType](https://docs.microsoft.com/en-us/dotnet/api/system.valuetype) → [LLamaParams](./llama.oldversion.llamaparams.md) + +## Fields + +### **seed** + +```csharp +public int seed; +``` + +### **n_threads** + +```csharp +public int n_threads; +``` + +### **n_predict** + +```csharp +public int n_predict; +``` + +### **n_ctx** + +```csharp +public int n_ctx; +``` + +### **n_batch** + +```csharp +public int n_batch; +``` + +### **n_keep** + +```csharp +public int n_keep; +``` + +### **n_gpu_layers** + +```csharp +public int n_gpu_layers; +``` + +### **logit_bias** + +```csharp +public Dictionary logit_bias; +``` + +### **top_k** + +```csharp +public int top_k; +``` + +### **top_p** + +```csharp +public float top_p; +``` + +### **tfs_z** + +```csharp +public float tfs_z; +``` + +### **typical_p** + +```csharp +public float typical_p; +``` + +### **temp** + +```csharp +public float temp; +``` + +### **repeat_penalty** + +```csharp +public float repeat_penalty; +``` + +### **repeat_last_n** + +```csharp +public int repeat_last_n; +``` + +### **frequency_penalty** + +```csharp +public float frequency_penalty; +``` + +### **presence_penalty** + +```csharp +public float presence_penalty; +``` + +### **mirostat** + +```csharp +public int mirostat; +``` + +### **mirostat_tau** + +```csharp +public float mirostat_tau; +``` + +### **mirostat_eta** + +```csharp +public float mirostat_eta; +``` + +### **model** + +```csharp +public string model; +``` + +### **prompt** + +```csharp +public string prompt; +``` + +### **path_session** + +```csharp +public string path_session; +``` + +### **input_prefix** + +```csharp +public string input_prefix; +``` + +### **input_suffix** + +```csharp +public string input_suffix; +``` + +### **antiprompt** + +```csharp +public List antiprompt; +``` + +### **lora_adapter** + +```csharp +public string lora_adapter; +``` + +### **lora_base** + +```csharp +public string lora_base; +``` + +### **memory_f16** + +```csharp +public bool memory_f16; +``` + +### **random_prompt** + +```csharp +public bool random_prompt; +``` + +### **use_color** + +```csharp +public bool use_color; +``` + +### **interactive** + +```csharp +public bool interactive; +``` + +### **prompt_cache_all** + +```csharp +public bool prompt_cache_all; +``` + +### **embedding** + +```csharp +public bool embedding; +``` + +### **interactive_first** + +```csharp +public bool interactive_first; +``` + +### **instruct** + +```csharp +public bool instruct; +``` + +### **penalize_nl** + +```csharp +public bool penalize_nl; +``` + +### **perplexity** + +```csharp +public bool perplexity; +``` + +### **use_mmap** + +```csharp +public bool use_mmap; +``` + +### **use_mlock** + +```csharp +public bool use_mlock; +``` + +### **mem_test** + +```csharp +public bool mem_test; +``` + +### **verbose_prompt** + +```csharp +public bool verbose_prompt; +``` + +## Constructors + +### **LLamaParams(Int32, Int32, Int32, Int32, Int32, Int32, Int32, Dictionary<Int32, Single>, Int32, Single, Single, Single, Single, Single, Int32, Single, Single, Int32, Single, Single, String, String, String, String, String, List<String>, String, String, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean)** + +```csharp +LLamaParams(int seed, int n_threads, int n_predict, int n_ctx, int n_batch, int n_keep, int n_gpu_layers, Dictionary logit_bias, int top_k, float top_p, float tfs_z, float typical_p, float temp, float repeat_penalty, int repeat_last_n, float frequency_penalty, float presence_penalty, int mirostat, float mirostat_tau, float mirostat_eta, string model, string prompt, string path_session, string input_prefix, string input_suffix, List antiprompt, string lora_adapter, string lora_base, bool memory_f16, bool random_prompt, bool use_color, bool interactive, bool prompt_cache_all, bool embedding, bool interactive_first, bool instruct, bool penalize_nl, bool perplexity, bool use_mmap, bool use_mlock, bool mem_test, bool verbose_prompt) +``` + +#### Parameters + +`seed` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_threads` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_predict` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_ctx` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_batch` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_keep` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`n_gpu_layers` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`logit_bias` [Dictionary<Int32, Single>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.dictionary-2)
+ +`top_k` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`top_p` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`tfs_z` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`typical_p` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`temp` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`repeat_penalty` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`repeat_last_n` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`frequency_penalty` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`presence_penalty` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`mirostat` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +`mirostat_tau` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`mirostat_eta` [Single](https://docs.microsoft.com/en-us/dotnet/api/system.single)
+ +`model` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`prompt` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`path_session` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`input_prefix` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`input_suffix` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`antiprompt` [List<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.list-1)
+ +`lora_adapter` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`lora_base` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`memory_f16` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`random_prompt` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`use_color` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`interactive` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`prompt_cache_all` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`embedding` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`interactive_first` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`instruct` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`penalize_nl` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`perplexity` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`use_mmap` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`use_mlock` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`mem_test` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +`verbose_prompt` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
diff --git a/docs/xmldocs/llama.resettablellamamodel.md b/docs/xmldocs/llama.resettablellamamodel.md new file mode 100644 index 00000000..b43646a3 --- /dev/null +++ b/docs/xmldocs/llama.resettablellamamodel.md @@ -0,0 +1,101 @@ +# ResettableLLamaModel + +Namespace: LLama + +A LLamaModel what could be reset. Note that using this class will consume about 10% more memories. + +```csharp +public class ResettableLLamaModel : LLamaModel, System.IDisposable +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [LLamaModel](./llama.llamamodel.md) → [ResettableLLamaModel](./llama.resettablellamamodel.md)
+Implements [IDisposable](https://docs.microsoft.com/en-us/dotnet/api/system.idisposable) + +## Properties + +### **OriginalState** + +The initial state of the model + +```csharp +public Byte[] OriginalState { get; set; } +``` + +#### Property Value + +[Byte[]](https://docs.microsoft.com/en-us/dotnet/api/system.byte)
+ +### **ContextSize** + +The context size. + +```csharp +public int ContextSize { get; } +``` + +#### Property Value + +[Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **Params** + +The model params set for this model. + +```csharp +public ModelParams Params { get; set; } +``` + +#### Property Value + +[ModelParams](./llama.common.modelparams.md)
+ +### **NativeHandle** + +The native handle, which is used to be passed to the native APIs. Please avoid using it + unless you know what is the usage of the Native API. + +```csharp +public SafeLLamaContextHandle NativeHandle { get; } +``` + +#### Property Value + +[SafeLLamaContextHandle](./llama.native.safellamacontexthandle.md)
+ +### **Encoding** + +The encoding set for this model to deal with text input. + +```csharp +public Encoding Encoding { get; } +``` + +#### Property Value + +[Encoding](https://docs.microsoft.com/en-us/dotnet/api/system.text.encoding)
+ +## Constructors + +### **ResettableLLamaModel(ModelParams, String)** + + + +```csharp +public ResettableLLamaModel(ModelParams Params, string encoding) +``` + +#### Parameters + +`Params` [ModelParams](./llama.common.modelparams.md)
+ +`encoding` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +## Methods + +### **Reset()** + +Reset the state to the initial state. + +```csharp +public void Reset() +``` diff --git a/docs/xmldocs/llama.statefulexecutorbase.md b/docs/xmldocs/llama.statefulexecutorbase.md new file mode 100644 index 00000000..6cd169e1 --- /dev/null +++ b/docs/xmldocs/llama.statefulexecutorbase.md @@ -0,0 +1,234 @@ +# StatefulExecutorBase + +Namespace: LLama + +The base class for stateful LLama executors. + +```csharp +public abstract class StatefulExecutorBase : LLama.Abstractions.ILLamaExecutor +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [StatefulExecutorBase](./llama.statefulexecutorbase.md)
+Implements [ILLamaExecutor](./llama.abstractions.illamaexecutor.md) + +## Properties + +### **Model** + +The mode used by the executor. + +```csharp +public LLamaModel Model { get; } +``` + +#### Property Value + +[LLamaModel](./llama.llamamodel.md)
+ +## Methods + +### **WithSessionFile(String)** + +This API is currently not verified. + +```csharp +public StatefulExecutorBase WithSessionFile(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +#### Returns + +[StatefulExecutorBase](./llama.statefulexecutorbase.md)
+ +#### Exceptions + +[ArgumentNullException](https://docs.microsoft.com/en-us/dotnet/api/system.argumentnullexception)
+ +[RuntimeError](./llama.exceptions.runtimeerror.md)
+ +### **SaveSessionFile(String)** + +This API has not been verified currently. + +```csharp +public void SaveSessionFile(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **HandleRunOutOfContext(Int32)** + +After running out of the context, take some tokens from the original prompt and recompute the logits in batches. + +```csharp +protected void HandleRunOutOfContext(int tokensToKeep) +``` + +#### Parameters + +`tokensToKeep` [Int32](https://docs.microsoft.com/en-us/dotnet/api/system.int32)
+ +### **TryReuseMathingPrefix()** + +Try to reuse the matching prefix from the session file. + +```csharp +protected void TryReuseMathingPrefix() +``` + +### **GetLoopCondition(InferStateArgs)** + +Decide whether to continue the loop. + +```csharp +protected abstract bool GetLoopCondition(InferStateArgs args) +``` + +#### Parameters + +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **PreprocessInputs(String, InferStateArgs)** + +Preprocess the inputs before the inference. + +```csharp +protected abstract void PreprocessInputs(string text, InferStateArgs args) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
+ +### **PostProcess(InferenceParams, InferStateArgs, IEnumerable`1&)** + +Do some post processing after the inference. + +```csharp +protected abstract bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, IEnumerable`1& extraOutputs) +``` + +#### Parameters + +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
+ +`extraOutputs` [IEnumerable`1&](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1&)
+ +#### Returns + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **InferInternal(InferenceParams, InferStateArgs)** + +The core inference logic. + +```csharp +protected abstract void InferInternal(InferenceParams inferenceParams, InferStateArgs args) +``` + +#### Parameters + +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`args` [InferStateArgs](./llama.statefulexecutorbase.inferstateargs.md)
+ +### **SaveState(String)** + +Save the current state to a file. + +```csharp +public abstract void SaveState(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **GetStateData()** + +Get the current state data. + +```csharp +public abstract ExecutorBaseState GetStateData() +``` + +#### Returns + +[ExecutorBaseState](./llama.statefulexecutorbase.executorbasestate.md)
+ +### **LoadState(ExecutorBaseState)** + +Load the state from data. + +```csharp +public abstract void LoadState(ExecutorBaseState data) +``` + +#### Parameters + +`data` [ExecutorBaseState](./llama.statefulexecutorbase.executorbasestate.md)
+ +### **LoadState(String)** + +Load the state from a file. + +```csharp +public abstract void LoadState(string filename) +``` + +#### Parameters + +`filename` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Infer(String, InferenceParams, CancellationToken)** + +Execute the inference. + +```csharp +public IEnumerable Infer(string text, InferenceParams inferenceParams, CancellationToken cancellationToken) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`cancellationToken` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
+ +#### Returns + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +### **InferAsync(String, InferenceParams, CancellationToken)** + +Execute the inference asynchronously. + +```csharp +public IAsyncEnumerable InferAsync(string text, InferenceParams inferenceParams, CancellationToken cancellationToken) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`cancellationToken` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
+ +#### Returns + +[IAsyncEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.iasyncenumerable-1)
diff --git a/docs/xmldocs/llama.statelessexecutor.md b/docs/xmldocs/llama.statelessexecutor.md new file mode 100644 index 00000000..60db8326 --- /dev/null +++ b/docs/xmldocs/llama.statelessexecutor.md @@ -0,0 +1,80 @@ +# StatelessExecutor + +Namespace: LLama + +This executor infer the input as one-time job. Previous inputs won't impact on the + response to current input. + +```csharp +public class StatelessExecutor : LLama.Abstractions.ILLamaExecutor +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [StatelessExecutor](./llama.statelessexecutor.md)
+Implements [ILLamaExecutor](./llama.abstractions.illamaexecutor.md) + +## Properties + +### **Model** + +The mode used by the executor when running the inference. + +```csharp +public LLamaModel Model { get; } +``` + +#### Property Value + +[LLamaModel](./llama.llamamodel.md)
+ +## Constructors + +### **StatelessExecutor(LLamaModel)** + + + +```csharp +public StatelessExecutor(LLamaModel model) +``` + +#### Parameters + +`model` [LLamaModel](./llama.llamamodel.md)
+The LLama model. + +## Methods + +### **Infer(String, InferenceParams, CancellationToken)** + +```csharp +public IEnumerable Infer(string text, InferenceParams inferenceParams, CancellationToken cancellationToken) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`cancellationToken` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
+ +#### Returns + +[IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+ +### **InferAsync(String, InferenceParams, CancellationToken)** + +```csharp +public IAsyncEnumerable InferAsync(string text, InferenceParams inferenceParams, CancellationToken token) +``` + +#### Parameters + +`text` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +`inferenceParams` [InferenceParams](./llama.common.inferenceparams.md)
+ +`token` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
+ +#### Returns + +[IAsyncEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.iasyncenumerable-1)
diff --git a/mkdocs.yml b/mkdocs.yml index c2b69435..fb5aa2c4 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -26,4 +26,65 @@ nav: - BotSharp: HighLevelApps/bot-sharp.md - More: - Logger: More/log.md -theme: readthedocs \ No newline at end of file + - API Reference: + - index: ./xmldocs/index.md + - llama.abstractions.ihistorytransform: ./xmldocs/llama.abstractions.ihistorytransform.md + - llama.abstractions.illamaexecutor: ./xmldocs/llama.abstractions.illamaexecutor.md + - llama.abstractions.itextstreamtransform: ./xmldocs/llama.abstractions.itextstreamtransform.md + - llama.abstractions.itexttransform: ./xmldocs/llama.abstractions.itexttransform.md + - llama.chatsession: ./xmldocs/llama.chatsession.md + - llama.common.authorrole: ./xmldocs/llama.common.authorrole.md + - llama.common.chathistory: ./xmldocs/llama.common.chathistory.md + - llama.common.fixedsizequeue-1: ./xmldocs/llama.common.fixedsizequeue-1.md + - llama.common.illamalogger: ./xmldocs/llama.common.illamalogger.md + - llama.common.inferenceparams: ./xmldocs/llama.common.inferenceparams.md + - llama.common.llamadefaultlogger: ./xmldocs/llama.common.llamadefaultlogger.md + - llama.common.mirostatetype: ./xmldocs/llama.common.mirostatetype.md + - llama.common.modelparams: ./xmldocs/llama.common.modelparams.md + - llama.exceptions.runtimeerror: ./xmldocs/llama.exceptions.runtimeerror.md + - llama.extensions.dictionaryextension: ./xmldocs/llama.extensions.dictionaryextension.md + - llama.instructexecutor: ./xmldocs/llama.instructexecutor.md + - llama.interactiveexecutor: ./xmldocs/llama.interactiveexecutor.md + - llama.llamaembedder: ./xmldocs/llama.llamaembedder.md + - llama.llamamodel: ./xmldocs/llama.llamamodel.md + - llama.llamaquantizer: ./xmldocs/llama.llamaquantizer.md + - llama.llamatransforms: ./xmldocs/llama.llamatransforms.md + - llama.native.llamacontextparams: ./xmldocs/llama.native.llamacontextparams.md + - llama.native.llamaftype: ./xmldocs/llama.native.llamaftype.md + - llama.native.llamatokendata: ./xmldocs/llama.native.llamatokendata.md + - llama.native.llamatokendataarray: ./xmldocs/llama.native.llamatokendataarray.md + - llama.native.llamatokendataarraynative: ./xmldocs/llama.native.llamatokendataarraynative.md + - llama.native.nativeapi: ./xmldocs/llama.native.nativeapi.md + - llama.native.safellamacontexthandle: ./xmldocs/llama.native.safellamacontexthandle.md + - llama.native.safellamahandlebase: ./xmldocs/llama.native.safellamahandlebase.md + - llama.oldversion.chatcompletion: ./xmldocs/llama.oldversion.chatcompletion.md + - llama.oldversion.chatcompletionchoice: ./xmldocs/llama.oldversion.chatcompletionchoice.md + - llama.oldversion.chatcompletionchunk: ./xmldocs/llama.oldversion.chatcompletionchunk.md + - llama.oldversion.chatcompletionchunkchoice: ./xmldocs/llama.oldversion.chatcompletionchunkchoice.md + - llama.oldversion.chatcompletionchunkdelta: ./xmldocs/llama.oldversion.chatcompletionchunkdelta.md + - llama.oldversion.chatcompletionmessage: ./xmldocs/llama.oldversion.chatcompletionmessage.md + - llama.oldversion.chatmessagerecord: ./xmldocs/llama.oldversion.chatmessagerecord.md + - llama.oldversion.chatrole: ./xmldocs/llama.oldversion.chatrole.md + - llama.oldversion.chatsession-1: ./xmldocs/llama.oldversion.chatsession-1.md + - llama.oldversion.completion: ./xmldocs/llama.oldversion.completion.md + - llama.oldversion.completionchoice: ./xmldocs/llama.oldversion.completionchoice.md + - llama.oldversion.completionchunk: ./xmldocs/llama.oldversion.completionchunk.md + - llama.oldversion.completionlogprobs: ./xmldocs/llama.oldversion.completionlogprobs.md + - llama.oldversion.completionusage: ./xmldocs/llama.oldversion.completionusage.md + - llama.oldversion.embedding: ./xmldocs/llama.oldversion.embedding.md + - llama.oldversion.embeddingdata: ./xmldocs/llama.oldversion.embeddingdata.md + - llama.oldversion.embeddingusage: ./xmldocs/llama.oldversion.embeddingusage.md + - llama.oldversion.ichatmodel: ./xmldocs/llama.oldversion.ichatmodel.md + - llama.oldversion.llamaembedder: ./xmldocs/llama.oldversion.llamaembedder.md + - llama.oldversion.llamamodel: ./xmldocs/llama.oldversion.llamamodel.md + - llama.oldversion.llamaparams: ./xmldocs/llama.oldversion.llamaparams.md + - llama.resettablellamamodel: ./xmldocs/llama.resettablellamamodel.md + - llama.statefulexecutorbase: ./xmldocs/llama.statefulexecutorbase.md + - llama.statelessexecutor: ./xmldocs/llama.statelessexecutor.md + +theme: + name: material + +extra: + version: + provider: mike \ No newline at end of file diff --git a/site/404.html b/site/404.html new file mode 100644 index 00000000..1b5587ac --- /dev/null +++ b/site/404.html @@ -0,0 +1,1526 @@ + + + + + + + + + + + + + + + + + + LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/Architecher/index.html b/site/Architecher/index.html new file mode 100644 index 00000000..83da42f3 --- /dev/null +++ b/site/Architecher/index.html @@ -0,0 +1,1629 @@ + + + + + + + + + + + + + + + + + + + + + + Architecher - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Architecher

+

Architecher of main functions

+

The figure below shows the core framework structure, which is separated to four levels.

+
    +
  • LLamaModel: The holder of a model which directly interact with native library and provide some basic APIs such as tokenization and embedding. Currently it includes three classes: LLamaModel, LLamaEmbedder and LLamaQuantizer.
  • +
  • LLamaExecutors: Executors which define the way to run the LLama model. It provides text-to-text APIs to make it easy to use. Currently we provide three kinds of executors: InteractiveExecutor, InstructuExecutor and StatelessExecutor.
  • +
  • ChatSession: A wrapping for InteractiveExecutor and LLamaModel, which supports interactive tasks and saving/re-loading sessions. It also provides a flexible way to customize the text process by IHistoryTransform, ITextTransform and ITextStreamTransform.
  • +
  • High-level Applications: Some applications that provides higher-level integration. For example, BotSharp provides integration for vector search, Chatbot UI and Web APIs. semantic-kernel provides various APIs for manipulations related with LLM. If you've made an integration, please tell us and add it to the doc!
  • +
+

structure_image

+ +

Since LLamaModel interact with native library, it's not recommended to use the methods of it directly unless you know what you are doing. So does the NativeApi, which is not included in the arcitecher figure above.

+

ChatSession is recommended to be used when you want to build an application similar to ChatGPT, or the ChatBot, because it works best with InteractiveExecutor. Though other executors are also allowed to passed as a parameter to initialize a ChatSession, it's not encouraged if you are new to LLamaSharp and LLM.

+

High-level applications, such as BotSharp, are supposed to be used when you concentrate on the part not related with LLM. For example, if you want to deploy a chat bot to help you remember your schedules, using BotSharp may be a good choice.

+

Note that the APIs of the high-level applications may not be stable now. Please take it into account when using them.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/ChatSession/basic-usages/index.html b/site/ChatSession/basic-usages/index.html new file mode 100644 index 00000000..1b9a837e --- /dev/null +++ b/site/ChatSession/basic-usages/index.html @@ -0,0 +1,1653 @@ + + + + + + + + + + + + + + + + + + + + + + Basic Usages - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Basic usages of ChatSession

+

ChatSession is a higher-level absatrction than the executors. In the context of a chat application like ChatGPT, a "chat session" refers to an interactive conversation or exchange of messages between the user and the chatbot. It represents a continuous flow of communication where the user enters input or asks questions, and the chatbot responds accordingly. A chat session typically starts when the user initiates a conversation with the chatbot and continues until the interaction comes to a natural end or is explicitly terminated by either the user or the system. During a chat session, the chatbot maintains the context of the conversation, remembers previous messages, and generates appropriate responses based on the user's inputs and the ongoing dialogue.

+

Initialize a session

+

Currently, the only parameter that is accepted is an ILLamaExecutor, because this is the only parameter that we're sure to exist in all the future versions. Since it's the high-level absatrction, we're conservative to the API designs. In the future, there may be more kinds of constructors added.

+
InteractiveExecutor ex = new(new LLamaModel(new ModelParams(modelPath)));
+ChatSession session = new ChatSession(ex);
+
+

Chat with the bot

+

There'll be two kinds of input accepted by the Chat API, which are ChatHistory and String. The API with string is quite similar to that of the executors. Meanwhile, the API with ChatHistory is aimed to provide more flexible usages. For example, you have had a chat with the bot in session A before you open the session B. Now session B has no memory for what you said before. Therefore, you can feed the history of A to B.

+
string prompt = "What is C#?";
+
+foreach (var text in session.Chat(prompt, new InferenceParams() { Temperature = 0.6f, AntiPrompts = new List<string> { "User:" } })) // the inference params should be changed depending on your statement
+{
+    Console.Write(text);
+}
+
+

Get the history

+

Currently History is a property of ChatSession.

+
foreach(var rec in session.History.Messages)
+{
+    Console.WriteLine($"{rec.AuthorRole}: {rec.Content}");
+}
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/ChatSession/save-load-session/index.html b/site/ChatSession/save-load-session/index.html new file mode 100644 index 00000000..dcbfefb5 --- /dev/null +++ b/site/ChatSession/save-load-session/index.html @@ -0,0 +1,1565 @@ + + + + + + + + + + + + + + + + + + + + + + Save/Load Session - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Save/Load Chat Session

+

Generally, the chat session could be switched, which requires the ability of loading and saving session.

+

When building a chat bot app, it's NOT encouraged to initialize many chat sessions and keep them in memory to wait for being switched, because the memory comsumption of both CPU and GPU is expensive. It's recommended to save the current session before switching to a new session, and load the file when switching back to the session.

+

The API is also quite simple, the files will be saved into a directory you specified. If the path does not exist, a new directory will be created.

+
string savePath = "<save dir>";
+session.SaveSession(savePath);
+
+session.LoadSession(savePath);
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/ChatSession/transforms/index.html b/site/ChatSession/transforms/index.html new file mode 100644 index 00000000..60f5918e --- /dev/null +++ b/site/ChatSession/transforms/index.html @@ -0,0 +1,1845 @@ + + + + + + + + + + + + + + + + + + + + + + Transoforms - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Transforms in Chat Session

+

There's three important elements in ChatSession, which are input, output and history. Besides, there're some conversions between them. Since the process of them under different conditions varies, LLamaSharp hands over this part of the power to the users.

+

Currently, there're three kinds of process that could be customized, as introduced below.

+

Input transform

+

In general, the input of the chat API is a text (without stream), therefore ChatSession processes it in a pipeline. If you want to use your customized transform, you need to define a transform that implements ITextTransform and add it to the pipeline of ChatSession.

+
public interface ITextTransform
+{
+    string Transform(string text);
+}
+
+
public class MyInputTransform1 : ITextTransform
+{
+    public string Transform(string text)
+    {
+        return $"Question: {text}\n";
+    }
+}
+
+public class MyInputTransform2 : ITextTransform
+{
+    public string Transform(string text)
+    {
+        return text + "Answer: ";
+    }
+}
+
+session.AddInputTransform(new MyInputTransform1()).AddInputTransform(new MyInputTransform2());
+
+

Output transform

+

Different from the input, the output of chat API is a text stream. Therefore you need to process it word by word, instead of getting the full text at once.

+

The interface of it has an IEnumerable<string> as input, which is actually a yield sequence.

+
public interface ITextStreamTransform
+{
+    IEnumerable<string> Transform(IEnumerable<string> tokens);
+    IAsyncEnumerable<string> TransformAsync(IAsyncEnumerable<string> tokens);
+}
+
+

When implementing it, you could throw a not-implemented exception in one of them if you only need to use the chat API in synchronously or asynchronously.

+

Different from the input transform pipeline, the output transform only supports one transform.

+
session.WithOutputTransform(new MyOutputTransform());
+
+

Here's an example of how to implement the interface. In this example, the transform detects wether there's some keywords in the response and removes them.

+
/// <summary>
+/// A text output transform that removes the keywords from the response.
+/// </summary>
+public class KeywordTextOutputStreamTransform : ITextStreamTransform
+{
+    HashSet<string> _keywords;
+    int _maxKeywordLength;
+    bool _removeAllMatchedTokens;
+
+    /// <summary>
+    /// 
+    /// </summary>
+    /// <param name="keywords">Keywords that you want to remove from the response.</param>
+    /// <param name="redundancyLength">The extra length when searching for the keyword. For example, if your only keyword is "highlight", 
+    /// maybe the token you get is "\r\nhighligt". In this condition, if redundancyLength=0, the token cannot be successfully matched because the length of "\r\nhighligt" (10)
+    /// has already exceeded the maximum length of the keywords (8). On the contrary, setting redundancyLengyh >= 2 leads to successful match.
+    /// The larger the redundancyLength is, the lower the processing speed. But as an experience, it won't introduce too much performance impact when redundancyLength <= 5 </param>
+    /// <param name="removeAllMatchedTokens">If set to true, when getting a matched keyword, all the related tokens will be removed. Otherwise only the part of keyword will be removed.</param>
+    public KeywordTextOutputStreamTransform(IEnumerable<string> keywords, int redundancyLength = 3, bool removeAllMatchedTokens = false)
+    {
+        _keywords = new(keywords);
+        _maxKeywordLength = keywords.Select(x => x.Length).Max() + redundancyLength;
+        _removeAllMatchedTokens = removeAllMatchedTokens;
+    }
+    /// <inheritdoc />
+    public IEnumerable<string> Transform(IEnumerable<string> tokens)
+    {
+        var window = new Queue<string>();
+
+        foreach (var s in tokens)
+        {
+            window.Enqueue(s);
+            var current = string.Join("", window);
+            if (_keywords.Any(x => current.Contains(x)))
+            {
+                var matchedKeyword = _keywords.First(x => current.Contains(x));
+                int total = window.Count;
+                for (int i = 0; i < total; i++)
+                {
+                    window.Dequeue();
+                }
+                if (!_removeAllMatchedTokens)
+                {
+                    yield return current.Replace(matchedKeyword, "");
+                }
+            }
+            if (current.Length >= _maxKeywordLength)
+            {
+                if (_keywords.Any(x => current.Contains(x)))
+                {
+                    var matchedKeyword = _keywords.First(x => current.Contains(x));
+                    int total = window.Count;
+                    for (int i = 0; i < total; i++)
+                    {
+                        window.Dequeue();
+                    }
+                    if (!_removeAllMatchedTokens)
+                    {
+                        yield return current.Replace(matchedKeyword, "");
+                    }
+                }
+                else
+                {
+                    int total = window.Count;
+                    for (int i = 0; i < total; i++)
+                    {
+                        yield return window.Dequeue();
+                    }
+                }
+            }
+        }
+        int totalCount = window.Count;
+        for (int i = 0; i < totalCount; i++)
+        {
+            yield return window.Dequeue();
+        }
+    }
+    /// <inheritdoc />
+    public async IAsyncEnumerable<string> TransformAsync(IAsyncEnumerable<string> tokens)
+    {
+        throw new NotImplementedException(); // This is implemented in `LLamaTransforms` but we ignore it here.
+    }
+}
+
+

History transform

+

The chat history could be converted to or from a text, which is exactly what the interface of it.

+
public interface IHistoryTransform
+{
+    string HistoryToText(ChatHistory history);
+    ChatHistory TextToHistory(AuthorRole role, string text);
+}
+
+

Similar to the output transform, the history transform is added in the following way:

+
session.WithHistoryTransform(new MyHistoryTransform());
+
+

The implementation is quite flexible, depending on what you want the history message to be like. Here's an example, which is the default history transform in LLamaSharp.

+
/// <summary>
+/// The default history transform.
+/// Uses plain text with the following format:
+/// [Author]: [Message]
+/// </summary>
+public class DefaultHistoryTransform : IHistoryTransform
+{
+    private readonly string defaultUserName = "User";
+    private readonly string defaultAssistantName = "Assistant";
+    private readonly string defaultSystemName = "System";
+    private readonly string defaultUnknownName = "??";
+
+    string _userName;
+    string _assistantName;
+    string _systemName;
+    string _unknownName;
+    bool _isInstructMode;
+    public DefaultHistoryTransform(string? userName = null, string? assistantName = null, 
+        string? systemName = null, string? unknownName = null, bool isInstructMode = false)
+    {
+        _userName = userName ?? defaultUserName;
+        _assistantName = assistantName ?? defaultAssistantName;
+        _systemName = systemName ?? defaultSystemName;
+        _unknownName = unknownName ?? defaultUnknownName;
+        _isInstructMode = isInstructMode;
+    }
+
+    public virtual string HistoryToText(ChatHistory history)
+    {
+        StringBuilder sb = new();
+        foreach (var message in history.Messages)
+        {
+            if (message.AuthorRole == AuthorRole.User)
+            {
+                sb.AppendLine($"{_userName}: {message.Content}");
+            }
+            else if (message.AuthorRole == AuthorRole.System)
+            {
+                sb.AppendLine($"{_systemName}: {message.Content}");
+            }
+            else if (message.AuthorRole == AuthorRole.Unknown)
+            {
+                sb.AppendLine($"{_unknownName}: {message.Content}");
+            }
+            else if (message.AuthorRole == AuthorRole.Assistant)
+            {
+                sb.AppendLine($"{_assistantName}: {message.Content}");
+            }
+        }
+        return sb.ToString();
+    }
+
+    public virtual ChatHistory TextToHistory(AuthorRole role, string text)
+    {
+        ChatHistory history = new ChatHistory();
+        history.AddMessage(role, TrimNamesFromText(text, role));
+        return history;
+    }
+
+    public virtual string TrimNamesFromText(string text, AuthorRole role)
+    {
+        if (role == AuthorRole.User && text.StartsWith($"{_userName}:"))
+        {
+            text = text.Substring($"{_userName}:".Length).TrimStart();
+        }
+        else if (role == AuthorRole.Assistant && text.EndsWith($"{_assistantName}:"))
+        {
+            text = text.Substring(0, text.Length - $"{_assistantName}:".Length).TrimEnd();
+        }
+        if (_isInstructMode && role == AuthorRole.Assistant && text.EndsWith("\n> "))
+        {
+            text = text.Substring(0, text.Length - "\n> ".Length).TrimEnd();
+        }
+        return text;
+    }
+}
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/ContributingGuide/index.html b/site/ContributingGuide/index.html new file mode 100644 index 00000000..33169a5b --- /dev/null +++ b/site/ContributingGuide/index.html @@ -0,0 +1,1711 @@ + + + + + + + + + + + + + + + + + + + + + + Contributing Guide - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + +

LLamaSharp Contributing Guide

+

Hi, welcome to develop LLamaSharp with us together! We are always open for every contributor and any format of contributions! If you want to maintain this library actively together, please contact us to get the write access after some PRs. (Email: AsakusaRinne@gmail.com)

+

In this page, we'd like to introduce how to make contributions here easily. 😊

+

Compile the native library from source

+

Firstly, please clone the llama.cpp repository and following the instructions in llama.cpp readme to configure your local environment.

+

If you want to support cublas in the compilation, please make sure that you've installed the cuda.

+

When building from source, please add -DBUILD_SHARED_LIBS=ON to the cmake instruction. For example, when building with cublas but without openblas, use the following instruction:

+
cmake .. -DLLAMA_CUBLAS=ON -DBUILD_SHARED_LIBS=ON
+
+

After running cmake --build . --config Release, you could find the llama.dll, llama.so or llama.dylib in your build directory. After pasting it to LLamaSharp/LLama/runtimes and renaming it to libllama.dll, libllama.so or libllama.dylib, you can use it as the native library in LLamaSharp.

+

Add a new feature to LLamaSharp

+

After refactoring the framework in v0.4.0, LLamaSharp will try to maintain the backward compatibility. However, in the following cases, break change is okay:

+
    +
  1. Due to some break changes in llama.cpp, making a break change will help to maintain the good abstraction and friendly user APIs.
  2. +
  3. A very improtant feature cannot be implemented unless refactoring some parts.
  4. +
  5. After some discussions, an agreement was reached that making the break change is reasonable.
  6. +
+

If a new feature could be added without introducing any break change, please open a PR rather than open an issue first. We will never refuse the PR but help to improve it, unless it's malicious.

+

When adding the feature, please take care of the namespace and the naming convention. For example, if you are adding an integration for WPF, please put the code under namespace LLama.WPF or LLama.Integration.WPF instead of putting it under the root namespace. The naming convention of LLamaSharp follows the pascal naming convention, but in some parts that are invisible to users, you can do whatever you want.

+

Find the problem and fix the BUG

+

If the issue is related to the LLM internal behaviors, such as endless generating the response, the best way to find the problem is to do comparison test between llama.cpp and LLamaSharp.

+

You could use exactly the same prompt, the same model and the same parameters to run the inference in llama.cpp and LLamaSharp respectively to see if it's really a problem caused by the implementation in LLamaSharp.

+

If the experiment showed that it worked well in llama.cpp but didn't in LLamaSharp, a the search for the problem could be started. While the reason of the problem could be various, the best way I think is to add log-print in the code of llama.cpp and use it in LLamaSharp after compilation. Thus, when running LLamaSharp, you could see what happened in the native library.

+

After finding out the reason, a painful but happy process comes. When working on the BUG fix, there's only one rule to follow, that is keeping the examples working well. If the modification fixed the BUG but impact on other functions, it would not be a good fix.

+

During the BUG fix process, please don't hesitate to discuss together when you stuck on something.

+

Add integrations

+

All kinds of integration are welcomed here! Currently the following integrations are under work or on our schedule:

+
    +
  1. BotSharp
  2. +
  3. semantic-kernel
  4. +
  5. Unity
  6. +
+

Besides, for some other integrations, like ASP.NET core, SQL, Blazor and so on, we'll appreciate it if you could help with that. If the time is limited for you, providing an example for it also means a lot!

+

Add examples

+

There're mainly two ways to add an example:

+
    +
  1. Add the example to LLama.Examples of the repository.
  2. +
  3. Put the example in another repositpry and add the link to the readme or docs of LLamaSharp.
  4. +
+

Add documents

+

LLamaSharp uses mkdocs to build the documantation, please follow the tutorial of mkdocs to add or modify documents in LLamaSharp.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/GetStarted/index.html b/site/GetStarted/index.html new file mode 100644 index 00000000..bbe18825 --- /dev/null +++ b/site/GetStarted/index.html @@ -0,0 +1,1748 @@ + + + + + + + + + + + + + + + + + + + + + + Get Started - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Get Started

+

Install packages

+

Firstly, search LLamaSharp in nuget package manager and install it.

+
PM> Install-Package LLamaSharp
+
+

Then, search and install one of the following backends:

+
LLamaSharp.Backend.Cpu
+LLamaSharp.Backend.Cuda11
+LLamaSharp.Backend.Cuda12
+
+

Here's the mapping of them and corresponding model samples provided by LLamaSharp. If you're not sure which model is available for a version, please try our sample model.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LLamaSharp.BackendLLamaSharpVerified Model Resourcesllama.cpp commit id
-v0.2.0This version is not recommended to use.-
-v0.2.1WizardLM, Vicuna (filenames with "old")-
v0.2.2v0.2.2, v0.2.3WizardLM, Vicuna (filenames without "old")63d2046
v0.3.0v0.3.0LLamaSharpSamples v0.3.0, WizardLM7e4ea5b
+

Download a model

+

One of the following models could be okay:

+ +

Note that because llama.cpp is under fast development now and often introduce break changes, some model weights on huggingface which works under a version may be invalid with another version. If it's your first time to configure LLamaSharp, we'd like to suggest for using verified model weights in the table above.

+

Run the program

+

Please create a console program with dotnet runtime >= netstandard 2.0 (>= net6.0 is more recommended). Then, paste the following code to program.cs;

+
using LLama.Common;
+using LLama;
+
+string modelPath = "<Your model path>" // change it to your own model path
+var prompt = "Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.\r\n\r\nUser: Hello, Bob.\r\nBob: Hello. How may I help you today?\r\nUser: Please tell me the largest city in Europe.\r\nBob: Sure. The largest city in Europe is Moscow, the capital of Russia.\r\nUser:"; // use the "chat-with-bob" prompt here.
+
+// Initialize a chat session
+var ex = new InteractiveExecutor(new LLamaModel(new ModelParams(modelPath, contextSize: 1024, seed: 1337, gpuLayerCount: 5)));
+ChatSession session = new ChatSession(ex);
+
+// show the prompt
+Console.WriteLine();
+Console.Write(prompt);
+
+// run the inference in a loop to chat with LLM
+while (true)
+{
+    foreach (var text in session.Chat(prompt, new InferenceParams() { Temperature = 0.6f, AntiPrompts = new List<string> { "User:" } }))
+    {
+        Console.Write(text);
+    }
+
+    Console.ForegroundColor = ConsoleColor.Green;
+    prompt = Console.ReadLine();
+    Console.ForegroundColor = ConsoleColor.White;
+}
+
+

After starting it, you'll see the following outputs.

+
Please input your model path: D:\development\llama\weights\wizard-vicuna-13B.ggmlv3.q4_1.bin
+llama.cpp: loading model from D:\development\llama\weights\wizard-vicuna-13B.ggmlv3.q4_1.bin
+llama_model_load_internal: format     = ggjt v3 (latest)
+llama_model_load_internal: n_vocab    = 32000
+llama_model_load_internal: n_ctx      = 1024
+llama_model_load_internal: n_embd     = 5120
+llama_model_load_internal: n_mult     = 256
+llama_model_load_internal: n_head     = 40
+llama_model_load_internal: n_layer    = 40
+llama_model_load_internal: n_rot      = 128
+llama_model_load_internal: ftype      = 3 (mostly Q4_1)
+llama_model_load_internal: n_ff       = 13824
+llama_model_load_internal: n_parts    = 1
+llama_model_load_internal: model size = 13B
+llama_model_load_internal: ggml ctx size = 7759.48 MB
+llama_model_load_internal: mem required  = 9807.48 MB (+ 1608.00 MB per state)
+....................................................................................................
+llama_init_from_file: kv self size  =  800.00 MB
+
+Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.
+
+User: Hello, Bob.
+Bob: Hello. How may I help you today?
+User: Please tell me the largest city in Europe.
+Bob: Sure. The largest city in Europe is Moscow, the capital of Russia.
+User:
+
+

Now, enjoy chatting with LLM!

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/HighLevelApps/bot-sharp/index.html b/site/HighLevelApps/bot-sharp/index.html new file mode 100644 index 00000000..726d08de --- /dev/null +++ b/site/HighLevelApps/bot-sharp/index.html @@ -0,0 +1,1558 @@ + + + + + + + + + + + + + + + + + + + + + + BotSharp - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

The Usage of BotSharp Integration

+

The document is under work, please have a wait. Thank you for your support! :)

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/LLamaExecutors/differences/index.html b/site/LLamaExecutors/differences/index.html new file mode 100644 index 00000000..ba524e9b --- /dev/null +++ b/site/LLamaExecutors/differences/index.html @@ -0,0 +1,1667 @@ + + + + + + + + + + + + + + + + + + + + + + Differences of Executors - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Differences of Executors

+ +

Differences between the executors

+

There're currently three kinds of executors provided, which are InteractiveExecutor, InstructExecutor and StatelessExecutor.

+

In a word, InteractiveExecutor is suitable for getting answer of your questions from LLM continuously. InstructExecutor let LLM execute your instructions, such as "continue writing". StatelessExecutor is best for one-time job because the previous inference has no impact on the current inference.

+

Interactive mode & Instruct mode

+

Both of them are taking "completing the prompt" as the goal to generate the response. For example, if you input Long long ago, there was a fox who wanted to make friend with humen. One day, then the LLM will continue to write the story.

+

Under interactive mode, you serve a role of user and the LLM serves the role of assistant. Then it will help you with your question or request.

+

Under instruct mode, you give LLM some instructions and it follows.

+

Though the behaviors of them sounds similar, it could introduce many differences depending on your prompt. For example, "chat-with-bob" has good performance under interactive mode and alpaca does well with instruct mode.

+
// chat-with-bob
+
+Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.
+
+User: Hello, Bob.
+Bob: Hello. How may I help you today?
+User: Please tell me the largest city in Europe.
+Bob: Sure. The largest city in Europe is Moscow, the capital of Russia.
+User:
+
+
// alpaca
+
+Below is an instruction that describes a task. Write a response that appropriately completes the request.
+
+

Therefore, please modify the prompt correspondingly when switching from one mode to the other.

+

Stateful mode and Stateless mode.

+

Despite the differences between interactive mode and instruct mode, both of them are stateful mode. That is, your previous question/instruction will impact on the current response from LLM. On the contrary, the steteless executor does not have such a "memory". No matter how many times you talk to it, it will only concentrate on what you say in this time.

+

Since the stateless executor has no memory of conversations before, you need to input your question with the whole prompt into it to get the better answer.

+

For example, if you feed Q: Who is Trump? A: to the steteless executor, it may give the following answer with the antiprompt Q:.

+
Donald J. Trump, born June 14, 1946, is an American businessman, television personality, politician and the 45th President of the United States (2017-2021). # Anexo:Torneo de Hamburgo 2022 (individual masculino)
+
+## Presentación previa
+
+* Defensor del título:  Daniil Medvédev
+
+

It seems that things went well at first. However, after answering the question itself, LLM began to talk about some other things until the answer reached the token count limit. The reason of this strange behavior is the anti-prompt cannot be match. With the input, LLM cannot decide whether to append a string "A: " at the end of the response.

+

As an improvement, let's take the following text as the input:

+
Q: What is the capital of the USA? A: Washingtong. Q: What is the sum of 1 and 2? A: 3. Q: Who is Trump? A: 
+
+

Then, I got the following answer with the anti-prompt Q:.

+
45th president of the United States.
+
+

At this time, by repeating the same mode of Q: xxx? A: xxx., LLM outputs the anti-prompt we want to help to decide where to dtop the generation.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/LLamaExecutors/parameters/index.html b/site/LLamaExecutors/parameters/index.html new file mode 100644 index 00000000..d7e5ad41 --- /dev/null +++ b/site/LLamaExecutors/parameters/index.html @@ -0,0 +1,1689 @@ + + + + + + + + + + + + + + + + + + + + + + Inference Parameters - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Inference Parameters

+

Different from LLamaModel, when using an exeuctor, InferenceParams is passed to the Infer method instead of constructor. This is because executors only define the ways to run the model, therefore in each run, you can change the settings for this time inference.

+

InferenceParams

+

Namespace: LLama.Common

+
public class InferenceParams
+
+

Inheritance ObjectInferenceParams

+

Properties

+

TokensKeep

+

number of tokens to keep from initial prompt

+
public int TokensKeep { get; set; }
+
+

Property Value

+

Int32

+

MaxTokens

+

how many new tokens to predict (n_predict), set to -1 to inifinitely generate response + until it complete.

+
public int MaxTokens { get; set; }
+
+

Property Value

+

Int32

+

LogitBias

+

logit bias for specific tokens

+
public Dictionary<int, float> LogitBias { get; set; }
+
+

Property Value

+

Dictionary<Int32, Single>

+

AntiPrompts

+

Sequences where the model will stop generating further tokens.

+
public IEnumerable<string> AntiPrompts { get; set; }
+
+

Property Value

+

IEnumerable<String>

+

PathSession

+

path to file for saving/loading model eval state

+
public string PathSession { get; set; }
+
+

Property Value

+

String

+

InputSuffix

+

string to suffix user inputs with

+
public string InputSuffix { get; set; }
+
+

Property Value

+

String

+

InputPrefix

+

string to prefix user inputs with

+
public string InputPrefix { get; set; }
+
+

Property Value

+

String

+

TopK

+

0 or lower to use vocab size

+
public int TopK { get; set; }
+
+

Property Value

+

Int32

+

TopP

+

1.0 = disabled

+
public float TopP { get; set; }
+
+

Property Value

+

Single

+

TfsZ

+

1.0 = disabled

+
public float TfsZ { get; set; }
+
+

Property Value

+

Single

+

TypicalP

+

1.0 = disabled

+
public float TypicalP { get; set; }
+
+

Property Value

+

Single

+

Temperature

+

1.0 = disabled

+
public float Temperature { get; set; }
+
+

Property Value

+

Single

+

RepeatPenalty

+

1.0 = disabled

+
public float RepeatPenalty { get; set; }
+
+

Property Value

+

Single

+

RepeatLastTokensCount

+

last n tokens to penalize (0 = disable penalty, -1 = context size) (repeat_last_n)

+
public int RepeatLastTokensCount { get; set; }
+
+

Property Value

+

Int32

+

FrequencyPenalty

+

frequency penalty coefficient + 0.0 = disabled

+
public float FrequencyPenalty { get; set; }
+
+

Property Value

+

Single

+

PresencePenalty

+

presence penalty coefficient + 0.0 = disabled

+
public float PresencePenalty { get; set; }
+
+

Property Value

+

Single

+

Mirostat

+

Mirostat uses tokens instead of words. + algorithm described in the paper https://arxiv.org/abs/2007.14966. + 0 = disabled, 1 = mirostat, 2 = mirostat 2.0

+
public MiroStateType Mirostat { get; set; }
+
+

Property Value

+

MiroStateType

+

MirostatTau

+

target entropy

+
public float MirostatTau { get; set; }
+
+

Property Value

+

Single

+

MirostatEta

+

learning rate

+
public float MirostatEta { get; set; }
+
+

Property Value

+

Single

+

PenalizeNL

+

consider newlines as a repeatable token (penalize_nl)

+
public bool PenalizeNL { get; set; }
+
+

Property Value

+

Boolean

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/LLamaExecutors/save-load-state/index.html b/site/LLamaExecutors/save-load-state/index.html new file mode 100644 index 00000000..39a256d9 --- /dev/null +++ b/site/LLamaExecutors/save-load-state/index.html @@ -0,0 +1,1576 @@ + + + + + + + + + + + + + + + + + + + + + + Save/Load State - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Save/Load State of Executor

+

Similar to LLamaModel, an executor also has its state, which can be saved and loaded. Note that in most of cases, the state of executor and the state of the model should be loaded and saved at the same time.

+

To decouple the model and executor, we provide APIs to save/load state for model and executor respectively. However, during the inference, the processed information will leave footprint in LLamaModel's native context. Therefore, if you just load a state from another executor but keep the model unmodified, some strange things may happen. So will loading model state only.

+

Is there a condition that requires to load one of them only? The answer is YES. For example, after resetting the model state, if you don't want the inference starting from the new position, leaving the executor unmodified is okay. But, anyway, this flexible usage may cause some unexpected behaviors, therefore please ensure you know what you're doing before using it in this way.

+

In the future version, we'll open the access for some variables inside the executor to support more flexible usages.

+

The APIs to load/save state of the executors is similar to that of LLamaModel. However, note that StatelessExecutor doesn't have such APIs because it's stateless itself. Besides, the output of GetStateData is an object of type ExecutorBaseState.

+
LLamaModel model = new LLamaModel(new ModelParams("<modelPath>"));
+InteractiveExecutor executor = new InteractiveExecutor(model);
+// do some things...
+executor.SaveState("executor.st");
+var stateData = model.GetStateData();
+
+InteractiveExecutor executor2 = new InteractiveExecutor(model);
+executor2.LoadState(stateData);
+// do some things...
+
+InteractiveExecutor executor3 = new InteractiveExecutor(model);
+executor3.LoadState("executor.st");
+// do some things...
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/LLamaExecutors/text-to-text-apis/index.html b/site/LLamaExecutors/text-to-text-apis/index.html new file mode 100644 index 00000000..e6682adc --- /dev/null +++ b/site/LLamaExecutors/text-to-text-apis/index.html @@ -0,0 +1,1569 @@ + + + + + + + + + + + + + + + + + + + + + + Text-to-Text APIs - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Text-to-Text APIs of the executors

+

All the executors implements the interface ILLamaExecutor, which provides two APIs to execute text-to-text tasks.

+
public interface ILLamaExecutor
+{
+    public LLamaModel Model { get; }
+
+    IEnumerable<string> Infer(string text, InferenceParams? inferenceParams = null, CancellationToken token = default);
+
+    IAsyncEnumerable<string> InferAsync(string text, InferenceParams? inferenceParams = null, CancellationToken token = default);
+}
+
+

Just pass the text to the executor with the inference parameters. For the inference parameters, please refer to executor inference parameters doc.

+

The output of both two APIs are yield enumerable. Therefore, when receiving the output, you can directly use foreach to take actions on each word you get by order, instead of waiting for the whole process completed.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/LLamaModel/embeddings/index.html b/site/LLamaModel/embeddings/index.html new file mode 100644 index 00000000..33dfb0db --- /dev/null +++ b/site/LLamaModel/embeddings/index.html @@ -0,0 +1,1564 @@ + + + + + + + + + + + + + + + + + + + + + + Get Embeddings - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Get Embeddings

+

Getting the embeddings of a text in LLM is sometimes useful, for example, to train other MLP models.

+

To get the embeddings, please initialize a LLamaEmbedder and then call GetEmbeddings.

+
var embedder = new LLamaEmbedder(new ModelParams("<modelPath>"));
+string text = "hello, LLM.";
+float[] embeddings = embedder.GetEmbeddings(text);
+
+

The output is a float array. Note that the length of the array is related with the model you load. If you just want to get a smaller size embedding, please consider changing a model.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/LLamaModel/parameters/index.html b/site/LLamaModel/parameters/index.html new file mode 100644 index 00000000..d67c803b --- /dev/null +++ b/site/LLamaModel/parameters/index.html @@ -0,0 +1,1668 @@ + + + + + + + + + + + + + + + + + + + + + + Model Parameters - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

LLamaModel Parameters

+

When initializing a LLamaModel object, there're three parameters, ModelParams Params, string encoding = "UTF-8", ILLamaLogger? logger = null.

+

The usage of logger will be further introduced in logger doc. The encoding is the encoding you want to use when dealing with text via this model.

+

The most improtant of all, is the ModelParams, which is defined as below. We'll explain the parameters step by step in this document.

+
public class ModelParams
+{
+    public int ContextSize { get; set; } = 512;
+    public int GpuLayerCount { get; set; } = 20;
+    public int Seed { get; set; } = 1686349486;
+    public bool UseFp16Memory { get; set; } = true;
+    public bool UseMemorymap { get; set; } = true;
+    public bool UseMemoryLock { get; set; } = false;
+    public bool Perplexity { get; set; } = false;
+    public string ModelPath { get; set; }
+    public string LoraAdapter { get; set; } = string.Empty;
+    public string LoraBase { get; set; } = string.Empty;
+    public int Threads { get; set; } = Math.Max(Environment.ProcessorCount / 2, 1);
+    public int BatchSize { get; set; } = 512;
+    public bool ConvertEosToNewLine { get; set; } = false;
+}
+
+

ModelParams

+

Namespace: LLama.Common

+
public class ModelParams
+
+

Inheritance ObjectModelParams

+

Properties

+

ContextSize

+

Model context size (n_ctx)

+
public int ContextSize { get; set; }
+
+

Property Value

+

Int32

+

GpuLayerCount

+

Number of layers to run in VRAM / GPU memory (n_gpu_layers)

+
public int GpuLayerCount { get; set; }
+
+

Property Value

+

Int32

+

Seed

+

Seed for the random number generator (seed)

+
public int Seed { get; set; }
+
+

Property Value

+

Int32

+

UseFp16Memory

+

Use f16 instead of f32 for memory kv (memory_f16)

+
public bool UseFp16Memory { get; set; }
+
+

Property Value

+

Boolean

+

UseMemorymap

+

Use mmap for faster loads (use_mmap)

+
public bool UseMemorymap { get; set; }
+
+

Property Value

+

Boolean

+

UseMemoryLock

+

Use mlock to keep model in memory (use_mlock)

+
public bool UseMemoryLock { get; set; }
+
+

Property Value

+

Boolean

+

Perplexity

+

Compute perplexity over the prompt (perplexity)

+
public bool Perplexity { get; set; }
+
+

Property Value

+

Boolean

+

ModelPath

+

Model path (model)

+
public string ModelPath { get; set; }
+
+

Property Value

+

String

+

LoraAdapter

+

lora adapter path (lora_adapter)

+
public string LoraAdapter { get; set; }
+
+

Property Value

+

String

+

LoraBase

+

base model path for the lora adapter (lora_base)

+
public string LoraBase { get; set; }
+
+

Property Value

+

String

+

Threads

+

Number of threads (-1 = autodetect) (n_threads)

+
public int Threads { get; set; }
+
+

Property Value

+

Int32

+

BatchSize

+

batch size for prompt processing (must be >=32 to use BLAS) (n_batch)

+
public int BatchSize { get; set; }
+
+

Property Value

+

Int32

+

ConvertEosToNewLine

+

Whether to convert eos to newline during the inference.

+
public bool ConvertEosToNewLine { get; set; }
+
+

Property Value

+

Boolean

+

EmbeddingMode

+

Whether to use embedding mode. (embedding) Note that if this is set to true, + The LLamaModel won't produce text response anymore.

+
public bool EmbeddingMode { get; set; }
+
+

Property Value

+

Boolean

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/LLamaModel/quantization/index.html b/site/LLamaModel/quantization/index.html new file mode 100644 index 00000000..06926bc1 --- /dev/null +++ b/site/LLamaModel/quantization/index.html @@ -0,0 +1,1574 @@ + + + + + + + + + + + + + + + + + + + + + + Quantization - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Quantization

+

Quantization is significant to accelerate the model inference. Since there's little accuracy (performance) reduction when quantizing the model, get it easy to quantize it!

+

To quantize the model, please call Quantize from LLamaQuantizer, which is a static method.

+
string srcPath = "<model.bin>";
+string dstPath = "<model_q4_0.bin>";
+LLamaQuantizer.Quantize(srcPath, dstPath, "q4_0");
+// The following overload is also okay.
+// LLamaQuantizer.Quantize(srcPath, dstPath, LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_0);
+
+

After calling it, a quantized model file will be saved.

+

There're currently 5 types of quantization supported:

+
    +
  • q4_0
  • +
  • q4_1
  • +
  • q5_0
  • +
  • q5_1
  • +
  • q8_0
  • +
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/LLamaModel/save-load-state/index.html b/site/LLamaModel/save-load-state/index.html new file mode 100644 index 00000000..86be6384 --- /dev/null +++ b/site/LLamaModel/save-load-state/index.html @@ -0,0 +1,1572 @@ + + + + + + + + + + + + + + + + + + + + + + Save/Load State - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Save/Load State

+

There're two ways to load state: loading from path and loading from bite array. Therefore, correspondingly, state data can be extracted as byte array or saved to a file.

+
LLamaModel model = new LLamaModel(new ModelParams("<modelPath>"));
+// do some things...
+model.SaveState("model.st");
+var stateData = model.GetStateData();
+model.Dispose();
+
+LLamaModel model2 = new LLamaModel(new ModelParams("<modelPath>"));
+model2.LoadState(stateData);
+// do some things...
+
+LLamaModel model3 = new LLamaModel(new ModelParams("<modelPath>"));
+model3.LoadState("model.st");
+// do some things...
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/LLamaModel/tokenization/index.html b/site/LLamaModel/tokenization/index.html new file mode 100644 index 00000000..46bf9055 --- /dev/null +++ b/site/LLamaModel/tokenization/index.html @@ -0,0 +1,1631 @@ + + + + + + + + + + + + + + + + + + + + + + Tokenization - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Tokenization/Detokenization

+

A pair of APIs to make conversion between text and tokens.

+

Tokenization

+

The basic usage is to call Tokenize after initializing the model.

+
LLamaModel model = new LLamaModel(new ModelParams("<modelPath>"));
+string text = "hello";
+int[] tokens = model.Tokenize(text).ToArray();
+
+

Depending on different model (or vocab), the output will be various.

+

Detokenization

+

Similar to tokenization, just pass an IEnumerable<int> to Detokenize method.

+
LLamaModel model = new LLamaModel(new ModelParams("<modelPath>"));
+int[] tokens = new int[] {125, 2568, 13245};
+string text = model.Detokenize(tokens);
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/NonEnglishUsage/Chinese/index.html b/site/NonEnglishUsage/Chinese/index.html new file mode 100644 index 00000000..e79f96f2 --- /dev/null +++ b/site/NonEnglishUsage/Chinese/index.html @@ -0,0 +1,1558 @@ + + + + + + + + + + + + + + + + + + + + + + Chinese - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Use LLamaSharp with Chinese

+

It's supported now but the document is under work. Please wait for some time. Thank you for your support! :)

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/Tricks/index.html b/site/Tricks/index.html new file mode 100644 index 00000000..39c2e2b1 --- /dev/null +++ b/site/Tricks/index.html @@ -0,0 +1,1681 @@ + + + + + + + + + + + + + + + + + + + + + + Tricks for FAQ - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + +

Tricks for FAQ

+

Sometimes, your application with LLM and LLamaSharp may have strange behaviors. Before opening an issue to report the BUG, the following tricks may worth a try.

+

Carefully set the anti-prompts

+

Anti-prompt can also be called as "Stop-keyword", which decides when to stop the response generation. Under interactive mode, the maximum tokens count is always not set, which makes the LLM generates responses infinitively. Therefore, setting anti-prompt correctly helps a lot to avoid the strange behaviors. For example, the prompt file chat-with-bob.txt has the following content:

+
Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.
+
+User: Hello, Bob.
+Bob: Hello. How may I help you today?
+User: Please tell me the largest city in Europe.
+Bob: Sure. The largest city in Europe is Moscow, the capital of Russia.
+User:
+
+

Therefore, the anti-prompt should be set as "User:". If the last line of the prompt is removed, LLM will automatically generate a question (user) and a response (bob) for one time when running the chat session. Therefore, the antiprompt is suggested to be appended to the prompt when starting a chat session.

+

What if an extra line is appended? The string "User:" in the prompt will be followed with a char "\n". Thus when running the model, the automatic generation of a pair of question and response may appear because the anti-prompt is "User:" but the last token is "User:\n". As for whether it will appear, it's an undefined behavior, which depends on the implementation inside the LLamaExecutor. Anyway, since it may leads to unexpected behaviors, it's recommended to trim your prompt or carefully keep consistent with your anti-prompt.

+

Pay attention to the length of prompt

+

Sometimes we want to input a long prompt to execute a task. However, the context size may limit the inference of LLama model. Please ensure the inequality below holds.

+

$$ len(prompt) + len(response) < len(context) $$

+

In this inequality, len(response) refers to the expected tokens for LLM to generate.

+

Try differenct executors with a prompt

+

Some prompt works well under interactive mode, such as chat-with-bob, some others may work well with instruct mode, such as alpaca. Besides, if your input is quite simple and one-time job, such as "Q: what is the satellite of the earth? A: ", stateless mode will be a good choice.

+

If your chat bot has bad performance, trying different executor will possibly make it work well.

+

Choose models weight depending on you task

+

The differences between modes may lead to much different behaviors under the same task. For example, if you're building a chat bot with non-English, a fine-tuned model specially for the language you want to use will have huge effect on the performance.

+

Set the layer count you want to offload to GPU

+

Currently, the GpuLayerCount param, which decides the number of layer loaded into GPU, is set to 20 by default. However, if you have some efficient GPUs, setting it as a larger number will attain faster inference.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/assets/images/favicon.png b/site/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf13b9f9d978896599290a74f77d5dbe7d1655c GIT binary patch literal 1870 zcmV-U2eJ5xP)Gc)JR9QMau)O=X#!i9;T z37kk-upj^(fsR36MHs_+1RCI)NNu9}lD0S{B^g8PN?Ww(5|~L#Ng*g{WsqleV}|#l zz8@ri&cTzw_h33bHI+12+kK6WN$h#n5cD8OQt`5kw6p~9H3()bUQ8OS4Q4HTQ=1Ol z_JAocz`fLbT2^{`8n~UAo=#AUOf=SOq4pYkt;XbC&f#7lb$*7=$na!mWCQ`dBQsO0 zLFBSPj*N?#u5&pf2t4XjEGH|=pPQ8xh7tpx;US5Cx_Ju;!O`ya-yF`)b%TEt5>eP1ZX~}sjjA%FJF?h7cX8=b!DZl<6%Cv z*G0uvvU+vmnpLZ2paivG-(cd*y3$hCIcsZcYOGh{$&)A6*XX&kXZd3G8m)G$Zz-LV z^GF3VAW^Mdv!)4OM8EgqRiz~*Cji;uzl2uC9^=8I84vNp;ltJ|q-*uQwGp2ma6cY7 z;`%`!9UXO@fr&Ebapfs34OmS9^u6$)bJxrucutf>`dKPKT%%*d3XlFVKunp9 zasduxjrjs>f8V=D|J=XNZp;_Zy^WgQ$9WDjgY=z@stwiEBm9u5*|34&1Na8BMjjgf3+SHcr`5~>oz1Y?SW^=K z^bTyO6>Gar#P_W2gEMwq)ot3; zREHn~U&Dp0l6YT0&k-wLwYjb?5zGK`W6S2v+K>AM(95m2C20L|3m~rN8dprPr@t)5lsk9Hu*W z?pS990s;Ez=+Rj{x7p``4>+c0G5^pYnB1^!TL=(?HLHZ+HicG{~4F1d^5Awl_2!1jICM-!9eoLhbbT^;yHcefyTAaqRcY zmuctDopPT!%k+}x%lZRKnzykr2}}XfG_ne?nRQO~?%hkzo;@RN{P6o`&mMUWBYMTe z6i8ChtjX&gXl`nvrU>jah)2iNM%JdjqoaeaU%yVn!^70x-flljp6Q5tK}5}&X8&&G zX3fpb3E(!rH=zVI_9Gjl45w@{(ITqngWFe7@9{mX;tO25Z_8 zQHEpI+FkTU#4xu>RkN>b3Tnc3UpWzPXWm#o55GKF09j^Mh~)K7{QqbO_~(@CVq! zS<8954|P8mXN2MRs86xZ&Q4EfM@JB94b=(YGuk)s&^jiSF=t3*oNK3`rD{H`yQ?d; ztE=laAUoZx5?RC8*WKOj`%LXEkgDd>&^Q4M^z`%u0rg-It=hLCVsq!Z%^6eB-OvOT zFZ28TN&cRmgU}Elrnk43)!>Z1FCPL2K$7}gwzIc48NX}#!A1BpJP?#v5wkNprhV** z?Cpalt1oH&{r!o3eSKc&ap)iz2BTn_VV`4>9M^b3;(YY}4>#ML6{~(4mH+?%07*qo IM6N<$f(jP3KmY&$ literal 0 HcmV?d00001 diff --git a/site/assets/javascripts/bundle.a51614de.min.js b/site/assets/javascripts/bundle.a51614de.min.js new file mode 100644 index 00000000..5afb7820 --- /dev/null +++ b/site/assets/javascripts/bundle.a51614de.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Ci=Object.create;var gr=Object.defineProperty;var Ri=Object.getOwnPropertyDescriptor;var ki=Object.getOwnPropertyNames,Ht=Object.getOwnPropertySymbols,Hi=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,nn=Object.prototype.propertyIsEnumerable;var rn=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&rn(e,r,t[r]);if(Ht)for(var r of Ht(t))nn.call(t,r)&&rn(e,r,t[r]);return e};var on=(e,t)=>{var r={};for(var n in e)yr.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&Ht)for(var n of Ht(e))t.indexOf(n)<0&&nn.call(e,n)&&(r[n]=e[n]);return r};var Pt=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Pi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of ki(t))!yr.call(e,o)&&o!==r&&gr(e,o,{get:()=>t[o],enumerable:!(n=Ri(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Ci(Hi(e)):{},Pi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var sn=Pt((xr,an)=>{(function(e,t){typeof xr=="object"&&typeof an!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(xr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(O){return!!(O&&O!==document&&O.nodeName!=="HTML"&&O.nodeName!=="BODY"&&"classList"in O&&"contains"in O.classList)}function f(O){var Qe=O.type,De=O.tagName;return!!(De==="INPUT"&&s[Qe]&&!O.readOnly||De==="TEXTAREA"&&!O.readOnly||O.isContentEditable)}function c(O){O.classList.contains("focus-visible")||(O.classList.add("focus-visible"),O.setAttribute("data-focus-visible-added",""))}function u(O){O.hasAttribute("data-focus-visible-added")&&(O.classList.remove("focus-visible"),O.removeAttribute("data-focus-visible-added"))}function p(O){O.metaKey||O.altKey||O.ctrlKey||(a(r.activeElement)&&c(r.activeElement),n=!0)}function m(O){n=!1}function d(O){a(O.target)&&(n||f(O.target))&&c(O.target)}function h(O){a(O.target)&&(O.target.classList.contains("focus-visible")||O.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(O.target))}function v(O){document.visibilityState==="hidden"&&(o&&(n=!0),Y())}function Y(){document.addEventListener("mousemove",N),document.addEventListener("mousedown",N),document.addEventListener("mouseup",N),document.addEventListener("pointermove",N),document.addEventListener("pointerdown",N),document.addEventListener("pointerup",N),document.addEventListener("touchmove",N),document.addEventListener("touchstart",N),document.addEventListener("touchend",N)}function B(){document.removeEventListener("mousemove",N),document.removeEventListener("mousedown",N),document.removeEventListener("mouseup",N),document.removeEventListener("pointermove",N),document.removeEventListener("pointerdown",N),document.removeEventListener("pointerup",N),document.removeEventListener("touchmove",N),document.removeEventListener("touchstart",N),document.removeEventListener("touchend",N)}function N(O){O.target.nodeName&&O.target.nodeName.toLowerCase()==="html"||(n=!1,B())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),Y(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var cn=Pt(Er=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(c){return!1}},r=t(),n=function(c){var u={next:function(){var p=c.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(c){return encodeURIComponent(c).replace(/%20/g,"+")},i=function(c){return decodeURIComponent(String(c).replace(/\+/g," "))},s=function(){var c=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof c){var d=this;p.forEach(function(B,N){d.append(N,B)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),c._entries&&(c._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(f,c){typeof f!="string"&&(f=String(f)),c&&typeof c!="string"&&(c=String(c));var u=document,p;if(c&&(e.location===void 0||c!==e.location.href)){c=c.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=c,u.head.appendChild(p);try{if(p.href.indexOf(c)!==0)throw new Error(p.href)}catch(O){throw new Error("URL unable to set base "+c+" due to "+O)}}var m=u.createElement("a");m.href=f,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=f,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!c)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,Y=!0,B=this;["append","delete","set"].forEach(function(O){var Qe=h[O];h[O]=function(){Qe.apply(h,arguments),v&&(Y=!1,B.search=h.toString(),Y=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var N=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==N&&(N=this.search,Y&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},s=i.prototype,a=function(f){Object.defineProperty(s,f,{get:function(){return this._anchorElement[f]},set:function(c){this._anchorElement[f]=c},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(f){a(f)}),Object.defineProperty(s,"search",{get:function(){return this._anchorElement.search},set:function(f){this._anchorElement.search=f,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(s,{toString:{get:function(){var f=this;return function(){return f.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(f){this._anchorElement.href=f,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(f){this._anchorElement.pathname=f},enumerable:!0},origin:{get:function(){var f={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],c=this._anchorElement.port!=f&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(c?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(f){},enumerable:!0},username:{get:function(){return""},set:function(f){},enumerable:!0}}),i.createObjectURL=function(f){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(f){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er)});var qr=Pt((Mt,Nr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Mt=="object"&&typeof Nr=="object"?Nr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Mt=="object"?Mt.ClipboardJS=r():t.ClipboardJS=r()})(Mt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return Ai}});var s=i(279),a=i.n(s),f=i(370),c=i.n(f),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(T){return!1}}var d=function(T){var E=p()(T);return m("cut"),E},h=d;function v(j){var T=document.documentElement.getAttribute("dir")==="rtl",E=document.createElement("textarea");E.style.fontSize="12pt",E.style.border="0",E.style.padding="0",E.style.margin="0",E.style.position="absolute",E.style[T?"right":"left"]="-9999px";var H=window.pageYOffset||document.documentElement.scrollTop;return E.style.top="".concat(H,"px"),E.setAttribute("readonly",""),E.value=j,E}var Y=function(T,E){var H=v(T);E.container.appendChild(H);var I=p()(H);return m("copy"),H.remove(),I},B=function(T){var E=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},H="";return typeof T=="string"?H=Y(T,E):T instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(T==null?void 0:T.type)?H=Y(T.value,E):(H=p()(T),m("copy")),H},N=B;function O(j){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?O=function(E){return typeof E}:O=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},O(j)}var Qe=function(){var T=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},E=T.action,H=E===void 0?"copy":E,I=T.container,q=T.target,Me=T.text;if(H!=="copy"&&H!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&O(q)==="object"&&q.nodeType===1){if(H==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(H==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Me)return N(Me,{container:I});if(q)return H==="cut"?h(q):N(q,{container:I})},De=Qe;function $e(j){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?$e=function(E){return typeof E}:$e=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},$e(j)}function Ei(j,T){if(!(j instanceof T))throw new TypeError("Cannot call a class as a function")}function tn(j,T){for(var E=0;E0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof I.action=="function"?I.action:this.defaultAction,this.target=typeof I.target=="function"?I.target:this.defaultTarget,this.text=typeof I.text=="function"?I.text:this.defaultText,this.container=$e(I.container)==="object"?I.container:document.body}},{key:"listenClick",value:function(I){var q=this;this.listener=c()(I,"click",function(Me){return q.onClick(Me)})}},{key:"onClick",value:function(I){var q=I.delegateTarget||I.currentTarget,Me=this.action(q)||"copy",kt=De({action:Me,container:this.container,target:this.target(q),text:this.text(q)});this.emit(kt?"success":"error",{action:Me,text:kt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(I){return vr("action",I)}},{key:"defaultTarget",value:function(I){var q=vr("target",I);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(I){return vr("text",I)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(I){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return N(I,q)}},{key:"cut",value:function(I){return h(I)}},{key:"isSupported",value:function(){var I=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof I=="string"?[I]:I,Me=!!document.queryCommandSupported;return q.forEach(function(kt){Me=Me&&!!document.queryCommandSupported(kt)}),Me}}]),E}(a()),Ai=Li},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,f){for(;a&&a.nodeType!==o;){if(typeof a.matches=="function"&&a.matches(f))return a;a=a.parentNode}}n.exports=s},438:function(n,o,i){var s=i(828);function a(u,p,m,d,h){var v=c.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function f(u,p,m,d,h){return typeof u.addEventListener=="function"?a.apply(null,arguments):typeof m=="function"?a.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return a(v,p,m,d,h)}))}function c(u,p,m,d){return function(h){h.delegateTarget=s(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=f},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(n,o,i){var s=i(879),a=i(438);function f(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(h))throw new TypeError("Third argument must be a Function");if(s.node(m))return c(m,d,h);if(s.nodeList(m))return u(m,d,h);if(s.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return a(document.body,m,d,h)}n.exports=f},817:function(n){function o(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var f=window.getSelection(),c=document.createRange();c.selectNodeContents(i),f.removeAllRanges(),f.addRange(c),s=f.toString()}return s}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,s,a){var f=this.e||(this.e={});return(f[i]||(f[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var f=this;function c(){f.off(i,c),s.apply(a,arguments)}return c._=s,this.on(i,c,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),f=0,c=a.length;for(f;f{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var rs=/["'&<>]/;Yo.exports=ns;function ns(e){var t=""+e,r=rs.exec(t);if(!r)return t;var n,o="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],s;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(a){s={error:a}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(s)throw s.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||a(m,d)})})}function a(m,d){try{f(n[m](d))}catch(h){p(i[0][3],h)}}function f(m){m.value instanceof et?Promise.resolve(m.value.v).then(c,u):p(i[0][2],m)}function c(m){a("next",m)}function u(m){a("throw",m)}function p(m,d){m(d),i.shift(),i.length&&a(i[0][0],i[0][1])}}function pn(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof Ee=="function"?Ee(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(s){return new Promise(function(a,f){s=e[i](s),o(a,f,s.done,s.value)})}}function o(i,s,a,f){Promise.resolve(f).then(function(c){i({value:c,done:a})},s)}}function C(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var It=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Ve(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ie=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=Ee(s),f=a.next();!f.done;f=a.next()){var c=f.value;c.remove(this)}}catch(v){t={error:v}}finally{try{f&&!f.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var u=this.initialTeardown;if(C(u))try{u()}catch(v){i=v instanceof It?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=Ee(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{ln(h)}catch(v){i=i!=null?i:[],v instanceof It?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new It(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ln(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ve(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ve(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Sr=Ie.EMPTY;function jt(e){return e instanceof Ie||e&&"closed"in e&&C(e.remove)&&C(e.add)&&C(e.unsubscribe)}function ln(e){C(e)?e():e.unsubscribe()}var Le={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,s=o.isStopped,a=o.observers;return i||s?Sr:(this.currentObservers=null,a.push(r),new Ie(function(){n.currentObservers=null,Ve(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,s=n.isStopped;o?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,n){return new xn(r,n)},t}(F);var xn=function(e){ie(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Sr},t}(x);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ie(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,s=n._infiniteTimeWindow,a=n._timestampProvider,f=n._windowTime;o||(i.push(r),!s&&i.push(a.now()+f)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,s=o._buffer,a=s.slice(),f=0;f0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var s=r.actions;n!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Wt);var Sn=function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Dt);var Oe=new Sn(wn);var M=new F(function(e){return e.complete()});function Vt(e){return e&&C(e.schedule)}function Cr(e){return e[e.length-1]}function Ye(e){return C(Cr(e))?e.pop():void 0}function Te(e){return Vt(Cr(e))?e.pop():void 0}function zt(e,t){return typeof Cr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Nt(e){return C(e==null?void 0:e.then)}function qt(e){return C(e[ft])}function Kt(e){return Symbol.asyncIterator&&C(e==null?void 0:e[Symbol.asyncIterator])}function Qt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Yt=zi();function Gt(e){return C(e==null?void 0:e[Yt])}function Bt(e){return un(this,arguments,function(){var r,n,o,i;return $t(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,et(r.read())];case 3:return n=s.sent(),o=n.value,i=n.done,i?[4,et(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,et(o)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Jt(e){return C(e==null?void 0:e.getReader)}function U(e){if(e instanceof F)return e;if(e!=null){if(qt(e))return Ni(e);if(pt(e))return qi(e);if(Nt(e))return Ki(e);if(Kt(e))return On(e);if(Gt(e))return Qi(e);if(Jt(e))return Yi(e)}throw Qt(e)}function Ni(e){return new F(function(t){var r=e[ft]();if(C(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function qi(e){return new F(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?A(function(o,i){return e(o,i,n)}):de,ge(1),r?He(t):Dn(function(){return new Zt}))}}function Vn(){for(var e=[],t=0;t=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new x}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,f=a===void 0?!0:a;return function(c){var u,p,m,d=0,h=!1,v=!1,Y=function(){p==null||p.unsubscribe(),p=void 0},B=function(){Y(),u=m=void 0,h=v=!1},N=function(){var O=u;B(),O==null||O.unsubscribe()};return y(function(O,Qe){d++,!v&&!h&&Y();var De=m=m!=null?m:r();Qe.add(function(){d--,d===0&&!v&&!h&&(p=$r(N,f))}),De.subscribe(Qe),!u&&d>0&&(u=new rt({next:function($e){return De.next($e)},error:function($e){v=!0,Y(),p=$r(B,o,$e),De.error($e)},complete:function(){h=!0,Y(),p=$r(B,s),De.complete()}}),U(O).subscribe(u))})(c)}}function $r(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function z(e,t=document){let r=ce(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ce(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),V(e===_e()),J())}function Xe(e){return{x:e.offsetLeft,y:e.offsetTop}}function Kn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,Oe),l(()=>Xe(e)),V(Xe(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,Oe),l(()=>rr(e)),V(rr(e)))}var Yn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!Wr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),va?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!Wr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=ba.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Gn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Jn=typeof WeakMap!="undefined"?new WeakMap:new Yn,Xn=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=ga.getInstance(),n=new La(t,r,this);Jn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){Xn.prototype[e]=function(){var t;return(t=Jn.get(this))[e].apply(t,arguments)}});var Aa=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:Xn}(),Zn=Aa;var eo=new x,Ca=$(()=>k(new Zn(e=>{for(let t of e)eo.next(t)}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),X(1));function he(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ye(e){return Ca.pipe(S(t=>t.observe(e)),g(t=>eo.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(()=>he(e)))),V(he(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var to=new x,Ra=$(()=>k(new IntersectionObserver(e=>{for(let t of e)to.next(t)},{threshold:0}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),X(1));function sr(e){return Ra.pipe(S(t=>t.observe(e)),g(t=>to.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function ro(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=he(e),o=bt(e);return r>=o.height-n.height-t}),J())}var cr={drawer:z("[data-md-toggle=drawer]"),search:z("[data-md-toggle=search]")};function no(e){return cr[e].checked}function Ke(e,t){cr[e].checked!==t&&cr[e].click()}function Ue(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),V(t.checked))}function ka(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ha(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(V(!1))}function oo(){let e=b(window,"keydown").pipe(A(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:no("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),A(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!ka(n,r)}return!0}),pe());return Ha().pipe(g(t=>t?M:e))}function le(){return new URL(location.href)}function ot(e){location.href=e.href}function io(){return new x}function ao(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)ao(e,r)}function _(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)ao(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function so(){return location.hash.substring(1)}function Dr(e){let t=_("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Pa(e){return L(b(window,"hashchange"),e).pipe(l(so),V(so()),A(t=>t.length>0),X(1))}function co(e){return Pa(e).pipe(l(t=>ce(`[id="${t}"]`)),A(t=>typeof t!="undefined"))}function Vr(e){let t=matchMedia(e);return er(r=>t.addListener(()=>r(t.matches))).pipe(V(t.matches))}function fo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(V(e.matches))}function zr(e,t){return e.pipe(g(r=>r?t():M))}function ur(e,t={credentials:"same-origin"}){return ue(fetch(`${e}`,t)).pipe(fe(()=>M),g(r=>r.status!==200?Ot(()=>new Error(r.statusText)):k(r)))}function We(e,t){return ur(e,t).pipe(g(r=>r.json()),X(1))}function uo(e,t){let r=new DOMParser;return ur(e,t).pipe(g(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),X(1))}function pr(e){let t=_("script",{src:e});return $(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(g(()=>Ot(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),R(()=>document.head.removeChild(t)),ge(1))))}function po(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function lo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(po),V(po()))}function mo(){return{width:innerWidth,height:innerHeight}}function ho(){return b(window,"resize",{passive:!0}).pipe(l(mo),V(mo()))}function bo(){return G([lo(),ho()]).pipe(l(([e,t])=>({offset:e,size:t})),X(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(ee("size")),o=G([n,r]).pipe(l(()=>Xe(e)));return G([r,t,o]).pipe(l(([{height:i},{offset:s,size:a},{x:f,y:c}])=>({offset:{x:s.x-f,y:s.y-c+i},size:a})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(s=>{let a=document.createElement("script");a.src=i,a.onload=s,document.body.appendChild(a)})),Promise.resolve())}var r=class extends EventTarget{constructor(n){super(),this.url=n,this.m=i=>{i.source===this.w&&(this.dispatchEvent(new MessageEvent("message",{data:i.data})),this.onmessage&&this.onmessage(i))},this.e=(i,s,a,f,c)=>{if(s===`${this.url}`){let u=new ErrorEvent("error",{message:i,filename:s,lineno:a,colno:f,error:c});this.dispatchEvent(u),this.onerror&&this.onerror(u)}};let o=document.createElement("iframe");o.hidden=!0,document.body.appendChild(this.iframe=o),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Overview

+

logo

+

LLamaSharp is the C#/.NET binding of llama.cpp. It provides APIs to inference the LLaMa Models and deploy it on native environment or Web. It could help C# developers to deploy the LLM (Large Language Model) locally and integrate with C# apps.

+

Main features

+
    +
  • Model inference
  • +
  • Model quantization
  • +
  • Generating embeddings
  • +
  • Interactive/Instruct/Stateless executor mode
  • +
  • Chat session APIs
  • +
  • Save/load the state
  • +
  • Integration with other applications like BotSharp and semantic-kernel
  • +
+

Essential insights for novice learners

+

If you are new to LLM, here're some tips for you to help you to get start with LLamaSharp. If you are experienced in this field, we'd still recommend you to take a few minutes to read it because somethings performs differently compared to cpp/python.

+
    +
  1. Tha main ability of LLamaSharp is to provide an efficient way to run inference of LLM (Large Language Model) locally (and fine-tune model in the future). The model weights, however, needs to be downloaded from other resources, like huggingface.
  2. +
  3. Since LLamaSharp supports multiple platforms, The nuget package is splitted to LLamaSharp and LLama.Backend. After installing LLamaSharp, please install one of LLama.Backend.Cpu, LLama.Backend.Cuda11 and LLama.Backend.Cuda12. If you use the source code, dynamic libraries could be found in LLama/Runtimes. Then rename the one you want to use to libllama.dll.
  4. +
  5. LLaMa originally refers to the weights released by Meta (Facebook Research). After that, many models are fine-tuned based on it, such as Vicuna, GPT4All, and Pyglion. Though all of these models are supported by LLamaSharp, some steps are necessary with different file formats. There're mainly three kinds of files, which are .pth, .bin (ggml), .bin (quantized). If you have the .bin (quantized) file, it could be used directly by LLamaSharp. If you have the .bin (ggml) file, you could use it directly but get higher inference speed after the quantization. If you have the .pth file, you need to follow the instructions in llama.cpp to convert it to .bin (ggml) file at first.
  6. +
  7. LLamaSharp supports GPU acceleration, but it requires cuda installation. Please install cuda 11 or cuda 12 on your system before using LLamaSharp to enable GPU. If you have another cuda version, you could compile llama.cpp from source to get the dll. For building from source, please refer to issue #5.
  8. +
+

Welcome to join the development!

+

Community effort is always one of the most important things in open-source projects. Any contribution in any way is welcomed here. For example, the following things mean a lot for LLamaSharp:

+
    +
  1. Open an issue when you find something wrong.
  2. +
  3. Open an PR if you've fixed something. Even if just correcting a typo, it also makes great sense.
  4. +
  5. Help to optimize the documentation.
  6. +
  7. Write an example or blog about how to integrate LLamaSharp with your APPs.
  8. +
  9. Ask for a missed feature and discuss with other developers.
  10. +
+

If you'd like to get deeply involved in development, please touch us in discord channel or send email to AsakusaRinne@gmail.com. :)

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/site/media/LLamaSharpLogo.png b/site/media/LLamaSharpLogo.png new file mode 100644 index 0000000000000000000000000000000000000000..62df789a6364461e9cbc09724e1c60caffc03759 GIT binary patch literal 41408 zcmeFZWn7eB)CD?}beA9@64EH$NJ~q1NJxitmmno2B_-XBbcb|zcjwR@_xOMB`+mRQ zufI_wW_X@+&fa_Nwbq`HPx6xJD8wib2n1c~qnHu|0?!5hpNRwu{+%DFFoHl{L!`un zRa{dJQ(ZK0H3@@kY4DMnuirH12fq#me@dInny+i7tIO`{YgA{PoSbw$vpWzb^&5|Nl?>|Fk4{ z|1J8hzl$w7g8~C{&`j_833ro(n$d)ivk_%cwDQRIqyw>Lc(E2iV>}ZUl8RR|R&cJ7 zv87mUl$*|PUzXLcw-34KWN8CC=LowpH_Cxv7kGTtP~bBqE>cLBGxR0tFl~&#YE&P7 z-+$rE)L5DsuRQXZn2kvGMWRH;C@lnnBO(%scY**bnAWqE^-2dLa%AhY4qO6dZFEn8 zDOV-&+`s33ZwPJZe+YfpeE2~~u6;lQGs-bFRXZw4Yb-Aiq`LG;UvtEKwQ70qHI~U| zGi$Sb<6PTPTVqozs3nwaztr~g>mkIs%fp{Pe*RSG>e}magF5`dtvru8a}95rZZQBS z?bhAzzpLI&+L!iu7bl#>YSuu*>M7y`)P5pU zdBSjfJX6_H+9KAm8AQuj_=hBs)ck!*-w&_ltlS&t#+Rh0)L|fyK=Z(&W_8*MVVsbv zhc)RLn^I{jWx*TIrx2c=%t(I4%fJtlgaJS5zSlYJ&Qy43!Fw6r)woX}+0vOxBLh zaB;o+`fhA|0%yTn&RkCQGk%~%25UaMHL+Z>gvlptv8L&?6$2q^ExIR4*x0Gq^_TN6 z3+`}U?+1oQ%H5;{4|nno7^h6ePH)m|8@Kii%MH?fWU@PP*ySHw;rJ_e!PT{8H)d5q z@ZN#-Rk{19@6@;PCz&wblar|~4Z%p3&eSRzDx476tkG(7W8%R6^vvhc(*P0bETXS? z#DTJuF?7ZO{!w(sI3jN-f_+C2At&_@Ao}|9)-m6ct9UVBA#E~zh*JwKEnw!Y%{8Cc z)nZTLOqHcnCQ@nOBqfERV`q(%7pFC5MG&Sn#zn;S&_@(~)wxZhI@G>#Hffz(+INi+ z4T%%(szFsodEz5LhuoN62BLpWKEXbMK*$nY?3YjHE43<)%=Ro5Zj21uf|&yz-|mq5 zc_{1aKzB1 zgZ+x!{DR~iR z6Jcua=e*e;`EZf^9`4Ky)v=>RgbU1@_I0`Px$!n?oPssZq(@esR_MiORA_spOFFuZ z_1>*U7tI3KnO8!_GZiMy9JeRrWTQr3UdGAi{M%aIFX7;T7^7&OUfp^=vG#NYlwpAC zA*PrZmuZy$hYxyP!;_%NcV5F29cy;hEK2pQR*BV+Z4|u$W9e^ufSs|^S0^tMFNY&` zl_|?tX1LDN@iU=GAXTZMhpBcJGL$*&9{R4>&W!1e=(INEOULtJMH>0y3JyF^bnC(D|CXBgv(IV-wEksZGtf z0F7QZD8oAa2Jla^1Ackv#in)AH*0ii$a^uwzuc@M#j&Vx8`bU zyP(;u`A+yw^&$1(QSsa)=JK<^*c#570rMc|I-bk4bXWW3k6Hdo=s5HkTJHj#;j>Qv zKI8VQ)gxc>pgQt(uyTU)S9QJGWxCf`y|sS*#dykZCph^MDpKEzNKE#a|IgtG5$hOH zK@pV9aUmri#OhNA$xr9nP}IwW6eMU94tt(O3AHTW6)IhEk&nOE^a3K^h5su0T@1p6 zc8I=&xww$2%w(og3aPJSs)KED;9t-%3PuaQfBU5TFzxAnzp5|zbf9n5$e;HxmzW37 zu(fRKGQxDPU6ndtIS(ER6-3L}g2G0P>nmXCaf^Q9`7j-WpJX5}>wZN)ONcNg>xkwg z0VS5%6>*)tJe);kAt%2}7}-KlKtfI=5i#*RZPjehwsvi%yVkY&cjUYgiXuu(3oVUJ zi%kpjEOSk*Od0nbW|JBOc<6c7wv%gXar+(m$5NXl^yH-EETsC2!YVRKA9AqL z@{O0NsbN=CR!h@a(_GD5QHz2ABOxZr%Vp=t&l1rH(cx8n=nCE@L(YUj>#4_E&OEx5 zbIzU9jC|9)v`Pz&-QWA`7M3iQwa3Q+l*s)ZKQma(j_#d5Y>=TUovp0+B%E79L)r`R+)z z9Vs+8JFMaLWk1OiiiJE{9r%-azyDwqy)ojdu#N-*ISD;cYr{|J-JLWv1mHS>NL_JX0hVDLWWG7EhUMgIE$n~k> z#t0h}o{8LvDl_*l?xhqqepztZDz8W5Y7f!iVaI4yzT^aB-1RB;HQqgmOl0mQ?d`qX zV^^6@_!4jEeH8KNwWF^^J3oB8Wx7$;^_k^W;XW?3Nv`SK`DTRjj`<+yh^vz0#Rpw6 zarWC~3LFC@md}}N=0^8l5E|zpy12Tp$decU>WCAcBAvHgyIB#;ntol`mc`34urlSh z83`ct5!zw5JBv9x4V&P{D84QEy8Ci}u=$<-B(=Z2cUOI5qfOQV6;8qVyXO{}R$RFR zC1xc5bb_#5ZGfpj5|WDi2~xF$4fzo@Nn1fq9>>OC#xf-61gbo}K#`t3KUGnDHIt`| zvuFIQi_x>2o7Z`DT%>w3PvU1jecd0IdkbXb_60`SgTA0MI4j9yWeDt{9>!T`ZiR)| z5p$bp=72|l3#t%a=%uf=o0ckp3bEF!%EDLUX*ouTfoXa^9pt&T8D)b->S%H;F2V3G zWi!xhbFpzDt0YvSzG{}M*xLo^aU#7@eV&iVkhW2i0C{M}$3NpAG8BuOtp&TjAi?|3 zk83gZN?vdw-PVk;&FDM3xH**8Cyf(Nih)m|yn@_T62C%37fdiisa*|gLEVBtz@3Bb zS{LFsxF~&pNANH7Ax&C(JnFW>&leY00)2im*Z z4Vs+lQ69V9Yoc77)?6xFV&q=-J|jzKDfXcHKYW%ode8H!P$ULQ`UWPc93xJq{9r=) zeF=RHrSyIbUWefy$zMB<0@rxve?(g&{-G!qSq6BBV;b?i4sDjbFsU>zvCm9liK9>!a7w_-ymAjI1kx zu#U;*!l_T%eDx6OfO);FK3(uiGyEn2)4MeRJNdoi#=A*-Q7q%5KWQM0HL4k5^xF)^ ziQKi|(FOR)4aebOg~zk9AF}xerByO~Uc&l$isuhXf1av9@{neefq@_qX}W_Ak$39@ z0Pzx}M8VpbgMqS?Z#pNoAw!Jn`2Uk80>WG!Ny$@EqjF*gvFPZA80VHP4`Mqf{CiwF zog2Q_MSrBK=LiINmS|0y_KaIc`7Il+M@%rnx&xdRre_KMSR8C`Z79I|>&Y_Izi4S$ z3#6;iDPkI3c4tmILLC$ox*!nZ!hOsLp(wlap_^vh(?6al;jCX~G$P+cb}D0VsBjd? zGYh~oP#;_9?8n4LzJW}C2-_xAnp)jj5y>spY=pmauU9|{5h+w+QZnbBqi zi0taK3?14n=DaTwMKE5Db$zU>VVBbm53Mu*2UL7n;$b1_qqG}WvSw3*=&-i7tq715 zeeZbH`>v?z@rg<(mCs#^LUz=P_i@7aJYNSgU&;KKZ@S}q3WCH1bHKC4HLaduj$6LV ze+9CSa-^*OwP*9SDHD{?G%f9ygY3n}6`0iN9vc8~KRsjz{A9#;+k*IJR*Bqd72WCB zA;-YMoD{(OeaIFyQIr1GD@&?dY)vis?(ihPNh&Hr4{ZOp=ak$E4_-7M~d_ZV!nQf5xs5_5TKW1b3ghnz~DQ$r;5g zT8;m5lFHMjGm2g2Xn~Hik+Y|o`4?n@@5jW4BDp`^!dVXt-M)K-60F%6HBox%>ARa8 zP^!Shj0CySt#E^@#l@7RIpl9%HD12whxnQ*D2!=k>eF4uIR)HN0hCbOY~@XUm7H&w z&0c;Gm>u4S2M_u5TbE{K%lwsD)+?%jfFm(1v1ltUB=}5v8{qKH_0V8I$>5f%;_3 zf6Ld4m$9+|&^~FB=gILgpS2r0DdJ5g_R&dm7~WrB6ZRb@A-+g8hf0LSy+Qg1%I z){kA$I<**Q)ohLvNDl*m59@21*b;og+a$m!Y<7`>}~RiiSX+vHSst# zX=}}*;=(UorZE4q)<$XfXTB-9R9#Ra#z_Ktu7ZBCkJhyNK_E;5CFvEy5J-bvdb+YN zZbphs*bP!6iw(368E=$Lm6>0IWoseF$VVTv_BR4Sc@Kd!K0)#ar$gCU1{b>1m>en) z$c_)chebQ`?{e&A2j`o^KMvTHRgw1+HBl@Gm%;0nfBO6X!o}>QB=77(16Gi|{&!_n zkXfeVO%j^9ulbMnEloJIA1aU*3T$}QK|+t(etn}fD)gqvnz97~5e2`eoc)9x+jm7O zLowx>F@r}?S*((>*7?~YD#X_>&QFP1NjM$nlnYIm@$~FY=5%Hq0^tJ1YQyUr{8SbD? zk%I`6h6zbGAN%qw5ZQ0GME#06NIOLl#6ZdFM`lE?giFh7H_|oH!JvR-TO>qB;!Q5& zjd$irZ^S$^IA7?_-G<{Og-$&K9C4S`-}b4PiV21D>stzxIr*WI(a}USXHxA$ZR^PY zIkX)VM3-@w>520QN@o|wvg4iC6A?pEs>8t)%+jA|7`oUNQOkr$9106$sg@nhUkM5- zunf?3s=$15<$rm4X^q3lkWy|6zzBe{RPYsc)n-%5=XFh9ls-Nl3@~1O>|c_26Ezz& z8v+|#NLFP|sH$GT&7j>?ugF7us}r1dFUT?x0rqfrTk0aX^uZ@`$j+-~k^txrJzbh1 zhqV>w8ArRdy{K?N*unfI8clP<$3s5)4>8c@@Ud}Qr31o~Ti6y~UYN zZ)WH3X0_b3EJGGOD8o8ScP7=RXYkZ#;-}Q`)#ARy#Tv)Peu>*|9#GAgKqfy8-1DY4 ztjqMDiF^(vuJjIGA1_jQQKFmfpygP|j_9%RQ)zj{C&=cd)~kN1vcILKwivf*9>019 zmH%`#xM*cZPyI^%g$i+XzN1@~RijU5_w7 zCbOiXR#Smj8=X16?-#QWe#Yx5yBI09eKE*Qqh_J7-5*Sk*yO~66Q3P)yY4a z985ht3pcay;d}d}bIXf!vQ?cPfgV%p)2;Q`E4;OD-flB*(_H1s`yHixOCfzA>AF*Q z)6qL#NKq&g0%R>9SZ<}<3L=&F>E~6%BuhT!#3@BHuq*fEnuHfih>I8`qk7Jat7g4d zjDhNg05%uEOCBfl9g6LN+B}Pk5LC)OHJgz}kLwbl-((-bUh($2-6-*Ya3GY(! z!D-5i;RS7E%xi(&Ya^dT!7zU>k3jN(s;>FjgtKrbm1=QpQhIaR0p@$7NoZmj8w{kj zX650y=jld>I#A*?;-vk7Gn`Q#u%UUsCweWi%_n5VpMp!^KXTBslmY-Gq z?3T_jALH+Aw&I(A@3*dk}D^9rkp(snX?^XUz;46TgVw(Z4g3jZ*%=w^j4w zb#P)#Vq%PDp0GC{519^@mj2!xe6=m@k@?e*wLA9}lZiFZ%FJ3LS{-nIOfKKF(uA~Q zOH+J>X0U3LwQZ|hU+OKPLZa>k2_YXclsk7We(pKi+H!S`Xt-(hT?nHw21mSR9ODFN ztU9ZZ={OA)spf^|*ThzgSxsdn(G2DLvfULgi~LPZnmA#$ukYd(w-dYa01@0Ehq1>$ zsDJ2K!b7Z?a*$kReqiZBw#~A}Xcnuksa9~o1cyz%zQ4XdK8PcJcd{X>xW%&ty0a6t z&;rWS%087!QB%oUEG|SLNe4pI3d>FSP_|H9+qX>(7t+$?Wzsh^J$}B5en!sWs@Vw0 zLge5sH>}wnX9q{kKPQ$6Y1V1Y-psBS*-F7p)S1*4X0=W%qMn~xT38_YzXnj-dSO9= zd0#nJ@8Uvbu+s|@U=~#`UF(3*0s7NhPb;(T1}j}R-LP+s#&;zz?QG^&0#>!0&RM_? z41@i2qL}hVt!L3ITg};HC!C8k#<1BDL`-777&PheuFV;Wy zLH|uHm&(+^%YDX%`q)pA;@HbVuGdfPjYY4@ek}4zOY4s=EFhbDbE}>e0N}2w!ePqAKNKT$+$aaM`cQiNQ(3iSZ$r6zvI@7|(f0nloP0aJIl+yD6;Z)94GN5d z={dzg(3R%mbvyL>__(DAxh6EU1P?~vH-sWaD5!CgR_C;Q<^c&%NPy@%;hXwTT3|-p zwfiUf$TCFG=J(@9c2Z9CqS1)Xs3^W*gGq2^IWi2Y6 z7Hi7=h$qjJ1t`OD2y$<5yPYdbGS?`Sdz&Q4)@+9?tAknOcJuc`n#)7QM2rI8~;UcU88MXyMe*J$;^aKU)ZYwrKxN(a3joo$6{(8Sp6|;P3xo z?nu1MjV;*?rap{yZU&!qh`xJkA#DkP0Co#y0%hx^Y#j+ReZ8<*jc)pRM^PAHF+ePM zp^6XC0-ia1j>Gw+p*`|S$i4iUj6y5p=ytmYMo^#0{!X*H_-zomPq5T^=n2M4Y+r>| ztFQClqOfnLp?KMaeZ#~?arF}nui?nZfjJff=1f>OW%g*v)Z=$Pb?e}VaFf8?zZpl2<`}& z38t4haU9#6CD9PMC|YwiyR7l6r#5O#Jjute*v01*UZgx{@Ar zEb08U5ADnM03C@KEv%O|4NLWswYt1<{t$Lq4CE=}sojKR34*sz?hlQPAs}6rr@agP zmIk+G>+Zal2xJLBbAem^PuGec7G{oskq;otG0o+=?7fJh%Lt+&SJ}KS-OittUi(_N z>QKoYzayRRn$I}N^i2m~3-^?Us^qg7m5L44$btKt2cz}WfX)2PLG?6vSQycS3N}kF z!7sk5OIQy4T2=$Fe!c-wVgmRxJ}tyvjPs@6Qa$n;6;?2|hqnz&>C>0?e>p{732?Wr zr^NO*jgN9F!YCOU8wGSr3-F#q8RRT3c&T?FdfnBi_al_=C#@IOA9jOcP)|uQW<@Tr zHdi?AMMpfPIEBlAY56eF80iEtM(#$opk&OCH ztT#9=FHSpO^1kJ0Nn}y%$T}ArWHJ-=Y5j)kM;b_aZuS@TR#G7AP$1bQm?T)EFX%kh zkH*WC52QGpi#IddOS-L@%Wo7FmMa}T5sYz66fqaBpPMumfpXG$>ftM5FR8IOc6FW`v`;3s{_mQY9@smlIT4;WVz3EN~ zAU>T*2^X#HUHl^y)pWKmhh_lH5Xv7Y0rHZ{3;b$o5t^WnpT;~r_|{HhTTe25UIIAh zD5exGM=HmFN-r#I^4S8W!5anw+zjYx6GhNU?|=d4qwNMk=gtaENqv2H>RaztC6!~D z+p}*(^Q?Yn)uaFO-<*EwRmb`7G0q5)}+I1HKPq>WQHgv$%3k5ianZLL*<&irkeRf zbzN&hO2ui#9&c&#?#qDJAIol=mqY}v!e_scC*k`Q%Xj^vV-HpKoe27cNuHLYzNv1| z-e|)#<$MvM5)o%;bx$2a{r0_NyC)MWVFri6)}0c2$-jJ1oHt&q1o4foeyH$&$txl2 zKzI&yLCW!AujH1@CG~cD3fvqyb9-3)y9gdZ9?%J=T_E{=mp>5Jct@k$+sPSzf_BZz zzf~66gyKwk4@lTBTc$2@2hVB6L9x9@(amx>N+ey8_J5fmoJDp~{Fs-X27_iwoHYPB z2;@ng<-4NVlc*EurGX4O(RI9 zx5xo~j&~^j3Qa}?2_(IxlKEQqx0yv~X~=P>CV!e+d)VF z`Oxx_df2~Gz+!&1i0l;RKECN;i_IFxf4izOHp=0vJl_tbivIZdPiL1J!ro|Emh4?N z5K*&SBI+woQHYtMl7D$T_54xi(jzOkFxdRr5-`?Y(|iPyg2(s6?!V6X!h4(+qE4Ns znv$vCQXaqG4n$M#@{K}82N5A307dYN8@N0otPvYi0<0>?E6%GEBn(uD8J9}X7|}+f>^~Ugf_ZpW68#zBxftH!E-JoZd<&(vfWi z2EFs>tU}yU>t?Iq>T^9+ygMz4?!xw9ao@fJn;&lzzeMBownaKYsrwZx?arciKwtIxv1zT51++ zIXfw;f(Vi~lphNcc_@n;*%9J-xEW9K*{9!s`0o7M=}Uy!W_es*bzB|?B%S@w#Zt4F zSY3@q*qbN#qQ=~++;`Ow$Oo|B!Lc~bhlku3KAma5L))!xfZ4;^oM~TdC}L!O?mB_G zHX(#Vg%{(5NtWRP?Yi>1nyZ|Zy@oBG#Jt;pmGiAF97JVP%`;fa!|DCi~6?K9#Eo_x+g6w;%T-%)Llnmnac04*FLZAKz9rTS>nT6RfTg%UFl=UH=Qnpky zOU?c$2Qa;IUs5W4=3zNw!sD-g8zHMSRBs^Lk0B_MsRj zj|Lz@J%1bqS>foJIUw?*We2ylzpVRGF8S+ioN&%4EimGUP)}6t#UIDx;I-+cczm^Z zLWcOd*rru@bZ^8VgC2D|CC$Zgeod{PZF&%7hKTNt_=g}wSw2qDGscwu^fg3d#UjQ_ zss2RqyixxqKnIP_H=OYzBPflMV}F*)?AoDQu7s_V{Ro^=6$DPLg(PsN9!Y=Hl#9sN{9{sD#Ki*=b1A2{g-qgDDdp5EPN{M&u z1cd(|2}IFhZ-Q({mQS zbmor&3wPofx5?(TVnEU#2~LSjx(`1A_4vWzVr5B22U_b4OcSmnFZ^G}V9J@y{D~uL zEUVan2rn#^ej|lIC@3jwB9y48Q*}oKTOSTv5wk`UjOpFLs|oB}{d~yLbkm?P&Yo#n zzDZ@$dy&=yeqn{Tp6+-oJ|kzXB%`CXdxD~0bq7_2dzT00($Gs~p&pOBv^DzNiPh zodd8`A*g%XVO~br4aRRCSvv~=rkkPDTj&bfc-UB6$LO&0C5Q|Z0XISdtHU$gM)@Zp z2EmB7-ROt`YlrLUix__?i3F$o=GzQr&cQo8 zyq|?d`S%WoPr!}?mhY$2SrSQ{BVdZu#;?{U9m?mDt82p}GURo|Q!~O-;;4)Hi+$~g zdNMCESArhD5B~(ejaq}w9tiL}_lJu@Jx4nnXR}U+qA{4c5_vyBbSD?SL1G0+7`R8~ zIxLw()B83cpEG)FgI79*<`ZX=Y1b(WnM+g9&Z?5*(MVRy=3KJH@2Y7 zc*7|?r_boL<)k6h1KVaCZgqil{Ew$1^w(+B{Wa41?4076Qf6RL*Q~$>J};_` z%8*y`x9`QqU8em@2D*V!`5ytYsI2kb?su3Aug=j=8vjp)m0%g?8%GqHXsB;4kZ#Uu zjoED<_r284F+HT+l^Ht$6u~MSKYZF&HkwJX`Vxtmt_VMp|HXa9a~V(Lj;;o`0*1sPPNbEAtrtaRFJr**r&Ay0U>zjPC22QW(Jsv!F)IF5( zEU?rRX7T;$B@udjN3&@4uYmpTSBQU9^i$D`Qm-04Vq64B8@nN^cx+|1o5iGhsHS0* zAUk%K?CiB+_6H{xp4rQ#2xU}xl}%JS#D7f0eNb;r(~oJ4 zv*n$s_%LkISl__Lt@P?|9>aU$y&*0=C1jshrgl3K&UZ7pTpG zL)^5Bg)IEC0qvTM3`ou&6^0j5(4P*5%edcKeEdYj4y5jumG@u7C-;}!TAwvN%w+nn zwlLLXg#;adnZ|v1Z>plTEHIf`#AZ$L{#b9lCE6orY+ueTrD@nj&Jy(yhfLv74MXF} zAn;8!A#f#Tp7-wf2rXPLVNAl9EVLi@1Tj!=zUmD%GVMt-oH3~G76I$ygf&bomc zOdXQ6iXTc_Bq0{W2-e+IH0GhZmm|3AJ=x!CUb}q$UQAwS@E+IPlg^bvyeGde--x)M zVD4jv;>!j(B4s3^z2bqtRjmQ|@6jeU;u(7HC_X5-9Y1(D7b^h5xk9NlM8LVsh zg=2gs!!ey7>_quFWj;~)NWEs=YxkJV_A8YmuX8TX?~a!I^=oYe9Px#S#~td1&&HNi zTkbDk6vaDT&+zTUKYN)ki91GSRBkwL9;=iCa?&+fM@GR?f6mY3+FM0>iW4S{1qc;a@{s23>2Oqld`&Is{lX20f{&J`C0*fS&g=1Cvc*X( zrJR;!xjV6bhwi_Am3D#@5NCfn&Sp1cfD`h!)Cfx64DF6TnDu52t1vc|x`jIWkdZ-GBly zNZKt_EfM8Mu3i3{a$}nc-9O~ghfO`=o%guDiaw!3UNu$G2#5U*TlAEv^dn6#lqHXv zfw{djfNYAnmq=*s0~?@9?8Q>_HNGA{M4Cd6TEg(FSe@TR-z%|{wO-6*3n52HR|Ll7 zz&NW^2d2+viIqbOZ;VbsCGv?_U@2NjmZeDMORj}3P}nP`A^XVQov!tRxPUS(wcfQ> zPecsw)@HV@j1KEHH&Jii0f`SdSL-TnlQOv|NlT(RiCiu@w&!-)XKhc1s7rP>p{(?F zGG|vXmM?gmC@gAz(rvDxG%Bxhx5c?2Sz~~8aB!|OGLZzF+r{mWf9uAk4S|3@MNsIR5$eV4o#z=xZXl^@T8Ltj|B9BuVR+== z%qwPp67Z2A8o6s9Y&)c5ih+Fzv?8d)3r;{gbr>Gj_Ovvw4WSq!^}J8ew${|v+B1*o zBI*jFAr*d4qLaf9D7C74xN?TQQd`|k2cAMz zd)0oCEw#{5e{|w(m5U0E)A%tBLjU(AYI{ULe}}&-BM#R0X{fm5)zax3p3$>PW#oV< zLRUH~*lgW+{(QeAvb3d4&~dL+-b$$Ir=#F7lk0zw8gz%KaQFg}PgfFsgG}@~JNE|t zw6o@b;b1Hs16WKeF7v2PE;fDgr}+}4oiAJ}?fy6(95cJD-2%|S`?Xr_#$Mc{EuwH( zg+QgsGpY%YIBfERTJA19#6ACV$5BgTO(*?BrDM^!VF>S5renqfD#LJwy%J@umyq*y z-Snr8I5K$QH+Ub(d^aFiQtt4O&6a_#rWNZEmgd?nUHqS$@igbtr_d(_ zue?DmWBhq3m8a8vOlG<&4(DANn0@dr5Cu-Qxh0qsIdjSLOa)%nz@(h*ml`(Ak77A9!I;M5muw z!k*1|4e~MkZ{o+_A|7WPLm&bUUOT13OL+mX5DgCd(x=e7YXQ4NHwj-c%bm`KW>J&(L(X*#u0w>t^S!3(=bXj72#r~fNtUd{ zZq|{s&2J~JN+~x*U-5XIhU?wNCq2#J8h-4*S(!wSfTIn#07R#0^UC}ZoHLzk^qJys z?cZm%XBko=^Nr)iBefN$EAF3&s(5y`v!ouV)1)icTZ(`G;z<-|dho)%R!M5h5cOaLHyUE1oJN6wnda zUJb0t_>rm(_1P9F#c?ikQgTKv{%rEno@W6D1{{Glmrrl~KD3!|0oSxRd8pEpoVVxh zN-ND%{p@j2bLX`|dlakR>yN-zydZQ;ayeioYRPjD|1mP3Woynn(4OX!=^|m}dCc~s zsW`FX8^BR#Qjg@o{99X!D%xg^*I2;tit=5!U(&`90|I*S(z1tjo`h~gm6Eu#yR#xb zKHDx`FQ%faTn&IF-afoXUMeMIUZ3>o@-5>E*XvS4(XJCsZe&Nur`@%Yz{-Y>d$TiX z@vRN=REPd!i+XTx9utT=Jq4e)>USUzOO^wQS3z{+R@QDu+oN&`(~qK~*pk3FS$`tASeQ4=)kA4^* zX!#dSvn?k)Sbi*CeeMA!&vK{JY2TC1+Z+DJ(amomE-U>@t9LWcmV~>-Cr*6|!qS4D znS)(IK4Ov}enP_5R~47j{9KYmHcf(%_Ly(=nf!Igq=;JHILC2LUQ6Y1ZmcOl$hc=dYW{$b^4z&tLLA51Mfpb z>lH({(dg4J%$`Lb0(|DY**%&CYlQFcWQ#Fq#N_STV6a$P+>ceYXKsMGa>S zpqUGC!CiPVOlfQwY^^%E{wd_`S!FR;X4JVO?nr#NgL7!OW4Y2|>(S8;uJX^N>ELu) zMVsewrE?e;f-OJyS{n!~_F6mn+^BCoE(<5_6YC2DT67%oQJl9Yk%$mqAb1TdrH|tW znQXPq3e^NW4R$XPw!<-^+mW;ZS64ah3a?2Wf^p89GLoezRq}dZ?=VEIp;?q|Lo~<} zl$sNAUwQHD1VjaVo*7^!OAykaNQ#+ouwh~R@o*s0e5M2#cED^I(`!bP^MB6>-mTW! z`T~Y}jIe&=8l$+K-xOCn2(<*?bMVwLuVHQ)Vm{Ezo^P2jc2;dXRZiPym_{A;pf zSypVK-n}C&EXE+-ZOVU`ua8g=8Tivh@$<^P$8lj{0D^Av@%#{nr_Enr*aMVe>l=$p z1s2X#8JNu7#62+bXHU!x940_s2kNd~ikCNFWsCTI`1!Y3kWqK^*7VLIpMPD(tX*QIBYhvQT2ar8wd|P2F;YXS{t>8g@OJDKr$FW_%5CC@e|J^+LW3Z zImE!s8XgejH#c$S_wpEkp|I&MGF<*N1Peja6vFdLe58>P9(#H2Ea!vJF<)LRV^f~x zjo;M5m_2o{3x$W!-hH~TTYK&wy9r!OtQ z>VBaAJkmZQ9FibbuJj4He?OBxN4?MNRry^_Sy}KN#W);Tybi2TRY9|RG9_)jFmotQ zY@ni>88TLJY-1yIanXHa=+f0hSNpTp+SLhD96}rj9pt$Okh&>rBa*zWw!y|8GA#%_ z-V8bm%UL>HJFh%iuQJzES`q{fnH^{`OAAk7>pT4wD-x8si@1%`*wxZB+bdK=b_O*M zO(xJ1vLTF`mXR6N0>>xE%PoG^qz{A&KxG{Z@IxR|;;QyF^I?%xKmTj6yX}l?fs4zX?IzH}?)h>XaJgEBkQ>6x+qT`**L{tpS%SxnVSx4hlNE}* zg+tsQfFSZ+1p?*coH*h?mBTP;=P@=vJ!CgouW8ex(!)T2KQAWATxup4F#<#D(sigR zi)}O_>4MfAI=X@3^>RiI#;5~w6L0luK=B{+ zgprUl=y7W&)bt(E%}a4Gy%^7g_whCEKUN|#NSM4cG#E9SZc}kV5-!@&P16ftEN^@u zSFnsUw?UB;@n9jV>~`|Qd}D(_lNj*AR!!Co3h1eh2K@k`{hQfB1Z#Z0a%$Fr_Ptat z&CA(l_2qi|nQ-~Od796Lb-B`S{DgWs*NtJp1qCMV41*tn!1FKR1+1o!w8+mh?ee+K ztK7cAy}x?~;Tq!4!kk(f(_9S($s1c1PA#8PGofnU@*8^`La@)Ip+heyKJ@;UOufx? zVB_g^Oa235APM`P(4OLWwVb;4L$ItPDPi&Aivlq+43}uMGniSex*Ey;0dzg+S=-@e8L3*=r6)pfy)Xx$M7=9BHYpU3?2ID9*YME5EkqOPRCm*Iv;M2m0bL` zhfRK~w}SXB;$ZoMq6BQM7K}GF>KG=LV$V5EuiJE6uNH{zM+UAjmve;fU-LR%>rUHt z_h}mWyo#K&rL~%~d`iDUu2mdkuK}JG-ACui_$gQju jdHK=w@w?-1Z$lc3{Ka|a zEdE-Z7E9om-0R{XL<;*3#`g-`Cco3t%a2$Km>&VpKv{7sJK!6dkc-sn|Mgl775CY?O6ktG$1|9(5HZUIV&0{r74Z0N)z=wm}+^lEK=J5e@XTvFLT2E`mWrJrm z5>mRjJOV86VX9~AB<-T)%eLUnDz?6{$En1%X=!gD|JL^sh}pN~Bd`#?-#*9lN7|qC zFdhVVkB1ikHd%1DIet%hcu$)3{uTvZn1(c^qhROn)45W#GAM!)HQf+TNWVwK;-d4$ z_`DBDje)8Fb@sI4^Ohhs#)H9PWa91^(zNQ5o$dFn5$I{JC|a2I?9N==zh3b`(l;j( zW`&ppm`Fua!Fvj93tb~{W2{oRPBAVrp_iards@Y1z8={)1FSjuu?Ln969yQ|Die5| zMH@@!MhPD`QF)D(p~rnH!3f&|(JtCKzx5;|@BQu1(%*qUhbAkaW|ROwVZPJP^w2C*tWqGEPRv-S1t z&x8WcWy<_`64>9$LspRtM>a|2j&yQ|ht`chk?Ys+5y&WHELE`@v3ga1Mp$55+KN*X ztr7Wvb*bWy!z){ALUMzoUQP{M_?CYAet6jSxXT12J*!08k4z8<7=BkHR5y|_0aKN3 z&~qFp&6V!7LhdvG4_;5#Y=V*c6S7saU=S-*hyZRYFfcumDBuTebEZR>W}K^ms8o?VXfM$=ek2ZRNKzslW3=@jRVAwp@;W|u%_gjF`qGN3{ zU!|>+o+ruY7~DAps!Vxkxy6>k=%$Ax1zOlw^$jqh&vf;XHI78ER(s)cXt{f-5|}OyBkEhJEgn3>)+4&%|9cK&M=%|-+Qlptt-}T z-}&tSMVFk+Jt;N{140=s1ti*E%iIa}zW}&O*t9`Z`)0#@$Yez?l-hqD12aa!CM8*C&_r)P0?uVtPu5EaXmnTltB}$=@EyQ>% zHP6}j=^ue)R|iydZyr`eb3A>YnSh1nj);gn{X7xZN~w@EqwgB6oMoBU8><;icF3p| zHaq}V-CPQOh-9*p?{W{TD1lEDr)6B`KQcan)B9~J8JyCH=KFi#SlF!t+#wEHH&H5Q z4F_qZhcPf*wqq=Wl==L7x{9Du3)TH382szh!#l9D$gV7dS{Ds82-N%QganESuenMF zJBOX7xyjXT} zz&W48i76_2A$Lg)%AerFBgf98&(#qk%DynHYz0}Js|@IZ~YRx3kSxl zafC(c1F281LMjih?*}{^OYxZ-eup^y13$7C6g*1ombS;H{5~fY4p4Cav*=!zp8s={ z=uYR`&cUmuI z;tcgh>&|`HF344NQxyo8`dV%d=(Yoc1{}1<87-}|YIh`bw@Nh_&L9C;piKD;59O-f z1iq!QBXZ8pobsKEC@d&m&y@bN3k*iJkVI6w90!w|#ZAr=P&|&Z)h!nb}sZ=t;{*Di{VKS!xi2du{ z(w1%3@on4i&IQ)5J2;9CN&nG}7w)xDpq`{KDUd7+R{4O824wZ<2!2SW(z%|ahNnvb zbDDHoo8eH4@LGGz!Q)+TjwW{ruo@wl?awJ-idGaY7G;5;3pOz!Fl?U%<;77VXZMwp zVBpWZGhP_rtLypB#K$4Rzp>!~Kb*_v@DcxYp#>u|ja9s}5IivFEuqIMWEm|yDn0NA4+zT z`=G%k>y#VvKmBFkVzxY0c#l{@{&x7&<o!e5a-yf= zkAv8yp;rG|kU}oLtdEJVtAwkXj+&acshYRi7p*Kg@s?NK4oqsOgshxZ?{IZByd7^V z;qeecbbCdbFNXl3jL$DfK5H;5C};}<;n4T%JaNNx{K9Tm%JQW(enN40;^lCPv7cW2 zJNviJZs7`P{vol7?yIpvGhjE~%f(_wCbZR= zHK2Rk39+91kD^);_YJNG34-FVA|2xz59IK2d+iw~H9irPPip^>M*7siE?QR7{-MIF zT^OuX!avPx^Oc~~MZ0r*@oRb@hyQ3jjiGBcjaKTRfCZCs?CfYD5a8;-mcs`FaPIkN zxCP=>m%zh35T$~YdwiVD^J_OkujHRcNu5`A&X35dxqD?#O4+etA5g)iS=_mCu0)>y zpHBlfKJG@Od<#noMr0j~vbn`L0?vOxLn@KqgeH!a` zK6Ua}3q@ar7QTD3V>Xqxt$3sh%%*y;fHpa!$Pa3ZSZ?q~E9L()Ysjc50pF24N@XZ= zibCD(j1s>V>&v0ge)}-j9 zWKvOF(Q@FXE1H}0pnsPzG}C)+BrIs~!T+DS>Tf-jbmiyKeY8J#?<a)C)xgIP_b$Z;tuv6AL!_`g-V{J?E&ux|mu}e(+`p0d&$X zViu;gkIB->-|N+CC_t|@o7Wu4#(`UsvX2N}p&)aozZa%Q7FkDw zuX@2HVh!7&LH-o*d0^bRul;pxf$TH{?2IeLOW@Y-iDm2*0b63_w z=k+I>lvc`&AFk9;^`eNXH^wc~Lj6ttRR#aWtHr39Hv8ZB8;JR5#v<;5@)GUs292rs z4|y@de_Kff1O@K-n?eSJP%{nmj3km-l4q8Y!!k8e(eoa|RB18S2T^WPW*^qtkV;4r z)@k1%l4O62ie47IjNS_>X9J?sD-Dt#MKiw^1>cy(H%lLW>&<_o89Izbhg-!IYVZ2Up-gsgTq5)oGt zqtgP<6_s&rBT`Is+ITLUa;`zO`ZE(AeCM{I?~#F zQRn@o8&Fo0E;_t{k3WJkHhRtWfGjX@|HIQsnEa_>R~zIUP(@BFrkK5dx6Wx8qjW^v zUr}KrV@3#el$m`0fjmj9K@6ngtcwlMsI~f?j+CU$XS%$Hm2|oDv?2J9PzIB{#AQRI z6s&HQ!2f1Nv=!$1H_p2V16ZCl9zog|#h2Y=TnDw89-MkHdzqCvPhq+@sS;3h9YKvY zG;qsjN9MvAy_nMgdjuu6I;^&>Mr{c&JHvfK!g0+L`kc@f9oiC_eM!aa@iVI!0aCzR z;O%o4KUc*bLkxPXq1gpv~Izkoof4GCSxstMRo^ks>g9qS+qrBz79Em}vQ7}^D?oA%U_&TV**)bwa z*=h%`_M~3qV#qf}sZ`)`UL_~wVONdeWeb&)LJSrG!?4Q$3zE+j@_(g~sYnFZ$kwPc zq(B_9wehMa=P7cgtK{!%#+8#-M+CE1*|lDRLr0)4`- zR|09$`h$Wo`?m?8u}m2Zad&xFhf#vR_tgL2l1rU3jk3b6I}_#Z_=z8+1?lvhr2RPh zI$gD?HfiexQpql0q!CEhboG8|Z-4nb(^XM&D>o$bM*@Ot+ZO#-<{cPB;qHQ?#g7r0 zT{2*gcRLIw;knpFokLt!( z(3&5TE>q=k+KYCkEkj8jmOR%}ND%g>q8j<6L}x%(>JMJ@qHerHV0q)Ix3@1D@yb?Dw*G zjDZ?i?`s)t3xL;w7)~~*|9hT{-eG&?);(w}Potah1!~{7V*;AYo60AyZJQ3pn#ah1 zCN79+68Q&mxecc4F+-`7|J&jH)W8m7wR9e3g#eOr^V%1N5fX; z$sf9qWG`1IcE7jMo&;nD~=H@cIl#cS|VA+e;pNk2n-CBy1`s4j2nt ztbcgp=MjR`|1V5Ss2t=_yIcLEVglPoPH?W}!8DHyqZ?7dr^^R2BV-dM0IZti(W6NX zYwWtnr+4gR?wFNx_&jGWt zAjr&eRQJmimY00m$qY72Sl^WWmBl#B{HFw;E2|wGljrp2oVv(_4IOjL3BZfovp!&Q zcHm>VP0cw#>?8~p9sM%Q&!t@|6_Y%z*fPOYxAf#@LT4<`&sEwT$dZkp%X4Ks^Rg&- za!WfP8X-zNY(D9HqMM0;8kZnH|K6DJ!-o&XX!OWbfv{9mlvLoW%J>-{CR%uqRZ$X& ztS0Wbb%J)7JaL5rX?8Gnd^4toq4g$AG-kt*+UNY=g>FW9-p%|e^6byd-;sU?`zKTd zt4t=SHiZtI_Csa1mK-T7GJl$8v(SGF?#{Gnr+ss)ifQ;R-;Jkr(RyGsG~iHa)x+Cl zT5I%nQVG!rfDT;#ZuV>w#t$W2C;mxcRQ z;vgBK8#~<}I9>R$WY^nE+l$4&0ct{uE0A1f@LsVfr@)P?fd_m(}-jsD;19_S!4dg8c@sJh#Bm1!zJM>$Yc< zTIl$y6(gc$h4@@WbG4DbZbi|q zUO35mq=d)HK{dCx_8*I?N~%Vvn?M0@efPB>*vg@(QEfo*V3|zFub?e{ecC3`?x^wX z@5OL%6<}Rv+~P6Ged?;t$lHiw;LK6ugK-yyKSb-d%rJcY%+aoy5H&TX9iXcsy>NR%@z z4h0G7F`;2hd?1^=#aP+2jwmtE!`s{2C#P3jt^*oxkJbg=uixIjba!SEunqh)*`A7T zyDwf_w8bWDpx%K=!(&+6#jYbQDlh9zM^>q>Ucm^$qL?PUh~q$jh=~<2p=~?&(1pps ze=v-Uaz)tIugiQz{a1~XyT-zo>Ej}=QQKHL%;i<16(Lx}_q(ZQY){T>n?-oTj?%Pi zn@1%}uObWwoTg-8YKU(1YW9{SSN>EVo0289+TEXUA`Ow7KfV)0KbC@i!68tJ(|U_H zh>DBm{*gQCG5c(n^9y`DtTT&wgcVDsRc26FkYBK*AEL6?=*AnQ{nTKYtft0?p5Z0l zrih;;d~S*#j2x^puG^}whhZR7>a*%?C9CSEABCbnP=1jj7-<_>c%B9OaJNd2HU`Bi zIG#VA?bvi_*)43O#_J(0y61Kk1^X58kG4LarH?q>?N0vE82M+iG$r*V6w>8orLkU$!RU_~rz~r?@1-b)5M}kQ z#vh3oYn{`X(gh#vZjz?(MAPO~Y$UC~Xfb{y(Ql^S@k%bnYgx@mz`;Y0Kd`%Hl(IMR z!$?d0)37gtQdZ5a=9k5`qst>Zbksuc#kmZyybZf6-2!f6g8QWU&+4<4UwRTJD;p2N zUWy%|U0(LWvwAh_V~92Q+52NTjeM7jVJFmA&TD&zD53G*>Xk%PM}%|~+*SD7J8z}) zySHVD$|nr1&Fz!axn>1lAO4IljlYVV@SlexzW(*87taybF^(@p4Jzm0yP-)EZi(S0 zsAf%Buqq3WYrTNAYpBJ<`)C+#@lw==pejWt<{}QFD+28#6o~Ko^~In{x{tb6s6{a2 zj~dL=v0v?M3kCSGAj4C%%)4ti5*2?52+RL^p=RCAl;ncY5pf8xjw_s+g+q?JkOF7 zY}yUtCVRfkpNYtC1(%o7Jq}daav8Km*_~Q#U-^2F^)8LCkE6$~SC7pXI~M!Cs`QrH*{Iw4Rv%AcvSPk)`pEH%O)p zQavao(e(F;3Z+-6FW_EseRqw|dQID~?RH>BR=r-gvFRu@EJHRiIyz0#`IwKZ^Y=Fb zW=yNGe}BMdsWGTRiJ__<@wMGT4rv;!(M($dQR!KPEoi^|%K|h8d3^aPs+GJ2I^<)~ zbE0^Rz2^l5LAjPm&VE~(G^d)y@!GPssZA;KsGAqJ9l1Rv&@zQAJjoo%bdk@4`}P54 z6F>R_g4*Vp{&h5WqQDt(NySLxwKsF;f-+v400r8=EL7sWb;4EP7ZqSX^NjB@6qg(3 zH#>dI((RRWA$$FCRT1LfOGgd^i5aB{zkReZ{A8{NuB6Iz^6Q)pkZ}oar-;xNCRf(~ zD!8C1l*PRwC@B>=cEcmzV;7AI6^(pkIB{v}jLQ&xb-!nSpxY7V#a5>`PM6T5*X z&j_`_k9`#58dmjn5hsAGUbks)YcSC40Uo7ROE+G-aA^)_6@3=S&B<|oxD&$0W4%G3 z0;7=i2F97SS1Br$-l&fLxX zjl_ob<65Jm$z@s{t+ISv1ubjx^z$fd9=7e*lZ4-YmVZ@v-R% zl39$;wYnu@epaa^AEww%v@D$p5&F7)w6je?T>U9_l*aJ24P*LP3eZs{vW6A}U#HAs z4!_`~o$bjrBl!6USVuReVPIVJPU+&dy|UT*rlAru-K7!Q6NwYwR_JY^wz#w~yGP9Z zkz2s6rnevkEd_oKTC%=kZ_T#ZS%-{h5w-G4sVAz58u zrTx``0LU{zr2pop-L8kknCF!g^?N|DHjhybh)g5MbM)y%5mU!4f+a_Zt!AOCXc-j< zG1XW&i;)NYPy5?N&zwlBytvg$a|w6?N(1q`v8{C))aum;nhe7fvg2jLbNqYt*=8S8 ztNl2XoiM6DeTh6T4zE*59S7@bGS>2^jqK@E8+TO5=TZR{;(uh^zB#m`7M_~ZfY6+y zy3j(U_r>uI49gd&P9v=)LF`|WI(zq_8s9nBSS8UcI(-{Wz*cIpap#ZX1LM*(g2Zj` zgF*R4#&+^sx&G}cLCo+UqTX2Mrt2i{C3SkD6|ideCSA9VrthzDR8Yr=wt4wOD6wV# zyZYecCBt<&VOF@{LPj##gc^7AaZ<0i#Y&ipiHh~2yjFSNjc*Itk;zv7+ zm$qSccovKr#)v1>?6oby!zO+Y?rC*yqf$)@hYp^@3`4>1-&vf-UO=2ll@)CKKmQec z8Cij|v6cz&J?vdyx2r@ZcqjZ8jj=n-U82Swf8RuoS#OG&t34i;p>|q*Hs`Dm+{i&Y z81Hua(65Ud$;(a6mCu4_{*yTnQ*oM2VQX-Py-Wq?%YQs9P@&)=zl$aK_y4^BfkFc5 zQTC5t4Q4X~oDeN|S<>$ZzaJRQWD!ia;F1SOXQ&Sq_kk7$ABrDnfw8#fO8eMGDXR8# zz}+NMmW!)fYw(QgGwMQTXEqB_uQ6gRS9ELXh9u?C(+_YFku21w_wzXGYp~=bZxHH> z{W2`#Od>6?!__-#*(TG>JXx%EbJf7Pxf*e+H~ZJezHJ_e+LjrJC02Ls=g|6F=Slas zkB$!9l6R-iBh@2pGa_*md)9=-cf!YH`^UAI>Woy2g1O39CnbJ2ezFk~7=iOTb!Td! zCatA&k)H#Uam=S)vdKPeH8>)N@d-8wmMJ{J3|Mir9sl@^v?|N3FtPNiM|II#K|1s_akpy+o?4Sob-}Q!@9}LjZr@_KOVn)*98zA^;JK%|oXOKyhS;5; z5dk;dRea2-A%+@H75(PJH<*xJ&f4<2{I$~2728?1UmM%IEB*RA`M*B(;VnO0jD#=o zDrzQ~5h;^rNX;cZQ`cxKI@|YV;aI3E2P8a zbk?i2#>q%%(GwQJVJF=Fu(R|A?OTeIC8Q&>F$hioNi0k#_RtA<1T{)DXl5G_Q^+vz z)+(;KA&{DB*znQ*H#;!MgFQ;lhDRExA{$|n`Lz`P*5oU!L>OEmeCx_lcq)pVLyOM*11t{>btly4qaR(4U+&94yDRWrt#D`kPW!qjhoW z4+PpYYYQv!9W&ct*vXP6-A02`5O;${GDrDu9mID0C#_z%E56Z+IaoS4Se|XM5D8-L z@fnR;d%xkEOy67IQPNg=SoOOZt_Pux1r#k>mV1^|y!D$VGaP3OuBJCkA8FrIq4J`T zq>C6R|)oGGG zf2B~jq?i19~a!3Rq7`HA-R+-v1(l)BL&~62gRL6$5I%@v%-C)~!sreiC;RQAX8~Efjcq?CqV8XvqvnS%}Wlgx7JE!D) zH}X*x&Rw#Iu|nl|JH{RorlwRS{Q>>ta>Xql2I7lYjYC_KYJOt8VpY{&MgmsG&^)*> zV<5sWzRT{m2(mB5IU3)?R0IQwGmTpp>`B^$eT!rn*3(}7(lEV|W0->>Xu@XKs1dI| z&6>AV^5i7^{`JBHf-}nP_Me4|)DhjgyO@6yzE9^R91dx(^i2Ya&VIO44W=W1)L`La zSW;h<7@KwNm!d84F&k1h%8Yp}n2`3t@A#*y&?nji&3^@TZop7!nHx><-Ox#YYWHq1S}#|uAKv$#d@Gz9y{%4#s0D2`=T5!b!ds%d7E7xI&tSElR};+@ z73yG$%t^H)G~Zwi2zmHA%&8uio@Wghf&-mauxMuA1oV0Y|i-!WXY4^DT3>{}K^ z%Gh}2e?Iz;rQ2+ir|??6kea&+R-Ix$v+VQm=xg>#fl3V~JRO?95hiI`AfP3hN|@1l z)Y;w8DOKBWdMy{jLyKgI7P8Tk>}9@OA35uiI!2~-gD=c4A4atia=s;_D)`SwA{oK{>q zWMVq4{n_P_ZCfYk`mIsJr|)&p8oM6yupd_&?W`g)!0D{C98&HXE;h+fyKlb=pR3GMWsOoVhEb&$;h zzByyvH5x1y5May)T}@wwr)<>)*>oxGbF1xxe%-~yMGnb{;@qvMiWpF`HNa?Jn-(6w1$$=)Q1mbK~@fCsrh|j6fiToUlWT zNsLnV@7q%6f)SVJN`)e{A`~~o4u8+LIscw6K5y=p&0JuTh*3r1(-6S|4)a&n!w1lF z@IO}9`+rQ0gZVvIuR`B_PWr37;|+LB&>JM5JnOU~W%T`R3f^ZePcSEDEfF)8M}GNy z#g*u=28=zzl}boT602u2nC$V?r%=m+LOH z#)>Kx$w-l+j-&Il(5H^;TTSS{F2_&w8%%dPN~(JCV-pVKe+d?&;;G8;Xr6CW-l$@; zBJkI*$(1wsoUU;|{C$ifKRS|+{Zvv$&Z^5LY*CYZBW7b^GZK{O5$V|L%}vPWXy8EO zc@S`2*zCt8Ldk(G8gS{`9vKZ~fHTGF5+yt#ZwYF9^*oR4)R`n!-HEh+YQ8ZS91lw~ zi1-060L*iZ3)V+?*^7dh3 zQWeJO{>8?@J}DnHnS&O$6Lz-84ZNeJhwF*xjW%M1Tjy{pyg_o(X8kxNzTJ&Cir*X* zRqWUO6hLJ`q0kAZgg`#=Vj#TwE7Oe4C7)ra=*$lDQ=0k<^*&PXF>hG>`J*W!2_tEv zHGW-XThsG$R`ZkTW&Q1*^L8z@q?va#Ty6KCE60>due{IW{GiYIbLW@6^jRaKgt&dQ z>$w=*<4%~qE#L$=+mpYXvn4AExT-Qc!_(RApjfT^)a88cUMc~fM3({Le8}>x5_xdd zqf>20Y7t$yZTRj4m&(~`DCbRGJJ$U4vF@; z5*gy|{P+gqKT>lbIvIoYNkya?YJjJP3R<0$oM$(ze-?|qT1(N{M$K!*Z&M*~u?FCw zNMQ=?$z{vEE$(|)JlX^^z#;nutDr^?6@q>C;o{{6#rONNvZvXSFZsLQ?sEEynphCz zU6LOw=1S@tdHCOuAjVx#-mN`$JrVc4)%DZko}1J3zK2iis;Kd2X{5jMvuFwE-dK7w zhmaPqge_BCCN~|uKY|CwIop#~7t1SY6~V6VQIoS|sN%5R34;&c@o!;@W5VfB2;{}b zZDBlC(EQlos9Ldb>KaZk^Z2o9XyWkp@bH$XG195{q8LN!BYy**Gui8DAH&Fq_teg; z!?GEPun^C-*|4M}EXjcoe6C0elGc#^CT-PyxT@2T=_?4tsHQ@$)1DdOd_BvLBwj)_ zN&|albd!&h@q~3P?7uTW^kl`=9q}86sD!Q7qA4|Z7EmxN7e7Ng#78(v5|WbBV;ZoF;Z{XTCA_rEu) zd&;}vtRCk5@L3`PA=1>wdVLMPbZYV1rhb2!*K`JB5g!qDp!8xnD@Bw`(%RdN1=)L) z_y?d^f3LXnJb4Cn#p(yI4Ea=NE&U4;Zfj`yJvTp&w#!#zWb%2dgFonWQ9kqow@Iw- zhY)0|;BJfVs2)E}(?SKWFoxO#0Tk&*O-v%N!*ShVwz?B%tB=VHE>n8kYw=n%27k*$J&K2O3zO*`)nYCqPW9qseE^$ z#QJtecx2e?x8P>%*Eb)u^Da{+1n<0YA*=h>#qUMPzHMn#vONFD4VUZ| zFS5?6qw(e@}gu8Wz`{!PM%TI2@{r5%p;LQOn_imME>g$_Fhnq)oC5BlyaUuf6eR2}C2~a(0 z1c?1VzN`kN(DNY+2Fn z*HBrjvXqR|lbH(g#B9DFffES$C(L|Np4^mIzd19Z3dT2{N zYD8mv@@-x4SkvyQ!Z7x(9q{_zqWI`oo!dO9AE$r6XV{#??k%`eeem*k^j3w$<=_6? z7p5LA6T^7vovMitVLsRP*|=o^6AjMwi8BJK~+^B*?BNa2NVAHUTAT%`?Z0`)4-Zgg*5n#%Jcrs@tbCS#XlPbO6gl<@NA8KZ4J z0v}$y>e!UQB$O_nJo2w5!#5XbZVIvfEzfR5bU z2IXBv0SUfdbrDub;6u292ZgFf*K5LY;!B|Sy;QH;K{`rBZP!cj1qqGD+rv&Menu|3 zw6Yh1NVoxKCsG|nowTfN0h`+JWubs1>pI&^Olr)qh6>Q-l#>+JoAJS_p zF_^j!uPN{Qr2FnnrV;?Hjq_I!;UfYJWEZ^&NItRk)z#(IN@c+<_ylZBm?MG)l%#bu zFGSw`IXcX;(7W(X5s4N_S1kP!1J+>(JE0O2W|=v$P1+=c{0$H z_}%uIjt!p6U&Y&!;?BQzTD#4E! zES_RE9M-FJ5~r}MQ9%B$usPlY-(V>`o)q4h?O4_o;CRS`R#U8au_sMIPQk?-rDE+WiUq$m3>$=uq2-abB_X4KH7sKL@zJ|(>9 z8KEW8w5$2$qpOGf*KnEf(;Oe)3y+uNa1TM9bDJ zU;HJ+=+&eyPaormX5RUVnXnu_AInD3Vy(oq6^hA(aKNvsZ{(rrwvng_o{+q`YmM8C zy!XLHJ;se?4I*d>(md~owWPNI!|Y)GAyv<2@!H~GX}PzfyRWMXoTL7on2EAn!(??J zIe;=WOJh@^@|x+VSd2F-3S^b+c(Tqe2woQalGn>y zpij3LWmuE6d|^;F?lQSdUQqX<5C~e{)f|$hl4?CipT2ZDckHwyIu9PE`ThP?YK2k> zk%IlTL*D%ODVjO@Xk9v3mhU_Zf`Z;IPSZ<4Zbgnvs*Zq^cD8r1KX-LKcXT{=bv$=G z#|-ino%)Zuw>v047%dU>ak|0CjQ6uUu`XBsUItnvu6E!I&(?~LE3jf@=U`tu`OX-! z{~Yb^JqBt1Y#ydmCe-$L^18VU4zh4*Da*V!`ATl+cB0|4L)z_3dg_xYI^}OlG`;#N z!WrZX@-4$+!WN2m8q1Ag2WCu+b-fRC<;$3CqLt zK+*ypd(1;@* zlLMp$OQ9W}-;FI-xJ9$U`GW<0{i*pH6_dxY%8}zJ6h)yuO{_`qafH*XwCJjuG^S94 zd2yxTw`PBZ1vYW{11aYef)4t(Jo!-=Z+&q-tfMDMLwt6^%ZU0t#ON4@f3fi-wqBpP z?Y{rsH?N->p_s^Ce6M&qB0eO?@j^!oB&e+;)g7L6ZtGy}$b)v>_xfTI_R?m!E=_|r z#NbKY5_p@-n%F|MGgI$IN9ueQpORW>0J<>w3{|1bQe;?zTl{{5&4t+GpqoMcn!UEB!=YM3&dD3rFq*DIW4PdLJ^o*EGxB^YrsRV!gfY2Je zPm7NU`ZZ?Yhb%?ADeN^5JtN8~T(?Q(LheNCGue%_sU?STj}d)DB@b@s5q2ql2=8h|(X2`qEToRJR6E z%h~P1;J<^`VWKP#7GmzoMIVFbm(-^+)&ekvk~%6HruXYw67eY`^$1S7?U42L12FPz zS!Pa9utattQ}|$yN!}vuMo&HOi_a_mA}d#KSH91@lWzeY|K=^gY}?DNhr{-ymC}YF z>s$#o8@3Er0Jj$!FgZk4z-{j zSGG>Mp;u>&GmKpPOf8VFuYfll8k$b9cd3d;3%mp>OR8OX>leZh-R-}Yngo0?YEu3jK8lJw`p9>>!G}uz0s>D?E146;wsCJey&MDcDC-Wv z%n$eMm`hx#g2atuKFd3EBqAgv^@yP2pxDG%zeGq_F%Jv`K>y<$9Z1Jq{KhHTZhn47 zrY@1(N5K(M&+epSJTL4A!^(V!isMp2wI=P9`6{2|d_jFei~KE`^O;*=Um5Z*^iI1E zB-PD*;CMyhRwg$fc|FEmRJ)E6GR;D{NJ+zr#r}|nfg02y!oY~GZ-Wv>#236O|gpIo343`Jy`S(>J^ zfnz*B$aI|hE`nw}PinhK6ovzwkhb1#?#iuj#Qf6@7)^En1^}~+IvWPAc*lYHjDLSZIAEGJ_h78 zQ;TrUc-rgmixNGauiFyhn>EyIQB}f)x>G$?ql8o(eNT>K9EP$4E%mKPF~+SAf8ktQ z92Vwk7b-5-gm!-@d71l;_wg6SjHvhBRk_Fl_uoKP<0148Gu>3K zR*C70g7@7+C2A!^FuvE{FKqxJ+My@tA(#^mC4asW6HHM^nvxwIs2+eo#K-^Ko(LVd z(yH+yaLb|`T@_8g2d(z@?|Z;WKHZ)9(znrZH}wkxT%GVQ_;)MkwV2;LvEPs(e-#7* zuJwGi4C{5-Y^6eZ{o%;(8v{Qhj=r^j%XRu$u+0E7Wm)Sg$ku89kKa6cIlbem-otJWfsjsOgvo%D`;1{qe0&NJ~#4f=hW@db*5; zi&oR>Y$}m6dD%Bo1kSfd?Zx>CDfSpYC1YJGH$Hi#G{s3upo}~^oP46WMP4#-Q^_oB zitQhac!{X^|7f8X9@k;;^5x6W!_XS0LXBVDa)3sQZ1b0kFiRx4*A@?pKZQBU>2<@K z{drd92p@$C#d^8P1WcSamRI9AD+KIE4uuyVN1*n4=XMsC&bH*B)kUu4sTFUy>Y#<8 zR9l8Ay+#tp=9K4lNShm)LjXJTNGRJ0G_Z;%?UHCL$Y{?b3OTc4pwAnRfC2_AfrR}P zzLll7_?|Hs%-BsrQ(-lhSCUb+p`WWS%QQ@{Dn+0x>5B3T)fLUTI*y9J$@vob>{Y8b zm+!-!{LW>!y)~(JrP67uL1mVL9NrSoyD<(Z!E&<>6`M3urS(>jh`IB^b!ghmNJ}q= z!f9+yKW=PafwIx`f?Cc(;d6loO zmY}H>KbwgmHe0I!0?pX9N|fQK$qWH9U_iR*OuYIhn)@f}*In3m8m+RHYhZ>u#Ap zvu~J&HUGaCz;soMZC<9J_wDoD{;l1~qbG2#-i9O2cmsf>%t~uzXWdvqrVo9$dhshh zkBhLw>(|&DK=`7D@-WGAm*~#{K`KFCJ7DF9dB~pLvY*9>CVG%2@{=iMN4}gGlPuHT zK#t6c;&ZMv21Q?h>&&2_(7*#utc93xwea!V&UPQi{WCY=83SP|mcSgZjd31O zMi3`*%LYo}Au8J{=b=B%0TukY>~Uvyu9T4Z=UW&^OT!J-^J(jL8|wO8MEnH%*;f;E z#Hw8DcJfyX3KVllONuH{EvQpJ{_-l$p^M-iL1Wk=*yQKVF+9a(Tz9RVjfs zJ79(-)%ZUYaV@UWcZ}sK?uQAwk}CX!@J~<`GHplal_6pC?7P#^@8Gt4jy&QN4AE}MF($Ap8vV}v11?5F_%sPnR?M(~_GBn+2So=Z$NlR^#T&9?i zRh-c8l*s2`K>_^e+b~cqN%?GwM!z)>baBmlqnZ>_`WRvygJ?!Knf`Mdng2oH;3fDd z@S8XSf%lv8H&TWjG8VNrz6!~G_Fc6Hnve0P9LhWwm5@p zoAAkOEdMA;#2TmNwgQ(Qg+Kh7n_6yB#KuTuzkc;Z;j9ywXmN~5=HIc{0qq(WMIXI} zDwYv%P#0{52cQ|YPQEle&_&Z8u%NwBxV=~u%}VP7Q%2N#zhRA;|NHWs{Wxd#iP@3W z8(Zm3e9oZa)OkIA`15JjFE%c561JAFxWK6iHWZehBX)aq_t)`5k{6Vy3kZBR@ny#^ zLTOC>Wx4jfZxR?siYn1ktnGb|j^3@qa#s0R+ zXVgI;?Z|_&+FUS6ijo+%JhQWG2sP_o?Pm`ded;?n`!JBcrdVRXTo2edP*B=?o~z1c zwUiI)DtW%{vSi-6o>p&mYa$HthuUCv@BM9>ec(-UY6G9USK!wY~P=bL}uKfD1 zx$s2-OJ#Ma*4V{?8|{uJelKt#GgNQ9IXt!R!G*0Aul;eTQt7fp3aZ#m(T2n#87{2{ z=qY6S4YV*>?1LOC`kfp$U=hc@<$FiTwAC|m(3?O?<6V?LH=uaXwzhC7EJaLY{;n+`{ZR%!7G7<8;_7&RvR27>?bq;U%_X~lh2l`+_$v_ z*FbZ$rhrLEi+5NvMz8nbTmg$R1cF5LJtSY`_06#RBPOuO+}yZj z3u?eKl+nU#k0uLjo<7>`y)zdtUvegZ4TWZYCj1ccs(k8ue%+>OvQC2Oa>_eBTt{+$ z|G7g6{oZ;I(ZXN_o=FMc?q1>J`KvAe?qtEedWPlJ+2( z!uzMEwB-79?bF=bBy=7FWNkO4EH2QG(ZBw0@5u=yBvl5ph#>^i5;~H1!0~G|l1G&5 zCrxjo6%kZgsv{Ph$EvFY6+{7YyDMVG*Zw=Gye!l_cCYK>#Yoc6D3NjfHkW6~)cy(E zgDjQBq;Ywj-YuE2;e3SjVx>$LVR)$q1ItLM31rGawmN5R%j8f49Z|b}vVeO9V3_aZ zT4|r0e?FNCLBvYcD1N(9+U{^$>9L{H>KhfR83u`vTW_WlW`wEAOjpY?_<7%Yx%jH) z2wJIkIsPL99^}?ZH*Ifk)8&yVH3~zkYtO$zX(pPc!&kCwtlB;=i>%btuUDoiH^po; zb9LpP+D6@=#3Y5()~pZ3sZ4a3MTSX|Mv@;&?q zL~w9AtBMD+Z1YhIAHiOv%}|)QaWEV+$6$gN2#O%Iqx?XVv@df2fpA2lv(Z>vUJ1?2 z|D0=?QWO;L)BijI=bot%YS*idVpJAPf?q|d_hhn{vh_}Wei((Ni_!1P=fe%)7uZk( zQ`~a(e6Qs2TC8d ztZ|LJkOY`qW%;3$=54lM!UC{vLZZAyP>MuIZ-#HP(7xxCzi8dJ6Wx?#i$UX zn)Ox;rI#T)8Pq}w;_$8*`y2xWG#qc+lRUTP!GZRjH{HW+|HwqGRuZXM?=qF3dve0Z z_tGJ%Pd4gN@LsS1D9V6&0DRNDsMSg2d%xSEgGqfOKaWd=&J$(Vb+<_i%(GDUsCOIt zJE#!)&n(M#%XSGiic#+OKd&X}*;<5((S00T773hUeD=^irQE_L9fM27fmFlH=)?%{ zl&sJNetYytX5^jcA=aqh;lo$-rjN8SX}9+oK@=1q9>E23V-R^$?_B|Ow#s6|H%Ju? zpKDw$N2SeJG8{fk<4Zf8yTY1227V}~QI&CMrQHixwFZ{6-r04PE<=Xx!8YLkM!XzC zJ)jtIH1_!Zu)4w7v+=la`VDTSpJNI(60e$LD5YfqMQFL2#S{WO(j&GhtXu&BLf zb(@2ShwQ=P3)$uODZz>K`&W)CY^?(q#KikQjvx4MtHMslG`AB8{FwULU0~*aWJ%&L zlK?kF1D)4I;G_xO#KhT2Zp?FIzE=_RLZy7%sJ+5A2YHW0q5zIGTqq5>OH-<=5a9^n`m(k>+5s&ejkU{8#GQ{r~k z_j2uiU60H)_qj4%i0(e#GcQgNt?KYNPgd+``wO|L8y%}o|HqOeb^y>?QQ3I;>X$-@1dnB<;aeh*R_076gc z%<<*H2*;`xq?S8YN=xh&QeR#Vhn5lUYG2g)LxCR=_f@N8XQ#L-eRMjnr<^+#;-y81MvgX@=DEqP{S)J%Ws=<`Jy9$(AJx@Rxs zA+A?um7-sN)gG646KVsbtsO@>#Qc zE50I9^#5%H;(xu`Wa8;a_nQphAQ)L>kJfowMnX1b$3gD@-l5Xg`<(ULr>+aHv+SGP9SPJR>*5U0~~nhdB}54^j0?6=LdxFFr5*O>s`4F$YHDV9&V7>i~6=}N`!KG7^FD43)EeyYELOaA%m*WWe8pyj{FF@W*TNAZzZ^WpwX@j_Z5io#HI5hVtxbmN7#7jvrS^o95fMfQy- zvRiZJA21vs7@$6&u}W4hsEDqgTW)hn#qH)&14|X)N129g_|{y9k;p*=L-r|oVq!Qs zj5E@2^@%d0a7iC!cXHvN4{8f!2?G_m_wnag7yiwq=9WAuB;q^uoQ!?D3WVdPje#m{ zMgJK{Z?Y3(o~>{~b$U`NkM(aZIRR2K_x@m@^`Ot>$RzSrwOV81+n2nhf_-o&epNZLiSRf?%0$Uq zN|j|l$Od77&<9mv`MS+gCT842`ox;)52ubb2Aq$KT}3ET{&3a3gUp_CMVTH!vnc7R zIjio~p|39sZ)Z9Lc3Y?rZ+PD<_u~$!Fs*f1NMkOqw(<*oB=ZcCPjZl4_wM(FR3}E# zHvVB;>iw~@vHyoAyF%9K5X3B|Bxkn*{I zV5EIt2Uy~cP(X@gop!9eY4^tCojp+6KvtH}+Sgx!;I{eFGQpnyMkX^-n{?!OoE|9n-hnH?_*r!bFwj{L1r0=VO@Nt9N`VaLn`A* zfLTv_Z>>}58?&b3V#;f4bbay7SNqmO7iHCgB_-!X&P0xw6??_jVWa6p#UyR)Dlpvh zueK}w1}RZD&6cv?X6z5AF^sJW+F!!|P`bSzp1_rIPr(YxY--O?bEYz15{K0Rz=T}g zd$qjUTWa5NLgJ_V&!0_9W9iZdM}F?4Qvj~2eL(6KG{LQ-!=8^42ovf z(`h947;#jabPAwAYSt zMRa@1r0XtBv{d*As4=HiY)WfH;RmQ;eSIAb^&Th0SI75|%h9Kbx#_LJ=~w$u)=
  • L52Od^dFbR2oW2{ zzwTStMOUoII?9G`=opE>6j+wz+o*%`$)@e}vxj9YS)Y|hC^3sNoo$xn1w$*K=EDKt zC6whq9pI?Ye}{3hw#5m8Rn(6I&p97vdLadAD!?JHiaj+@L=i}+y%M}9rKH(?MCE|SgMvQ)`)iQoYXo33U zaXr3l7#C-2B*i_Q#(&!gKbo&m?gc^5V=g|>0bTe}4CXwyRzx43Ddo^TZx*R{;mm3I zP}aHEZmHazWa2Y=)XWb_gxa&{RgdrG3MO64sE>wSDIFYBxlgQRic6GHFAK<^TDnWL z^DE-l?)vvt#-DiWvOx7iAKTIMSq|#Sm|e8Om;bHV3IkbXKJUA6k*Bi|WG+4>t4W)n z^LoU1utxNb@u@x#7H!g| z-E8|pram?7CYudWJNuxO37P*w3#PE?@SPsac@)dG3Qn z8ILqfS=yL+Jw}i+x)XqMR!zp%Gn|4OLmVbtBSk7s*o3lZtn-k=>%M73lkG z2AA*RCBgRtzbAz!fAbcr2ZKG9237>%JbOM^&BECbfkQ;-H(#r17gY8WAnB)gh!Dv=JSi$KCEMY$B>ylS&R{mJ; zT|TG2IWrZ(9BMbX+laSFybpfEh;+>BH=zR8NempC@T!`h?SKB{a*rfQ`pIbX4@vf+ zw_9uI&h6K9aE|D8g(WnKxJJIU6I>J!Kd7apW!;y%#KF#g)N?!zJ_<5DVWMHby>XR& z>gnlSL!%_)yzK%&;j{c`RHA<(?O%mk055H7FP$lp+xt;Q^o`-~&?jcu^SqDBgv<%_ z&9owi0TWRnx*8G7WxQ73=h;53w;P(jqi{~VbSoXCrLzdJ9Q!Sj?W)21tTyVl4jXl@ zN7c6EUCSb)LeS#?uBx*+@fSm)2nbaFvZWDS4c{i(PYfnTJh%tm(wUqW20(AyJzfoCV)K4F^@<(ja*2>LMfUXebuu)^n5x zQCKz3JE(aS#(8!~Hd%o`ktYXPK^JDs7Y`HmaAQ`zFKt2z##nR#Bs}!G?5B8w&N4>C zT^);oS0>-OGIQRet@3KyU)?+UmdC>59Izq=M)f@@D(ZQPYF>TN^KiknWMZ+Fe`to$ z(lyg@3S05!n)=BVRGgcMJ$?elCfL+lD&u)8%5`P8*D~x*aCAdRSzEc)z+su zKjETgzjxkuNs#PAiNKjq{fMT*ofHPQ^T!&i1anF zwl}dh@kv171{k!W^7NdrFb&&Of@fTP6YdL;on9Jt;L^adxsysgVDXScP@AgpBmcJB z!oYwwnm^}bz6~u$Hwi%E#D_p?(oP8Gs8|8K0@s9`KmM0r&cZHIa?_ zzVf;Kpo5xowbm+jWcc~Kg|k${n0dVf83CSd?ry)7k=CX&al_|NPZUUK5x31mb1WsV z9o6AK4SJe!whYojW9_0q-u9-UOdO>WzC0|_fq7k1l~X0zSc@tygGZs<=C2B2S2Dya zYrssmNmc(AooMI@CT!&{Kho|K;q8U(K2v*8ta7xz^5z$I7S2%(5upe~Brir}pz|;v z46aah&gD6>SsNcudB$$bMSNn5i=iF0^WVU|Umu#M+Dy3~Uv6<SeZ@3P*`;P=K}zUYIhY>CLbPT-n1nqia$nsz=E$kz7kyjyrMY{}NW zpUAy_^Yq5XiqOr;`8JUUUj~M4DI|Z1C|74E3-dEL?2rRFWGS(Uk zrI!WncJYIclNS-fok*06hUvNC@^KEWEG2=vp?0ad`(iZ$VD{ zI>wmI!L*w0e7GR2deW`fOjGS5)q0KYx)AjN-IZKJPM@Gn1o_5Rm*uPYbYz=u&iSVy5Ehwf~WYC6QLw?%E z@pW0}4K2hB#1s|Gujr_-_)5saw*yyG$hgZ&OLd?C;!}B42-~~ok=P^^=c-tC)_y{?@zpq#`-pY z%J00OGh2{Hk;Tv43#LUN&IqY0$z%TC|CLfg?S%pDcWu$iLwoDkaI@N&j{#~|YwGMC z1a#{Eo=jG2lO+t>m9O!B%y|M{VknqGzv)v2fDFxZ8z2JyAPSQXfC>sKi^A*L=&NUQ%k8x;a-4~g|maJ(*p}`As!ZoM=UNL NPb^rtEg$kA{|gO6ZXo~w literal 0 HcmV?d00001 diff --git a/site/media/structure.jpg b/site/media/structure.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0b708bc6f4369b6036b4f7d98fcb37697e09a2b GIT binary patch literal 164821 zcmeFZ2UHZ@mOt7^5)cpp$+Q9rf`A|(xfLW!4vnN%5Xm4p(+UEDWCR2RBuVZT$+1Nw zC&@_8Imf1Hnpb}JUiIE@=Dqvo-T#}JHKVH*)m5j?IeXVWJN)*pb2f4|4Z5f-uOtt` z!@~pR0{=m0V<1`3c_JcWBEs{;#Ka^d=SeTnkY6ApyTEXXnv#Zv5zNZM$jp3&gOBqH z+f8<6X0AIoZr&0Q5)uM)ib;tIO7h(n68zB#9tjD_1+okD)rxoZP(pPbH;g54-5_sk9;4UnVp+o zSX^3OS>4&)+dnuwLLXy((1i!W{|i|^Dfeu;8PJ0UKJp^ zbWfA`u_HB`;EVG#GSTV94J7QhwYF(Zoq9lWljVy}x6pAt-;f$H<$a18cc zYv;+Ul6!mx`eODT{!NYYWHl1~z=aY+(r^ay)jo7yZ%mcyYUiQEQM2IV{GY+6bl|^Q zT1a<_&5e&|d^`iq2z-Mqa}u8(DEt~A_kSP%U)KGX5F6d6Enmg;vYmL>x7d@kNJeuc zHCDQKN=Y?+RmHh7-oMWX-V^%vP_SsbFqHX!$?KhTFd#B(fP417cOOxcd z+SbEEdBC_!;Y>JqCU*G@^fFWa43q|(h^INI94AZ$#2Ej(Eu`xB7hzr$N^T-tg>ba8 zki#)9axe2|XNUW!MBinxYa=5W#lTwQ>P0PKAl;Ea8;)UcZEUe8BBzkqu~J z=PXRe8OVnbG9%XKYYu2>a?flUxc#g*f7Kc2t?&QN=f5+5%1Vn&$Mgdi%BRXGC}u); zR`>Zq@K(VkTs7r_@pm^XLu%OfE*`@itXL2`Iq9=S1(+NSwBC`BM)U69AKJ!4LKbF$ znlZk)cmsM=I}y(*C~`pTr}>Z>f$cNUU+uf++_+KeM8-$dZx?d`>1$~`1MO1>;4qn~ zQjLjBrFwMO>(bye(EE4bqe1ZYTQvMHmfpFg(wW^Y^vmBpf};jgo`L@G6e2v1D4y8& z>cf6L0yqaVXQ01$51Ku!Go9h1JY>H;0yq~wkTcLdQOH(4;5vv!*T_BoyJvu95*d63 z`imFftwRtxVAlfLnl7X-`FdvF_J+vt$yQbME}XDR+vPj7Dr& zT+$`00>FlI%hRs1lAVE~o%oMNYwPP|8{oTqa<~&{MbsInR;B+yf%VR`NscVAsD@Y{ zY0A-n{*?zbbn*<;a@un0>ab`tTF~SGpTo`Jsw?({r!CGvjlp>nFVKkqxiCJjt8z3N z1m#2hCPvrlYM&|@&FHunLC|upJs4ut={{;s=O+J$euvDO0Ec9TYwi~!u z%`#_4mT#|aT9U3Cxk3!~pV9jWdvd!nX*sCaIuEBP@>yn^MFOi8Y6vW5pR}XCGf=qM zumi%QMytOpIa!f_)yVeTri}VsUmN24+k=Z031NkLtIy2+mcCK!7tYEK$QCt|*apyn~nyun!eVJ4Z3to?eh25nh6yvO?^wC3iQU_Xx#vGYBw1(=&zVu8j zT*^hHJ=oTr*vJ?kAHSAmk9W7D>eT^zn2vbQOT>P%Lm5Y&v%Nson@7lcDF|c3v;cQn z>9c;!N|klp9X`WV=K;OV@R`K26mjp|ocjU-8i7jVcZo4WSrAG@DyZM2i8;&nbKrBV z6C_UD)9-qrPC8l6@+G7iU)Q^^TWyY#&+gY`Mfp-lMIhf#S!S@iTr8{_ZFLfq zU^E?>7+KqSc$vzWV>$oyE=PFL{ygKvjvXR**vp45p;&e4QgNVaXwH=<2M07OGyUmW zTCW#X`L7{OGUC+zZb&h-_8KQ(dB%{(XP{ufK?p9eyuIR;CBkajbF)2LBH~takMgxd##1W?v29+)XnCu@eCv+Yyksq z(m|n>7gV{tm6}`GP2Qz(Kb4ZEe^Qw$%4XcB*g)kR7W0f}KvE(@Br#LT;kE*MXVWe* zJ~xpV2Fm0+vsOf{(;sag)ghVL;R>s42%?mphsxQ0Dq?iZ-Hmy?nG`<5>F6 zUe6>f2eoWj;E$9ib!C4-RYW2o_r@|KbV2A3Rs5S_Ht{wIoTfY`GCBhd-@_IRw?QWB zA1pM(FRU$Rgr4BH|4DTRZQ&$y05xfc&A0w_o{PVv1RsW+G$*PjoH_&g$SS{ZGdJ|% z+i8%X5|U>6QfS?2cs~4_$Q}-R2dzVQ*sRY!w0V{h zdgCw3&Y&6@ROWl>{L+A(e4nqj?<|Ui<5U4%EHpGDXJsW$l!Ne``d(fSkBz*2 z6k6Hsc=T#tSjIAS((`JZTN@LNd5;VQ`DM4thns?b%!fvUZ~}~z6@eX0K;9zL*OtP6 z|AivnF3AY$1T4(@ojlx_PmryQ5=EaPECRY0ov_Z7YAhF?)!v<8gi2wT|p6B}9dt3zP= z^)3f8Kdsf@tk$3ZQow$h$3CCm>}I`!)EUUr6W#56OqvQr!7Ue$H^Uwc+#VQ{DED2kgj&q<4rq z(Hmd17T%O6Jf3}gy{Twj|PZ%r%iKwmr$!u zNTSQ?!{t=i=5yY<;$cyo?ur3R++fO^C=(c2pW)RIPEf?VVd3qq)2EsHfZ2LllpWuv zyCUCpV{-N6q5Cz{Yi*pP&8j&8!rr+)6VWmfBrt-LZE{(=8C8?TUBk!r>;lZUTc6dB z04yaPE8n5WhJ^dI_s@~_KmVO1x*^W> zt)W9PB>LV#TFG(EqlvI%6N|3>l`xzz?8~cOG@_S9xw<4skN(PrJ-HoP0XE=J7GF_Q zd7Hi2kO0qLNASSp!OY(E3R1?7|IEIL?_K#@*;%nreivLxl!n9S)KIkZXc74 z%0Y3sde;m8&n_T;QDE`uMG`WilLk4GHNAQHk7768-T1u86@O1F>bUur2Y?n+HE?IVvDacL15PAjs zpMmC$UY{N!`H>piFzoASc>Un;UtKbJ{izpTDdd1#r0@9xaEtK%m4^wB_ZbL$1U?u< zB_genv~_2o$dgFS>Qv|O3&0_#V9jwOo_#{2Dd$tQ9QY&1a)VLA=MIl>RmeSrNHvVu zddNU`uz#{^f#*G`+vT>NkHQo2-M5-#wA^6Z7aU2d?bs4RQS%n{K*SDTW~(9Z%JGqS z$csmL=qje=L>67SH0CJ$b?xZcluZRxWUHQmp4OHs6|`jx4z5*tZ6FK|+7KI=4Tab&C2ZHg99wZ&b6fWLiw_BxTc7ErV7XEPO zf@P}YCvFhmc8OWlMC3f7)!2lGVdJEESJoG$#AoLO+}IJskBwfhHXnU0wuZvkj+f^z zM1`Km`R8W_UJ&9^R_aK?NPH*IPEimlEcAL=Y#?1=zx&+YEWGo|Au|#CRL+tSQ9VDO z4~(Te!?&%*i0~5;|hi=2U!3x$2UVtx=5TbA}e?7XweTcP^J?u3~KO zTr?{63ZwTJ$b7;om^BB+<*&Awat(*gFz~zPRF{^*fS2;zT%I~Wwdf3l5C0mU^UybZ0+oiuCKW>Ac1hiAv0`&{_MwyLC&RsORa($~=jhh7!F#(yR?BHfsB92gdCc;nKWL453QkIiw?kV8NdZ$?QIM79!e7LZX*P^&C z>OQNw082?g6drN^d+cgzmJ5@VnW`_J`b5($&ymZIK<4a7liPh&)|CT|nn_7V?$BY^ zZ+$nKv-%B_O@u-?2VL}^oPnSmfrhC~QH-!xbeEbQTi7SR)T*It!Cz>`!|tK=psx>V zq~yIxgiay_-j6Gdbd)YQ4tVQT4hw;)R(2ZlMT2yYLyNibU0Ur^Fi>6P(gGc%GPP3s zkbCcUUOo%@c7c~kqqArtPxF30)2I>!=76VjQG#OlyQkI=LbjItHO=>Um+$5iKVn!; zgZ0)zK1dw56zIIJ5^YAo`cEu-LTZ4_$ch-o+6nc}i4;DgzK8 z63Eyc+}uX0{W`b?CDvjRhn$l#ZPD|-Z4OCdr$o)0H#D0Y zw=QHfrZ5QQLNc%Z;mrLX^ph)msLnv_@yWOo`119S05%FZ;9HCn+X~nS1Ba6hAi_=B zy@9nEn2r1zDZdjO#<|@!SgL8DAeu;aXPW}ffmC_*1Icn@jb)kR@FJr%euJ7HYjP_NuthIn6BUc zJO{$*E=!9KAzfe;vb-<-#h%8Q3fi@4mZnOf^CYhp7*BafYz#Yj_O~qhbV?tNN59iP zq&a>kT*nW$nWA9Yl3=;l$xwzKkdVgj#1BXiMU|bMs)ZCRw zgH0^)tgqq29om6A!_rSw$&{%r@y*rImCq`dqETWBvf z=Fm)O$|rrQnj1QVLZJob1QbVnCj@tWzV9EltQbqRG)7*$XK6v=z=Ib{;{*Wenvp{_ zc8uM<^=vi?&pt_TXyR35#Dnh)V5@rSoWz&HU0=iKjfLy_i|R@jui85n`@dt3bfT1! z_h?f8^pf54h%-h28r|!oa|ViHNjUX61Fe6kw4-botw-70^O{A7;(d;P%1wXNLXfoS z#|>|#=)l#(54E9^y1x8DK2&zBsm*lPB|IjcDPyx@dvdjV>a4V=;*^vP*%oN#$Cl-$ z!Iajl;L1T$RQTQ4Hi6dgI3I^@=|mQr&I2Y-1Xjh5JylF0b{Tkw(DqM&!<%F{tkg990RT^)$L~sQta;Pd?R;{LQb94wp6l6YM;N|5n%D!S!=AOP<;t_eG z=n-cceyx2BB=eGZGrERF{GA?M6Rbq(H1VcWdh$`IuIDvB6X$h!imI}Jrk&MeO6)B}@`XvHbSia2j{nO7=D9Kb;pP1aPS?E4s=5Qg$(Emi4q7n7l*TPz z&p^i%V?e|YUNe&NrJzJpNmieMz5KFQLZHfz8bbIrD!k;ybL~ZZi;ADI= z5M8G16kBU-MavcM?z1~a z(c86%Zc-i~#WSk)tnTP`YpU@YOv=OcjLOxGH@%*|92E(?P|mLRVqv0Q-ji0|>yh(D zKgSUG1;3r``I48Wm!F%HgN?Xv_s6{Cz# zE}j;Rb>0-2j_vm4K8ir{BDQ$R%f4z8hWVR4`|P}7ltSBc(B{q1TOQ-i|DDfOy2Ueh z^|Jc*DR0asQCe%DPx=+!C@4FNw_b2i65+P8@He7yj(U4}KjW^nzS`~@GIF?jKtqhx zC&e$zP27Paj`5t#IN_DY0;{eQ%JF)Eb*^Ho->jL7hYgdx^pevp!}+qwF@oZ8dY(Fg z4D%OUrKHwU%7#qqcgCT+t;L2x=JL`0&(jqdIEOLAymC%DorW6WqFlK(V>QANtiw+3 z4zf8rf%{q>p_7Z<9v`T_z7(>hx4)ony=3%qnF#b7-?>ac>5wVbWRIj#g#2U``!;h>j_*Xxl z^03HF3d^Ib86QqqhOsBAKk~cMzdAD4K8B4m_ioC{ss5bruZ~#Tb9PacVwvMXp~-(E z*Ya0CQa|W^=KMdGY%jX!ka-3GP&FprMNs~YLpG>wW{1luPM_GX?h#)Q7_BpkzZAi} z;bOtZo1-pU2xPt1B5wki(Xo`+Z7?4L58S{hRxsdbg7KL-j1CTLx7va zSp%{S?p=>9AZs&`LHIw3#b@!;Y;qdVE6LA+yK7|&^96sv$_=`#ZBcx0vVz+}wbz~JSf!ln(IUb59n6|3V~kqo1n=l_?5rL4XQACQ z3|srhJzic*d(=b|Sr8j=3{CFUD&W(}LOymbc$itW-CUfFc4(%H|2AH-;_SC#4}M4W zEe13v#?bQ2CTnn?pHw;%2wn&eFrMnc0wFqCSrnh0oFf8?JZWj<%QD*s8+1OEMJl7i z3sx}kfo6viW1eZ<6k1}siSqpe_)Rt|qY=?_5Yi86o=(N%wz~0BGsX`c_qe1K(6hB( zp>TbluChvR1@F|Z_bM|ES(!6#_N3)k5qAyQrFowd5yHD%r~%cl%v_I*GM*2+6svF zSMyqmHG+l;bup<_Rs<^!kZ)=vCVN!}q~6jo8y@;-nU|v2X2m3+th6t6XO^!Z2j=0{ z&7*Q+7;>7b*|V=6%EfnYT=udbS2d{C* zH1L8goSz?fTXt<~0;~S-^u`4s4pUex7BU??jH*GW^2>QE9J(f8F-?GR2!)gsqLzmE zPv$joNYwE$8s0#OguCxZnuH$yHjc$Zcuyhhs5SHF#K0!Ftyzyc+Kr#H=E8LYesP*U zYAad+1HhC^OF(%Bfpp#J)Yly^p3z?e{YJ@w?-{74rxADF*ZTw&GP@5gh5sDtH^!`< z03D|VV=rQkj^NZyYXg#WXCStD*jAbC&-Qs4p`zMP6mzS(X-sQAG8lf*abBz&1l(+Y zGZ^)TnT@_LjiyO)JK&mwem`inwX3~(1!osPPs80oUW^qF>#qRvO}KUHEKJL!VMife z3eh`V=SU>t$^Ig}9m3)~xc`_b?z z`6^N_+)y(dGL34LE>I~3u=rp1BGKww@TETfljY&7a^TUCo+T{ZG4Y_4oA?>%=dE5q zrJZhJF>(N6$T-iHd1A>Y!@Mj)*+b*{2LXVLkD^7fJe&oXko1AM#>6cfd)OKJCEO zs` z@po(iCL4ewQ0GJK&;vaTnEp_E+yxSZ8V^H0gPz3dUGGO0W$tm7RECJ#$6VJujyCY4 zTxuG93SX+&7haW?2C~we8fTz#I7YxGru%%8AuA1j!5i?=Q9q=K=j@IQ~$aJV?KbwXH!cD>coJ$K-1w6J$i%9Qn94+ z?4g|Xg&!4SS*`!*D*yP+ZR4$y{OQZ+jYpr`^IE)Q#fuTF!mg5H*j7iUG)q$ZTq$Wn&0H~&OEAdP z_qLo>y30>ee0A^HDjfy zU`;QcRB%v2b){=QdxxHqP> zFfPlX9<~v!oYp$vx&o&CJdssSr1+K^5LRgN&;a*|fwkLsXw-ZxfcMZAzx|=Jpt>J? zAcF_SB23HXr4eEYPB&vj*8NBo){Kh0Q7_^1TK$l<=Lku2&t8AWJB|He^Zk;7v&7|9 z@skf6LfS>dpD;5LE60e829(q|>C>~AiV-w!_o zi@m+%nEDwrNNyuv_09EiakXWVMtTX=wP$|sp&8hlfPAqm{eeGbRKY{QyCSI5XX9?hXFGot|QkH?X_sa%bl`AM7wMN>sp(P^S^Nh@{2?dZUqBq z9|;h(BhEl4T!n^6N^~OPgXLUu2Vaw;Q)bu>R(DI5OkUL5RVp?){?K7xG)1X8CQ`W2 z8516BHTv=}VrKOESfz`wK{blRO5CkH!SsN``{kptis*)(wP?(cjOFGeJEP_V4%NY7 zCgI$MtwIWVDD5^ELTm1r;}m4_5wl0~#@!0(k>n`fG#_w~-+-@3uAY>KZ z$l=8Nv8C&|yg+}F#cRns4p~j)6R~%q*=?UZ?d}f?^L}Pyqk9I5ailzb7I0kT@~y%9 zjsb1lxS1=?GGu6R{+V}453ojex`DFDKP(G=e=U$sVH<@HLvgOX!(!8s$CvwY)u=s0 zMEwadZbKc*vn#2L`kGJxqxtAI>C&+$+dVJ!m@X%clFWnX-UP58EJQvYMi828S71Y{VW-}lyU0sJ z*JI`k>f$Xd+){QsoxBD#v)0rcojz6RUe9ahYKh*#q}$Z^y1HNqNxDBF!B0JVI@F6py-i8v~8zs&H}Tq*|pb z6&2I+S9zM1?7d2pj$YP^CF`e-iVy^;SROG)NShMF@(Kf6a`*?o12;sgPr=Y^B0b|6O^^fbm82^S5Nc+SX@oS$RZXFcfB z)zGZv7!QB6dS{CxdI2Z+ZRVTsVR<>{`fl^}aQ*pQjxdY*^TNw%6!3Y?*SH$U9>QTZ z2m8`x3o|>_U9BDr+2sO++c@obSf0(^L$U1)WSH0&8txWtp)jyFyWy_lwqPPQM{vlM zjsi+V)W3G!99@fEJzp8IA#K((WkR`9agehro_46-6vCHS-n5vZ690LmH<#n|cG{O3 zFsoI7)^7xoWNRs;%#OEfdg_aPMH>VQRzf?K|AH`U+ z-l?xm*yLj%70KHG8yL zO_k}9?)Fh&X2hsAVY$2>eMVag0)*MMR%{NK?Gmb1ARH%%&HK=Uz$&2C6z}9(W`Vsil@3!&Mi`uJJ4mq;7hD2=U_G}H=GNu!#)N170?QgL5%JmCuI zfgOLN7w5~;5^s-Uq24Q2h-{{h@b4_ua}nxw&v2~aPtd8fL3e9XP#2ja7fjNSmfJQ- zR-dXW#~@yY?Pq>?dx{j(TIBG*ek_o6pa=+wz>$lxD;2hAaXub>U|FPN~t zVG-|9tZ7lmUF*xco=sR?a@&=Bl6@!HuwaH!{f5$d!9ZkdT64K8eeLa;D)zEu^_az{ zcbh^cS{}TM5?gwY;A`JBBx^;yzoJBKZxj}p?YfJeY`2_CGQMiE3#ruir&}Ir#TH14 zu)WL_8?aKXnJ8G97mrG^QgGlUo0Mq5vX@uVgDoFWsz>Ju!;U#G0{PhUkfdRWB6Rja zogkAEuihOcf^fEa*^B3=%_8bQ#BVi$t+z+5z-D7*RFYG7#Ks~<4i6Bam8+NX^pp*h zFB?d;hBM?g89sF`1%%^x91xK|a5OJ>f5^O{I%f3@)LkvUB>B7NH}wdRyu33|pr1kr z6diK_NFP+`kLc!iROY|YIo|vf5VOroH~@JVc+svM-)H`0~*l9}q&5)))dPN)ZHygPeh= zbX~$(*JRKyaz-ZJcD}x1*ITaRPx)XP7iWXJ98m$lHP!Ib6F83VBfq2)+#A_aSQXBf zY0ky2MPs)yEGl)p1wB7&?lDl7bX_f`BxK!>vu|mrR?0PJOwvkdOeB$n^wvz(XQ@EX zDJ=Jf1J17$m>3$Em>hV!)D?An&<|8h0fuhJ1^%UX2mpVF0aOA7@TqB7icvIQb0Iw3 zLMv?I`!^3V#Wb10gV$t@RFyh`ahbOMIqnapF>bkjSC*?63*ff488NeM0KB9ZCjC&* zNfATUGgMhs`!K&_mU1IInys3Y&erDTDCDKT+Px!y2PxpDa5f7hSq26!6 zxoV}G2r zp%?R|ZkaEbW_?q7{n)cQ<|Bl-qw-;gkUv=11t zh&_h#v!*)}Wgeoh2^fW3c+nq5eJWKT?(q<5nVloxTm*KTpwvYNez0Loi7WK7mi$QM zAkATWS<;Tf9iJ!}$XYbIV>qBtx>Yvs=lR7J9C)c}(sewfQ?n_%G6jw~_ihFh6ucqc zUd+AoHQcI9xO$8@=KTx1(v5~qH77NR{lmh+3+kT^MU-e(wU(CZKLj_aAD>^DI%qd21nCSdza0!MT=*8;0^1e0hHP=-o|mmo(hi`?u(vIo=1Wsy34D$n{KR|BaK;@e zs8*W(z#^(r=i$>UAHyzYf+z&lvu!+_o9|3A1*v#k4YK6$UH@cpWsdoxZ?IWkem$cu zS2K@`DtEpxllSdqh@xrc0BeYD~nB7Etf8HnC_xw%PAb9RP(ym`g+8S3U}! zvl5j{)%tV#1BH%&kac?>tsLuk5!E=)Y}}tTRGTDWP)z%C^E#%$Ih3b7cb|*6-X18=&ka z^2LF~gtf8kgAfa)Z4-!qZd`G|Fg`QI@nSiaY{n+#9NFvF8~%%PmQ)4yBblk{2zA>C zo|Fo7j!!gUW&iv5dxO#K_-JRHGFS*>8J3^C(D6Gvf=pg1c8Yv#o8hR?_@>UxdP>oF zuUeRScLfJJA-~DHsL`D2!p(2N>?a#xk)QEQ60%KX3oYG_xMUS1WuNuPhTXza$ob$p zyPMv|n>s0paSSa5V}a@u*F@e>CHsx4nW$&#%mD+;7vRc6<>c*e9+S_?i617vsZO@r zZqdViv+VPBv_gfbT#~WqXK@_Msq^wnF}s|cWB6T0zvUbOVI(I@muJ~<6VLILC*>c5 z7$_Ai_#+$7mF%Nlc_ET*ZmKiiQQI^EP?xbt1SR`eXmim8UCWchZ%I;-UN&5@A(66|jYce30|>z9|+1yQ#py5kb-h33O4IEo&C-GbNrh!_juyOx^@S zOJ+wCdM)}fua--XEa-bU(o7Cq7DC@7h$~TFljC6Xqg~#3B@SrHxiioFK(YU&{wHnVY?1PkO5c#T9_h*F`KOgVXjS2~<2CT< zjuHL4<{(C&IDTg@56=N;$SMsK(1c&C3W3xFuyD3QAu~cl@TEfdQIqo-$Zp05H;c>B zI3VTe0FqmOy93@%vVrUkK~82PH){FQ#=+XTUV*d=Knm~Y^`8SO98S{pY*OY8NXdlm z0vWfzO$6AdCaDK|5Q#%)o;Z*#9T5+#vZl*bY=ZUye^34#b0VHBEkG?0($=(ro=eJdg?`Sf*&;KZu&f zq{t3%A^HFQ&hKm}->!#0^Hod#6N5H=gyGVenb*2Hh@#*6-)(Z4ab^@vtkkYMx}7KE zGB|Go1Qp|dMTZ&cwREZo9xVq%-k24wvv-Hln zKxKjI4-jP>KpQZuT-Xqkhw;|1{PkplLr zR}XeK5_gh$B0&pyH6z|$@J{D}|C{by;1;H+r7=ne_2USy&}H}x8Sv8Iz>g-t+ktD4 zy>`gSX5{)h6npY`W91Aa(4%Qg`D+l|#X1X|Cs5+x3nbt}0e8z*st&R!w#ozeU%zGy zI6ZVfq>2lWs%l6{F;FQMT#wC&t0Q4f1zP!F4ym zd&*fS)ApDTXukoX``aMDsQ80bjsGO4T@VCn8{n#9vG$A z5|B!6J3RW>bWVmUeg(*)atkev{yB@PDd(Bp6DI6|_z-Tcn!6yc{cV_!he4h6O}?v8 zj;r_My!GBCk`hVB{zpWhqtFs3Ro8i@4(<@0ethmrV zec^&0+O|E2&96IUQ|^J}3c`v7_~g&z#q&SMnQ~4WEQQ@U7jhM)(eFttk&N(H)tJ_H zjsQ?J4hbMFK=ou`B%sSyh))6hU~Tx$P+p?9WonO{M+^2Q{35(Z5wbM}_|}nVecTm5 zc}2meP`Em$0SM2S z(6h`-Jpw={h)@ULpvlnICDayAub=I~EqLQ`AT4j{_1RT=e7ijsZPAt3(9N{ZNw&(8 zT{2HN(@ozq&NMzMFr{`GY98c~NsSRD{|@4RJ{Z2=>P zQt}yucIN7Y4bB3YyF#)*zRmh$2f%`Dy*Tqc6%U6i)SmZ5h&b^jO0Wgs6W%RJIg#Y2 zwCX8r@+=V`qazIIs)urFJMHD#?l#xkhsBGI>rXYIWyXAS3biM^ZL-(DeV2_6(6LE< z_J(8b+SAvn!Y%;s6~Fx2WF=wvX94-)F8C>Y?%>?Y3%$trGM2$? z0M=0npg2X?E^l3rqAe_Z96qdvds=$|@bjJ>NPL7)U&#oq$a)`pVA%PUGS$Yd#tOMDSq2!shWl(hxNw? z!cT#Ek00;Zp#N&bl3t0w?gVpehG@4S8OS=5?gkek!@hmNfG1}sKH4l>)v{c}Q%26T z(ObkZQp+aoQfVTS{WOb2Sj0qNZ@m>3V*dN8>*6`z4+$UqqFyps@Aw-9a8JggY2*Ky#xy~TPhxuUsiaUW6=-)qX*toSk44;E(ER9N13EY@Oq#qpey*?Z{CidtdSBSg~; zu7VOaK$M$*tZ>?86A*yonD?BzfZJ;i>&QNXcP~@{Zyu-~M#jMFd=fjta16tM<9kT_ z<{wo8E%_AIQyl;Y$BuOdf1PU(cgFxdz>mTO)A%t!ROQjn(bXAgfx&TEL@NCyN zsZC9?uZqt*5IlUmXRLvCoH=|uhCqr{;=j4ntO$I>JSP>oV9@+>9`Radez1o(qne)@ zEwk77Ad=&?RAG48bms|rj*#HzF+t0PYy&0Q5ZX^Cq+&Ax@ z(DzZJ>1QNjSVEPe^;?5F_9pR(?hq@2@-}Lr2%Y2S?;O|V^i1AwyT&P)zb}{?ZAp06+h@%G zg?<1ptc9tBhdIvZ9c4$Evoy^&UcQZ|0|!*e9wDQJ&vNjuMdpI%9*QGh9k4tmaKDuQ zMQnGDUdvbA*|_56wTYQqAM@L8KE3zx-MQ}^_{X8k>3Tu@RHRlWw_cZF>cpu;yWcQZ zm+SN|Z1=q4jBn-@Fnv@L=TC5xIGuTjHX@Q78Q%XO<^Fa-Dzop*$B#WL1|f4i#ZHe^ z$&S_r=_G<@DumV5>njI*;0{uqL?nXH_Q(EjWm_aqaKf~l!TBo&-9{bjV``m_+Mx+@ z_D3RRWH7Sjgh{(`s^gLig;&W4S#DRn2ZF%^g{Ep2{}qANBp=6SJmX0i>GH&R)++YB zd{!x7QDfl;nLhT;fIGK6us%>dgD*#)SFlrgW zzS<*=%1y^=EPI$)#hrVkY`n-JGCR_-Jl`MkmWC=95RmE?H#;^k4=}o=SL|P;4kS6-X-T7*R)P6_8OkDy-ugzG3HUxT*SZ zOLR&BD6^GKMZ4olG%-N1S)!B5lB@=!hRNN$ze?_YBj~{s!sAa;H8@vrBkCY2vJY~a zlxsP)t1*VF=ftTSp*<@#8YO%`)E`<2dk*ZMlJ{ITwk$r)V9LE?O-{U-_E>bSBCQBI zAd~_W0ykvfn)~QK=)cU$shA`<+B(*yWPUi_Rz%V_34LYgZ>$kV&nYGh{|ueAF-#HB zqUf2t!kM2+@8^MCQxJwSh|`+J#8^rAaqL$1X}=!Hn@(XI+GQ%}Xl0KVW)q?i#9M6v zze4Ek%*J45Him6^+35!;UZ)Z-m;~l!c-S<2GQ2-u^Hj*x^IAw7mI%(hXT4WK>X@#U zLgw9OjO|)h>efnAdts73jXI_S>ZpASI)1D6c8CHB5`fLs?wk1$uDS?m%K1KC{AFbR zgIUOb%x3;(-Rtse^^S0mO(0x`-!>tFV@17EM!+FadM$zQd5dBIt^u|Wyk5L!_64TSjaRqP`{-=Ld|Sm+*78p?Lw{Z5p7oMug|JaFMmxY>s>&zUr^mmT}rFBqc#(E ze!)^=yEFTf&2@VoW{scZ%tcbKM zrj&s+=X!Lg);e5l6O`< zt2_ufA61pqm|HX%%!^zNnrqTB#;cd`l*vdHya?oGoZyU`!p72LOBWgBGPN5H|Ztx-g~bh z-s#?Fm0i}p_w0Y~d(MB(dVG9HW@a+S7;}s{zVd$W8(u+4x#|Py26xW*Z9Ss*5xx`N z@JP|#7OVx{BbUVbSA4E$*GtR8W0qex(u#_$?%3!f%uBP-dQ7a# z0u&_|hD4dP?ynEJ4`+r!{5QDMD^P)xdX<*V<<5Oc{KAgQN$zNs!phis`}XV>5)(f; z=1nGw3mBimK*#=#BiCF^Pw2K37>s=J{trEyCU&Y;vjhX8FT3LYgeq(y?sB@4c$A zYQ3%wN>&QZTelEnk1)(ijloVU;E)}W-7>Uychg3t7+`6e*%R&4g*YmXv=k7OBnz6a z!Zxk&b+)+VvOa1z#yzdsy;o8x@#=N)^Yk6(-LM`|N}f4yIhHf%dC@zAkQW3srC(_@ zlH1-K7M6`G1jwR>co+?Gc&T?^f4W(OQINN%C8<4E49cwd@cNkLK2Fkt@0$}`4LJgZ zVDs6f(kRr`tU0YbmP&a?{dYNDeE!^2ZwD3zVoY)F7{2LP9!zqWDi+rNG^Qa=m}1B) z0k(RDN6$clf3V_O&~w|>-|Oy4+NwDl+a;NI_xNZ8oo0sd0)+w6nN{;EV@6|Q{kXMn z8VwvNaWRN|%g<{dmJYP`Ln`ehR@RL9hx=%U7^JdV=`PftoMLJ3_<{d;dq@jE{kopE zHq&#oqxPql{)==t0k6r`F;aLdu&&NslC&;Yhi5?G09mtwyb6Yyl-*I^*~&_#FUB0@ zKjr*snq@hpv3&2iu(jU7yx%;7ZH~n@&}Sk1I6i^;cw2olGikX&SuVe|=o`_JPqmDm zTdNVG{=@~C`5fnf6<3VT3rM*s%9&g|X1;Q)rGIvsP#m8lt23SkX@+XCWgC6=HJ}V4T!-@@l_pN^sjD$i-!@a} z56!`rok)=W*H7gEkD{EoDq1m@V7&qC!uBuZrb3R^krM{ntb8^m4ppD~OSR8$SdkF1 zryGX7rO%;&1n|^)+zAloAmiO13CfwUei$WWl&RoOl;Pk_>klI$S>1HG`QHD1?rAoZ z!AIykTsP|-PtJkqyOy}v$DI2tOp5|5uJhR!u&^7QtZGk4WjU+#Z*AmPxy7qiMVYs% zt)G4HX6WNy?z^Kk@ZzU*FemW19-$GUo~x)@KU5^0o;HhU;*Rl|dU5H)W!1~xAC@Yw z?-x@lMXRxjs8?yLcnZdar2hk#Mg5fN9NixNFY;K3Ows9ZB8=eP#zH{GK2XRD3Ab0Yl*5G_B)H*-P|J>6&GB1Q>1{m1T$*p%nIX`PlYgNnG8Yu40bS3pGYI@?cH(Gk!1CHwu%CWI@b90l` zJMoLRs-GEBVAYJi*3k@EVYESA%~78v%xk3kS(GC3KN0CES`T?NYgxLtHg(=0(6+R1 zh;Q4vHYYWA4RW{Pv3IAHwMp8RXDi&=6;C1D>VCh(H-njO146;GN&w) zSiLGUk>u-__Qe|5v4ZORr<=!$%i4lVX)kBNomO1(JA*1UeLfUS96nDe>)E`#>kd$) zUp!e%WH%wO=g6!!jQSUu2AOfi2V=)thT{NY)@o)_^jx{zAHN@eDeh&oX~!M`bwcF8 zUd~3zG`=fRz9LAm*iRVszQ)eY{=Nti()z;Iq7d+F84%Oh+5%LZI=sQ=Fs{6x<02^z zmh||`5Rrm0)oqL@wzFt>X<%!n`*U%XIP;a)AZY_4>yYt^0yy>5i9JW@GY>128cb!O zeK9iFw>)VA%{RLx%Eou$H367LoT}3#`5X^-Z&!v6yS%YdEqO=DTEU)>@o^$DEqDuCo!bW&I z&5l}hm*YfOWuk8v<12z*-chHzGpSWqOG82wsm$WY zKhA1q`83?{$a==7b>Dz)$8jRZoW2zS##b^VaS zQeBuz#LVh(`0CNue72*lPaMtyG;Nq7-J?kN)t#gMrH)Y!2E>pZq4%jCg)NaVt%!LB zrKII zI)#h2m95L#Jz&| zw4b@2y6iAle09-3tlwP)>UW3#H`nj}BMQw3g1D(Ufyk?Aw(}yKgfJD2-G@u%H|=y< zFSA)yxLHU+-tmMb zMpHUiWxs8_I`MTjYI1%IxTJ0$K4U7dPCcy!x~&D@abR@pTQ1~{9Gz{g&o(s&)er4P z5CsAqM?=JSABSv|bF7`!wiLUT^Re8mc=}orpW;f=!r4$>7|GPcN>GNdEj|c@%bGZ% zW0Wvoo?)b3l0s@`eQm(g_N7&n5_t9Vrf-r$z|ZgLoG z_4cI^vn>j#zf-7(7~{u0H%!gkc<0Dc?i?adrB~HEW@JUpv3#kzOW7{?pq{o=&FRM5 zA;J`@E6H7)m|9K+=`%GgFp-r?wAdMYP0&Kfc-gX&$Gpe*q^O*V)n&JArHNNNl7!b> zFDy_#{Vux~fN11XRGps6Bg~P4E63SQcrYBOY=6 z{_nVx?rucqO|=kJhxz|ncTz3r#;D`^O5yDFSAmP-H+|JskRO5*v?3;HgJkl!a)C_q zcO0MsC)tDJd=_<*+X+B8ymEE5C)yVq*->Z3`1Euic_Jko2V3S?rD7bcC|g4KtJ9r^ zv5>k@_Tn*daA+`r{x=7Ws&14=LDfY{w4r#^JAS3@E!ZOj>wFRDWxGI`-(Bvxt?)iG zEJtmBLCjE^T;QE8JO<8l(|&+rhq8tDm?H&mS}x$r+D=;`sEhbe!n&D?e3Lwe=vOe@xym#?ohCV1e>w@i-osw;?c}9 z``nJ~*}5m?nxD%mLqUy3$4|pz^PHGI7r^QCt+aJ{x!#2Fe8t3`VWXQ`>6>Qfouz0f z74TDKUd4$s6m~D?J!yx8SRJl1yA&WJNTVT#-Hq3j0`Vw8Jv)TYB;c-g7t+#HCGBehAzi9Z(4{z`+t~%|c z?pU@=TRr5_J^c#m_nn)`vddaDfw4my!ocFFkElWUeoRE8#&W{EFD+C{kh_z_T(3kxbWn~hg zLG8^je1%KKhEtw2Dx6U5&B9${vfMe=p@^tO=Ku;x);=j+oB8vRxp9$n&d)1fqcK1r zymU^e%T9?&Bkdi2*q!kNf{ z=uKzXN2xnL;V~2Hk47Z&_smI770(4-m_Q!LzaL*_ABxvIb_oMUFauE$a}Y=~g==U3(@jVCmLRDD!(^rQ41Af6rSU+sprcC zRR!q+tq*wLi{&vW+xOMkB`dAK9w5RB&_{?Yt|eWmjg-z#t6N?+O?Lz*dTYYd#i%z3 z5`yOiAbu8_u6x+2`!r3&I#xK>81m?l1@=*DQZ#`fQc7~r3PJ7(H?7{OVNa+>zBZL< zDDx*a0@W*#Vai_X*AQUVp$=mvj z4)YLyrvo6^^k(sIf=%LppG2zyWOsS7YCOMrqk3sS{m*sNS)EwG%$nlv*j2!VJ=;~t znjLV9jkWHp}rsl`c+Zuu_zTOwV_(w!S^g>&vWhAg${&I z)3j2fMI==7WYxW(N|ohEm9n~%_&gnXQtkSyHFlp36Uz34NLy(e>9pzxG4k0Kaf4dL z6f;$U%;GrrZjwkTd%yk9@w=3+m2UBewffrxm> z`8OOIfJxs8H;K`FxXkI+>d<@rYwA0-gy?uY0g=((N!M1Tnw>c_dvObPC6dQ$1t@}G zhiaNyRyrcr)Z<2ZTg$of$e!y|-ReIoK#12hqLhy!YPTMldc;E2s=~Ac^PkgKaedsY zxz)4~mc`$i;dmC-1l;8Q%p#c3tNpYm?e`{J>G5!dy-S<>#B!K3BPT56EggC5o(x)I z_|K?q$Ks(?ng*0tNtGEW4(x$1$ zgutT5#CD-Ks|5!wJMhcN)k0iYal{fgoOo)bs$?wc#?5k4F!;1XS}2fVM@j$`ET zur(skzSdb)mL*!8E1#{y24ia?C-xyTxYQzZkgKZnV%&NoVk-`dJTL9k)Iz>qX{>tDxPUJ3pNq;!^SojzV0(dbl znt`xx9As913IKFX=pVNu#ZtpIg6a0WIWUduY99aK_oR*_iB2JR0EMO8+8!X`;P->H z17ZiCW(+%l0t^|)022(XH#_Z|9uJco^d0B%*^u6H5`gPM0pb7T3eaP9q1D z_NRYol-Y0FT6-M<`SKXveuCO3Dp1>U+y1pp=#{&caSAbI5mnV-(&*y?L2w$;o0Nc9 z9_G=rFHBlap!6SUy<#8}{`V;+{@LS&*Wj1+$S|>QVh0t%=mKl-Su7#iRtw3r#MhvPJ((00LzGH}s#X4k6sw5fC3K_hP8V83?IPOdeZG8RZ~Bom5R1_chH1^ffWw* z0*J`*0o^$)aPRW!fVjY0$T9HbxO*ted{+9Xej{aM4|SsEGJS9x;BbM=Oa0Z4h2<%4 z(EjmT|LO7KEIu*zfO%c5uk)+P0Ud47^9vjz&;Wb;?eqA9X}lv`^4E?_O*^I0Dc zyP~GQ99V!+BIs%?4J;gR$>j8a`}b-(>jYihr(04q#8Ut4VM>p%u<0X{9{k^a4(tK= zF~I)vv>m0mi*!jX!S?5k!u2`5a|kzf!cKlF-9Eb}e+CEms@}n-4{pW-pya#9?#zEa z>+xB-P;mofmIRGjq2ycrF>xLu;k}$T0~pxkO4_+B()hx&aqa!qKfY$zCTnf%;XYBW zc(pJ+mYCfG`zF#m8JJ)Xw2m)u8809$wNnU4N$!z8`kUzgj&{Wf>r{okbxg0V9hmeS zy`mIw@A3%KDXx;MpssiexeLkY>Q=vYg{Ii;r#%WN@l&YzIe-Fci%$otSb^1TUi;!A z4zfTq$8olfx91+Vd3^#s&L%~is;T1&5-6VDP^TFYtd;33Q2XO{({Kt{{t`-01*BS zSoDu$|NNL-0?g}@FwDCu#f!I)qxd1B(KPI{sRsb8?=zeU2;TngXP<~%{#mp4kMI3! z-H89hc;W(XmpR=jsblgvK75lem#$a}<69#EfEo?3iqL=X*;Cr>-+`WA`s4WDWw872 zFfMpO*nY?!<0|9`0OgF?s6tG_PVroWuvPV#B;@aZ^jc)k{=rdzZPKM;i%-ZU1AJVAFKR;HdPs-tB~OHr}b?mZkG$pOe&6}GzD(5MaNW?6*}rt(%d z7}o}3>5dVGbtW#Z(uUCWZsW6$k4C4K4xcrJ?By;P%z7tP7If*qkOxawqdX1qgI;?i zHSY3*mQ{mQ3-BCr3FSS;sPDWZUsfL~l{f57FuTU%i|6lT)$c7H)=+!BWlQM%hDSxH zQ4&wM_gj;SFrw8ua6scZeUPLX(nXPJ?5_Tknr?-So2J)Fgf9v2m>c72a5)N(p^=}X zbP~e@wBtD59$Go(X={Jj?9p62RoOYjfcv=>1IE;2%d}ID*57omO0^Mv`Hm9`%g4dN zp}_qFS{nYQ+#h$E=(vM)OGtXRb1vI|)afWXX)7?ve0u3ff49r&H4Y9gj^pJR#NL`} zqLf>8V)$Fb0y2<_(ts5YjIp>bsH@!m2IF33ecOtJc4`K1VxJfeNbJq)Yd9cfcGiwt z%3iX}&XW_&GCEIAU$3^mk}(c9d;to%-Z%VV?fs{%z2CC}e?Ik>5RYQP@^?~8T=UUDT^8MQHIQ*OMK<%4}b{}bXFxJYkwHZ z<`(h5G;;};`)sC;rT9^pq3V0@UU#}9`R9wBckauK>TF_1t$+;&@Q$Vp!^*VbJx!3~ z5oDt9C}cSeI3^j!t9ZHs>_&ZsiiK_z=WzNJ)X!(7ZN^OR?-E@cftMA3J}a#&66+4R zI50a~e(&+e$$yN-A9Dj(PyhOrQab+Um70^c3?q0Y81k8u?`&v$3%p{68e%$6El-1v zF{Pw4eG^467XAgtUg(uy` z6||QQQM3LjE>JFs29nRc^2YinmWbpV!kK{7FT?zGaIZJ0{$_bqqzs7=n;F>`A4!>R z$o*3&WR6`_8%3?JsWHA4y(~ok^qK-EJf}dHuPtMuP~V9wDYeXKC$pM;BgQ;a!6f+d z%xjz;#rUjsH69VV7k!x;r8l5&5{z98{LhH%v+W|FX6nXw!Tp}#8JKp#Ww*l+QR8Y; z#@pQ;&Q@2ban}^JW_f7nUi3dV#P-WMFgn{{*_^P3xvCiyIZrCM3zbsK*o47_7BO>k z0}9czA~bzi^pt{!Ur?DI+3u9;BrQn|+h{N4P*N}XW6dJ_{R?Xsy`yP$~& z8YPuwv`H^jA(6LXd@lw#N^&qk>zel>vkCbj;=Nchei?8Hxmk{c{(z&rqK3&`uY}^w z^KWU_MbmfVB46xmAv3t;++K2uemFsX&YK1|X~i4~=k8Vr7O zf`;#ujp6le8@n2ri}sb?UR$a#OvGO7Lk!=F^ z@o?ANiCPNQ^eBfNKM0?%6Fdmu=Xn3sOPYzyzCCY*ZPS4f(I5YG$m*bUaPdpH5@0)0=T}_r{HdwqZ}Fb_>`v5E=n!$7VoiY z#U7hgdXhr=z12ezym>7b(ObK>5&&5{o$)(`4TVxO>2cANJdS=Q-&k2agoHxGwuH1b z;xrfdf6~he9!}T0jb5sZo8)e>COs7-CS`N=SmZ8ZKlw!K9e_3gIDmh!`u*IHupBh@#nHj} z@3lc{aQ!Co9e>@NZL}T>8=4Mt@x@(*cuPJY? zUBXjkS};0bvccfHtHU?8XTIa`F0D7m+F^=J#yaUw0#7HrE{&g*XE?iOHG$bpWvd5l z9AhHJ)))~IQ-Z{yB5OoMCrho3?pI^Nj50o#mjp_tS9H>Dc(u2SF8wmP#ee?}{|_78 z|BlnF6eBSIX6}~ChMbAv>5Ulnq^JzP0#Fz83Xc|v^YitURKzmRUmMu`9M|FP&vx&e zcYVi6L3`-@m1UIxZK=JpU@SFz`WXmkei-}_$^^eT0l)_LJm}6&AZG-HXBBh^)E-_D z5D)VOmUUbQYG)KAO712PFy6_uxN|hXPtJxQ6Je*N;;`>H55rOG)j(Nhv5F@%WYryD zOfiHk4c$UwVk7|^!}fQa)GlW!fX#|e|E8CGGSx}T`O~5MUp^y1?6+fN7<8MzNaND& z&;iuktcsLX7Sn_&gRg?w{9YzLg*cX7qKmw{B+3kL8z8!HXTrW6>RS)MvV%|;9@=p3 zJbSAyk$m29`{%to3Z))-Cx#yJqvu~tQLB&12idb@tU+G7Rn7*HAfTJJY=)K zigddgovOCCH?VIGDr~VF2=x$;yT-WquwRCGXb{_PwBA(ALH1cCN1`9z*a%^9)>ZTx;AvwGf=icIK((T;H(S6GP5C4(!KKC zX>)jqL99WB-_+h6Fzkoc3aLj^>5Q4Lr>NNI5|-Da?M6HW5PPbRVph2`q*O{rc2A)M zIj++x!`7CVaWmfWNw!_7y9p6Q*A?6_dB|RXFezLIjxO!lci#sf+d5iWu4#%%2cODi z?7v-*&1^EQ*gA}ZqGg1{SjJX~88^vXV0=v?vPw?Wwj+&=Y>#fga$2A+NAu@+GdC5? zcCsu;><=$I+OWSQ$v|Y8jR7ji5ZpATny9jvM$Pkwh}L%?-B>(<$=0cP8tE1=WKN}( z$#+AKQe9(g^?F;@{eT#ZuT~ci0TcYAy?W52b-sXlJ^V5`{6YCBKguZL8XwIt6y9#L zE!10Rn4_e~)UmhyK}SSJYYgt?Ec!RCI?fyJOSQOGv02e8(W4GT94{) z7UJv2vVOy(dtFG^ST(d%yL;n#buF{8npsQp*RN8LLA2V;Vu03( z10q75X5@*3Tup*oM6-|FrSJ!favr9UCj)Xx%9)LG)=VP1BugUThG8vsRL2+4(^|L- z1s&RET{}x|&~X}n-29B-&7%fp^H--&8I0p8gd;H*d0p6h7Vt4q){n%+pxnSac=Yr! z7<*|9xDH{4F`s3QXng^bYr@xPS)iD>dHdnMD^@kB34FDzTCQrl{jDr$k6d4W#0Tl6~Gpg7J;s%hy;h#XBs+dPg7 zLm}&NLnhrwY+iVU&O+D;eir%ZTwK{q{1Q|`JM+ZuD5vJYM##gaHTk%>$J42h^x4p2 z?_s>h$(|T5!wc^-+O)k3JKf2i26RPr9DLJB|H@-h1(i=j)o=|?C6ggL2ZJ$A z3%{|YxM|?B5E<;{1tw_8;_Pp+SFY-B%@hsGTB|BdMBWsVAUC>aJL|8SoT(sMx(r48 zw$Kq<6(l4VwQ@uSN~!9^%wOcwHPS|pDbC}wV_<~-yf?g(v{BDx-INYm>C}%3VNGSQ zSsQAN^&HA`>OJfC%;yDw$@171s$84$<WxyfG~S~rKYo0TvpsA4|18)C_o;F7`2P)J^u1p z=9)cGXK!%QbVnr-iK~%PKR2mE_Q{)N!;aLEVvwO1f$%(1VQ`JNvSa}LG~Rv1>8^I` ztiFGmlFz*XZtxQZuVwqo%OkJ6FIRl5@yefEjg8^+r+Ql{mJ&tC_c6|MnW?!pe>v$M#pOy=i<^C;_IL{oz_Ym`{CwMCPO&=MMLh}tLIKk0;`C3 zITcyA@}F16=~Pn*E(3-0@Ux>bthuT;Dg2ue7VyD04vRCyVE|o&?P_1o;+KZ^l@t$Y z@LQx(zCy&+YnVQbx|;R8iuIYps*%WtKK}F`#qqNJY+wD;q;!{+VT84;6w55T;ofws zikt`pwDR80;;7Z9YB{i^lp=2U`apRX)Bu$^Og|RLeMBv~M_t15G zwsguqoJI_n+b(a+l-Q9xmJJ+FZUWdbpmPD3kjFr{BNe_P|6)eU*9dFvGjmdRTV7u@5+q* zmY=m1YP5sKN+zEhO!rq7!Rx~di00@=0JH3LVCetNw$t}75Ju9vM#!Izb+$n2T)aON z_E$Yo*gxfH>j-Pye+iU(2>glg6^gqH#XdcP9PR*QV!DU65D=V(RQ-_^p*8 zGD800pYX@8S#~=aI0+tvr`h#I&4jgll)#ax|D!iqwjj;$p+3Zy1Rp|t?$biXO0WGc24t{x zco4NW4-MHRwmYQbGE3TBfZnQg2qM87M8zdkTup8wz>pAO4q(JGJU%YGGjv-pLvN)C z-1E9B0cheD)BlOQ<_h&~3-fL3!q<21a6QL+#^6_UI!LBu3*6cFKk9zT*&qw_fEBG9 z3D8D9e#h~p*P?5Z`auJ|pOyR97#{m1z<~)L2Vx}K&whAJYwJ-vfQ(V-6!36y zCj!FLP;KZ5R&Gzyv5qbt10>w@<^Pl^E-v*VY}N7SN2ZxdkK4ZD0?{|MJ06&3F z(E`<*045ma50rWS`lbrO>Xjfw+DY5W0oe5Cjnn%8rssGEut9!(gI1ED?WXaK5ee1D zf{p`F%R@lBW`Xj~UtSlcB195|@Yp=4Q5f(x5xfU*|E3#(I?-QV>q?M$@H-P~9R0(# zFdx}&P#1031Hu)^8lW~+6bZnJZUN=6zrJxIQ`(pv5ilBA1oSWTT|KZ~`dI8;pc3}y zmut*vqm7^KTZ=k@;7Q%iG3E?)fdNW_etyX_s3cV!V6Yfw6br;(yk@sg58>x0X1{q2 z%T5h5C9MBrO#Zu%i37n5&1C`dpUP}Ip5eL8AXZ;r|3vuOFb0tUqIushXON0u`DOR{ zc^CTew`2IQt%N@{!YocA91)V00W8M`IDkM9E%r8y-@SnqW&^lwbFga5hx?;3ruszs zqYcAFlL3tl=>qBQcQ8yx`uppAOU!_8kL|>InP8gTHbM0l<%qrW5Ny`-1CA5L4b<|m z%{L`L^yDHIbJlf=*dGJrP@3Jm067%X6!|oVvzp}hYtJCR zoT}#`0jMw_IVwc|7=-`p27wi(Ppze^D{)z?p zxC^Gzn7sEy1MpZCSG8a62g}R$zAZ56AOH4o*1Uy14Q&RfETsKOWkERpw~FW0VSnMU zol(>6Ja7Z@Hi3W{RhV`=^Kf#{9Dz`mMNzrT)$DUA zU?a~i*qLqxsIJ>`b|B&-E2)SJoE_j^{~4oMp%@5R2hN)0nM4!p;$2uJpJ&!!&==is zjxMTM6e5%n0DJ_By#Dz%{2Nmk;ctEw$hp!$v0hd{C`YtdvBE657+F_;wB|W*{9zD?#L}Jdyu_az6$+kyDA{K}} z{a=m&6179{<>BeWv~}+DfYNvCs!%|m@v;regBJLQ{Eh*S0>vgi=i&Zurm=P!jW|z|YDJSX9Funf%*hRzOK+)W zG$rB~KlOgSfPT;*{8I*-V96cl5M8~WqUAB64-gZ(cLE;vrc05y4fm6Pc72FaFko?& zo#+~^+k*$|A5CBQh*1-&R30^(9^O4pu*!FWqm|ChG=;Y8orHN^pu{Eb$K@YzOuY(z zFPaVo%t`zj@+k@{<+C~IC6+4w!JDrxdOh}a0|H7qy&w{1234f#S>MdBJtlY|)U)?Ojwt+;L}RDmX|)U zax7=#RP1rj?lTaVhot246kNB^V;(XJS==`n_u0U?{`%YtuC)rEz1cS#Ap}cQ^BW)} zb!UM{2&2g!t2FTmUhl1oHR=B1BkOTW{SZ_=M|Z0$g;SbgzaX6-+JZ0o)=Ov@WdB34 z$u>K`FWn>)NvbciGl?y>-XA-CMtnrG8ETiT=niCb-;zSHz@rocnbkNF98Sw@>eHO$ zmhP%}eW@KI;r>8hC;2MX0L!`JXwc~g%E@)TLOdd0QS`k@KQ@pl%q+V24lyk}n zd77V#SEi`7c$V8FU&C1fbq?{$D%P)YKc&_M^}G5uWXY6kcba3)CGU|)fs+zK@p%l1 z2mo?yJEt4qkFP48Y(07ZSi@@(>zae@Y4VU!Da+AcT%7QpqRxf(B_zsU&FJcTX=B4S z?tL5sL`k9I>8^S=63KN{UQ|TjKV?h7>FyicKZDB5)C%4Z;ht!=XY`1Y43`uY<7!=l zvyR`0QO)DeSxHAO+la9m(vyv?xaC~c$h%BIecMABsx((XGN35#c56AFZ6L94!Lzb` zQ>WI>kXhDGir{X8h24^5{myVIH+)m9=#+|Y9^O@s28HRxGGQWteP7S$^!ieb$ZTL~ z?W4KHtni0Y%!p4$LeG}>3o(+q+YMwq)_hO=yr{GK61qTB^)p}&j?Or&WF;;e{TL30 zh{xAvwppLJJwsm?UybZbXRr-TQ8E@KuA$m#M|SEKvo&7O8TudrX6-b8fjP<`s1gh5>Ptnd0oHTx=lw11R+fx_8X3#2!_{YqJGp<6?HJT3w z4oy4)&wM&&yq-22u3%~S;ttM?*K6djb3J)(|BSY_vddPH14${#ds3pc>R_yQi*KYp zwYqeNCg1KG`p%cFP70qHTS3#o3ovfu6m_glI6xn(Eor+Q9N~3UU~wc+;j!JYw&xpY z=738dg(K8fm?mA1E!_O9Sft78nX#?tC&MB~IAMGjK(>E4#pp-*AyeR9nBK%v)ZW={ zl#xx4L}BdJ|F~$jC!R$_MK#Jczv-tuKSPoIgyf8uu91xeDe_cO^l~>BE8d^%62VKKe;G#WDFxY$B%hCqCSFn#ir_o>o@8oQf9aGB?hIo2{Z7 z=5Xq@*{G1z#I1URN&11RwRu`vSm$knqn)M2%5O@#>S41Cl>`21Ig?&-qp|jHUfZq7 z%&4N1Zk5FGDD@k}h+xR~= z$t_$Ru##Z9v(*dmqf%i2-$oA#yKuxt_W=f=YZmt5>Bk`~buHz33BXf&I``vpwDP+U ztT|L@f(^`$wlP`Cx+(3@7crn)Z?7(_1!E;ERG&CX9)^hx2>rSzVB$f zLOe2b!6!DvwzR2{Y}LxWpk$%wvsj$4?TZ-*_(% z9?*qo)$YVpXN`+=btgV(@MY`h7?;Ya7glzE={K-#`gFRDG$X#XR<8UlYNiPWQrGGX3FBbx4bxqi$b` z(5Cy&*hp$I>z1=~L-sKbu!l4hqfscp=YlJqsBQbf_lO7K>j*%&cg@piE~kd;h=XxV z0NaNW8VR-nZfrC3s-=Xn{-}zed4JZ%>4F{IPX6Kc_9&1r7{G!$*Ld&T)l1=t?)Qr$ zANxuj_Cjh93hcW#l=rc30Ois~or2CaS}=M!>4wU~Z_6&Rw`ZT?xE$feO$FyoRt-yc zr)JqqZoTT$(uxf@)O`So!FEW~n~YJselLD2^tpPlP@*(}h`WgdglD6*suHBr zzGBodP>L8-_s>YNa2ISrAK5?sLPC~sTX9g0|6bghvHujy!|1v-$&3#dI4QF1x+0eV z#XcHPZy2uY`$G7a>D_QhOA(UIx@6tg#?6L-B-FXBg26pZsZ~oUhd@XolaGMCPY(fx zD((!I=1MJTt`5l1a#Fv}_4fNrG^?N-thTVh5Z_0ss$P~d8v>IH=&l)9481?pLP33h5Zv{JQ^q;_b689_G?-Nj}B3hy(Rdpoei{Y5Jq>mn$2J>aU!=g1YTYOH_Y} z+gdwnPSZ40e7K<0Ccs3OVXR(bKCrA6d)-=jQMdzfNEGOP^(f25=ux7<04O9zGDOln z821bvr!&@L?C`Mkn<80X0w;ex*$d>|B4zEP>roRlf!TRy)=#Gvo5;)FdW^1pg~1R;j_erCH{D zXEg;Nzr5}?&T;okTZ$kB8F1~}Nb6=+u#6`pPc+-HxhQt?we=IDPlr9vJqDcQiIn@E ziz}6y2~TWv;#O1ihR#{Z0mn@lZk;9uEqq-^ObGLI^d6l156U{~7iS5Qj@#hGSBlhM_4J~73m0Zxf2hzmS#BU;G zM0)yjpV&K#%ap%zAXE3R%z6}AFge2DFX1DW`|YuG{+Yby=(`o5>G!TP7n}&w=jgcF zl{R_6zvOalK606r!3#Dj+Ez_n{M>J2bQ!^%bNkm|lFj-Q@nDS*4Ai_n{zdj2FKXev zU6MUshH{X_inHa{)GGKjS3M$9oymvIP48PTvoskPzhkOAgQ`vwa$hk-=0DFT#OIX6 zzW_`3Cd6{Gs6;}gVwJS8jQs_URu*zXLdcSa?0mX=@17Eo7D#&?7Fw(*ejZt(F@_E+^Rg%Gp$(bB9rH8WWiUtLQ5uh}7UY@e^> zvq9-oX5bHCE&#LQThntb#Z5)XrZyA;r)UNKow;<`o>`~x#%4>n5c0LS(*_Iq>V~wS zX1$Eb!F!Dg+X@83fQCRoh7Ff4)dOZ8blq-?zPzL72p9X z)cHUxgG zJU68j1lkW55S96^soh=#+_jG=XTvU0wBelMz4UQE##qb-1>12VmCZ*rwUO@=8CSC< ztj`WOJfGcpK=t^^(es4)^9$7T`Q1EGJRkvXv5l=pVPBWVb09W&hzP&qn6BNPD6J1O z4+lw42=Xay#udyelGLX)^AwDsBI&o)pCwQmERVlYFX9%h%X2vYqVG`FbS`jCD`)ZX zUHV19xia16J^5r&6Rs7zfuxGUnAw`!>=}>KK04F9cB|~>U$Z+MRt(%OiElT!tCsC& zbgTC5OTJ|aOy#P&`8TD9OLXcCY<|EmqVB(QvE%XhogvZe-OnIg6 zatH*AU@5TKwfj|d_IMwNarl?70MP&YIzUuEj(n~t4& zU^dUYnVtlSQHlbm6+MUfpQo;V?cl>o=7*7-M3=Torv^8}C?|5K-Qt01>}R0TymZ@X z{H*mTyG%BV<{>8s5Z-(8dGK><=02oRCfO>y&FA!#g#BL zXv{V89C!2mcN|N%Dbv{V+Ulcurh#C-vKK^;=r<=(&6Hf%z5ySi+@|E+HVH07a>veCdY)1E*MfCN*jqO$>xOzXa zB-7fawVqa-#c+l7XL||(1lh3{Ku&dZtuA)1llg#dqf6n&yQeC&MmUQ?PV`sc&or?V z=#<{=ERww1lCPKRy<+|o_P|1;YrD+w8!Rtw~cqCOfzvEnK2X4^E&Vj1dxy3qFNra@@a*`VlMfZm2x#>#=J(_-0{;5Z5^lGl5- zc{R+5W2<6piMGYNF=lLHWZ;kvf7M0bti&xsKEqx-iD)=6O<&tWPpG=86tBa>(prEj zaD*##`oiMPA;&ezP5T%|86r$EaRPjrVG|Ua0XyQQWvCn!?by|p_9J0d^VXB(Z==ca zda07g-|=Em8C886P;9SkE;OD)7hL!-wkejlZ873r-rXam61pW=-cIV*x?2w$ue@Z5 z?ZmTN<8KxC3Tjx7-L-JASN~bOlv+N;ZEEf3~C+#qT2*& zefxAzqVOG#ybJ!6|FCaU0lrzMsv&!9>xc?sSOL@f3>|d`*ic507>c^ z)1x_HP*#p`a(pkq$&5=uUnE-kt%&T@j-_+X-EFOlMnEqqo@$DVgvrlP12W_m9?*LH z*zRl-DR322hEw(}ICONG4t-eIBfaaFd}L^TR&sMKuh><$qeIY|2A-&j=GkP_eIzd= zTf8?kshM!^sa=mk)<9g1%V)sfERjsrvKqedlS)dim`6M058sgs6MMsR>E0LR*Z95$ zb}i0FKo&{eBp&H%B<(M~u^6q?J(ocZU}lpuT+7-Ing&Yao&O*9-a9OcEn6E0QBgq< z6(nZ?K|mzO7LY7aa#TSPkep)^L?kBxB{jLpNY0?-+~h`b&JEq<(B1yZnLFor&V*;~ zJoEeJe)l_n^aEYhRlD}C9oBx=yWSOd`_f#s9lcZ6R-!_s>Q%j|x^EjzUSUuRCuY1z z9bvja7Z}A=e*F(=6`PQUdk-}eRc@-Tly=r?M~85YF-O%7;Ab2DE#Fm1YAcwQwqRd0 z--~$gNQ5n;`H>i4EB;w>viQ~_@!1sUSlQM2mo0t?i2pl|Bcyj<_QZBEGZ-){7m1&- z6%RECLb(IL6&DnaAoNPjV4p^j7d3V3ZO@_736T8S8@W(VC%^$&+xrVel%uY6WE--u zKV6x1-F~^pYlM6zsz1U5PGBSy#+#Nl)%>tFMI^+!xNT71aK`3byrJCsd{&>V6HZF< z7nn>ZeVzWbF+n_5GMM@4gNui-DqvxK)bm9_bokvNih~jt`oy4 zG7{k)fbnhE;rC1ABx&G)D$I2HY@FY5EL-HMU1GURhoa&-TSoB)rTv(*{F}G_ zyj(sGykQqSd`KU>h2ACK>6ld9&X%#TN|YROCN86R`|Z)WKVmC&F7VYzo{Hx30{<)j zhZ;Y!(`yoP{N&}VA=UJEf0oHR33||C153$o ze;d`FJK61!KCcj+rNJ9_(ady1YjZXMs$5{{a&!Y+`02>slPC*|$BHab)K-{Y)@cgL z+BCVf%{kRO@@96KNU(MfPTNX>{!%b|3)PHf)*04QenlT=x=$K?d>O}Au5{U_VggOK z04CO@Dw5GwCqG@Sc_As$BXmZ1OWO+sUJy-@@62ewHsfv<6Tcl~OR?;SFup&AuY4{f zxavW2l-!%BGzi3fZIsG5zRYdEq4SZ?@xhWk`4q&<9P(TrLI7IE2Ub!O&^cQ!mL1&1 z-iobSZ1%9oQ-}cde85?d(*<_7GVb6!@ZTT9Ja1Yk;fm8 zs7;$yEr$pBr%XsJn%`9Es8M&UFU38dn)B!AlVApSO#-IOS5T2N-Or`v1^KtKr^sC^ zb2b#Aq%5w^aHE~ut{M**_*juKODl40I^ja&VfTQ;RB9a2Aeloj_41`rJFQ@cR*>l# zT}MOnq8vIL;7A(YI)ketD*&J@1PLC2VS6gHTHBwG85c5Wo$C+ctAFDVg1_Ztm6b6--< z|4R0Ww5H*egu2qJr^&R-g!YNxHrvzNF9C3`8@-h(5Wy%GZGXt`>IC7f?MHI}y*DBg zsJhN6++Y!Q6~Lj?k3a`=<=zl5x@@lAWzIgb{tSK>S%Q@kiNx5OtQpXe@HZ;SV*9bM zy4PZJfO_fl-t#g0W8wr9MqDM+s9uxZ%zjkzr7VSRH=7h zq)l)l{CaBj$-Kxh{wDiDDlh7GX@zyZQl;P1Ks7t>FI|P~yV)v5iG{n4@eY9_Ifb5w zD~-@#FNvXd*KJ*Iyq=?Z_z=2z{h1p<>&^A}V?mwL!1}m8OA&+&XmtO8K*LM;-5xi= zy;@M6J4MlGgcR6M7!+>1Wj8auo)9%5&s6L7KD-fAI=GaM7;kV_7;#8T4Nuv)cUQeg z^)70>9#dYzXD?uF&3oHt?Ia1sME8heNQ^FBGVL`hmu2R@{kt*ye0a5U?}M z2j52TA__Ra?McA)z>PIK7=iIIQ$(rHfG77JBk{Mv)rjrMktXJi1XMp!rp>rn=Cidv zS@Mk$VKhg|2|&zXdOUXs#DbsQgx@iOw$hzavH&|8IY9d(zPdo_b}lS7pEpoX#eP+= z>DL*ew)q`E&JloO?7$mFtnI3&mvq5!M}V;n7evlsWdRetm>9#s4%$&&1@>~Yce*wu z?g3y95e(Y|K6s1{6C&@K^2dtE{XRtI^&TkR1i|crKc|6U_{#0JsSPotUGz$}6&|G~51A%Fq_Vy6jQ=$peHhXDWT+?;X(-R%x476gX+89H5{pWL$zOOfBVtvSyab)pn z4~g5=1t%(op~sAJM4-tpQ)f0M{8cxTavxLdhemSK&BEx4RHz74vS}@BI`u2kCA3Pn znm~i}qfQEym%<2oAJ0EX{l-Rh7B46VekCqM(x=|j$=f#~z%tu|d+iZP%~B^&^| z|8k8fIPN(6c0sI$X1)Yc9UVQux;-lFuEi zU}6d-6F_>eAxsbTHJ%nMm)-B=f8sG}Ph183xor@fPAv*8O;srPuAG~}% z5%I3h&h7Ocd$ZMFwZhXhv-`lUQMqAm_LjZZod*BtSWdI4r%G!#1_cu=Bxb0nRmp4!P4K?(yn&>U-# zYCjBvpFe*EM*ef$oZrRy%>5<|NKXe##RzSI-2;m37Havyuxh-s0783PLU!cPlMS0c zf2(f&jze-sbxDTT!qyvQqvV+zuWRf&9r2KeiP$aAc^v$icfKkPUI&ga>z1}N?37s>SgR4% z=;6;_I5VOd1Fqzr8%8IOmzCEs0?Qj=CEq zR_ul)keg}BDA$gWNL6X$NGst+^>yQM|YVJ-SbXhxxgDY?poc#H0a z(|X0$V=U}-R}9OO%g>5Ez;mEDXOH-l`8lF#{*H7jn|JHJ(H}ztaU7)fBOMJ{{Sh{t zV)i+mju$`I&VDtM$_fvM@w@&M^QWrUp`EijUVT?orl(^qfX|vrYoyXD`2MpLi7CdG zzLZ-yxwI)(;Yg$G#^caJ>*931bno-%4#^fCx?Iv}G}mc^phw#NQ4TO+l6@8FEuozxc27)Qb0gLAligplLJB*}C7XmHX?b_q7B8Wph40;|ty;-c9M})sAqjmBr z)5LHyK4E-(4@MEx{rn~wyY#IAaaW%>Wwty8P8(5u1Y=5nr~h*lP0_Nwd)31)tGZfL zZ)}qOK~;>q zBN8xDQsfcTRxg?~Lo}%W_5O;DBqQ_vN0aW0al%b{uSsId2M12xAQg(XN-XQGzxf5* zhq0`FLFbEu7gme%x8MnbhI;p(D%48Xzr&+J<|HADXgLu2K$g8Rbv@l@{sd+udRSns zd<((eH298F3bw)|^<(B@SGPOxpV-T$wmWjXOAcA@LoU1M+lF0#S898y&DeCZ_Ia(= z@v{d@qo(VQMlJ=&GlEFcZ;geF;t$pI7jF3mdJ-#7I~|vRs)wBkFbd#`w93RB?d3n&U68+2FkJy-AF3BaE#arqUkAlFC@W42qhRCDlnT zyCHwJBA?EtXrrz(g1V{hP`B+^*O5M7WFXUUemA$QG0;`|y;s>96Zuq>BupCibh60u zjO!C|D>-*&X&_hM4W5tOeM+R=uqI`D^%GYePlKe`=VEqKET@#3_d2UQ;MtaBv2$p) z>UZNTi6&)_zTPlP$yVr`zy5@qp9L-{H`@r8hMZVrZJd1?yMCCZLu(RO-e-syKm*9H zcEc@}Q0qm{DZ#3a%%fVF0z{W4ZW+Fq9q}rM^g?{JH&owb+Ss6hz`>NHFo4iyr@Qm{ zz7Fp~vBbT|v(s8&5E>`z+OojMQj}!`qKtkbB?MSBMnpwQ}1dgG-)8|BiUz{QOVb0ep6u9FmO%Wlp{jWmTgR#`4-x(# zVRsYM$il*O!Gx$dUHo%AB3j9us#*{kFhRUAn=^dK32GnT zpPHUv7khT|>h-T(J;`QtD5%wV$s#d<)RA2y-Py8RgYQ=N=B`4BxVa+tT3 z?3_CM6^d%I(l?|n_NgSM&)LOVWNWUF}z?sG=H?hO0zzBwLTr- zFJ29-49|a`S*PWY|KL^(5$ICClSfWo&WSdGr~Gz65S_iFE55sme5h;Uki$C?zb*^34K%->z{j2 zn(fG$JbSG+zJ-UTe5#%iH@tR0R-(2MCFJ1yyqn z4h5Qqq>zJd=B2|zI$R5^H5v#ze#24v5cD3{zG0o?AHFSEWP2J@Zeh|}tC^F2wcC~R zWDoVMykdk=2!HJvu?#g^C)A`T;j4asUa2+v$`~`@6VDnVXfCYS(GV7F3t+6yTc0FZ z_-r{PLOVAdz?3m_t(4(Aan~^!g}n^VObPM0u2QU(D6YBHm31l>k?$KQxpQ6@|DZ)m zQ4ju9Ti|y61EA1WA7sKP4V#}^I)#g7C_gvN6H6>AjTaj7eP8SYwgb~vPMt`>bjqqz z#g#l-1m3Gp^B6jJrfsyV4O$;yRNE7a<{w9!+P%1VAuF~wvdxUnW7bVb(J5aek~!t&LANstpZ(9w3i&B4gxx zDA9F%2`5J4m2IC-Wm&Z3A`R$CqnB4i`8+imV93Bub?BA~b}OvzSwcD9d<5+ZvK~x5 z4TWC2y$O#*Hvb*w*FmM~6Hlbro3T4fD0bKge4Mp(C|; z7&^l@InBVHoj?dJz36L?C3V@{JF>n&)krOOzcBwRxw*qq^r!mAx&WH9|>k_Al@7FVc>|-I(;N%VMmkpt|F) zu2ZF6-g$fL^hGV%i$1;J`zi{ovHFv)LAK6D0!sDbG33h=F9xp8Q-`5un`|$MRBYu9 z=~zLd=k#RNDrO=w^^hV)N_WRogcO+XhG}>YtEMC7;K)J~Lc`+7;>r|6G;c4X)Kxs5 zs@;_?F^_9gNS~Ac!u=?do~7x83r3RQ0-QQS`mGEDgbB3Ur+h z>6SM13fqeXS_NVKzSXd#MwXqT+7Y)%gNkAO+qLEAl5qwWUzX@hitHyZmt=2{IrQ4U zJPKX>j#JM}bcYrS6<+5^M-@kV=PA58vZHuxxq$p)3eC$#hG%KdQh7_w&X_koTQ;ZY zT2DK|$J;L|=p@mh>#0yz&KW!70LCg2Nv03>C4yBgvQ6Dk(mU&v0q4|=f{-}p(OO75 zvZ%i*rFaGidN7}3sEIaBUZQ6cqZMz~pFtvy%E4>hd6Vy%$VtKjuZpetJ8l>je|U5f z$pG3v6vEKv7s;m57VS(IxfE{CJ(uN}i&SzIXwWN~)c4L*je+$g>PqO@2@O0tUQ9>s ze6r3y0iPRtO4~bZ8r|kv#S>D?oP*EipTOmtYDN=}$O)!8Z3t?f?=3MO9~2>2R^8Dw zcpL4Mt3p!jpO+kvlFQDWTPk8B9sQ#w*SzX{IBG;?!`wY~j;L?8GFl07N%;N@#I ze#RFJ>&_BE2gTzez{Kmu3U$t5EJCfxNBeo75+OL%FRWjvNPLWES{yW`xhD*>8iFd z>-fR?MY6iE?i=U1^3R%Dg}-VvG2b9YN{oSWuvGiSp-zXY1;RBb7bASw%m!gfAv zjo2n7JRSVIxV(EMwj_5m&}YV-Z$V<+W!{H|?eo|n%>&5iSr`uMUG**v4+jy-kUUtrb{ah{iliL z&@GYpg!*m4@<&v=?1K1nG`#wnt+i^Jv7;Zy6rsX{UI1nl4G>Vb{K5L05@g^mqWvG> zp1QG-y|cYLPvWM#`G>GOalJffWxbKZ6eXqU`?jzA4B|CyKR^tf`bi(`Xp%A?Uiopa zakrk8`&cHu86|8nnZN9pZ7SANopdV^CkbXGx+;3wkEW$S?x9z!KRKO!yPHh=So?B^ zRo+Uf#sg!W`<*C$PgN9AQMRmUS#u&A42c&<_sESj#61oQz$DGf>Ipw+P$l=9;gv?pOP;-_D?CwMg3s>JT)_)F zKg^l6O3cZ;An6Xa7gHLd>85+S>D{l`q7^4Unp`}Hsklx3BBFJ{fNhNb;(bAKO}|w6 z>#%*g6qi{>WF;inr~jxH8o7O5b^4far^7J{T*uMIS-kTmd*9mTuKyELFBQ$DP4+c3 zv&bCgGCwi;8W@#d^fo#FTvxFr@O7?(g5iPfiB=s5jWq$!DFBWRIHMRd3#u=Xv>}Gd z?Y+g_H#WLQ{;E0d0F+0Ey-8z>4WJCdUILwr@>ouW-RbuvJ#3i~oc$>8`e4c5GXfuf zxHyF!)wt$;^}|NJK5Lmr>2_=n|xlDj-ek9g3BxYavy!HYUqALM5eNY+B!Yc@m!$sm@^t& z(2myMWNULJsW$WrpE%%Os(V+NXX&SXcIG~&B318|cD9t`zNPg%)yg9N^<99cTZbyu zrjue{3v_17Ro<+NrR?E#2pLL^^+cB4x%yhV85fhk6O1xy__sskY^Xsoe!?#(c%jR+6=jUPLAl`+W9`Qa?-u4e;Q;OGYG5doW;nd2* z9}cKeXrl$aE|o}UY^fxuFjdv5g{@_L2HlgGjy{3soCLJS8{e&n5REr;8(3e7q@dV3 z#1BDwjFqBT%5yqbCM|ih+;AAIdnA09iCa>c2=2iqpSrxr){_;B(UHrZaKn=tzL?+| z0cQ_U5WIL%G9Werl42)uW$gK|m@;8C*UhV%EjyjKj)RaletMJd8uK?Ztr6&bRyu|^ z+?YL~!p%B2_q}edxSe}i;UoT)kzVi$DpeAT#qssJK)ZERswM#OhpP>FiD#ZDCzu89 z6em9}Q%y4<{s}o?M4+R&&Xi0 zdsd52ZAM`CIWNx>x-C;X!-gAnD4roz+b)MQME|M>j z6cTvJT19e`u+$pNcB5(57V@ zdP~f;ZWzp%$Vf!|f_UQuE}r2fR5^UeR$^nDh)92iN_)?^Ljz+KkF<$fO?0WuvtY*f zAHBwN&@xzIh2w{zbXZYA@3|Ox6!HgdAAWla`iL{Gbk+E3%dKNQn?_*?3%RFFCxMId zoP2qFN$znwYb`ybWu_`niM1SYA}fvsRRAak{|P8YeMSPB zk+MxE1&^@N#Ni>@$~K>E&?)2amCx+q?Qc_P;FU&20MRLi2B+{lSzk%V%S~-TXP<1C z6ofl_bn#%rpFDGoEgzm%S3flrS~qi;IHC=h(6XEiXH|&_>|5i%w@;hmjVz(a(!&gxZ1Sohe-sH!{IQ)(S%WaTE80!-R%9D1jpd{eYd|E)*Kdt2KV zR%zly+XCk=<_% zPVc_{x$^O2!u%;@O{Zw-gi8_UokfAnY+ad`oP~lsQhrR#AD-bqlAk|9OxEE#p)@f}?uA}JmdU1m(*m3bS{~XJVxu1(1?XZq*KRh{hpH4Be#0qZA&}`6Kv!Fo6 zLr!uJ4<~jSqTiblB+UP+ZTVaI!raZ-6Pry1>x(uqzIQ_;TX9FA(twZJ<$Tem$evHx z_97Xo<7%`!HlNc$X(PfdF@a-+FO z9pS~QTSk|H+WIwMDoPfdX*Dr?6ER%|tax~?Vvt_`wjrqkzMYTvx5xu8l0*ocV#IWgmI|1>L35*ew{2rUV@-0=h6UNd@MzdS%^lPm(tW5g0`^@6&aYp3hkrH^HMjJ1^-iX1l!vyzc= zZ`P_kR$gaL!jcltg2YNZQ$C%1D#Fz6kcpbyFCGhw4#^#G{CW`r9@OEb&2uCw#pK<( zvh%p(nT}AmFw@2qZJc6=M&3P}w&mG1i(It)s51?_6)EBXxLwEkrjyLPX@d%vswe|dLibNxawB3s z?!~H4#&(<yuvgX3g(`n2l)6J>0c z3zZNEqJO-%Eq+Nsq-Q6y?38;SLqz>}t?!hs#l6vW()=Bt$u@!K))+XEfjPpAo)E+F z5yc;L&3LUD&y3+-^l1%lw&I=*tmG`<|rvtCJ{bj=Q;R0=broP40aA$53 z^_uqf$yG3#QJ*&N4J>>1!OJD~^HDnc@v;Wqg(=T|MeLMDc(1{8mx3F-v9J;wiOSk} z#LazqgS?Ws7yDAa^?png-9k%^v{dyG$8B{pZLB<{kD#N3Vc57SYW{mYVQb2=8a7+Y zXWkuLSuu$kbw(;o`V}FadUaJn5{9;c-Q5>*bzWW%Y|2Dk-r>k5rsRozzTSrW6|h~d z!N>VG_&htRV`b)EJh+&-6XkK#xnS)0Mb_}poJ%*4zZ@LjwSI)qzh^YDZ^>=|jWDRq z3KV#}$IEECaMIeu>rg?4V-@jL;wE>D_c#uW$IN_Ma&Ykl*$U^e;l0mmd{vg zr|h4ph~J;J36ZQ!=#`88D@9`2Q2)u0;$=Z% zRVfk5@jBGIa)Yj;! zjFDee;1cr@-spICa~OY;rwP3w$NY1nK{Auh=B3218Sh_MNvWvE*3YSn*je^dhA(pJ zAzoE^cD&AkeWcKgO>J1>AD=XoPCL2PukaLci{a&@tWvXz_@mAhV{aK-X&Q}K2akF^ zfg?sIQ|6eVOO_T*45J5Gdk7nlbBtHDTJLw9aWn^4$O=@$X{8_EeA$XEkAu(Jl9qj} zlAQ7?tM20Sce)8S3+t--{bfzHBRewfS^c-uOzjR1n9}nPv{V}PV5u6`#TjZL+Dg(M zTOE}BJ^7gQ7uO^)!?-m8so<8f!|RH>nknWD3H{MJ&#cqky2X7D`L#owdHC}!ga!9+ zTeOHWY4o<~7pJ_b8n5;-WLF?q$?NdRC|^r8CFs4AdnMj4C-WqK^n>P0mf?NM zu%~shsh>(V^%b`(AtdH30=R$*U(tzHeKWb$9eci4PQm0iTA1IH0UM*^N=7R19AuZz zpxM5wUqHPuY)x(GsmP(FmxJx9Pz%pDssv-sv7PP2alH}KVmRXi0>Wo&Kt(m3W&p7{ z1&49R4}_90=}B$Bj8U>@2N&JDS1VdveG_`IFg{2c6z-fCYuQh2oFmksx|X>1?bAg0 z%{~Ao1++o)3XF?v7ZsY-J4hm13Q2z@gcjC~BG=Gi+MvUcGc6!=Y!-l=f_=>X#dzE2 zfbFOL{@0MNKdGeR{JXyYq)=P4x$Q#|85b% z|DA$Nttv5aEn*RToCZKw%FfP};K=1>W^<*5)8;zhx)I4 zI#~blkLa{=X#e=l-*3b}+UI{-|B0IN5`w4KztbjbS09VGyM}||fZreum)yj;y~&~I zl&QMF^+5ec(BgrAtJWF`{DN23-^}UH&A1JmY-ELg)a}U{yz4SkQ4VmMUuuLce|Qr@d2qwPsyb@w(v7T-xT<)g zKKT48in`@wFO+V@sCp>wu`4LrDClo*hLp(zoxtx;Fu?C3oPjy|&k@hRs!;r)y7uo$ z8ULF_k;W+U7w+dO(5EiTR)F3|{)ySCs`+Dq`}y-PGCTkBD*q4urFQ*|K498!!~?xt zoUun3l1;#pc?)bq-C5QCUB<`H_t8R7VWkCtsBJCE#!PV6D?o!Q1kgiq^C;;Hf#4m8 zM*y?eATi-z+Lz<)NdUGAh8Z!Mv4h+IkAM4)Q)dP12T!lkcTXIR+?w_~E0 z)9dTSj=vm={Iw9_rD@?m3jV2C|2H@FGOEdsDgW+Xys581sM9fcb`W6PY;7Un_=?LsGs}wLM-V4-TpZ%W{(Zq-hO1_ z=4})GRo!U!ms%Hc`{myG#5>s{jr9e}Aj|vViND{Q9~{`JsLp74;!NUagN44=%~^7g1-DUMr1N z>@LoNkt(-mIgbMRV-_pF*tEYc=JmPRRsIfypEIKeKleK>u6w@-%QJ@aWlJdJ(Xz^T z${jfaa{hn`1`ML|LO|o68yILo*plYcQ(*fdD$$6UYeRW(O#X6G9h34B2wf`uG1Lyd z0j*kCEB1=F=XrDR;(<;D*jx`#@i+M~fJA^q0st}u47|;AV6;Pm&^OMGkHNRv*Gpbw z2LRm&h;H+4^v~sw0r&(6M0~<B>0SGqd(F3?Vt_0g=>C z_>nd`>pEZEpVLNz?=p|Ebr)puB|XT^2#Nk>WBg+`MOwd5LSW>Us5d_A_FNZ-%WS}X z-~rB2VQ9U1d1LChRF1sa>xmL6$*-L&nv28DlHVqm`P60PfjR>U(&v}6pCagx5HPqGp^viy>rZ+T~EmF}n0a%lE~lCx{l-*J2*PvFoYox{!V zIQ=IK-o^>0eV_#`Bk-UG-T48yBl1K`$K>F45Ees+DpAddEYZQ*e#IZ#a1m?8l4`** zml%L7O>$^}=^Lo!3<#&ogLj7j9O&ST?d%Y|ioWs34gWoDxCRQDrENy)1aTIpkCG&= zE=xUmt(iO7{}s(%(xQAv4ONwrdphZ#S00UHal4ekPaHfC$T!SYM52T1R55QsM@eJE z-!ie1vy4EP_h&F`?%9lujg7V3w#DSQ$j>hs$dvW#Yqc6#FZlC&Y${PI{f9kB4qosb zM{qu1^PrygIbh!8Z(;qT0G-TCEU>Z72iVYQS)k)}Kmp9d`?j}7^S;j&i9c@nUxW4c zqw(L+mIlUmkm8*|mEfwzZAY$_K&>lca4VT7C}$|Gp16c`_%(aiA5iJ)-^!r<9j99g zb!V*kKxEg-9|=W&pd$e{%$?D=R*r<6z%$TcxoD68fO9!g{>do)Z>~|q#wXU+7mZ2d z7pIwKlsMeqF40HUL8qtzCU>}_TxTlVFuyR<7^C*`>a4E+Gc(NaW&2&2Vk>ekq@+Pm zlpH61R>jN@cJVA=;v5dO(65SIY7}|Rb-xUSyDH6gF`XXWGxrg8x&AqqTWdNkIJI%kEoO`m^?XxdpkiR7u=*Lz z`;sLb#qZSqDa?Yopw;_uZ=%>6DYqooR{v=U#T^sdRmxvx^Jw|rO1*|*eyFnZ&9Zm7 z(1p1RCRRsMC(|<4uiFA$PCtFNb351?Jt~-lwu(-1T;(d_Hm%(?V`PwG{bUI(&yKTEI?6nA!gkUJa{6v+>f`BT64gj`HP{JWLa@;N9vAH|Fg- z*G@*DvJVybPp|=DxAZeVomiwP&^p&Lhi4=T1~PTXiv|J7yz^5-`*WP zWl)i|YXs{DESQynTcjPyj`px!{pDij9*&XvJ*f7+wRVpWm)*spw$2;ZY-ecl4I)45 zU#;BkZV;bW!nlkeVxGju2Z_>jD0%vI(k_pMn=bSpr96YjC)m%)P{uoJO@1MoH(P7= z*sUzME{G+uvXL$jR{i|;gP z^`0J!IMHx`Iww~@**j;>=uUS!^H&ru%YG?-2)&4rqYG#Mkd`qyJo}ujquomN-5m#g zInJZ(>siITD{tniCz2z4;)1>B5mNBB=+~di43*ihlE4F}V4NFal)F1i$m2E+CF|;b zn$bsPvgB8933ai1MBp>+7r&ZeMcxTT%xT6ITQk3Hi{(-oJ;Ki+wv}!d9`>({PuaQo z*2kz9SkG6cs?T2$gDCdUnUOF+{SMmN>spR#>@Td@X9&@qvRGp925qhgbjON4h*E2eK-%J$}-YQ&WB zHN0HnG?7N(>3yqUB%!Q^(W)rhi$qjp zs(W2X2T# z7NkQp1|Mmvc-cuOJk3ikhbw(`+@PgW*5BJ>TlTKEu0pq!Za~x?s?vmeaKAN+H9&gj zZ&sauVbJV`CFaVGV?@u)8|~+A+w9EgR#!w{)9)lZftj0*V#zy!6))c@o!qfF^8;gM zpShdomxMC}bc*>CCnu4MXjuF)UW9me0cB@v5<=u_2`AT5Qwab4&4Nl_nlIfjpTjvT zZAx1-=jFo0Bq&SIh&aeD21n%nIX?8n+P4 zfW3Q;?Dcv3y~GznftZ}>hx&o?rk$VoAkN;6uu3P7pq8yEgL^=P5mQYJKj#X0k2#HB z_i7#YRYd8DtE20)x7}5XX4AE=UQXW;+O*4PD7$W;UX^H(-dLmX>e5SlhF#mpP_6=&L|Y9WbL?Zui|h3;dJce{tzUA)^Vn3BgWe$kl=)vYK0~JoI)+9&1&q*`}k_cXaMJT;(#4H^+Nz zL0yVB!je&}$2a?q3;u)Q5&cKj+kc6mgA0T%{#|nxus#3oxin(e-)$j(-cJ6r|Kt7E z68h@)6st)5zIoObLsUPx(e7n63~R%se(Z;9=t7$J@Jj)YO~8+|xVr^@ zC%|LUd(xeqfX=QK0{>r+e!5`(0bsB$Cj%TNHGKDl-sUT_Cq7fk<3px(fz!d{57(F> zO--HT5)j21W!Dzk8|BFx_sgdhdCOS23?qC?=ZV0O%MFPkv;5QA!6vmCxo~M$c@*B1 zTMMTR(w37~tlsnTMQZ1DV!SERZ*IlsUneWWCIgI^;Xq{OJ=@A;nu)s(nNZe7_drsf z{82;Knh%rO;vhK`kzbLUtT=&A?KxAH(k&ZNNrLb~D(LgC$6MMJsw5|%)>0+tq_H9s zzUKg{{wh^^2mTwPFOD7VB`g_5FPICri}|%OGm$E_wrMB6hDOT#L`Rgx&7oC;xQtv1pb#D%0GZe&@?FAt4bjM(ilWroJ*p`Y5t=Gtsc=8Zv8mlPoc_ zpE*~J1rT5NOP3T#LbP7qD8YZ!}l4E&pm4k$KI)Ryl2nH4VKLnh~V8@zRQ5 zQvAM04m8h8Vx0T>%wc!@i*(nEDQ+}E7iz^FxUVS`=*ndo@(H}LcV^Hd8cI!jz$yfj z+%Rmsoo$fh1g*YmLS3+|hZZX@|q_SIg z+tsw;`;HTIW>gqha<}AxrG#}*{K&Kbt$ejeTh@L=4Q(%C zvLPdc$yhx^d%`kPu~T`eNFN~WUYOBUUX!BNoGDF+QhkBDYt(>cKth5moJ* zX3`9P&{j5u@!F>( zJDbagFGQV|fT_j6SQI4?UAV4YHR(N<@=&F?ccpIL`uV8eG?2xY-McVlchqp#I!XJGtq9P}Z6<)rp461ed5^uHTL{lD>kDDb2l z_VElWwuSCoc{6AmK)L95Nx*PjQ0&-uoWFZ+5Wjv%=3iRxekQQu{&u;UZvgnRck$ay zb_KCPMhK_HCjRwiCUy{UKd>P;?D!KA)sVQ|

    iLh@<%WgKg?M+RYI1#=VqjDQ5+r zlsA&kQ0A_)>FOrO`dldxXgcj0zq6S_@DPB95R~1&jFRQ^B-VhJKYPQcR^l*cPBD8!Lw+*33rIFx``|% z1djDi2cW_ik-rvq2-@|*-4=r-=ayvp{B-MyBJkQI-T4vQkEg&nqBS4T)ER32^~T%U z&HD&q%=v9^)$=)DPh#FR_P<>X2`Fawi+Iqd{W#JC+h6Yb)5AW+9?#RZ_MP^GoC*GV z*I%DC?>mks@{eZtpVbU7?Z2L`G{L`gkGwO&YqQ~WuTrb|-wFeUSW)PI8ms@GP`;SS zQ8Wnk+k*IEwS53jq;U#`-s&&%%Tv|L7ui^3;G_+DDZ^M+cUhJ&%j|~3 zbxwO4+}~07|M>oo6TJrZ#`HIi} z$;tR%sgVE0|A{Z6KlaLXY|Hnz4eno|d1Ru$YrPh+Imz)pVtwV& zrne*oX;xlMyHYK?MR!|#2wP##|Afitf7>(shbLF}odV>nw~}5%qmiu8QmQm#URVro zH-5AhXh9PO_+$a+>@HvgQXWil6q#COoAnvfCb^cbnjrJd0brs6d<^iG!vQq;r*9tC zKb&;nuLPT;=d=JR=vof=j6e=B@Q69&IeB{DU8;k*(wC2JtzI^}L|Wk1Cl;l4CnXC$>JE~-+~n1C@PsR-ZoOV&zE zFSJnG@-VCOZY!4~%(CUVLGJ4qA-NRN;!LA3`>Hg=zf33vw{mFVJP=ISIJx;|zgKXI zNqJFn-JMz+)_UH(JeW0NGnze{bgw%4YByf>Azh%edPnKw=#0R}YWeujI3(bYtIEfB zo#l)v$p82KCgx{U?M}&7j9FQHy<4H zJdXa60rSVX|DM73&$O3YGlB{5LD>BjALSpM{r}i| z&#Sf`H^q2}l-@oC^g35sREt2}(|qCFe|mgd!)&IVZ_EhawkH zly}?bbocw7d;2`6Pv8FTckb8!e5_rIy=Sex)|z9EG3F-L+~$AKlKf8u75>k<=Zx58 zTsgvis<%g2--vIlr2b9Bz7SRc?IUslE9m5eNpc=ZR)dfFcfe=RYCmdl8@+C3IQRF^ z-CHhhpz|J9mVM|tv{Z$tlE>FZN%0R&E&sr5`(LOP`q$t6V{_5J84p+MP8A%0$S=KE zontS1FX zC;1OnFxS6S#zAAR0maxwYrOsyT5_Fthx-rYuJ);5*owWhz25uMAoqn|9aDf772$V# zmnriOoeOO7W!XTYum#!FG`%y{p!s=9#dmP{iq4WRwrxfj$@6BcO;)`>Xx0ADdRPCb z)BFGI*-HcuF--+qdTm@f^WE4`QibAY%APxs0H1WQ&FF+A@yG~SJgxIEhAt;zY$JvK zSsa_#ND#6K^%5-&9DH?~n1I77c!}qb?E0n|It~pqq%D7cZWMk&bXx6XUaJZr1DuXi z1DZD31Xi1tWYLek0D4?n(E{)Xd})6m+)FyqpLT3^2OTo%IWa`w2e2V6>uy*}_#emr zw;^#RmVTscNxK;RaTH`cDj4Ha?xlz!hMA4;H_;vuz+21z)4OEB^cT)~fmdnu{mtC7 zNtR&0AEU*5ne*~a{{<&X^*T|z77gnXufp#;S72-T3Hs}Uviv?mnOFQJjwoPj^|Ydn z-;dVz1zx$oj`D{HoIxQwhXAsDIV-|5$5Mj#hhcsnf!*nb$rX&mKPQaIWdHfPPz1EN z`UwJ3njbV2acu&s%Lf|ERAE{m7eJE`4t|dKI?r{8mC3MeSN84uO${F~>j#>rlTheN z)hE?RLF4BU8LS@*Zh^3=Q$noTcmYJgY z55Bm}=v+1^3-#HUQJrjbaGeaYInj&CwDg75`QCS7y_Ut zczQ;cx0tVgr8mBC&TA|p%oVSvajMPPw|zG#2>`?(vVm>K7`9**j?x;Er7 zr)qT4~k zlMb;%r-`9LYlIadUN+D0H3U>T$_q&ooZj+5fgAXfTjRe~fJn9`8nK2P`spb{y$&OF zpWh5gf1tCvN0Ym=HC^E=q+*;&#vaasC z_^TA{ACRH>ZOa6WSlnNVxlL=JF%G%HNSxE2_BX|VTsP3n!d2H?z%lmM0e%DM?3M#Q zZ@1^wZiQI@^vC3sK|pyWY>iTOw7xU}guSZ3;e7xw!hZ=n2wH!ad8I;z-u)%38HKgj z7s5tqua*IChkw6Tn@5^<+NEjZ&8eGVSR#ffSs(KiU~&M$>VLnqCV>Sr+Wr$HLQpue z>#%@K+Fm&6#Go?&aw$pQuWdMaR4Ob z5vu?>=sA^k09rNALa%N{|10`iy5HKM9#D4t%P%RrVtqlrfT@AoBaX;-y&k8~+lEZb zr({1QUQ^=#8?%Nj?oab>@?XC>2KCQ;0d_>$3TJmn?G^2`?F3aTjO!~K@S16l?$!Ww zzE2n0xzQ_Z6K!^5G2`)rf!Vm~^{SHqv-qSl>95P`e;>mCueKfmrkJ73qkk5Q;&;|k zD}}#TVox*F^uDpyS^mR~txe5l%@L2&$OMZmeN;HG_ z1*{+r@L=HMO>@BjU%QyOjaxK@aqP#B_{MR%UMeKH_ob>!N2F7o^F3ms(qR{v46@#h zQ7@+f78m_Z-_-aSmPrP>4U>f7tM#B>O1-~1YJl>bV|w`+y63ZKQ$+biSW}6i<&q|5 z>BZz}S60o_V3tswy;mwdAo6Y@ib0Zb6#tdN81GJ?L-4I~U(@NH=ah+9DYy56CX$FM z-o`F)uaK0>Vl5KjZ%w1)R$hh9O6GrKi|KGS&mE~{1K&c)Ol(@j#Yt|cnqc6$tgRu7 zH#@Cd3yf|9Ua;E>`@l|9KFJj+PQM}g6J!b7(;E;i%_SY*S11c|Vm<^q=a$w>Seu)* zr`0obad>Sp-;_V=+8CNtab8NrL77cVi$T|8UA?c@jn)|`njxPeCNfc3N1g5&-M8sA zxrtf1oqzj!{eB{OMU0_M@*XE-w7JF%SD>@z?&rOg+NjjliNe?iNtu(`o$vB_?O{LW z9{{Uhu^?uC;Rm@-ab61@GC>SW?@8Mf@etXp20^oZS07on7N~>mxl>;i>Hy+ zeuE_G?cHGx>YE!*;IAcMq=CBP^0i1e4%_2(!2x2d)Y0&cY?klRdgr8p>;jL+&>Rjh zGCd3Xn%L>(cV}{fPeZ1_0qb?aB{xvUEhg?x>y&)Yvl)|oXH^VNPgeNUI|*ex%$KS^ z5pbEwyqU$KU%7Z#a%e1zqY%RLLEMsL5IeLQoR8i*YPq94`E8GUL6RM(W%?F;nf*Np zjbltr`0lz;Anh}uRT@{4Ow+}|+xiBf-20mb+w6m}qcLwfY<^p*5>!$EW;Zqv@l%ukS{f?bH%X7;z~ zu!0#HMMx>oT>ZIo)1TU11Rc4ZhWo;fumG!}U%hm+1dWz~wgX{Ct-^e$>(iXrr@qpGwD)EKeYGt9j7O2h^em#=w&^At>%Q7K$G*J}U%f(Y63NZqeY`2^6 z;(!za%XiEJ;cM00*34^9o$XKiU!z|MA^Ri5lmIkf^PWuPZIsw!9CYn0Y|~n=okb&3 zG5vjy5<~cygBbTuP`gutk-ru8mw`_lI^VP|g1r=Y48(FAGo_rmg@Mj8rY*NL2q zDbIt7IQ^t?g1W47ezaWMHI4F(qEhvc`-#Ph__=A6!G4-BkzT2K6VQ=;2Nz1qMs?NR zX*M9cx6f{#5BXV~_Wr!C!l1bbbb5n{G7==#!E@nU9IlMS4~8vWbMK+2vRoo9X64Vl z4I&0XTC~BUmL|oU5nE@mYVDQM&=K#WkGLy1CE!@g%!#)ym zcJ=irh3!@a0VoOlqy$^Ksy`D7lGG8XiEo_NUQjx;hY-eRjooi?##^1<|Q2gI){P;S>F>e zwwAz>od|h5T~gJDrPFDt(IBo!JDwD_LL~GoCeLD$aEtVw`JG-w^hW`H98sbf5{<>= zOvcKKFfM&w*VIpFRu4s2&P0ZG3lNP--bJ7oXpm$=ap@=MhP>0Injvn9Y;U#lEWd3? zYMjvCL@r88{p!xCj6iTwYR+)@4#b?#_i#L`^j%i$c|VKBZ@P)1sW5{H$nKN&>2n6T zb`i+>hA*0t@7j2Kb{62YP4hD*JT<*|NB~BWeAclUKZKhZx9N{CbI_4=+w6r{kXHm6b?Kh8?M^&uFdpAT-^piGj%QK}J z$Qly~@sCbaq9hD|hx6@-YeWp;3)FX3`;aT7?2^vuFsif7#wAM4XL)`uT`uG3Xwk|f z(%oqzyrZUPEtQjJZK^}7(f7Y-?O%>lE|6YM3)nipss6JAqNe-71OT9y< zwJaHpsKZ}bH^H;RDs+f7(7C89LG~l~fiS>uczA>|vBhnnG z8H?sT*pk>NY$JLh#@6c*@YU;sYlotqi}x1;kCSyJ^tusMsrg${x0O|uQo@03Yd`TP zNHhnX2keIDK=zfQ?xndwtOcI=TEba}xoxDLyY2=?GuK+9j}ni7%y3Qs-U4i1jofe~^fix+)Q?LgYIj!wA~lWXSYS zP|vZ4|6IA#+(I~C{_-FRtVNZt@H%{VUh-RKVw02Yt{7W#23%P|v;n7Y&m;?(8U$2W z$=oZi$fmu9?!(|es;oT2+$N~6CubZf3mXzeFCd0B19NRj=7$9*YW7yFV)ucrLH21b z*H0^aM-ci1h#C41S_XvLWo0P@lUrKpsFc{SVf}XzObH+91DU3@OPKM(m2s3^`8J}p zf*M{F#Vce*jJT+@!;rxZGoHbF;CHy-iQSnh@lG*I&MS2cE9agyGX{bgU9;ho`v);tzm{$ zkGlAY09V8=blqfD)e)6e*_&e4iCr#>X^iwU3q}Ep_qOihLXQT11A} zhu96DO{zVG4OorR_b7Lb%&r7&F;!lBQ)lE`26H5$N9EeHDdv{c%%9QPK#%IWxvHtg z(X)@AuC4m(?04-a@@6c}X4+|IBo~}7rBqfAP8nAO&v$~lGkyi>JMgQf9~uD;lgu?$ ziMuefqgTbEa~Iu%X@zl(`~qGChFDi%vnjw?;kf+#PSW1Yf;HSuL;WnW&Ux13;8k%+ zOCuj}V`d%h)&RSt;-btov3~FC;$6?tErVF>2=F{&dCk4W1h1XSZgKcq@s0|Wv$lg?o_axRxd0x&2GY!NWxVfT3a42kvag?qc zGk67#VTpnzHwj{!P|Ct}kR{>ZZw{lmbJK{nJMEb)%_M`WA$B}bs7W$uZA7X7-zlwV ziGAjThc^3pAl<~%*H@G?d2*8?Wj91qa9-I4ZGo3eww%wlYdb!d3sZUfM%eEho2S%yN;oo4ffvHBXzM)$65Q*T_gpcmL`jO_0MY6CQ*@F) z+gDk8xJ@!kGiyPsJlh9)Ny&_i$Dmec)<;b8P8*b!Uu;ubszh~{n@|cg!;S4{*5&@R zJI?!a;cvoX`iIqW#thuJ{_&Ruu~b4h=koqy!<$qmi_$bO4C}^dN*hdXHwTsb;Sr(! z*1fX~Ax^qx^z-g(w!Y2mhoe4a#_Ew;GG~Fyb#r7o)lH#l5_avmpOGbzDl}gimuqXp z>EGFXtBY%0I*4&`(qN==SO{4`mhfi;OeASW-4tTKXZ^r3e3@FeBnA892i(&90nqt@ zF0ZO?trSe7&tDaS#P{x|aGZQNPvu1ZTgB{LK0WAI7ffF9nh0=OVI@rj4Mz z6vI7M4V(8|!Ie3zO;3#z>fRmejSZhrHhkzm(+c!RuB-20D7GDZM>@JU%u_9mpcXJQ!J#GbtB8_3q!T!0UkB4+o=v$R zG9=#=_ROnS-n_a10kn+s{*nX6o6BA(5+6wPSq1~b5w{E$Gfx$gR z?O9VgD3SvXc!p*C8;-IEk$5&=Pqx{};xshP&;>%usE4t(pDq>&LQ^ za{x0+cEw=og9seL3yFgv*JD`iB)V##i)=1?t=j^9#clz}0t4yVZRG`>O;fo zf@(P-)ll~05>G1=Wj;N1y9;Ws^o*?`mxpXqdp@&Ym+c>~BvlT%U!4?lG$;t}W+P4W z#`K|SRe@PT3~AW-LOWqGH{s!aXem=(C#T`ZgVANOr9&zH=u2$~GrMLsf!PXKPsMiM z*0K=My3)35Hc3g=^wzK?kyKIR)N6cPHJd(su1;~p*w&@L?0H4Z~p61LLW7@#!zgc2Q|Yy>dlh_!o{#&Os$sZjO2MaqIU0dR|i*Ea0*yq=+B8;^|PRYE8F}GlX7ul44$!Fn;2wLMEyui~1@;nCsu<~_UxDQ*U1>`b@Do4d3PwA1G zSrY|qnountQB1DS!1gAQHIz5a=A~_H=bE|&snN7>13dkzOb!cHD+EsCn{x?`-{1{o%MfEn)6Vg{TcRFsYV% zn$t-YCC3m*F?*s(@wnj`FXSn*P?7ge*a#9TT8?_aW=eLjlqKM=Faq;H;)@qi4`*tLZ{M zW0SxUgI<%E$c`fxk1cj{xu(Y9FHOnLu>0UuqF;O9PtcjV(wN7)%G%jjrT39~GV=zZ zsb5dJPgYVyuGAapU|o+VF?9e*H>cJ_GW02*qk_i_bFNw6>;~%>J1s#@j5aWdN7$KYZ%+U81HiwVeU8$kYrRvFq;ixc~FoM^StN3OJm>aFnl~iT7GQlL-2tR>6i*EI_=e*pk@@sZ&-N zd#)0b!qTx3c$v2^v5%9xZCP7S*}8y7-a|TB>v{c>ZcqI@G#3`p_4}VOCsC!LD!dP9 z!^}wAc1S{-2Oq@WvA3wSpPAaq**=n)rO-&?0IlN7;ap6bW*BZy7K*wx_(*~%aUtY1 znG}5AKR2tjG1e^b91XjbyCK$aDpPXfq%CcQ%cZc(SWj6#dtYd1F1W}zR7^TTne6Cw zbu(xZb@De#UV3RJk_P_-kz7>XX(5i=qearzgYPbCS#w$?G2=Kh_n^OCH`R{a0h*;sTt)63Tm5gXG?F^@M~q zX!_xzpTum@u#^k?`}(f6e+xS!(PjuKp?fx%3u~#LuRg4n^~=k2&nXUlH+~jTt(^{7 zS4qTyPlsTv$I~aAAXpdo2Bwx^&Cr$kYO*`t1nlQ{0+c_!*Z#B|NG;P|5CG=!lL~!s zFd#tf@;V0=7E_N~oY%{_h3G-MIONJEP(`wj{d>x0f1|aTwhY1baw;lr-CP@GcVb=) z)Z@-K0)7qr->9dN7| zq?OqAY`PquZADfo=!~g&x*UWlki1eE`h?youJRd_8o^fe4Vsvsmp=i$jXD^cjM#w) zPw}1Xg~d$@T|O(cVx!RD6@!Zp74*aZE$yNafS#af zgVTTVS`b}Guyh^Tun;(pH>0zz>D?Xn_Ab_MA~dJ`;fIu(rBP4}XF7PkrS=+^3iHeD zMGeszk{jQAkC9slD@PGaj}JneQy{XH`N&;Bu=*j4(4j%RQ@PY#gwoQjfaPd^{(UNZ-BnAe{JImB8J*pr83{CyHsmgwrGh|kIjV3 zs3oq+CIh~Z6^ZbKe9ld7hLiPPvRkKv5YUb8if z`GkwOQO%Y$u_L%POQel_x%Vg$4)|kf_J15aM@-{0=oT>7iq2#^MYDJl2CArn$ zY!fO&3UN*PxjTV|nb9vSs8xx}OIr#;B0X<@n0Jwde5;7u-cWY%MMq!1GZgCAm{Ct}O+x7C9HKZ8s+^YA3(8q~U{qo3?8* z4<&;OM>FRzO5GV^OFd7Fx{OjG3RcbTSK24en|F%$X~L`UY~owHuzt0PgZX5z5mb-O zD<*b|r&t*~cgJ5|wvYSQu_aREPWEzx5#&=_ob8~iqZOglYL*?b5ht?+xmc%LzT?V0 zy|!S zz=2ap#TzH^QltdBbHzZ4jiF39##E3l%{EfSJNz!x$7@DWS#pbW}3|GieBy(71- z%nD0Uc2c-)2Uxhy5m|gBsHnI7s-JE%oZ>l*DxrUQGq-0|EnT&6>iF^rM#ImVxz}32 zY3}QW7+1L7-pPUAL*M?|URP(5%x7I8kI!f!&vB07tXjT&u60BYf88G`ka%sL>CHDV zHK5Jp)aJ5q$&2Mosb$6b-)%SD!ZMc${7&G^DwXx$s*V_4>nk-)5Pl=} zpGL{|b|CJC)*mgaa$JSUm}oU7_* zp{CS>F)k<(BMBPQPSf{bt9OwyeE+KT(RLt(4VWtE7qaD*Zha$i;D{|3;!)P93s#mJ zoFw+N^WEzMzfqekZBbQ@6bOld;I1JD}+|an8nS6 zj%Wv^RbF_3Xxz z*`_T!>xL7z_X33wZK(Naf#_`gbj+@~<13ACsl4y9K!J))vgOaphrd?U3Q2xPN_0UuV6cG<%(rRh6O7BC z9Wvsmd*VKb&@cLMrC|JA?)&9~Eu$Ns3mlbo&!bBp#oSy z>JINgR_YbIy>`M}|5*u5HtUPWA^CUG_AZoVJunq}=8)RSheY;NPgXxw^{DYAId+T3 z%xP07Xza9G^Cw*OnJ6f{@{E|=GCEQQ4DgBp9sz$rD*jK;!CM|-MysqPMAVT=8k|c`;U|b%UhI&l!9+C;-^_$Cz*PmFC}!ZCx|kcDT;T? zZLP)P0`Il=5z68=44g|6hhFTQ6koLoCjqu}+7EQtXI&q4Gq0}DorpSaY#GqhEW0P$ z;VwqvW`t70dL!k(Q+o;(3Y&*I;^7kBHVIqjC0ASZqXjBydW8fb>5`gHI-pP9XG2rd zauw`Vqx6dXJ~whURgan<@!Cc3ez@BmnLi`O)eb?Dp+6Qk?bN|G*>r?+Lpc$Yy6jry zZ~UaEr+WECjseC@ftzfgwS%QRJ3P}nQkLUoko*8>S2kPFWQppfxlj#bN@dj|v!rsO z0zVJ?>nqJw(yPjgGFB^w9AzGecR^+9lA^mQVua8+I(oh}wf&ns^XPQ_mQLXbOg@2L=5oZYC4?wSYussSQ&fsUX;IhI zMbytBFhr-Lul7*s8Gk0 zfJWHO***Hbl^HQh1d@e)sW+SY^s!%Mv*DBM0sw#0e7;zYDvI9^V?_{T= z{0^inuvyEbnb|2fC{AhoYHY^8Rew;3rnD`utJ<_sBVkpyfXh`}=?6nCRjytTw%0Ck ztv@nY@`pP#JpfNwEXae;^jgP7l^hF<^yWmlWY4q~9)_gY5r`g-PNz*c?!ax+1-M#L zr^;#7EFMe?Xv?tn*pXA+x9m(0a40O8`H`%ilZ`J`eCYSd z#nG&>(no$d>E66vENin#+Fqw#izl9{zj7qT-OLZS3#acwXLZGUqLN+IHLvx){F0yD zEv%$`uz{_HV>t1`&i$`cvLrP^TxnPH@jw|9Zum9MJ8w;k+u4(StS13ZDV=RPwsmTB z!3^xqlepl7aj-uOrg|w0aL%;ubp~2nIY^JB2x8v}^7cudGB9ZransP_Z*WYn=$IBX z8o+W+$XcP;@)5H%Dp9?hW1VB51OqI~c$yBc9@Xt14e6U_)44%>x*TKjR79l@QbH7y zh<3R~=MhDSfWd&U7|V<-)t0rF>CZIo9)A?X*_p2{WOq7i)kwhHl3?uXr+;w7uk7Y& zX|~Z>byN`S?($mbX82QmoZN4Ptw6`~r6x9|s_cuHvnC0ROd;{@xiiX@pP&l@iJu@y zFTn8NvvygZc89Z0>9;awsYEiUbyBKi+gx^(+QJRt{XH*=O9Pri?mxRN)_n?3RH1%P zI-!X5op>M^@z&11c|{z-)$wLScpI8GFyXW-pz!{grGwpWQCQcuX7pk{Io(8wA9&TI z3|MAk5^Y6hXM06fJcVemefjaKmp+w}iHC%F`uZbt)zX7*Z{2oeklMQlE zOtzZ9g{LsSn=L)Qjg|Y2Z>>3o&PM`=E^OTS$`UxirX?R-0~O9g__MFI8u`jGI+fW| znidLbYQa;p{<1a5VtDR?ks_9)>D0>)%CkPVg|H^7L^YRYLap50>qrB?%9d7%bMr)m z&Gp*|ke$c9FSK_S?a8H+fAqo2(RfsUY}e#yuf*)dCYcb{CV|383eVkiRt|!QoyPnq zR!vHE6%FSv9Vx`fD0pv&`J*s0)T$Whbnij~a{qZFaYT_vQze(jzNj^Z>dJ(q*>J(o zsC(VeK%NKRcSi8iP3_0R+}05VI|rk0xMA+6Q|cX^^)I3C5VUrzy-g+goOfpp=i##_ zCHS4ioXL;NSwy}ugv501L>(P&>0l`9!)VDZNe8AZ);;6J6lzwE0QY(YLv!`mjXeUG z?fS}%SjlP}3M^`z-wG(+@=%0?nX*?muGBdjY)nL#%jK#>2y|W6RoUp6U6f3g&b}AF zop20^T`~`Sx(?$SP+m$=#+ker!+;-E1k0%5^1OYpu*TOhSiBv_zhItlX&y|@s-Jf* zrm~_6A3HDCpP8TUgP!L-mdC@Uz74v(qu#MJ%c`DvtLCa{N1}kI-jum?cJnBlZ`ZiRp*kq*7I8!L4r3rCeMoA2m{ZyTur-ow zOw_LDzfEL6$a#TvL-8#^niy-t-03*lN3k&Q@taY1b6ORJi4>805(*ARj?*1mtGSX|n02jgHvLk3qvLs}L^GVlEPAUYd9>c4%ndBC{t`J7 z6Z(HyU-Q#213wws9<+fy3Thg93DwN^XbG1~YWmSFnp*y7yw?gC{w znWRV+WPaE6jcc~bLu}#I`InbWgWXm4$k_%(@5`N^?y~h3O8%Pb>ZXX@uH?8sP4z*% z@K>Mn#~ZQ^9Cr6L70BMJZq3e3zÏ*EU*mnrQuL`D3T?K>sH8c(}53mX*9&_6-h zbBfOfB+IF=c7^X08dMjQ7K*f-9Xp#vFE1DE9o_jI1P5`4_%0G$+%fBi*#6`d>j2oQwpHBMpi`)U{}&)mw?uGXGMjbSw z5ya4QW#Q7(eK3|Dk;{ynpPBd5)kqS-*`R8$grUO`>WWDVA$^m*HeR7_-S)G!$>5To zexMeMi7TU3K#)B6V?IaG{s;3Tt8|yF4yWo)8pODsd`|C5F9mN}#w|%5)|eZW{4RSP zPcWP#B?$2Sj2|8lnb&g1?E>3iZonH())(> z8|j9aM-Qw_eMVkr9xL zl9_Ovu<*dw;gPa)6L$^q_HmEG3_z@QZTrpsZi@V(e8nDmr}1!?OEy!VIFvZBnNkCv zAEDYX&Pm{7?0u4}^OCD`qx`pO@>Y{#Wte0n>d+Al8)|T6< z8Fh&;+!hax5hYW)aeg5z*MZ&SdUI+n1IM+P{?9Fxd3sOo?BAJ>L3mK>JqXFEc#la@ z>HDe~GNYId2^z0LPLw*{=_UE>OPOv=2976DK598oiR-DWa?v@N6$DGom&M#E4d=}m zIWUf1o{M5r)ixR7yItfpLy{G4on)<=bR3rKHXgMUYN~+s`T|xfz==9kdb1;MD1F^W zEeHP*C9UFYZfjyQajwd%R>Gv01Es&_UGkbJW|zb16YRL%e4ck^U#I0}y((@xo2{2Z z1*A+totRpfLhtWMS(cxEf?RSjOcxRVI3#RH;Qi5>Vyig>JpeoGD0V~C^LQj!2RdBk z<37kr6EgW&uq*|Ii#9)EY&%(=wMSrROZ>@<%(Q0cFm1hj-|H*x zNZ82(gE)V*spHZd>+-}#C15om$p6lWiy5*_P+9r)((#6`aM@$&?$}?Zxr|eP5Zc zyT^*!)>DFSh4=Ebm3`LJ8*dQe_fgf+lN^VxCvpSpX z1+Ae;(uP~c*-gFEp0v5o4I@2ypfu@FsagyniW9E6qmtWoN{L2v${SV7SYQjD0zK!zD zFp=H=?rgQfa_u5E?yr(p!hdM~Q8~)`ae7W&_xq%7qNBr*Q*x^Cl9$fK_L7qbYQjUB z9&OhCV~I#Dl@D%LrzK8b*`~V~u`D0mCCCvf;aAAPe?PF5Y#CW^IFXx-W*=F3<62OC zOWK+qeCBd9d?n)YattFhhJNA5Ws=qI5j&j5$yC7^Yd^Svl}5gGkbpoUz9aWWoXOs% zwv(Y*jR;Ze*7)0d#?!`qbtEGyV5>#X%1a;c9XtD2R;_RzNrQr*x>Dwk2d9OJiiuD& z%Cw;vE=3z(U2mK)S;0<-3SSSZN*Pgs3bcrpxH;O&ThI($JFD8Lh&ttS$B+VL%OPqQ z5=cc#i)Gsq(2We^H+R1<6dej zKaSpDnaC-z^5e}V&%M3*CY9JFRhCF+UPTYqa8b{wD{8UU$T)BoBTe0fh7Ws%(pP3O;e&SNNaqv3%v2z>Q+ukLg7$NiSf6>B5|+|*nt>yVNB{a^O(8$>TMdFY#)68! z?vVCXxMu7%vUE`LdhG%XhoSesc>#LKQKRRjH={ zm*s>3GP6W`L0mC-xybesO0J$wMyyPel`Y`pl9Kl;QlX?zP1T+5+jp(^?G5XdixJhJwGRkn=c*x!X2o< z8VS3l|Fh(Qf6us@2TiNAwg8KN=0Y>5#1q(JuEHH!q#0RoTT^Nau9f3y8X zMF1EK--UH`nga9(w%nVTW0i3)Gsh->PN|8_jG(KPwqLak(bkTIBd(@vd%X?IjNge1 zPzIgX+g`clQYcm6vg%NuKLNF42;_Q+`)`9cgDXC87s(lft43*Z@?JHF8?o-sEt!nk zEM5~x=_!Qjbf&0UIG3{8T6M?d0owIn+JJu(As|+f^O>Y!wVCv;H>DQ&^({7SQJ-{4 zvK7>GP5CUhS-KU?fD*OYAA|^huL$A4cBm&%3HXY>@wJ-1Eh+rmwdC2a{xoY9wJsx8 zvrhxRBn}cj**o3H&E~iEALi>7)!zL=^u^VA=>0%S zuY*)e^TVxn?ZX4KKY+57I06fKLIz>JUvt0ok9>WV7yOxOWwj$62Z2BMXm858ZDK7y zi8kMlZ3Y9rorB4j!R?&8>|z&mN0Cn^hnf*6-+FPCtR^~!Uw@=zQi zo~&zh)eo?r)-140xVsUDMWw`#v;Gd)hFcYF6E`faB2#Li?oOzx8!R9uIzoT|93 zSO)hkLpqR}^t%wK)wuhQ>PF9W63Jy+)9pikf&z>C=eN(5_YZPHgPsVET@blc88*1j^s|(C zMspulrF51|)F|wE7%h!bLM*`+l)6z5K5U3`8esU^wf*B|Dr^bQnm85fsJ}XsDFF+> z%aa4#4*>gF0nBp^c{_K~-LHCO?gOsFxkg2yStotFfX_HL0T_t~VhKrMCPkv$_=B9o zHN85;gcX1=Sq_~T1<)SdtO8>04In%S0RZQTv?aK<`E&sn=>!y%|IZ&eEQ7;=(<9EN zIe`yNHk{UR<5Pyed`%c$%=7Hhd2us2WDeX&Wg!eo!= z;TH2`jh1YNQnKSDG%xNhmKxBkFbI8L!Pd$Ru~H+zmo~NM?sr+^eh!Kht`s3SEMhH) zc^0N#FJxTlwX>IQYa6Q1ZiRW*U{N@ywAe9Dl{@`HjbHLG#u$gz*Y1*P1Qpl*Y7n;8 z6!8F>E=cEat;9nz*Q~*9hDa(=Z`&$h zCA!aAs9>+-7Uk?;=jqV&Rf1`dq_L92CAaPT?BU>#r=}0GShE2EBNxTN3~;{D(~A!` zK!RtOG>);w-?5=N8f-5fsO~fpJ)A=|R@Q|*+r;vynG3tX8KsTkX>*>K=JY3%^)o!K zFn%WDyWcq~igC;uet_TA3o$zSL#p zXhpNa-8V>(@WYpQ>?hrgFrWt;`cW zW2k)G`B2|rcI*6^?*dP-vHZ3Q+x2^uhblCH~p>6hlx^)%Qn9!CWiTTvxVNHw!5B}+WKORzD2A`qE2Kc zv`<43)n|N*4%#6l|7m|G8sr)0Bbvkd!Uk6(25SWx28nK4tGc5gx6NXw3` z?0QC-x;qaJgqt@ZCw=d&CS)H(#zT*+Qs0vfs`1QX9J6y>qNE$jEuaU`6~Cv&)GZ`f zjRbUVX7vw)x)+6FviW;;5=HW2_cvw=Rc!IXn9XH?jF?oS1UiD)V{3c(n*N=BUpSF| zmy9+8_I-E9%gIl}`peHsiy|8mc(Qdzt@}>E0i_(kbmW#Dg z)Sm5(&hugPyag-&)#^#HWe1ILIVq9Fw8p&0h)?88|21hb8~4;~S4Wa(&Dk6lJ!e#< z?1j=gTOJh4rpWc|N6C142B5ON?dkz?w-x4ax09Iww7_!R3s=bW91~laZvxI|z72s0cwsqhJbZm7YVr#K#FIzM zNDX=ZnK#p#YM8zQ4_+(ZD9bz}w0bV89iXaHkLRWUE$&Fwtt?>OP|V!18C>Z)g}yA1 zdRlB`)AhVZQ&#yCo%wXbje54l>zCIBLug6E9L?sIJ3Bed-i_nQWGJgGudw=yElAen7((4 zTB+d+AHIxJM{azsIP()?U(%%Xt!Ly7b8{Gw7u<#kBe&U-O|Plu>K=NU7lh@M1R!c0 zuZ{J$hzsr$GGdnTKV77DG?=sscf`~B7QILa%^-&ko8II0@hAVj5p>AP<~F4>=Hu5* zF+rf~R0!LF;c-K7i*RFt=##f_*a!GsofycgY#}j;t&w`IAV=hO z1+hNDSGh2kEe;V+2(^C|D?mUw+tt%!hx^nfhO1 ztOFqEEtv{%K=`j({f3Ihe|{9LX(C63!S4OgH;}rcxZd^3Pq33B`w2^7DSu?0umT%k zNU-$Y2sR{6dj(`&zO+aLXVWGv!SxxerS!6&W7S>9SxhbHs0pPPw{Sw>kGTQ^o~Z1K z2bValKS7?U$jlwyI)8+}{Wwbr8}R*n;Mn{e1v@Cm0473XjleGM0Y3i$lwifn6|4U> z*1zKQKWt81!2-`mXWI#;9{sx)o*3KcmwjoIS+fdXxxpn?ZnY^#VpXkI|A-`g4~_SR z@Vj&VKw1H2PZ1wPRf;i+D5h_qKO0$jiImjQo9Sk63wSWW$YD2Ivr?scp`9P2e!W`A z=XevADwRmY=_uj&1ht_Oq$Iwe>P`GX#2^ogb?!DAmTWIh5%~C!I#MoZ(;%{lTM&EF zHhV@T--UMyBEiV>)a*V}NHN#BGQrY?HrbNn+e4*<2#hqlSV#7Rz}Pit;lD zBkK;?SNT$iffa+vrw;PzLGGNq3Uy@#^{=lcoC6#Tg#lY-s8$DY&~u;Hnoj=e-Cd{c z?_B&_r3|X8Vv5nefeI1jLfx0+1P`_=^&sBY;g-*0P00O0KAJAwADkI3CNqQ75Jp8z z-PX>Q3tcVA4f9Huk@D6clnbt(=yf!|Q&P{GXKJS2(cH)l7a!ukMT<3HdbKfV1p^wiJ%h zjTj#eDogUg$K`Wa&E$JrV)leBCtUFmiYdwc{(rIe-EmQ6*}8?Oh$0ArWC{f&3X(Gg zN-7YLoIwT2Ip;zVkt`sf1&Di0B-&@EM=*{$}E;-U)lHD&t79V&%F^ed6d* z4;fS`{LNy4ld8e z?SQDNK6fG>VS>1Nj-ccrkM~+f%0nN9J*!X-Wq)hu>8mj!L~}nnvus1J-InYY$(0F7 zd0_>h>daVcU=kWDT^rFE6n%Ywr*Ca0vuGt}(+l5|r%5~aMn%*psFR#Zn(}~;?_8aJ zn4y+UnD0=~Y&XX%KG-u4m7lhKFuS7MUwhXvN6Qlq9hkU!;Tuio2j^Qz9rfV6?{7`i zK9QP03N});0jio%Hys2-GSkheDCMZmpI?3@Zg^+125Xtg%{Y@Ncma%~kgDZcI8{0# zaiChspa2T%Lz(2hNh!=pXE-#?t^KW+TIJ+|jw+(pi0^er_UNtgC_ESv>NVARa< z&kK*&o67hOGU@IVz*+_;RNlnV?xufTLt?s&+DVSuvfK!gEe&s92U5Lx)9%f@WCZQZ zG9{v`CN}JEk7$rb`PP(X#I|OAldZP2w$~nSOHOvuM}*W}F*6UZ>upA?e%Jwe@q7xk zlUW*xw5fzL4Xw9ki|L&rv4gd8qmQefJYJ2qZ*}<7h z+w>~QQLBW+vsi6n!0uuMsf5X~;MJ4r@wD;M+=9u31l%@Pxm0rJ#aHMHJE%~M3_}t& zZGvghO4n{h#tJ>X*h!JuL9HK42sD6f{r&I&XQx69 z54+AARsoWdZ5)|bW4>wzveWr@(jJ&Hq&u5qaH*@`V$?8>;ue0y4??Tr)A`eeLp$l~ z=m;k*HCO=ulv=O>ZzIMkaNam#`(fGS+xIQxD-ZhBDc$i_bvSEeX{1-}i&$Dt@5f3w zs<+CN-j&N`C|Lj06ccE-^EGkbPJqJ|P+P~}cz?1EiqVz(PRtY$78LX&T*-Rcy& zPu|gIY$3i&)6gXadBxp6@PUrl^e$f+z-U8s={j@GUg+yB#|L-Ig0-AvpJ|&hU%^nP zE&)FQvUZ>MRinZTa#Bnx?kx<@<4V658R@)iyoRwlQtFVF`nAi(RHqr(l{g;#DImOw z#Ga4E!wdi{LORA8U&mCy!#ps8Go*0ZUB1lU0>< z*{(*XJ0*WZOLr&C@x&Z~!)bZ5^u3EPA}8_zcPcGI!D9tqUYvk-wRZ#@QsJ<+c)+Sp z09t?nfjOWB*vPDU_E>|lNPE1ZmcUN<1XYP`rcjESpLNewjIOuqh!Fr)CI9tpeiC3Q z{h4$1Pupjm!02Z;+)rtbZ3|{NWEnt3AMPXEJW7yjU(;;Zlu%MDpO6 zIm*@0}v!kCJFNC`Z} z3IwjwS>rD7kAJ}=NQNdUx`4fVU_iYkd8oezIP*eyZPg98UEr(Gp^<^^XP#MPkLJR$ zw;M?ovGxmuQ);pnGavb6Do9T6o|E(h#GR#A81&V0F4V+pE1S!tlR;wK9iU{gaH!B3a<4)|pcz5BshnVdKT(C{sYPmQt(4ApUb#JxMuR3A1EQdMHYh_B)BbiF7NxLf?$`AJ>4Pd)%J^P2gK*Qb5MJWrPnqVl`P0a82UB-0WD`Jw z#QF5Q#{Ti~Lq_C&+0T$dcpULXH$?U9(+HZP4o@3c-n*f~c;v7S0Gf5&Z5|H~ z{3TN|Zx24rK2PYzn->R$E%01#NSW3iH@|aA46VOR?=qQijV5zZg(2t76m3Q|c5W_b zgEs8SlUY*&@uc|48$e)sG4o7KJpd_pC&&M#A>O#659fQy!!e9CVE|fSSE}x)(`KKs z-o(VX`?4y?gpSo{vckT05cxQ5V}iqU;>(#8NJ*QTM}Q*4FCdpY(I@Z0RO~0#B_btQv!;rTSu!J|@4Bl+opwkU6Qtz% zS4tgqt)YWRfeDdl#<)oK9e(Odz{YaPRhZc?C4=PbQ6lZV2(!`ha>hP+b<5MDQB7?Z zb;ZH_*4AdtX(zayVDDl8AAYssPwizb3hOeiLFkBHJVyw>p{BPZwzK{D|MhU5m4 zXPM|R-Xpj`4ft)t0Z^?uc@-Vm9f-yWHi6+&eV24kLe@{-?|=5Jj=8Un7CnI+@4=34 z>mL7xYBdrzU(DpeauCCiQX3IemkR5!2SV`$01NfeUA@TtM+@ej9F0ja(jUKyF|;Zu zm>uepqaVk>P#(aplYITiI$-wT`rtRv_bLhQK?Q^ocj)J@vwmK5^%0urFSXU-E}4=h zwIeKDbH=aTpHT@n2<=j?%?nw)*G$6m!f)EUdh((noAf+F4NbnLu4FEoD~m~2dTmC# zkjDLCJ5}oCk*HM9E4{7L{Hi`T9BvhD*WlWo73M;_!j*Qo3a)r9D9ppIA&dsDj2eZz z`@6qx&eUN#IUfcSxNyprmXwb;T=(U2t>*76Ws1PU* zu%?o3aOP>X!r<4FcjFgF z!~Bp5>_8e0*_>ECtFxoJw)Yhvax%J5E-WioHCEwch0b~M@u#G&EJ3Xp&Aaj(P6QO1 zQ8GuSCqV-`alRsksZIiO2{&!MnERys9Lr0fIPZ(G?-^Z8%1eoyU9G8OI&6xnX9|Q4 zmiiIf%Jkm~ElyFA1r9qO?>7$EuTREKsf- zac1q(WbTDzX2~t&^DtV&^*C9tbLEMV2$$%xwOmZr$mVlt>&oFSML0*_)j1GYsKUDn zpkHP)V58P+=2?iMZy>Oi{zW*9J2%j)NiUuLqWn36^GU@%{k!ErRgRwe$I{UEJG4ZY z^aYP~U$*;r(5KOU$P(2I>TW0;J>z#)xw%7@7WC zqi3;1V!~Hj`M~~=czoMnRwMY%^)BKfSo1lr?_&SGVeU<2ZTjm;W_)jQO5zpzqt*TV z!MR-=gr8*0YV*!IlOc_@1u>@Yv#D1DIi^&^eMbe??>E3D=sFEG#m(ByUVA+SwLv?2 zT;K(b6%%U7nL4s*8r*TrtRqUGsWFN@1q-#(KQm%RljO(nfk7_L@}o-)WN)k%h~!@B zz&UG#MI!q(5xh-26_20PRuc;Zf{!iv)hLWXtb+F4N(cpX1we)G#D+?!3JTjN=3l+g z{suA|v0V=4D=)B9{h(X~2|zNUf){SM3-Ua;GEXIDe+uPeQYe=ad0Xw?fP;WE^OW~* zT62|r#oOfoYBfoEE*(*Xl!066qXeh#NJ9y--zO_ZR1&sts+I1Nu2bE+W$Jd4aOKNG zv3lVSLwakEnNDHQGF2U`pae8E7TvQKilnn`KRH2nhEIOIS&J;TKZpnl;a`n8 z(!Z^>7OiE7o!`5x<&O~@82RPdVq%7Qw~*s}jXs{)Q_pvW6_t;gc=*Sp^gQ{AMv#CO z414hfT@P@WiJIAXJs^aKm^bO&^l2d`=2hxhlzXjAW;vhME)s`>pO|kwv8IPH z3^yFydI>RGx)feX8?*%9jjT2f8lJ_od~) zn!N07Hc1rdI4^ES$?Hs`m8DSADxS=F`&2~rpltlt^3mH;#bS+yMvX`*?kQatyyiQ% z$tNowj^|uqq+McNa9b_NB+W_S3+G&f+-xCn4dmd&og*a%cMQV<3HSt5p2t2v)PTFt zS~dd6LWQVXN=|itCT-{VCL(wGxB;%Lr&))2`Cnni>|dqNUiDHRcI1<@U3HO}STc=v z2AHC)Jj&y8aXGz-DXUgvQh&KaUzoxUR2p{fvZ3D$Nwi9`MVd8gbxzu+!Y~l-QZ+G~ z>J6WmoLo4pEXdlSTjTH#wQ znp5?3n{5nVE738DsC5qGSh#O|*2pKstHNR-`}C$sHKpUySz==7O`= zRE`vzpT<<*tF1l>CHt(zdGBLiN~Mz66?ZDUd&vx4iItJt1$IjE0#tyVp?=r@MKk&l zPs8yzK=hmf*di*4I{{}CfBD&N<5o`qIs1{KgcJGCUWz)9gn94TjKeemj%a?5==A8&S zeRM!eP(POrZ3O@(zU~0N2hs-sWv|wpGxsK_zln=Wzo{P!z*HRlb0Q=7@DS=1`iCey z)Sk(2Ml->RAHq5j4Ts>7oKs{7CYoq_6!{GVMCCllAoF_Du-+rcX(JpR0Qrw`c=@Wk zXB1dS=E;IHv>zJ51#7AUNUsvtV7orLC_m#5&xZjhQP}4nrY80Cgt=#db{~>>XAola^W&U40R89vE187;EGzSd zl+djp@PQW_vUyY09W90NJN0o;PW$A$P?#{R#Kf_3Jz-b~*j zW=am?VDY9x@E1IW5H)*ltq++Cfi%}YmB!WbvM3m z`1sk~1_bf7Wwhnc$?xizaclTU+atfV%75G~A;GBfVS1^tv#wX70uf>AZ1mu`?yi;1 zRT|V_xAJ1;NcUxZ=)&GvtROe^k6+p0`Z@Zf+e&Ic=E=kq1*;}Mf(L~ z$h-G_P>Bs5N>Zz5V{HenlNIHj<^ZiXUFF{JMK7T&mlAyZg+cJxn%I^UVuX%JWI$d1 z;>M>nw+Fo~fh3u{0#CYicigMH82?T$ zr_M?un!DtP~aWogGYw;&_E%X{ib63RK43l)mLvzyC9DL(jf zw5()^SUI<5(Q=UUnY_ZXo~HyPPo)eFF=bta&O?ZRhMcUYmM3hbs3hudAng*XsSAqZ zJcMIaeyxmS)-Bq%^BLFVB}wAoUT=a%bcrYqigo>0oQl@88d?GY)|iB0u5!&){}h%1 zocog46o%-?)lxHginQe_cgZ1Gv7@{BHxP680obW~(%DOBBr{M5(1+OKBLydr z64&d^R92|*_QE5$Qvl&o>X$y9o)I1-+X5a!@>r_wf^C9V{Kfc+#fDgqjVBYlF6SG_ za#LL(G-BzjRUx{@>0y%N=cBw0M*Dg_y;siWls&H&kBSF~Wri(8MlJk{G;N@IU0p5^ z{d6tA1vf203Ck4mC^Jw1ZS-@!+BjBWK1%+j(F8>;relKQmrUVOzb}!7Z3RSDLs@O& znfrx8WDTlr(mu4AFYla*f7!6+Hb9x0y^j|R)@c~DS}f9bF0$JoQ+UYUmEc0*QC8c4 z@8E3f&g&O_AtCjJdQ2WROG9(8t7j{r1Tdaf=?$n?6)iw+O(8-RiIua0@tvdy;@*H4 zceN7e_Y07;F@|?w0JOpQAf3SauwwPPBzx(1zEg$LsOlY7w0gKC|y%rDs4y5aNVj)TIqd4zw{wlpb4a+o zuaCU9>ooh~LSf<(-f<1jc5hLyR;p!wtTiFb@`mT^`b|hg!%#%f;w+r`e!zDTVR4p) z__ca50N~7ol^N?IRBY#?%0uT>dXd*~J;c~FLf8Rs3l;kUAY$z4oniu5T|Mi|Iu#Wr zWVUB*59{RHo6(#lgGUYnj&k<}znF}F!o_stU)Febpui89KYs0r6I$A)T9)sMOQ;ze zL6P7l(7e=YtL+EnlfmWXO8c%6ASY5xTl5?#_#@Xp-%9+d-p>xpQ8<9&tD5?VYV<7XaZ-YFA40h0AC;zWS+RcVYLm? zRj++D{RSiINWW}!dlA~*N(;O#|AL+XHe`STa|0@r2_7s1O?Cs*`W_1}3q z35VWKsWblhi@%yvA=5XI0P^u>(UvPW(I({Q?;`J=^CA)LXNss->Sv=1VBJuI&aB@# zpU|!A4LuFF&bQpDqzb7v6h?&rsETU1VYNxWL%l#zPtGGI*eoP1Z3hME*m%oo6A&PM07gia zS)c7i9{@Nh;aphnn}+uVdMU-|#%93qL_#_W;Jw8{erWt{ZL|j+62N7Jn#~^0vn?@? zW~E+W;URnXzPDuFx}fQR{q$Fg!1X@K2bM7b`ca)*0Hn|cun)h1@DA|)_^p3LUQX!i z#Z|?Hl_4v{{(-MCack8TBpjaGRcn|f4BP}-V;pei`bSD1lmeEZn=#*i76bh|Kf_p> z1p-z}g4RHKAC_-(JoPJha^KuH(5u^D3Xqb(A3Bo!?CAX`X{jr}`mEC^;987kn)ISx z-WCWPOiO%gs)g>gN)%^**eh^4bMBba22_Gj`A?NQf6I*jIn_Nni6KYK+wKZwuUnti z+C^J-zY2O$-SrdqRxhBqeQg);&Q`HdfNQl0tnurME#Si*yo)oS%)h`mrSLqT6Jl_? z4KlCe;)8Z={04$S&WB)4mHSx8LmxC6cAPuDmby{eJl3)gxF=MAF_mBgo9#uHf%o7- zVnC)2kY__8%Iw6A;XME03A>O1nyCF9WHk}`#m`D4Fg2gsKSK}qjDsOf0y;p8@Vk@xzwKdQR!=-|oEw>Na^IE6DTwDGlOGa*AmhP$bAez^v#tbDO0E)9 z*OTaL6;2`gXuzTJYgirr2HKqmLLa}pfu{s`%@iC$YV~?3|N1cDI{g)xi#wkaKMbLc zrUI-=AUNCb7Emg`yr~XgI05)6Q1e^!W6OXQppS4zd+MXnKs@N@mm3VJlf!7^Q@;{F zgJ?j#!RB|*j$s#w@8PANUYlp7gj%bgCC*U&|1qmQaHt;npaTSZePB$yzqFze{2(qk z$SnFZ9^U%-PCVSRp6C5z7HyD)!QB$1uLG zqA!4f->`RzJa0fDfw0|AFS&;lr78f9;9Wdp+;%o#bNU8qo;?Jvo*!Yhq|8RGHB9GsMD1+=2tTcl#!K0 zkiAjM2Gp76hyQJ=?BDHtTpG4%zJcypMF4R=JXFT<;}bx4y?1`su$c$$1Q!2aIl6{9 z4Fu$W1)Tm4>G|XFuPuXp*n|{Xe3~BJ1ip~l@Y@2E8}%+eY*Ygh0c@$id#n$urXSz| z_*=Y*KgRsa$O!{TEdA~V3m8|wdn^gh`R`33{xSa7P{{uQd0+zRK~wPAZHL5*Yp`wJ z9B_#g(x(Gdn66FHh`6kwZ zTf<*F2s(B95m@ubYww>*wf=RV;o7ZeNWT!SC9J%DDA2K{%O#mL4bFkReXVPP#{VntSe;c*<@1nl{TjyDY z=bwPDPl~vIC@+5XfPHo1_Jk@HCi*f*L$Kq~Q>@P{|G@zM`k`1_ff8aJ$de$XJWLz~ zO-{zl#y>u@DS>^FKtgWf>~hze^_6Uk1bEX!^Y2e8=}|HW>a`Isd~3#WlLx@>eIaLMCH9q(x8A z14Tf1yArlrr1$(sHYB}Z3arwM%B+(0w;7FZpx!CS=~(K8Ogq|Z9snM; zdhAP>qdqpo0!ixEQlmnl$$;^Z2V30%NJbP<=>`Dc639)@zNmKrK!UBvSwG~h^VP5; z$Vp@<64eawiUdG@8m`_;sQA+_fprc<{C=65)Gx!&76Kq8#D`wsy~mpgQhk73s|Gnd z1z1cNei^PW1azc-fwpc&U-Pu@0I-rj&lddCXy(-b2J~0`ZUn3m5AOO`P7XXCc-DZ2 zxO?17vj*E{2A^F3%qWDgpG8rSz>s>u4&w!caap8l$rJB~lMel9Jken*Ab`EKp1I8{ zS~!&!8i{w2T9M7YQu>Fq|4mk8W~;k7=bL~9UV}d7OXjOuKN1G1ITlj&;Cf8D65%Lt zeX#h>vZ-e}^6wUA{#T#x|03A$AKu;OIdtUmpqI6rjqk80dQA6yw04|Cv-Q54x7A;U6a28QaE*7i!F=G?dUi+~?07E|Z zpL`Y@Jc>!yiyZXGC4fN}*&6_d#J@_EsZ z%3;UT5TFeBG*=tyo~o}U5z682N+Z{f$&!YHc3d{X9j5M?Vr!;Z%{j961#dd)p8}V9 ze3K1Llu(DdfLxrznyPm(T@0S?ZO)PpV&8s)y<8>CYb=DPHA<}aA*+i$s$ow$S>7zg zk_1N7u(%2EY}hHRl+NBjoXYomK&&&TwRO4PN1dgpQonAkDLl^f04fec^EpwSkeHFv z=1Ezjy&wW|m%y6(6$Xx3c8ZY)jn5W)3JT6*=;(D1O1>b)a#%~?(;L{N!S|n+X)Jkz zFh!7RmR2qa06fjW)7+dOc$8d;3xpGAdlqxF4SC$}L77UPn;x7NY|s5ALnC6`^QsF- zU1c9XRh~zCD~JZ#Bfo3VYEod`FC?3Sh=JaHO+Xi9bpWT)3EOxmokPW2+t0#fp39gN zY_;n5kZCeO-sYNxflsdvIx^0-*j{CdvdhKs%h&W|^?75Y_by};K(xIi`ho<4Mqy^QR#U^w0!j9GE?jABwM>;AyM zmgXWsbaWjaSzvmmT2WgT5j6L(_lSF8hQ7a{uoX#UhVv7kHiF(2Ct6@>?@{`4 z_;MJbGhN{oR~PVt3c4o#W9Vx-GUoB4hMQ{%1OwgM3oV1o&ots0e|0y(5H37d^57&0Ia5tcTF}CInxokFAz3fA}kxMJlj0*tra5`6#%3kkx zb=tYO|IYA@K?hBrBb85(BYd&Bsab8W>_ehq1$*Z;=q>R}J78uUPqEwt9`=kJI)_#Y zK|5QEy=tbN{AUw-1ntegW|lW2wR>3mNX@3--s-EP zqx+Wwy&n0>9dt10-Ud4i0d;Nk6QUh0xvhY%fr;si z>7V8#LcUH^?7ev2FF~|H0c?Q3onn;~}2xHmuIf9$4?EqHhcCo}E@8W0B^uDc%AKe;oRkmImBUY?yA?k~Ydv z{H15Ak?lb3Mve9NvUncY`6lWL$N{h*eg$MCy0W!xX&|(LVX|lhI^rV|RvvCx!gB?H z)B0ymu>fEdcKj*wViGNf20&ZJo&e741SqkD1=~A3#e)8E^q(OON7_PJ>`NL69B$T1 zoT8Li$otN-hr8Biya)g!^0WB4yZc2c>3{FoaG*PRO`7rR8BHR$;%sWzF)9nw{+D|f zKo`B~xj=hve*@hI>bO_dl5ivb<&HL$msHihDDQ(ZQaZtHkFzv2Npcz0lwV(Kyy`6g z`EOJ_@Mwu4+M%Q6sz@S=A|>KXqLtxU7X`GaIgr68^XC06w2CPz-{suv6Zeo=UjHla zX?|*$R*j|z9bf~vZLT$(-%7%T{SU}?l}K}pDci`v8`OC}qq3x`l!28x{J|n~oox(g z`q!5qGS3Orv?sd5Ivd)msu?hWbk~oo+?Txsg_@BNE5W|M+~}!#f1F#ZmPNGy#7{> zl=6a_&wW)J`E^!bC2B1)8v`wGz&>lU=h#VjV!wl!!R)LD9O4eA+3!|?*8FBAA`vJt zoP?b(qRjmYk^%GZ5`DBjA_%xWHrRhm@;xwZI3Owk(8K_5W%BjYZOPp>rGpIiS+h zcX>R{Y9ehB;_HG75Cq)ShMe#GrSo!7a>ccgv*JdYq0Q`G*(}<76NtP*P{9rf1pDSL z@jH#hcYxoz<`UtxJ$^zJxlpz?dmS>e(Xa#Ha7u<33QM0$bTKQLjKpO`7$b;O&ak%>V65hhrb)F`t1E*h}@ z;O+5T@&(86cCzr zo;Bc?7cb7L+SuE5PL5By-4d!yvu8Az)`>Tb?cgMWtjo`R4ia#&Xo}Vq2)4KKQ_JTi z^y5})$>(yrF_ps4PZsVNkOtEP(r07Qz84mF0c0EftAmKOTFcT{Nb{eby`c(fu{|aM z$6M)`%s?uAQ$6i^&2hjiTab@IWQQng(V-*$&qH@0a&r#+&0?PT@^jx!H z@#y7yu`HCsF+(mw zIOmL~=DfJ==k<}V0buwhu!Q=|u@;<>!M6ZcB?Y!7C&O547`}aev*ziOZGcq4#UAZR z3seJiqq#c~hfU{dewh;r&&k_qx)eWjXxp^8pBo&*aRb&f-)3Vo3Iqf6A{m^y*pgj5 z!kfReZ>nu?+RsZQxTi*XX~78js0!RwC-}qBh$ln0W%rx(prH>~r}*hpikKAUaLq$; zVjYzZQF0rlAIODadi`^wT!Sy|ZCM_cyfq*Nl?&S8k*@B+oqFeILSMRDJ5nBxyU=P9 za%NyFhb2Z&l-DYz0ItB}B&g}_r09Fnx>e+Uh}V*yn_tL3C(sgnATzZ9*U~)!w{s_u z>fLmg*T1)E%6u>dG-|p1S%};vmIz;V&7PHa{Ysm(7 zeM7?qrKRZ8piHcKVYrm%Kn3Zig54P-QNi23RkVZ?Kj#faUPr4=Z>oo_QIS-}P-i`u z8^6|$QB)?0RnN3N{D#;0j(unRNbx6S%1$MKi*Z&HdBtXyuuM2=#O(zy2YZYtdEvaB zFg88<3@%{jxGW#Bj`ET`kcs(l5s4<0%+OVP;?Y&aa85Zp$DF-S?8{OsVUJT0?TqgV zEhFH0$}f~4#=m~1etWN7F=SG!s`$O{h03#J`dn5Qq(3q@ZGY95wQROz#&n)+frw$h zsL^rbp=XHO`AFHIM%~LVUJdsc$NMLiw=o;ft^Eon4n}rdDO1_Qn6pV}sXn`@v+t{E z=vlIIg^{Uh%4S)-$DJ}hfJ8nMn`6W8W_bL38FP3kQWPB4yR3<(?layyMmjkYw9^I? zbUbnW+(|=$Vor%`Ay*zxTDj_46Nn23zh)7hbd&pwLJ4*8pSTx7$UITLtnJ5DN(eD#WW)j;+ryXJDhm zXcdeVuHX0?&Nhwa^WAvc_c2%|RjPKrx*HefdCKY*4Rci~ZVTlWg|f`?+Q#t?z}Km0 zOQSkjj#(J5(@ZtnRj&6Qcat@|%{MG@pXQtMoh_1IwjB*U3;-?7y*B?O{#0^6+Knk_ zc-(NUd z2`U%1q7-!b>8)Ecs-==rZ8pM8Eduw`1v|nfq@Ng@8-t8D+(zXLE@>l1_6;V8lBa#Q zQ@YD6($`n{ep z&lZ0we!!0=BF*ic-&Vari?)r|Sxe)25HYO3eaCPWu zQn`)r6IV!fuGigBxWkv0igM|;r-Fhwt8oTLNx1SmO&g8FtjQ){aDt8?!45HOe$Qi{ zgv(y2-;NUGXrZhmS=@}NC@7wFQDq$mSx7OEuE}GdjWYL6l5IUxJGr)Qv@K&`Qe440 z@57>sijJ{LHaXO)gmG&E+W_fhTdf4f+RDlc4!IX2^Ug%9Vh;xST2;u(MzV8FSC*0w zR&;CBDK%ie7Km~-k<%xBe21hF5Tz7v{#-ex^E25=ZNV%wtIr`f<#h0H&@0yGCSwPj zeW;sGUerTr{TBI2#W%=BROR~~<-9sSU`{l@*u{^vwZKOK zQ|15**rDL<01-b?B>fxiYm3I5+P9LzF?kVc6+R71__ly65Q=`yfEiX=R9){RYoaBo z7!fkmnZNRSJ}HW$IJ;5)3_c=SPu$0O*+?@kxHp8CYISlDU}DtNBYZPsRx%?$W6)Oh z#m<=**4KB{4B@Wo(i}+NNV?<=yJxRCAh|DX5JpPrmWzY0d(>~&=VyyE1?Os07g|UK;hkVaPNo>}tKkVL;O~$d~FD$ z>c=tPU;pT=<(qZxvmpaV^(6oc#TVo{4WvXUL;O-FG}jv@oHyVyu0A#$5*vIwre78= z?nE1CNf##Y-ok}kFI!+0df4c-Lqp+asD4G166g_ZZdgp_8$M3mUU4m7o);Ul7R;g+%T&~#P$a8;G^uTxf11-7~BOrzY`a#h;7j_Hl(i)pDXDV}Uq`zrL}no)EF%W00qSWD z5isn-Osa_40OlVl z2q`)dKzOGID?60_nA^HV;JD|LOVmjx2}|twn6c0zI2~l?97U{#o9%c*Qp3!e-=58nvIKY~fRpe`gMp>n9tL%GOhaPt2&DjH< zG1w#<3TFuYAh5S?@NSP#n_=%zRuZn+piF(e(#c5A1|%>E*)8R4;AlWC`%#NW2vG8m zbraulGjeG&o3OlY>y1<{y_J*XRU5E;eA8-GxisL0?;``t$}W0w%JRT$-4@!n z{asW+HdW`w`H!75r^g-=KNNCZQ+SLxg2!74$z?HhS`$pPhH!v-S0m0qq$tegY($Y@ahB8}zgAx|8$yXsFRe6M^wS$f~q+*7f&vH8a5FIoD zTSCHFI{GFDa)Oukt3*ZHF~rbiUvFw{stlJN+C4q9XPwRs$%8a`KR{$0EG%vTl!4m5 zQz})LE1oP81)63~&$4TqP(5*^wt-MZ_9G^D$34M71}Fc*!q{-3c?DkDjl3V{29UCim4}A{dm>HiIu<8Jz9f_ucyZ^lB z1qGcV&1>H)xTbt|aR0bK&5FxMjYG5>iY~Jgn5s^8c|J6{8pPB7`Rvsx1=Rz70zdtp0|h+oDuHzg3n0@&3Qi;O~1DR)C+1R7sZxwoOe|uKk$mpe?-Y{VAF_keNmK2Wy!UUNOF*bW4>2}I8 zgT1u9!2|3DA)n~Ex|sQt-jLgBM>2NFbjOSME#_-W_*x!BtK~c_>K?{QDO^45m6?JN z!P(m)ZiyTM#pOaNB?)(erl(|s&BD~4OpVrRW+MXl7HS`+v6ml*(0=Y=Pk@TlZQ=$n zn>>1`7EsLjAufJ=s(D{ZRn{|nd;&^MqJHLQm1dkcP-SVZSu=I4mWhzX;Chl1 zr9w8+TqK}}kg@SlLNl5vBD};PY=-}i^my3N+wXzsH3_E}>y~b{0eqsVhcnM?R+sFL zX?y@jRzg+gX;wbBt)!h_Q?F>^Li(n-IGJUwos+R$^8pFkvEA&vb*0M7noUGT^G$Xqc!*aL0Jc@SeX{BYOe=naUwjoP>T)hm-YbN z&9rrMr@bY?`bnSEy?PCAVj4n>W^X9y5_Ii~{0Q$m-KD21IcMvjp3iMF zbzF&=h)#A$_%EwP1v5>#l--w@D3+zkN%;JhJHKG9A|?Z#Z= zsMNiVrC@?cra|g)SfY$qVzRy&bmcLLMm09Y95~Znl(bZ>b@iLG zb3HUu`hu3c=xrmDr|=n9<9xn)5B0{^ds~x}&dWdq0{$r7J=g!XK`%4?sb(iIUxXJ8 zR)AFsFW7|fHYVF?mA<_Scj6g25K4pt2yD7M83xRf{Y<2*7VzcA$Q(INw74s5GidgL z+m`St*7Lc}q&ue%mn9lKXO?Z-N3kIBx`SMslMf^^8-~Q|jnueiybMP9g!!;u>5$NA zaD-puzwDF=Wr^TfH#D{|U;(C`Wqrl~dG`&}ud3VzIUo&1SF#9tTTT5_C-M8}#t> zG#^ad^{i&l2=hLV5$2l8=)Ii*_CE{ClmWAk`vx53yWr32a|LHTJCh^4_Q51oP+=Z$Zn)VneKKG3Ig-;iTlYYGH3@GXzeL;4t07@Fo+3m@|0%hj4NT z{fdJAx@M=NEtv&$Sp~gObtwh-mp1SLRt0di0Mxn|cjVme8BQmlcF522?T?;bE%1jd zJ*z%1K)+^A+V?}gegG`f!tZ8_t;$m@uW+_^TwL1jsh2lP!}IL&1bjkJ;hR-OKfmIQb5I2P5%I# zQ8oE>F-6TTHV|vO3ENCn$JG7V#mvIA`)xF>*h3a@?T$zLVMlkhh9egA&xqmmKv*pK zI_R((xLE|_@12Zl^*5Bj$qDja@F=-bJ3LRQYaq)7zaBQeD%;fO6Q2A~`h2D8dgKnA zRxVU1FLBzK_9>R~f~DHHh2$rbj%m#8wO4Lbrb#_?hepaxizLr?chzexPq*qrfsp?w!;h<}Y_MTTG8$d^ z1v&9)-h9k>5G)^dBF$@X^R>#k`+<2dfcn_~?QNKc;-J9_qIoyvnUq=xkiWYxi>wss zZ7hh79d>o1M(iXcfIK#UcHso0Z1h)#d;29oPE21mA* zWZ!^xa*t?i-0`0~dyufe{jrt$;ER2@Ji{e)*!Q~*g`;s84v=J5FFIg@V zj0XI{P;rqY#?`}adoPE2^~e=Sl)(MP(5_FSlxp7zpDq1R#lRBomR&e|LhNSo*|6>F zXV2>~OAJYPqC+hCNN8p=?QLHsXXZt|-lwo$v(H0Z?s6oWTqS1%q^}}v8|9I(gfbtB zV)4mTr$0A1gBr24gv&#P*D(7W-DT^PZa59@ircQTsJ_F&dmM($x%ph&3qOjT?_(Q2 z*^^aUTnr$Ar@1UW!4)}7zRU0e<2>Lc$jJ*gigyrhS>d%Otr7YHgnnC``4>w_LuM<8 zlq47T4FwtHge1$BW~h?99LUj&=i)?P3V;rpOWksb7Sk9}g-Q28 z^ZlPR9GGk*K+alvpYZ&sE32xjl`C=Lo`D4MlrDNOim-zjkt! z+Tmy>pFSPw;!dyh!MxiQlsIf1QC@0;5fX-BWu*< z5pD~FHu~|NC$JBSabc|eHfsHoW%XZ%Y5r4rs{dj9*RO0j?#W+SdZHBH!(Uf^dYB@d z(T`6Ocfm{M9HkD720mJ4;p!@)?JVse8xu_U$nMc3e=_k1%*04ZWlQZd(zN;mv|#O? zIn+=URNdakPWdil?KuuDJ%*>x%KVKEw@Gn zu~{-pcHhU=TfABo3{%lnemZ-{<73-UFH9LRs_Kl2Em$w&ihjYRM9b%RMV=oEq6Jzy z$~W`mIe~T@su_F&@*5uK2j+QiSEcu9jd+5%0ytFLC?(zTQFtSTM_(S0Z3yPd`VZ4; zcD$9FyQAlWm7MP`43X)h+?hCJQ8$@c%+v#IxFq2OQVOjnu)m$OeR=tV!4we$UC;z~ zNmZ{aX$YB_ux1o0T*_JsS3s!VuvBI9&?VZ+yZ45G`0Fzs_`p?#vrR<=e1SYzlM34) zQk>dK5{ENl9nfo&sDdVJTZXHT!a4_&`FbFnnr^D_3iph5qPoxJB}z7D6WR&3A#Nz9 zY1DQuf`{+U%AJoAw+@+II<@8Fo*_cF32Fx83@{6jS4fjH8?;6Wm=2O<|Qgv-e(WWv_GgX=lH8UVHc6wtiL3 zIcvV`^_~nK zz6RX&G$)7K66BS?cy+{@QISAk+L>E{3(Z5# zp{H5ce`@x&09OYwTUqe1NH3;#2y^wU$VG&^aEDhXbb)~ZN18VV|11mp-LHT3Pv4az zD9n9gI*tW+-#k~|mcYqL+d8V1Ba98zFb)Jetv-I^?kOfeqfK*OB3p>&No_I$0D+*^ zpD^L?LHNm*X`f}t1q$ruwH0XY&j=y_*#ibvqsyt`4W?$*D?}1fY8M7XQ??%aiyr*? zq9VY4SLh2|ct1cEa|-wSdTU7>qkIpoKrMg1JI2y!1Z}C`3FNI(Z+%>1;^xA9! z;`qBGt-0}e`E0DzI8Bd2d(rYX_t#8A zt9Ib6fz8`56|2h_kA~U9wz6aHQAgeQ;Sc$97Zux;Ellm`dWfIes2@$!?6CbV%JmE1 zrqt>iyyiM|=~56GXn{In@h_yij_tz%P>SZKVGNo2;&~wcPgI;O2fvo^^=TvnfD+e1 zAOE9^@OE382tG~i9dK0<$FAV7cLYQPN@EGrc&?t50l_j<`OqhIU&eD1McxziGz;6d zI7#DIduqw#>vk@~u%3Pf3h?;a*x66>vxM?1?+@e6EXbl?5R~Rp+fmcCtU@B) zUAX!6=4G2*ct;~yor?I&qS10HUG{~xAMLM4J*<#*bn7~<-?+nwD@o_b-5*2x34>oyP}jJ=p@ z4r-vXT@eAlO?_MZlCXFELl;JYlicOhCzul7P5$XUwfH11wWyrNTiwg=#;pWzZEWXM zL_MzfsM@5uy9I&eTh3jxe|-3`vbk2S`Ar{5(h&$%Thx|#-wDX0(W+QaI20!ZxvMFF z@NDBD3GN}e#*X( zBJ}FW+fj+E#~S_#b=)#ObF5Lh^O5*lvS9lc-?ZD;A9s461gE6Np?)%=4 zn{^$2wAWov(PcY<@;)D>z(wmEu`8yR(Bf;k?yP-v~!yf18_n4{d5lI0b_Y*gfdr7 zBWec(Pfcmutm$Ke+Y5sF?uQigP7X3PRnFp6@X6_LF2)cH*_<#`cG%F|Y0H5-B!Qqq z?H%>_C|a?iLPFUy9q{B!gmwYuJ4HX&W0Q?nGg+D07#EWqy36iSlj~#UYjNt&y^~um zZh~-(Ze-TA*+UqVqYOhH@EO&iT zDL)r`N72NUd7j6x!L(KC(?f&Nb-!E>;Z?H%Bzb9tdyTyO!^Ps)tPe_hE>azSRH z-ZmNUu(dt^*c=J&eRNa(l5w#|DT7B`{v4Hx*tXJC+pjDl)@jf&*~x0AKDdjtRxdDo z{!&|8mw1ml0QXabK6DM)Mc zxOXyi)MU+|cEpUi&(6H6#<)367XtHYJ!l)9ZIojGK>*1y5NZNzZ z=T76bX}J#+^Qwerz?0Lq~VRDru$Z2g^H5UevfGV(HB@hh_UUHYa&6zMBa3c3C%0A}N5%sgF!M#^?AVf=xl3 z_-e5u_+3@S2E;SZy<#by;%sT%q&yQ~ZT*C|ZpxAC8%PEM4Z{h_jRx(dWN$I-1>J#Z zQkz*h(~De|0>>^f#J+Yrg|yT9vtpGrjTvD!u)>p3yQ+Ofn=;oB2IqQOLYe1&+!xc? z?eNcvgsd6J{k84^HLK{JpzECvE0$xf@e?Y!!X>$VMdibtP3VfZIt*v@MMvy0d+Di? zgf|&r#^a5S3HgsIN@y+io-HlywE;PCn17=X{+ffUMe~cXy8#5C(4{LOVbMVGXz-@8 z`kz_3za4{rrNWfY{4-Gov;lB&eqWYA2*<9B} zr~?+j!-1>eTe@^OgC4ys-l51_+|gq|jILA|4h8}={Sb88e;~~vB+B>O3h`?;6got!kY|YkE=fCalq$NI)g23 z7?;Rrr5D_dMMR8N-T}z$lM7xlv1e@~sQ7Q#b0baSE~b{d$b>+d#uG>vYlAnl?ute) zvBFIr%Xk}UVnhst>c72Q=_6Rm|t)>8WMOsJcdOx($03(OZg#q z+vWS#j34xjKb1O5kYCbN{=9Xjq=^#iUcPsJI=R{ve-(#fGj0X!XD0a>VoHE6+R)6&L z91iVe@W;4D(-=mlfK-+foA*ClZ{&@Q-qm8C)a}+X^=DE^I2TgA4$s1DUM3&6edD;W z!({I3&r<}Eu;$eqiAr$9@=(y}L7Z^+-N=m9@JH{_?UNx#PMSsoGMk|$P?P9mtE4dg zbnz>NsD*DSGFoUvm@Oda$&^3Xx`{4Yp*m^>%iyWm%&h=T)@5UV88NNz3(iUFfykHGN|HEja>d83Wx!0QMi=s-sS+ z|4eqg^d!XlRs0tCg}ceKIwBdGd2EJOCmf@k1Jw6m z4T6Pp9_Pbsy@9p&hKW&h+ewdg+#^;NjKdki1Folv+=D5ViS+K@(oU8@t(^heE7S*G zb$%BlUAnME=kW`R%oW<#(AQ}2M)jA5dz@j#UXGyV!prg-KlK(bdOVvCx(&K~bymc0 zc;~IIbp7YFL88ckGU)0A;Z)}86PDSschU2h;cAB&e)(1ioA~8KA`_!UXy!67#W(yP zR_fmp(^AfG=FN-fQ~`0Pp$>2AI3UEw&3f&rhoLd=LB2Frh8X5o_g87WSAPanH&4k@ zkwjo8g=C7M>kMTK;VjH@zGM#musvowMi=bedATlYk87G+bLfJ0)vdlSTkM9X^W|39 z{p>C27EaK8Q$qp9YxYq|%_XIsh|;l^=-qBeaTZz~++x72Zz;hf^h5+-HL6)YP8nlX z?6XhG{)~|jyQJPF_3ILAhQh$0ToPmZR*YD6Q6eRKEf%_a-J25BH;KuPUPV6PAZ?U; zy<1;)^~tUoD4)w$kL_@s$BA#5Qb6QW+jlZ&FWZgmh+^nywua6$~36T#k>yEh#uy;J)PZTwK+~S*{&@oe$QBd9X^>d|Z z_)E+dq1DqHux{r;f9~TC9q8}SL1*MFDWF2XWg(LakG*nQX(^7Xk55ph$q%0+9(R#E z@npf}Es=gnejzP`@U4QpA+LV>sDcse1<1=g=hIK{;$C?3?i}DPC`gccE9A|x)=u1> zGGlM9>p7>>san?4-_jzDODi~k<_X3JSX|COg|1_67O&bbu;<{nQ+f^@rfYPPcqAzw92PQp1;3jzHwf{7-j^Ba*ze)}Pvvf2cbLsm4 zbgaiDWxq0wvhYQIaZ_&TS~QKi;RtuX-$|z3B$+~VsggvNeON<(`IY8jF5OgZBp34W zx#8auc=a(NKW}Jz8UsbNL8caU)OGcr?SG$6uiSMN-uIF;uCP<)^in?~z;A6n4^2OZ;@n)DP@7Qu#Y>PsS_oy1*=bYe z!4T5y!W0jw;t*xQ{)Tl%l^7g0_c-PjzLS-{!i#a(Sl<-7CAnUguPFn_F~f7b!Rpl& zM^3<1&YtdW6r>DfW$9@&R=FFdX~<9WZL;EP5#3lV7yj1B&b|Xv!g$}E$oHE+ceO=$ zaSXy&A^Egr8?jN0U^!g4xeLS}vhcjMi>IWbPAca-L@I^^P59jmM_0xe1760? z$zq8db#Xt7)NarLp=H*XgFMBo+^26`GOx;RSZ^&tA#3>dz~l-=23k`|!;dD61=af3 zW^s-mRX@79ZG&hB3RnXx=U0H>R5n|dAP8V?TdCpfkVjFooH0jnpss`vhR+kW9X#<3W&nzG0i?b6^pVa{?w zvAW>Tj%b>bDk;r&V1^PoPeefy!I|YHUCq0D-B%pfolcd%iz)xYe0eeXM?820BkNau z`xij;-!Cikn)c2(q{>^>lHvzQ3+rU<@&TH!IqFflZ=>?EZ}Qr9jZSRD<^k+E z!Ai!ctFZAR<-Hw@ZUqG=T}g#tZ+v) zk~E(iPSuskbo$a%G{%ix?vt)Cgs7ob^YBhQpxL4Fqd@3yMhV%g$nKFi+U@qPmd_lo zZ0?C=GED3K0LkuIiI1lyQ^Ptr`10p73jFtDpYhPPL}{I%AvH4L!$@g>u?90oS{sC} z?6X@TDeL{2w^hBre%W+7>7IDy%fFe41Mn@a>@Uy#HmXMD3&n|4_soTyrj={w+*dou zDAA)LEpqqC@YhR2Gr6GgQ5#PGPrz-x1nk51XHC5{ZTjlKCwuqfv|jMtL!hrSJi&M%{^7TaM220JnH z-xi;ZH=yVUQ?FNNV6rqT6=qmxB9e`rX&aIT39tAn*w}*#meYc^yns|$9m6~91%@?? zJS4$wT~JSM3Klvg$fFsRf{$(rh7*JGz6+gx5s5`fmo6iEq7`-AXV{W-0PA)0kGz#H znGxf;dfN`@RHKYeBL1Bc0D7dnu3z&@u)zd?w>5$X?_FEc3gcf#aRd655`J5K=lEoP zJ^94|^o?o&`OnYm0(_+KXG5W_yh#XcV}ehO6Y?8o@jYl!5g?Q3a+x)@!VU=0;_S|D zfwq1Fp$uhZhJx+O?ldA{YLjTs354$bO#J!I*jl@{gnO!v*OAT;qSxED4f50VRK0al z_pa#M@?Qlo=XW_&%JBN_Szuy+g>`pCkpxv2SL$(i6xXtOoBJBNolUe zWP%sn-@ceAi>%tn=KC0j_1Il#J54M5`q!+ihJr@jUsWikqaq^F<0_(PYSh${5U~Kz zXbrq~G-TC|rohxCUH~=29o#$$=awP#7n#MqFphuv$#*PTz+De}BIRZ$K@ZnRnm#HQ zRk#^sJQvKrmdjxBTZuG7UL5GE)V^hx&wHMrPUGJNo@$^fRV+g|QKcqneUY}vH{y2jz|Gty(^@cmZv_4FY?Zko*C3r!ocVKC z>pO$T$+RU^$~(B8T5p@zp8ph82q@(Sr+$0jYM7nRV&@voH-2qj-VdAY=UBGLOV-Gm zxn;`&kbDiQwinpKnc?L{pID7J()}5}P+*+gYJeyLMqydzjT@lGpjG=b0_~%PYy?;& zBZ>w*guySIO0eIgWrNdL+KO{@=jBOFi5e_=@SmdyBVSfExGd}I>eWVUbQv=`c2~~w z?q39m!feu3(_X{4KF1wQsd`zM%J>Iubm8}Aper{kdmfxZ!jr^p1ZK)UCTj;KFOwS| zXw?oEh$obJ@|$SQKA#v$mlg67PD{-9DA#S7QOf6^=_;q|P0ProCs4L8q}gR|tna!s zt)-mZ&~CnI`%U3|V2y#X#w z>PX=`$~)1mjF;(Skj7Hm^VDMUaz5|>X6IcMJz(3ZRp14<0oyKyM3Ti8`?l-y@fB!r z0Vws^ke__iv_Vx+dvqb6GewG^N(*kYfaBugSnUogs+#0P_SoT!$CR`h#WQ-eO@@hR z`PY|2qEBbiKUI&lulZt8Nzku+vdF@lG9=vkoTH~`GxjXgMd#rQUokL5^a0)=k5Toi))%kW4a>*Qr7a=i(vV$om^+FIyUd=393N7tU_$o^m zM2+883$UNOo=ojm_P_0A4w3jwpz#v4zjoA1TNUlbHfJ!*6G$pY(IV!cWJL*pHK~t} zs09*V_MMS;@sI$#+=`!}E}97UOodPHq%1A^Q5fr`{a>B800IK-saoI_jES-*Al-Y0 zP^eK*L?7@}7gd?~I}tD+yA>(&btON3G2zlkvmc;mwGl$EueeFOnDV|7*PL}vRcvM9 zF5+8PSjvgI-C^ij<9P#G`o1Gmv;OP|uZp&e@&4`Dmrz)Zj_82 zhE;Kqm4R>dWpMS{MMAv8nh{=ef;o`l5q3lxUDOt&THO%_2^Ylq-by{GsM`@E_tNI| zSEwUl)%$XBLkVy)K-#g#ptcmm7&H0l#=oLv9PQezuHx2@4VfipyunATRJ>02IW3W+ z*L8^zvFo)l@rUG_sG#RhmD}_89IiiSh>~IpDpGNdh8YeO9+6hqm+;rt$Z7TCW&f)@nS*>)vr<#Xm0NxThW z$f6mpGT*9;8LJ``xo6S$&H?bheCah^0MgxVN_DK4HOsf9d6#>1jokjym^+@QE0x|N zKR`r>xQZUk4xRa^H2y6}!!k$yGB=o_OTJ8D=>uX}Vxqk379XFBESww#B00tkIlgzm z+TykVxF0c;z;-2Fj{XPRN(FuW&r8Oy*kD!3FRnIFMD+D9pr!Z&9g5rp?ppGP!$sxY zonbXPVpVGrJSC|0qc8lTz;DYe&bE`JZ3SeXzvxTQeCd>1bR8`Z%KSObo;Tdhl|!Xz zx7UC~T65McUkUKe%=#jt!byQ_uBmdK2+&xjrq`eq_$nY+j`)p1AD#o-a9Gt_AMLWi zI{Bs-HW&VQUrt%ZDp<4dLjJZc*#BG?l4+iR1g<+)o4hWy#DJQo%>{;Ao5~7`@)L=V zW#8e|3hxL54wCw1VzU`v*LY=*HUop*%0uo&nv3t{W)YxmK4l^)b2Iis6Z0@?FzXP? zp5*>>!(E-Hu3Ds*E(Nr&X7uP)k^KF@gvBjyw8jXfQ?l6L<+IE7r}DK~ZEntgKH|dq zy8b-FCH7sH8YYTx&l$tmCtx00M*?1Ho5W@MhmUAb@ON@hP{g-XZ8);%!}qBJM%5_w z%@eippX4OmbQ6_vJBBE+MGq_Mc8Iy|U-aT&1+EM=Xd zx-Tnwv1j_JC0#oRwF*Q|I*oy{+UJV1A|;|coHSY#=a9{*PXNcTZ?&UNorvnKf;|ra z%`0uL1sREZ^Xe~97O#}YN!sZ80s9uAC=%;IJ&Q1fexTip={PcO%%^Hzwl_A?4EkO`qm zaWaWoaZhUJ-*TTZr~a`;{z4t%kNl^^3Xmu6pGWUMj{j}lL#>Zn3#{;kgynu&E!F7n z+@L-Nqt5#4FT*VSs}q_e@T1bW_^(sxT8i_eT;&TtKq_Av;tz)zWTl;ml9(}y17!uknw3!AkMSt>ZYiShVq#fyg5EcwRPDI0@ zQ%YzBDwIwpc&pEbQPeo__)L9aGtQMxn={zjjx-nU6c5eY&7iLu!7W(`Ic9MRo-jA= za*kTbre%dsR&p?UMy5QV3WFZ|q4e?(UaP-G4vnwQ)n9LjI1h18pTW4wy~XiJEFTDr z=Ib24nUPUjg^NAq|J?}z&H$NqfJUTt6TkY2URjc4Q<;*Y`+WfWVZEXEMMVH4_ zP968_Rb||M8t9f>U)i^MpOtjBVeNRiiR70TBnfMJxu{<+efj#EDG**tm2*iS`zwv_ z*Ib03uLdAi8ctSta}m{w(6VY`3=AQmZM}4J1ym&mXsW65UDn<_OY6G9uEr1jJ`T4M zFH})_q-bt?nuj)+WX-tS4N*l(nj|iu$q{s2U=&h2BeY^h^%ERMif?1&NE4K?%JtXg zlFWfu_!`)~16-LnqL&xbR<{ijRt(9uv@$5_17~htC9d4B$cp@=?@^G}6 zwf7WLUXy@xF@1IydutIz4ezHr58E;?8rp(TV^CL1y5mk zIXO*4B!;}B2}%uFWg};PciqRIzLjbiul%kXUJ#yri=>TnInWtOi*GY6xMWWznU!1$ z3m}b$yk}C8lkUQkagd-914DDt99~ZfDt}4$5Pm>A>Q{u|JVSo-5>FQmYxej!Ah91+ zxGh(RxRR*#$mP9BxGxkv_#Cz9fV<_=m3>(CSEvv7)`-RA21_ z?`K}KA&iZFV(@lZnc6aLoAf*-xTmHr0L?U?#X6uxq|vu(ty1&sZ~ZnhL}H31%vo6M zK5IBq2wxw}PakZk(Zb2|U=SlGlA~Ry%7lOKc%+$7sHd~2UrN*~a5h(nW=nGvFy82~ zPjUz2w%l0#gXtuQSk&9Dnfwgc3UggW(qK@Z25u#c0fvUEizGNYrSy5{X;(@uMb}T# z7CEam>c1J)VSYKCsj9`!$E2v{bzejK!3o1`{$#^Fmq+u8j`<1?yS_1E>X{TWzS5Kl z6K8m#t6TX<@=CGY)O&zL_U3|4aJjn?ASS$Vy0lzS{45h5uLCCC<9w>qST@;SamOJ} zc9Rj_5`Sor{a?I$do6s*IB4b*Aq@vb&b+C(vc|o;LQE#HdI8g*jYezzqmTfX<@&En z6NGCS7*Twfvu_C`db;T%-EW9B+q+8NWl98Vp^fdhir+fIOV952A*XPKs)G zfJyxWL{@0EZ}J7s_q?pbHjgpP4a`ahNgZ1{2GPn297Gi)MeeDJbX)_zPANKZ9fc@2eQUNSO|8 zt~xMD-8azen2qLg^%yQXD+7dy;s9YH7Gw_H1qQ%c&P5VrUWWOt(INZlX8yYNe_X$m zHB!kTd#c^N4+WhMh6cZ^?pVwJM=YL=>WlcKIe*iB(@Wxc=<59fAlsh{@z`(Y9$+H+ zU9mxy0WyA3C{hworYR}=>*8OX@W0JSSR8wOxK~e7ed0aGZ9D5YJp=nsVkznqrOlPE zFKoSsxdyrip{r=LnZde$4dcIt@xMQe>+{`Bt^{s4o4#$>pxGXupONpz>)-yDMKJ$; z^y**uFTdvp{6D$=m&*7rVG6*8`aNm)|LgC6PG8BqdoNMZX|9{<3&Ba1+@YTGH#^AY zg5lND^oY^SIiYCZ_LcM#DaCW$=!@@3NCclx;wsW-9jHx%`}%})L*Nyk`l2bInUm?^ zcNiq|Tielr`ru?-62ae(e{b&0xoq~I-;TQfi+0SY+8_6)`{TYm2c&=f(f_4g_Y1jm zUA*5B(8DqRq+Ur`9E5z{(kkZTmEm@mns!qg=`V{MZgOO{X$Lq>pPIT^jClU z)dzo#fxpJUUt{2}G4R(I_-~DYTaEi?)#)~Noa--gFm9)2l{O=uy~e-%DI?{h0STi! zW}WkEXdY_NsfT}&oeGLK=eIgYD~&y9hQHsUI|ZuGTmsc*Xi$-X4t|Ro%j#NM=Qfb! z_cwO>(SVT6n{I&in3YENeUk3?pTn?$>Zf1*c=5~nEB@78RBux8H;m6}UB#wPOvA}9(m&TXuZ>zK^j{wRkeIaevn3rSXB?>KUQx0MMvuw zp7HMt<^RSxls+p1@v+lJX)lV^%)#L5>d6&AC!=j>)NeDr`{u;JXI1%v+J) z06gA}?)xAe{Bxz@#U|jUc~8fv%1nL0sE*wQ>J|Tf79g?w@oSIU#6DJ8|3mkf%|mQH zn{puswDRA}UbLFFlxH+3EdKyqN1ity)EvWX%j-y}S6%Iul;rmShv)0Zzt2|*$t}s& zZemd7zY}Cpb1T0|4LN*vqV!9C{vXAaarO;g1WDJ(6>6E9%?L~hUbEEMUmvF)7r0*` zB_H}t%b(Q2dD(}mDVskBmzmV#hX%_!^8!7yEH>`9`ED_R4xX{ye9~v%*RT?C@&i=E zZD8njeb>Ht{6vfnd3RnA>7jU`3gpQhqyny3!F9!{sfFto@qirWe6CO@4xvCQ&<2?Mpr-5y#lH%=Q2!&7;6Kb$xfnSR zG^%Ohzxc_&H}z5auYvV1R(kbj9qF<%xepOi)x$5{y~<159-djexq{>af{30h ztbcwSt;%c2&&G>48-H&MrWMOE6w;*bFk-d*xCCAvE@b?t@ozv`wRNb3xKBT-2lY& zunAWce8f63GqQ|_x63PwOsp~6`1C88MM<*k#nCTvh%`#^)V*z;rs#h zTi?Xh*54t$LYKe^H(3X?<6Q=1wNbp_wh$?(A0g2#j8zG`$D@W2M2a%>+*1?@&QuHR zo8rIAA6%;szjyZhi_l@5qMJA=B8j;Qo^sWxrpsVxSs`CTP|OBnV>OPJXemB~X3B$3 zJqM1^8;zIK@it#ftXb^s2VOwKUR)0l8{xZ>tRV610qHkFzPFK{v)w9RKc?p@@CC>h zJd`lP=o4NQ7TQdyJ@V}C%3XHJOPWc@uBizi(}@HPc4XKXQ}`+$8l{n3x@O^P!#BfD z4@oRyb}n`6XXFPy=wqi9)5Co_t+o8GNyok2FU_L8EK7A*f}b9HB-&uSmoUSWls@IK>jm48njWS=b_>s2odF*){G` zR}^?-=CdA8VCz@A89|)ecX~xL9Jr$-#ByqVyjJi5z1@ z)tXe5Gb9}5`>D%*%w11PE88K^l#Twx%v0TxZ##MIR0VX5rOl;b#@W9w1hMe`>qLi+9ep!nI`l&R)J1F`Hy3vjE5!S*su#OAx+(WV3Va zxt5L+c0{e@9!<}oNy19*n{h(>60}!R9*3=ml#A}ut%u^p6|lWcfn)hBd_G$=VN`z) z`qe1jMbwcZ)WHU-BPBx;Z;457o=bUMrnmu3N8y(XdI7>zGJ2b3o{HYv@F1i@b9nO{ zkg7iR29^^JB9a9;$z|QnX<%#b^yNjsRvMKS+h)cr>PSMkABJ)+tvwLlFrGw(dptKf#8_1|H)G-En<-wt?==h1~Fm!*?Mj(*jxMoF`bf0w%L==W%Mo%EQ zm5aPBK^rgS{yoC?-mW+*!Z0hf7a;sa%G1nlLSKqQJAzyKLN#cwPlB{Bt7DH$B;b`9 zOsQS>t=|HF-y(=QGA~0SH4Pic)@@NLQ&t%pa zoFiunk*-(!POVPGR35Ual6tG_sEsxV5q>=;t;% z`h7OMeyg*1I=1I4fXHbRvl4Tq!YrhRqkjA_8=p2c z%Ey{v)rvFsC|+v(Anhh$xjL+v1;s#f&d+eH>79nUloW^!3K9V{4uSMS14NS_`fl|C zPc`A<41281qoP}{k5B5st(&4yA6~try>Ea*;uNs;7`_F}d99Due#a;ie!El3fSOx0 za?tOX!`kl*k$HI=D&*S9VZj}+EVUNiZQe_NEU_{7wb z@=!fPayLU6Ed{i1$Hh@b%s<%iFleM_+4*98HBmU1Na}u1Xzqg^NThcobqf0Dqo58# zZ~3}9Deb(F=>ap@5sVAs*uL@w8I!d5Vez9S2-cP_>A0P8s6FkbV)HvandCP`&8&1_ zysRZpKxeA7K!M#s`c>l7NDNI^5Pnytq2RL`Ly9J@MT?s6C$n2AmdUvx=yXm3ga;Jl zbDaUb7@!q@@ratns`}w?<;JNr-nFPE$HlK=?>|0%loM6jeXGd^k-9|QM4B$x(R6nL zW~*KPl!@}$JKOpYJYltuYXXv&j`~TRw%S#P{?(qJr5U_ht$FE%u?GWXHYI55ndU1M zDSI7>DEuZv90YH~tgJ3hmZ#fNhwpCBC`X*#ymX19qu(B($05ZGo1q$^eocPH80UCO z19H#oz^8JLmPHI_Z6#8swie9sXCu5!<~l|{3p$w zTo*!yxPHw5Nk4bc4mp;2%wdsvh5%%9fq^9YJ+Zp9#L*dtsc9=tqW03PU{2tmXBN6C zsuf_BiEihTiJ|YFsyz+kYLK3i-qvuv(BoW(-r^mP!lV!b%Pf^Btl|gjbcebq6wk~v zTECc=U8GhDT7NFN&z*}?58|>`jL6h)yuGFckkI|k}F^I94I;ugd-22%?KEfQH z(l0a0$xI~M)&&mnmZP*aGebi;afCumgn@NE=Q0xNd_iQOn>R07o$$n0yM+J5EP4s> z=)PxQhUlwxoUa^u%v-nDg~a;s#W%g*GLDmG z_pX@_?%fP5gwq42h{4q}s#o@p#42u18jO9L5{BIu^=x@tD2Iv*=4Cd!E%wohT0eem zVW?mbhNU$yEsLpjAqA#S!Cv1#kJ0WIPG~zOsmGqP(hR*=6~!xLL@@|mg}w1qN`QBK zoZvF`!KPTR9%`W*aOi+gug(&q#KMD?=bOVg_8Kg`$`)~-b37T)(86$k%tM`4G2bJ4 z%704yUGu!SX~jKT`A~LQX4ila&b54y>}*o#K6(H{&qh1B-$vn`^H43eMvVM>d32{C zo+yjUibA7vR}-s45|3Zbtg`Dz8KNcck}qq=SgWYB5uh_!&a#7epN6AnnG2Ea_+xpD zAUS%JUgjplano^8{G(>MYIq3jeE&Yu!uTCiThEt-2x2C8+gdd9j7**FCPC|zjIvcl zvX(?Lds|!Amv`UUF?txj3_<-+v`NuAzsHI?Pv2Ns>nh8coiKgK?u5>;8}U8{h0z~_ zG38hU-``U+ym-4VBes0UVNrWd2F#aFp)X1G?l!ixw2azx+7x~_7tx#^0wx1DpB>7~ zA6}R9aiFtkuQg1)Nl>lF2+FhqeP?yK15SRXVxZekzZ1?@LFdnJ*7WLVI<$#qtBFEi zp|$Cr)Kux(<3OsLV|gr4Y)~u3kn|(U?~qV~d#Rn`C}S>OnFvJ6{dj5?sY-MZY!p?Grrh z4dgJ~Ka0L`hTVth!#A(rPC?Fw8n1&8@uxzLi`e|YFE4|J{wDPE5AA4i>&hdUj$u-7 ziK$AlJ00LVx{4w>@aFF zb^${5K#DEawayV~cA-VQ^WsT?tJa@PyjBi0%JK;bQ3sE-_A?Ga8r z#e-7F_LvHz$7$!(KzsR$De+48Ky=|StebRxj>p1PhxqgT_d-VUV%xg!lg;%l`6vl( zcX_2PH#Sap8A3f1g@r!WoJPP(Es(Xz-@u|3;mu9Ywafmw#QQW`)kLYgIIqTd=Dp_ny7|fj= zwsufYd?`KE=Pz_o2lH*TZa)yGwRH5hr^VDQX)jEYI(f^LI&e#=-f?2JP#$z?*rnr} zTjnnEykgXa>0?vM!~1m$>dfRsK=}m;aO^mLGJIHaT2yRVvvT?@c;X;qIZuwr?A?6= zkjh~1+FisHOPDu{=~8nb<$0trf5!+@LmWn0;J24pTc8^-%d*l(Q3#y$Q0ptBfS*;e zN*cQH2vBcO1*2TgKxYPIXJ`C|Hbmx5=1^XQML7e}6E@k#h?AT4Z0T|Qu;GT$Wv&@Q@YENw z3=OL9sRM%J2lNZmP60t0Zy&h+>*(8y?s58N-&LoU@9@vcYrRS~qJ~D!p=Kycj`@qf z&H2I=qNtL@;SR4xBPoBqY!9D`|$_p3nW27RoAb(6mo#S{(R&& zp;`>^5nV_0G6cmcC6Dv@w#jkXdys6x(4>$kwYl4}fgt?*is9Skql4H&M2SkAE{*2B zb@qPBAD~jYItO((pa-gS>1bfg)7J{iFw}35Lr!Mx{IE0qhzbX9vCIr%t`ps1KtAr`{zbR` z^}FbU3c82acrwd|0`6pOm!Z%G%M~9|F&%4MrcFy16g9!%tjQ|v5n;Z zD9{FErCE)sx9lbd;}9D z+*@eopnip_Ka)^z)}v2)kx5SPg9$~;b}QT|^K|bytye$1N6;lb zc_((r?~Uj512`8_o(|f)0J-EZ0#qf_@oJHKiy#O7W!Y=qVl6aMf-;w2EkseXVP0W>GDM9HX7}r$fcDx0?u$ zlrqb<>V|TfBpn@bTQ%=8JfL#Qb{xVN-j<+EW*^nM;1M86oSusiEM<@aIuF^?ka-|#E2&eqdh*z&-OsXQJhfRQlm;*HFP2;rPRCTI=EybCOK3iN~ObBx|Emg zfw|EKbU7A~)-WBBFX46uCj840S13R3p6sTq&v`1A#>tqN+o;=!guB`V3bXqtHwl$M(21*Aaqrhg>HxQkXRd^6r`F zwP9Ca1S{<6`ai8*Kks;+2}ocavjU;qhD88FYApPT07L+r?4jTZOoRtXboS_DCeO2MUL|!P!SM%(eYF+H>5o84f8sO5zP28zwD#dun0URb zORgTa{0(I`POg_5kWwE~^fU|;r`XD1ItmUAccD0poQ#Py31(C!#O&;neC`>aMyQ_- z@@|HAt*Rh=juNgRrQ|u*i>JE5(kQ5(LZMOIz4s)@ug>bPB=nh*BE7+GtR~dhrbfAO zq*5%j*IEVcxyyE;E6sOx+yi$uRjQk-FbT_EN#R!1Jh=Dn7;8V`^Gb8*BNk{?IudHf(-#a_ps`dvxLam@EtSp>iIz)H-@%4jAn9(FhVF+??srq;gt>++L0<~On zNXCZxv|MdTF}jMYV`R<^=&8M88uV5oRcaXugVV$WkOtUb@oCLasb4pB?^7sQ=s zbw~DZL=_!Ph|y$?EGxc{l505l8V_T)Y)(vdRl#<|xo+OW_(djp3_f*z`oy((e4s+s zLQc)8Bt$*Ac9Sm3Sd*jKJeff-+YrQN1U+SBcATAFXyg}#apno5N-rBRS|A9*EK6}#20&M#I?}nT{mSHuj-GYFMJlnmSz-HV zIFYr7yAm`~fS`ow=ZvLnKQ;lMl8GgAk|lAB_>N|>GhC$~pyx-C=SkKUOH7H$g74a0 z9o&%w=sZqx!@woU7|d1*j3O9EBjV1oZlsRxtU{Txa)eE|ZJC)y5mfVIu*JYO&+FGH$ckDI8!Z9i`A zLt4f+Bi?<3iac*7cvJ972^WE7te)<$IaUNKspL9!$pf>Vxfda#i6sX72^E>o6x?Y4>0<&|&8zBMOo=jnjSvI8FVW`G;EixW&SOqQ)R)i=J0zcbe- z&?QSBLyVJlcN!9G>%O>r5WYGqdw=rjXuq9Y$jW?R9V#?R5{()!$6tV7p~Nd4_4zSN zgq}`sCP%votK%53Tzdr{h~8a!VZvk}SN(t{Jo;86LmM65X*9Zp+oeWu=h~HOGi$=&cnN_-H7Dknxqqk4X(a1x zzJ1NHGAOMmE$M9H@nD`k5ekaAOrVEeLDd)V5Rhd*^ul?Wp>CHZ4FP zAGk6gWC!T!SQHJ26TnXudldo;X-B6^2=M*G80fdXg8*agLuTal?R@vkuBma2jAQcs ze4gwom*;Zin@%-=m=mVoC%@01kjk-Z<`9ATdOyI1iTpJx=H=qFfC(o}SfaX9BF5&` z8@Hhft{N|SNN<8_Rn+OihjJp*&Y`6=iWEd7ZmKm`RrtTHB$^wjcwq7=i1#yxZ-%zYF z?Z|+WpQ^aKp9*_KCcSS7ZW$ap_~d7E4(8#m_N^zjRqP5h6I2Gck7`?!XoO z+`Q1ADLS(4KbA!E|G?WS<}5)yy-&4#;YZj_(n~AMqnkZuva?(5!RGqCd*sAd@}6O$jyK=Zt^l??SysxFrb+ z+5sm3k7S7NPNtcpHg4XWu_616e!d4vD2KMh24g`j19`yqIjmXh;b%RX$D*~lJ#=i< z>Z?04$K@ILZ|Yn>=Wz`9iXD+mWjd2O*#|M?;)Oy@F!DXM!bHXL5t(xF&nEXZSEiI4 zl(1t*im8JRzynn04)7AC7gi~4T}qF!7<$foO)&lHJL z_El7Y%lKEqKwsx|EY%hiX$Eq%1eHo9QK@Y;R{S6Ry19$8bY&h3?HHa-_-)<)Xzle+ zM?ED+&1w;TcY$5^Gvja3fZsm1kv};Ri`K|pKeqE3z@Pzkalx4%pt{Ar#_C3P7WmKy z2qpkMj#;_O?B8D6y-H?p&@JdV;JANmH}L8p@(HONh$es|em2_M(%t#|4g7S55i)zE zuWKcnZAQ@9$jE?rw+X$ZP3}_GIX9>Za9r}k)}IsW><+06H*-#O`wUGKzXlXu#i@_x ze8u3fFs}W|?S1wox%&YH46mE(dl~W2vAR(BJA8*WjF%1xsf=(|mOPA3=hu%er)%;n z31{!T3grJ^?OkVBQ(3nTMHxX1B1lgV5FF_^R0*M116W9;%OFaJNQ(%BA_%C|0Te|* zigW}6p+|^-;Ls6-Pz8||DG~^fgzq@dy`wYtx!-qx-0#=DKhH_lK4+h^_da{Q>s{-` zbKvxgnCFH>6s@)Pg?%9Z+|ylN-=^zv=dZGSC$x#d2&O3Koi%n$=74mp>nT7yVJ9u z?Fp6G0Y4f5Gu_wSc%UO!CZQ0MIb!Iot^RP1HT)J#!@oj)#^021B4A;Cw-L)3n}sw% zF}<97*Vn?~+;T2+M;6uA0v(k+;ocw{J)d*U1lG{Vw3KoVFFI*`7(ray|+BjyM~hmQD3Kts~*og7iz9f^FWS_Om&Z+ z_b|hhljrQK+PPV+AmPVAIY&wtyd-HMr>T=AeU&;g!J&&;;+e`an#B^wDqv_FOM(&1 zV=1$scyatjjn8)IWs92TyTW(t;_O(+$=XnOh|R{Atc{Z*;zX?zCKQ2ZE4?yRN-k|_ zR!reiF~3@XoL3S`KUSoSgI}A$PEQzb|2o3wt9HYV*=WuoJD++}w?8Y}WMI`Zt3!Z1 zbL9r)ZbT|dev2f693Ed*LGGll>L^U;SDzBkB{H93$)wsJfFy`S0%d_P(y$y4^r&Ox zysfbq9v>Ij$zyz+D;T84T#y6K()thJNCn}p@LeUGKuDl!wS>nhA5Nl zQgx9>qNh~27P!j_!dF^bI}1IAC>ETq!Sl&oC5zj9gN6zZ{k*GrA>qj=GP#^A>s(tK zc}Di5WpX?BA^nT}AT^fpa7~srtus^@Pzk3n3sPVBp|zOkoosi+|8sJF0Ka_wm8eF6 z`5mAZX+RMMZdE<(-2^Q(a`Su@E14{NSIh5g4BK!-Qv!NA(cUbZnQFedRN)<>T3~ig z_*Knw?#}51j~j#CqZYMh9@4H}<^4`#F$(c{AC4UYVNkCLyJy0b4nTlEv^Y{}PR;xF z3g0+V7N!UBtNt)?v$|(<7K5whQcoA$WXXdnsZVKdKo(NXHVrlSmYS52FF#etezLDN zILl@PxeNIz2dqK`Qr0DENsJbthGKtd_m)Q@xMw5L5gRAdK4Ohz@)znl3jK5xrV=_q ztD{LTJ>k^oG{xE4|1r}{q&b_+1et@^bJ+7rsJ7apQYIAplWp5q_^qYzsB17KBS67q z_l0HSIs+&POymP%LX3BLHS~P62I8qFB1EsT7hmsMM8cK(8~5GKm2Tp zqX@zg>zv8Q?>cYUO`H$}ZTL`48HYljn$WXftch!(wy!+7ho9(NaVVX&{-vHKTR7ob zs|$c+hcY?P{E5F3db$&sx+Lc6OI}jh+D?8g+rT&*Tl2&qPr7X;_a~@TL|-`@ZOI%F z4qDRY_>cH2+v&NrT~@#+w3qH(8PNQWe22sYF({ixrBMui;NvuxrSn6;_79xQp>eiv zm#+-Fl69N+2S(J|JoHU=E-`_6vDknIH4O3nfsDiS^+g+aSB@ch0J;SPOHH|3?w73~ zSPWkdOAgCy-RJNa!{5epk;IJm6U-M_q$I9?Zg%Zw>&bjpM*tICOX(;iK6Mo(k@8k4 zq@D{j6V%EN*PE(tQKF7acRq9q$hV%z62Fil;p@iVU6FoRw=X%n;=MBN$Rc*(!Fkd@d$l5 zLBa?+1wF$ivUwggFrp!uS}*0f5;#G(!UUF4(#v6*`A!Zn-HV zz$T;O#uP85-d*wY$=*)Z%-I){+d*gt*$ufTAN!)TMBo4N7-D9KPf-riy_+Vk3Gv9J zy5*ZyhV|LRr1{|b{N?gu6<<%&)Zuoh^7iL$3T{%S_b$9jG{1IbT!vjtbo17u)~=Oy zgj3;%CFJu_^J@Gn{3YX*Cm}@;OD*Pp- zKeHcU^%Q9wzC5YgPDlt2*uH;sdd%i#YRjkJB0hC|>Jp_7{%#sEN~nN+wOX&dN9juU zx@_>WDb0{Il~k&_VCcS(Rp7PpB^~|appRqi;UWYdk)biU*IU=0!-hFvwYH!5Mwoa@L zpBC7~QzRlB%!LsP*CfqAX(=S1y*&UW)2%cw84(OfNOP^t49IHEs!l-)VbXtL-Sp*Q zLjHU4ZDV6AnKz)rCa_{R0llA*pkjW}1RT@mc5;qS@!GX#{9`IKM(a+*S170le;Q5kXZ|ZS~F^NY@$c6Utk{fQs<)Y~X zh2GR`aCx)F`+I+`Gt*b|5m?n|LQ9rHSjG?y{(MQ5+s>^)E89ggkK4q{Gx^nn&#XnP zUmcX!vDyue0@N<$P%)>Q0ri2CB&eK=5+15RJZcG`_(E^cA$A)rad{Ybfr1#}j#s$r z*X!-c_#!KIb+tt-gs`3PKyMat`1F3Ue>a@9(rz$BId{=Qd`ScL@O~GurinfJUaqKr zr2;({_l|Bs9M`ydbz-vGzRD$1GO1{6c*gROhhxGxA~B+;hLZLzkXwG2xhMCO{dPG2 zi2*=tVGf3Q2frx~o*Z)%tiYp%jCj8q@z%L?d_uf=Nnx#pvp8h%-NkV_0=n7nx1>25 zi^#v~?x>XXf-Y{R82YNuj2a2M@puTk7R4)+_(iI2 zXsc1$7YAw#8 zx%h5U2#OE6-JzDBl%yLKQ5SxMhUp1Klw$4}jwbUV2Upi3L;G;L6{tnykB_DV`ma~x zt3ywi-?0sxJ}T=TE$(B*dJT%!hG6$~mUj52Ia98lxHUH2so4ZM;ZcZHq1Br6$QlAJa(R-m zb-6>tV#bxFjUgR^0{AxZn!;1i_0`nrNyi?+ms}d2?5*LT#K-p!fxtG4LPWd+A0X%F z|8Ag`?0G%?I%DpX_Z{3=iQ}BzgQ4gY;x;$x4*OMZ3qB@vJcrZ=mh$X5ltH5v%h@+ z+L;Wd@K%nl?1(Q9_niWmB5vgeps;Die%_DGXb~LiLTw(;!`I7|32VUTf7&10h8sBm z5dgAu=xF6dBcKIv>1EA(e_AC{k(tvwWB`Y6pe&!+qle8>+;1iUgzG=e0RH=b-i|}F z0o1W_LaY43CiJ`AmIeD#kf2HwZ(8rPB7Z7w-p}?5(h9A7w!%7tT`{UjH~vh8pXI+H z3x_2FP!D}qx}u6Y0QnfdS*w$5LwB6LM?3o%C{&-l(^`pmUgiHYJ}}n*Bc*D=alOO^ zpaEoJrw=DmtpIPK9|2#Ba(H3@MZNgo&@GY(oQl3IPiGwh$|YdiiZd9IZa}((;MVw= zy=JY})!YwX<-gk*{dkqc(xFwz|3(W5Lc}Rhs@{kbLAC3QwvHvB>e0i4OuBomoO}3+ zRT8k3)%!1zm46bi`B(o{q!~PK!Mh0`Ionx-=Qi+jT(1QDo$N(mG^E;-dAl50?p4YK zCpYe9w&7;VPx}7&J1{ht2%q=h`}3oB44mnyQ4EWAz;$)BE}fVm%sL_tKyYB>OiLzG zQA60iE$mKL#dbWr+85hJC42>!z5nvxw+0y!E^IpCU);*tHMj~TZd1}CD+Z?_a={2PE(eV}Y{uT=fT_&&%+) zqb>#@G+{j&Kmp(v8HR~e-Os=aDFW&}+o$m`0rb>1J~)kTv?~ao<;Z4)<HMW_+qP}1(zb1*($1`OrEPbebNe~tKBNEl6LiOm9WQo7 zjM#I;Tx+knW+=*lf}sIH06_r(0TBa91ZI0h0|NmGK>z`v0zrXji`v_{nA*AMt9Uw? zI_olc*xC>kf`L%w1A+X#{{J2S7bDP|G9f?2geLy{<3se2UU~}yns})hHjLZ_Akg2a zuNCc)a4GLU>zsuV!J}E8Ef(o}-7!0a8@F2dD{Z%%t&{AtP?BYC3#!7L{57ZdD4y&- z&~m`4sDKRj#)Pl42V0&K!zaw@QicU_Kmc&|iidU|+K}&9%p%_q@Y7C${y^Ub!e#E9 zU(wD52>%X=f862)1YLWH*jxAD+7?9-eIl>zo}`jYN&J&1p7bGqRQQsY#LSkS8&(5f z*J@rWt*rt^g~RznN6SBaJ|1a*d`G1ZxaJBpeNhT{J*4C27n)g_Or$Bm@I}OMZ^ZH^ zmdb1QJlbFbx}8Kp#!{Eb7HJT!=l)*89h)SAlSbg)^{?sab{%r*ZBXq`+qTE|J%~5lc)Z27wKQz4LF^fB`2VH=SqTC)n{etL@ghmYr?IDs*c-6i;&w zQ{ySI)xK+c^StfXB&&zA(OF%Ig(>&3FIOQiDIEWw*U{-!*3ZVnX&0S6Q541gs>(Im<> zGh`3Js5qRQNLPsqkD~)PGs8D`(;>MAd$#4jL#O=gf|Fw%Q=Y!Tq7RO)Es*6QZRH`) zY1KPCsEKBKHkeze`&2(~@Uud9XI(+<=qjB@`Pn-|v5)B#aX2lwj^`y$ysd`rrBrYNs#T|45m4)SS$j_yVWBWbdKL5YLmK3 zvJ0#_s zJfn}lV(stod9IJ&2Mf$1U7AfE-Gq}8wMzLYo>tSly`!&yX@gz;oVLd%w=%)EnN2bE zX(E7FX7h~(&s8eb+J0-2^6G#)Mpqt94J>Sr6?1Ux&0>j5^@T`w)$4 zv#AQlG@Q-}KVXW=2{+GnpeVq%xTD)(THiV^I`BPzza>LZi`w8hBe>R9;QD-U9yQ8| zcszHL1d5TwTZktlY5g6JATR<9_pFKYOa5S2z^G|;=D>IV^j0p*#D>PJ({G#WcN-zq z8;J&UGy}npH`DrGLb5S1hp3TQK0XoV6h!_&Hb+K95W5~3V$doSU;MF64;5}-d&R%c zmaN;%gOlGlA&ozl3d-=TggQ|cPVt+=@L?8|69A9`Xoo@~Vc{9?m?UsU6X zK{4U662Sq@!oS7ngKrZd7g7?g@)t|2Ev3eViv-W`6yXEmqQg2bhs?TUgqKcQ5jM+a z1i%4B(Yh48+bczzSFZ0%V9m=Xt&%p8T+n>T-gvWbR=9tUVstllI4m7^(;rsgdD-eUJx)p}JE^YCGCQaIeKl+7ed(R%? z5DZANAzRdEBp|=|ZCo~6if|lwb-<|UK!sd4dxnC|%9cmRIE8bng^^pVX6$OTt#1{| zR>J%Z7e)a$K;#>`zXwQgzrV(Iz8+`$cy(L~N`D&`Em}7tAX@Wx-Ky0sU%0^1lR{#{tWK4HXnYWN;ZN=ylb+P8P9j$sSDf?&IX?43 z5u;`2o^Y6IX7avJgUs_g9_G-u!LX^6)*z#M+T}Qyue$6S7ClxJ77rK6J^~%BrY=4M z_(Pb>-eo$`E*62=CxA7Pw&fXc|jhL}l4B(TGV7#54n^C5I2%jO~!&-T1Zve4Zx~n|fdu)lfx}CPD9XNp;8f>{9~m zzKle@jG|kTd-|L)+0f+m6KoM!)qm)uxF@r~m$!&Epc>gtqWCn)er|CvtnqchDIrOv zyG6^4OM=19NaB!Tmm?59;>$E4@hrs;LKrk`t%iij+hV_q2>um#np~fJ_1WJOb(oc_NO9?f0^XP}9x!Z( zk&1Q{?VW%G*QYxpboJq~wE;-j;@E3uGqZ6clojk0p^#WVqk>a2pIaGB*7HItF z{vnGoj-$~gSTB<%@b_ub2GnHEPBeyr4IUWdzj56*z_G@UNq-XelT-@0n@jg9J zOPL1|CP(RZ#NC3;wlV01wpb2yVu5QW%oh0%1Dity6S}`a8S`|;hXKlqjbY!0URW}1 zO@&mdAIHA7SFOYobHcl3Br=vT6!M2_27CN(@1iX?l%Ez6@0qmqV8n zMyF8xD4F$d2J14SRuGQArrrMGo+r{mCR+O^nS`_*hFSrS+0ednrO#pD9?t>U# zEXe{D<1oHs^czF2l2%IrK(eZsW@J`c0bV<42J9`kw@IQ*kE!+AfrGZA<=}ZhQe)QaB0N&Vs(N4!_~w#0k+HZKc$bl9JTJy#`MW( zLTZ&2A`biwCd_fk(ijR|6$9w=fB=wxAAp_fQ5#a%*Nbg^#X{2^TgwIIn|8gHi2^D; z)jqUH0%^>DJ?P1F;G6r3E;d~*<>0-kuip}SR_y9dNCNzm!LdRo#!@^G{8I}=T+SM_ z&V)OK8B#CBd>*Z?LE24s(i4u;5XB&wlJ4bPupbj>TSx(1rhUw${-poc|C z*I7g$x-0;pd+OP^PGSEnh0S`d*_`cf%`=dbN9ak6xvN5Tl9hpBx6B z&NIQNb@-JtVJ#z8SYBr|{X##?+KGc9lb=`g39*RL4aT=k6H4}u>6d}MRUKY$`(V)G zUsN3)1_1Fo6Nv=g?r9LLIR06!>Ko%@U}J6%xsz zA1q5pi`C1pC<1g7+wV-;4yhy*D?0S{t4uVzMoEfoJ-}Lc`P3&3=_>J@ib=i%!t$8X zrNw^0RMsLy(%O3zp$%};fL$AhfKHnF6^gQ8-}FFK5%s;$6crGqU;US8sSi&e?3_8T zG(ycPn%y_+kYiWj^bgllgdNHvZJ_+3~hMBPV$OXA+B$v)a@gcjP*GWTBzfH96OszJb}PkiM$WIEJ^ zOu^nnvND7lPWB|_%m&{SrN|G9*$80G0(&urcA4LB_X+gM3EYm?g&|)DZiK}HZ;{;q z;Tr3DO+&?p8(}Fj%x8<$sumd)?C+ihwb|qvJp67nhww#1oOIB>!MIvlfN~4d@1`f@n7|Jjpbn^ZX^ZiN zPaKgsTVtYmt+CIr{%J1=jBpn4mAj8%nE_0sY=J*pTXP7NPIL?nsEiSN8OpFoXrF}I z#HNDYM$|?I{|a*xl92MBcFtdB#FsZ)`4nH@I<|_DT;>*b^&G~-!sO~}ImZHB_5rvO zK!wdLJLGxr@?Btui~0{aon;ds8Il`s^k$uaK4Tp@o%xiUYxFV`dlJoX(kg6@Mdl@`^C9vODi-> zhs)Nn`IpE${3MK804i;YdZ3j&gQ{`TX^|4`D%0AGs8%bR|4Bm{>hQaQ#vpjd>c&W{ zz<=(AIASwU!o$$Cor^#v5VK|)#xyF-ERn?kC6 zv2u)B3j&NrvImAvsfA0tFj<4r!!|VVNS&(ElHO{ns?&JkUed()hLbRE6CO?#Shje4V za?C7Rw{+AJCavP1*nD{=PJfwhJvc#U>vwT%zZE|0R@9w6of+z~_mPz*L>chK8~?SH zcfw}vYIXx0(b5W|z^|&6NF8IIa-h^IfhgmY(1yhhhl@c3hXh^9sG@E^2bya|K_`8v z8mc;iwH@$l3D1aIZtu-L{Lj`~UghUAW^rZHbTt@)*C6h?b5(U0onR+*i_FV;4jaVr zfxv$0zQ?_V=qE^>r2WHVKgyS|w3N3enL82mp)n%B^cQKZcW%?6UaC^4s96ci)oGIw zGmUQ?lB=u410iYO6LX8K39q!tt2gckicCT@2<*N+6rB$%SS!4MX~k9u#UBZZ%Lg=| z>htJ!IpHC$d8pR3uRjKHgvzfDPqD+BYMaHqSQCFNy0Io)`yOMBzV?wyVu(z`{dL1n zq7&g7p!J7;Fudh`zOywTUmGYQmB5CDR`lq+4-|Kr1-Tl3!A*m9(4%Tj4hseDOi!w{ z+R@gbP-n1etL=m;~zW(9Ds&w-UDpvw{EXkdd3wZZVpWC_i$FMuVetB)5pgKa}VAp)R< z%mG?NG|>xVa?H8&;k1X%uLcJB8|$e8`N{5*K*SoYAl{xL#>D4IwFouVV0Q<#-zKIi zmSh{L>d->W`o#z0vNpuDi~7WbqspndQb3-eW@%>Bqth#70b(nqfh=}|VXC6gpKcXy zFzd)`FJ}Gp2&Sana|Q}mtUS{RM1_=+lH*C3(r3LD4Xe$l%Mm|B3nN8_i31fKLvi~~ zUP+N6VYYvWfeb_XOvyFjXKHYGs$>|6cjFkmiPXswUOOp664ZQ@#@!#e^CKM=o&&Eo zKBW$D4m}!#&3!HCAdbdt^>Fv_`L6c8)Epf&exjr)?d2Tj*%yEqBu`e{(otp|NzgE_2B|ds8yK z!BCKWSWv#f@3bcew!aL?#_I0*!K|5)V49fTm28G89wW9N6_by^kreGt1s-mcsjkz2 zR(Yu_eL4-4D(3r9_j(z$zxCl^DB1a=6MZ% znJAlK^`>*oEdrvV<23+nvDtguZqvzTPHGPUj*T%F>xw9pc1T{-T}TIs_>9Z{s9aQi z`yjU*AB1?zw5deWgp@t_mbxumK?B|464Qe-Jl*QHe+UsE^ zK8vc``Nx5yw&_|W{%2WNAzTMX&26p|ej5uTA_+~hb9yYFe=bs5ixOu~4UB!ZEIOw@ zPXAs};iqAJfBFTcNuQ}06u;=x3{aW*xWJcEyvv-R=u_8h_UG?=v+rHcCjXc2bwRJZ zi|=kmnVYLby^<&Jw$}9A^>wA*-IKnHX?^DheYZb#+;5D$_p5lH=6$T`rf~tNWQaS8 z49xye;gCvj2Oa|0WU0z|Pzv+2I2GN>G0nJ>-O7%fE17U}jmCL>nfHQMEC+EUiIZhM z5`EbH*KqvBFFeFoPA@apeml(~2EA@?Da}OwIzDWtiGGpEwSB`<*5Ol4vB@i|Ota^` zQ?k2rzCvyFo}1!D5nT*88nV)*fKh%-PsCD&fV&^upoe|X^ki(C$;No8&^eS|^#Ko| z@E8UxlL{TTv8~Gx2Grj8Dd%902!jOn=tJG zwNF|a#tyj zejwVP!~9=lt$a)Y0Rzt8mVt$s@h}$ZfxW+W!hv@9kat$I;rtw63LVpzHotFcmTt0B z4*RhF%s!RDN4{+V)!w${Y zShulMPH)^Cf(sVKDgXTLE>F${;{R>1q4Q<6?(4JaK|GMw?Y!{=!_PhSJNWkll8}1L4{QbYaV=%&xYf0F6TQ|D zix3VGqvBDR%&c#LV;7ln)Bd)LDZ3KtrMAT#Zr|^>obM-JbXr8VsUkM)atTRw^enJb zKIy^0PQ&QdBAPzkoV*-`kDR~^kUJ>>#q^k(9cwKDy3@&7-YTl`1wP$+`GneLmxc(C zN`A3`%FIqfD_TKmmH8*%)>R(eMKA|U%ho&1-%?HWAr6ZMO@+{)NT0G(78k7btojn? zIYm9QtA0X|XFNsRGX&*cfQ^cn?Le!_$(FvtXVAwqh-rnvjLJzaLR+V_+DRPIaXj=u zWy#kSnX8%0fs9jJA^LWq31afN+*<5upM2k0dS_)Q~?Llmf76Lp9?=}k` z#22;SOj`$k&0FPoFI{Yzr5n2{{D_@3SU_QX63t+(-=)c#rIuwQ8wvSsySlX!eUE$c z%SDP>SwI3hx6u|3o#L1(=lH>9 z`{c<+L-ZNcl?BjZ7EOy2D)Y2Yv3S(|(yVcq;=5=25NF60R*=1tX?f;0+iVKn@}SPJ z?WdLFE_mY@DTMeCXUD$-seuS?lX8liJy1BXhY4f4Xu36oa$tfzL&72V@#yD#F*(Ne z;wH1oY!j~pRA%sUx+tj$AV0NRaYXCy^bvXT`@nv>r;8$O-Cm6>@?BFAKy1QF=Rwv+ z8dGltA%DU0^6OuS3C3*{;wETgOAoImX3>4KehajVX3HL02cj|aBLJ_#o6;thQ%^D7 zT;YjrGt6pkJQK1oZ3ol-aIm0XkX!jx#GPB@7Rv~|g^YRJ?!1Ra76J#|L3V)rN?A95ey>x0tN};hKrZv!w-^Xag+)K;f#AhRbPZr zA0nyY;tyPkh2W-I7TP}^Bt6dqjUJ8np8~0=7(A6qg=zOyt&O2S8PGf7M5?s<-5XXF zr5~->a=;w^P7M}NvB_EOU5E*fivy)3M`)%-Fj09nbI{&&g*CY4^#Plg@ukj?LYd%! z9zKNLP56@c5~Za6f_>N1FUOk)*=|mjSA&TY%H7pqt)UJFaf~BK^2^?`+T^&1W#uX!=(PHgUO$nY}A_7=;>NY548U%|RVgGQ*b5^Go$B3B4QYu&R zacebABlZGTCOKIRcIhDyft$N{a(9t$=64q0o;GZ?s16u^+*x(T+Y{MANdf8d9UR)l zFSMKZnYGcX%L;ptV(A96Ik9LpK8^z2&F@v>jDY>p^dNF_Vmu%P{AS!nK8F|;+>aHq zNV!WlLE-1y?CF|TsLZ4KEz~2Aj+ob-y(SsVhQ;xccJLbQw~x*kZbWkICwrp~L@I#B z#ad3YTPXn}4(47f9jL@!EY{(jZ?jq`5B2LFWZN|vv_jV#*j-43#_BK)rr5#_-VX~5KN{#C z6dCDATt<%IIQVzy>?lh=gh&S+iEJ8R$4<&7;!{B%n!1X9UUA$GxR>?ca{z}$NZv3* zuCo@x^LVwz+~FwSa`sd2O^>~sR+%`A%;{tT)D*KERwndMJ5U-b0bejt`FXnUeR2XvU9j8% z7oy4MR%Y$aX_+(8uE%m!pT=^L6mlVk5QqyeZ##L_5~cP>CGRO~j5t&uCH5|!jt~!8 zqwE`(wvlFEeK$1+xykD*j9!l?5?k9@4_20olBt85uj9uDdZ2qehofX8igQ&sB>`2? zwJ-GyfwXjB_W7)T-V`a_*JJuIGc1a+QjZck-(n1IKpZ@wbJEhxu+axB_X+ z714{#$cP3(F6a~rAR8|q>5Umw86u&Wb9O`~;2b1i%@ z4waVZnxmDP5kP<8t}-^-e>%1MdZCk8o)I@S=o0ZE07pH<>4*}b)4Pmo`?5e@iDJUq z8tNMMZbAxxk?+FlYY;ou^sczS33Xy5tX30t`7niaOZ5u*oV$@UwUAFn_Tc`oPT|1*6@~|NzFB5QI^t=o zwW~0Q;<`EvEyuMQO>F=tJC&Te|2NWurFY!K=kXNtwd$&(qlF zX=1;O+97*ZR~l|i4pE%WqczUm70dz1cVbpSDwxy>dF1tLgWq#VTI~_IU(QPdCrq$M z#7|YA!FF;qmWEVdrgU7T)%QgZy^&2zmfoOFj;r^r-sF;UrWhxJ*zUyhcc}~yK+Hfx zRu}K0K=TWXiE89;vPL{XIG>%!T(YRGUHs%~t={R$M$lW)b*{8?irRAzzeCUQAoqqlukm<%O8NbM$005u%SUv%QD-W~w$Z}lemtIt(2b;Q-=0-Ie__)v zPWcH14IAx~os`?2%bH5)=G!_XitW35lyeMRwS)s%q2nyRa>XZQpX(>-?e)| zwP(6ea`7kySDDSw#n=#0c<|B*-iJ8)vmn6?v|I=+2>stS?Q<0)g^XGZvN~I|BW-gTEOwbtyxO(lR;H`iJnF!sB?nxA2;UJ@d?d=_hWNsnHmmbYJz8%6B%oH+u!m24mbGTO3*JwUAtB8z5N-Mqdpq?rY z!(df!Uv{r?!M{ELXSP3rbbXZOj`8$LWJ@K>l!bx$cbd_1fANjVL)Z$qhC2b$W?A&S zg!pgKvL%GK*y4#=$NCQifB(w*5xJp*<>X|~4V0~Rw4_2%ZGV!mCN_r96a=91U`)6l zAF&|9n97DiyHOw!Vp-{#7u`NQpqt3=S);6b95g zIv6h9IB)se2{|$wGC9K)?@7b?J~wRmF4{!V-5#RQ7J#iVr6#pT*ihbuRb^0(AEGsJ ziW?CyV|#Ou8fvfVXAok~(Px}7S&WAp!k>A_8TlD`n>!zR(4@u^Fv=q!Stv6WP_e_9 z8fNe*%{$DVOIrbd+1ugoZzyzTXJH1J;2>_gc6n2_bU6`88r?Vz)#o9pbT6XL3A0Gvo<_6pu|)jzd;L5@ z#rYZ%6ad1j%|uKaY_BJ1TeZjA$Y83dlugAEFu#8=om`go?HbV#&R?)XtC3J=SkF*W zPyE7yW2ZJgoZ(M3pot%uu(=rGN_lVPAHTB>(91N+K}&Hw6VlnqO#Z&uwUtV_YH6Sz zFFOW_al6Dh;tQwC*srH~k=Gq!3|#8bQJo|F$5n5a?eahVO>$lc|8r@}{I5het-fjZ zH_`br&iNa3A%m8wT6X2D zcH4M6)_#6s&eP<-8V2eBEqXYSclE((=;TX3rB7wt-QlBO*z+94r%FQKPGE4cd|||Z z{P|V|4>9)5QZTzZ9O&uG%IAwv#0950eJvl4bLQc3?4N{9-7&@M+0o?yKrui0 zaThydV%Zo!d$HuwG1w{mzT)Wp45sSjcE^qU?f?#P3Nj~8H2+C?t|(Yl9doXD3b+MZ zb{Xp!I;~d%&I*39p!Z=bVdyq+^Szh+&vFEA= zwnp2O0hYA~b`$HPfiAf4QYm7V2HTYUBMD|b_@V^Fx)Cq|-*Iuwa`_D^aXF;cnNrk^ z<}=M9An9ICT3p94DABUaz6NyXA1+JZ#SbUd)%uMMLDf%Z{Xzu7)JY$;t~ltp2c9j$ z+B}+b(-hlyqdlFBAe$@z%hTik0CwLepIU>Yz+Hj=EdM7153w|$5Y?T6UCTT2c<4pT zthG)TAqFiJfhvnR!GB0v&mV6$>Mo&lg4J$fKgK-Sl$vFsn&lu<%)N%!`m4v8|8C#u zvtDDygW>XESR22Ye#B?8ZRuzva#KV##)@hFDx_jyuksIKxtl0PlWf(GO9cO7vPs+8 zEM8VOyqGP!)G_fDKQ#@6yy7D2iFG`ijXr|)B$yf$-ctx#9P3HZB6e4ur(X{;cAfgZ zRst)7u7}?@#HAd9M6=(2W8nYk4II26W0`{m0wU-CpJN5fzhyA(zcQFKir-G{wd;2gqtb+VwZ{)ke zdZ5Mbo96bI!pABt`W*WCNrco?R@jfZzUY+9zRFj3_g`?!1b9Sxi>h*UH8x|k3mtVq zOE&(oeEHtTkhHuo=zNl69+gFxsYF4wXo1IMe7_;5m z8LtDGk}{*s+cwx%J-VE#%Ql%1nLJ+yUP?5EX{wXG9}1sf{=FqX?f0BD(T+g4qfN!A zDy3zs3#`J{f$ly0weS$2P3731w(I&>_W zyAN(5MEX{P2}STg&cwpVu%1N+STF@COy@SlOVwRSqQT9dk;1H5fgNCl7|-e{To?pq z=te6;5==goWj-iOOlgnSe(&CkmN2l98Y}~^lVG=v^>WI}v3P2Y1veQ?f zf4Q$=D^#BY%q>zUQ;XCkZ)CWW&|*VR{j@xsMIPID??f%+7^(|gzj~dw858~Od9I}; zb-kY~fr)=*Kl`jA#!XUYiCx8euNk0hIe_790Cs(dQ^+65iyTb0_$NENM4PC#_;27? z##;d?<5-laY&U-*Cdo6EF!_lvry11x9-eTvJBT32!kM{;h98vnqe zne~lUb~ci({TSEZS{hA3kGns>Uc0b?PW;~Z*1?ihh^|Zk_ju_4V!MMiPuRWE)o#w_ zinGO^FYrO9PP4>`^zrq8MujvlxiVns>1IewP#x&ApF*!uTK=nd62J(|nS$Y(HQ8m^ zigLmi)rjax@4wcw(!&p2JFX!5aH<$M`dgu+#8080R`@MwZv+Ols0#$L5i!(Ca*9|s zyUKCyBiTz=2f&+AqSln-+BBcJarjP#l^2S}1VC5MDQFbqR0H`n~Or_)9r708}Y7&%hMcI_yB%O-v`b_`lK_L*4x(Fij>q$ zl&au)w&360S??`9Xa%EugHQxk)@Y98mZ51Ukw&IPy)lY-TU#a7sGO-bi)Y%kjF{-I z5OX@1Su`l)-Qu_XSTwGW&Lh0*|0ggUT&Scj*DAH>7Nwem{i)n3MNaIO`?U67Ygns5 zXHH67M*ejA5q1xGmahn#U?X)Vp!GWBNzy$&aCJOS-|MOf3D9`()IW3rVR3~gWz|`% zHi3FS?fYWQQ|48kp!pRlMQH;2NSy8v#KlH92+DqIbeZp*gn=U{HG% zhZZ?Ft(zJ1X6Mj)WO1tCLO^e}Et{@elT6EDs>?*3z=2j)th`#Ol^QBiTZqIwk%IX7 zyh*Dd174r(oiKeqQ$*@n{G-23)eNXmIAZO7IH9&2DNQE7g=?rg)bMum33RnT6>vw3wvN>~c+wZRXpj zLFHKlok%c%usO8fnO>Q8v$p4T_@!Q?U=79|acq7Gx(^-YSFyu?5Wd0chXe{`sSV2< zrFx}=+)n9-bP31VxGbGi?Vzfj7U4(kf|Q8|L-Wv!LSrFFt};FW<|F8(C6Ovug|Yy} z2c$F(K5rzkpPP$?HyjyX7!)&&ig?r^92?Jl^2*C!Zg_n7P8@yNAA~8TkQ3xGZz{KP|oq%oe#I~{j0fdSEed@I~-RB<*8wPsjcnc=YSLAPx; z-W@lbyxY~?%6Rd$k_`HR(8b*QDp4s>ae$Pb5Y9lwJasJ3w#CWwaKygngxaKV!oDZ5 z`G+0_d1u@dwT*f{tVyyTy&2bM=G8h%K#S>5YD#x@Opy`CcOA{AhdX_nFRkYQXREz` z)F7-&2C(4)y?kdEALg!DeM?~GeCbU4V=B^!FP6_@{JEZTsg^0~yvTRaAmquUie17b zx#hwiw5@<-+2__!w@>0{USF5@BG}8A_ozX=de}T2Mvdi)o)Z}FKu(!T*r$!?o~(E5 z0#XhYOPMF5OdJ2sWcVKs@900-+?yogHIzno?h+o-w-P*^;Ik^dPYcq9-^Nx7aWQ>L zx`t=TEm!bYhS4khJRD@lM19bCDp>AuR(IKX&(@aU|D<5iG1x@ShyIvhXzFjlMUrYf z;>KuJW*C4kJO-v|Yma8v{-w42V(Fb>84Y^WDk(MBI21o+EEH%x+GGK5aJuVHHxku-&aVzzS?> zAdf$so730#T_ZBw&=e#yKFxE!=6@dA^6{Estrv|kmVT>0_dp^_nq#Nf6!4+u#3Uqs zad|%`?o(qAa z>TO4048s1D#Xi?8Bz9v*HFc20n_^2FMJ%W1w5OKfq0S(xsT1Yk83kLrR_lpOS&j%B z#%06{W^X^p#(-4!6+BUo$Vl97?h!4OGohYn(aZ?;LsNul1T6GO0F{!E2y2gyw^#h6 zg|B=2H=JTxpeLb#L1V~nRf=3f{M*fyzCW`~iva1K>8rcY%2ey7wdiV4wI8&m)c6_N zUBs$sC36_6lwSe9Cc-F`{i0H)zKiX9YOB8HlX4c`!c8?>%8vW~MNH4Hy+H_zRrnBv zR%|6;=m|s;Ptl!yeAD(hqbxO2e;f#S-pkKtH`w7@M7E;yjTQhHy*ByjX$Qr1rg*Xz z?fK$jWlE{-gBHP~%4?YLp=7kdQc}~9#VG!|JYtIR`mmZ4j!tA%)w6fAF{biv{5DCH zv7kClPG+Bt_yCJn4qTBVt#o2c>2`+?=MUpQAtKF-2{CDRIlsN%0Xa$bc(PZc0J*ga zQEu|4_nPMcF9EAo8rXFy$^|PIRsXWhrk^;zHp8gx>roQh;O9+=CaYo-UBWr!@dsvo zF#t8O=Kjdex2V@PCWj=p@AH~9ERh=;_;fRQ&TLCm^g+WT(i(J2g(^Q&*6q~V#sasHe zsoh1@?E;Q@#hg_d`N5J)K41t^bCqA0m20afCl-=fiKi_y7OrRJhPfl7osaeP9Pao` zZl64;qwyd2Vy+a$TxE4hOZC;XD~cDZsZ0{s#jn2=Bcut7d4_Hkf`^nwY&w-YblKq> zftKr#R=*xy-ckRF;MQLVpS^#X&3gXNjjw;}ffu^A&YP``Uw-CS{&T0V`G@kd_n#%G z&oQ{0k>)u|lup{mi}(_ywz=bll1YoDeP4V;(2nT=Leg#vWM+JF#u0-cTO$H%BUk&c zeP7*~2m)sJBV<1>;qj*k5E|FH`ZA_#pTgXyrgA;FURD*p-vwtC7OO7A1+>kYj*J2r zXE3~n&X4Y%#_kG6i~1+| zT+-&Vdd$~D;BOOhqxRT?Vez9!{@T^6ZV}b$7j|nG z;~T%1rIG)vw+oXe_aw%kp^4H-uS9{mtx7@BX#u*I$-v$|Zyu;UjN2LW_QdmB_v&$v z!1bf&wX^1{j^02|H|vam+2X+0<)sS?!}dAFHi4Ck_p*d5Z&v5I+s<9lI6{7VK}Rr| zQSIFZ@WBuOf5Z-Ctm}A#_Da=Ue(>nT@}tFdVOARalK$0x{-4(|wH;q3@DivSLL z256(-dDi893Z9F@<)b_eTJD@)o1;8GZN87MAn*%8fi)7=351TiyI+ zOAeW`DFur5D0TBHFl(D)-(wQZ!jBi7(Ua79=~AlJS}njwU1$HTaW9 z_ac#4@9SEfRC1mEiAl>0rR>4zu;+B&2{RP_=zhYe4&*qyl^awJ1r@vwp%KjJiiZ%$ zc>am!%I3)64484E9=9MbTRQRLKuT=}DBw~j;3PPlar_>)1pNz9MPQ2963`rx5hN#x z=bJ8YIBVl+?u$Beo)GBF?@Du_GrCnwZmZD#m@c*~(FC1HGz!aoc9OxA4 z#sf2l=WJ!XX>Uk3hxX9=nI!&po+GsaF?pB&S9@Q<6<5}@i@UqK)3_5LxVty*?(V@| zg1ft0a0?E>JwTA)?oJ?=dFPuqGx_HJf_v$;PM@YvJ?rd!sRU&0mt(t*evLZ`n@XoDDNU(j7vE_Ee`mnB`9lfSy zJ8oXW0z-mo#cZ7QuCdAGO?z-Cg3e9tU!EDW8RDDSxp1}RlP`tY@EDdy9IOlZ(qdZ| z7!X8!@9()<+5wHM4kA~5pS~1>!1xK|71lx5(4{*k-&5(kVB3zWyQ{=i?xoO~@;g+I z}?oMuXK%;WR8Ehtovi=cV1DQ!3A^g?!0!@7Ur^YjE z{4OpPsE=pgxyAGS`;EZJK1*DkxNt}zN#4Nqp1!$iqEF*_rbcAiOQ#=;Vba8jb|Tqr z^`lMHBtBX~XRnw1bxX z?o7ZX-|0{JWM^m5Cgh(WL<5S=&C*`1jt@WbIV_xYAOk^I6i6|iC^;9U#4#Rofl=Ym zCudREK#hvjw)jw=5?6`_Ehf2!6Hj0*?IUnfub!*-ZI6`Q!O{4e)t5AJvZ7h8(ZDzB zm9R()N)naQK(%KTC)aJqOwG3Chb45fWsY!b=XU`@y$xq}jV%Z-HVisRRLPLEny1QI zY??fCXo&C3A7dYWZhE+-Pbd+;!#~L-!arUnb(dbzn{Djm-6lh%^zX{rcW-g+wd92s zRB6M#Mlsm&bPK)#C@E=q2M;(PxY7l}#9xQ!&js~QEiYtDf}2%gP9>ku2C&hJ^E#6# zVnEvhVon^~@(Jmzq!R0p`P6QAft<|q@)zy|(0syI6fa2=`sUmUS=c~yu}5fH5~eGB zCFITr|4x}($51NYL#P( z5~Re5cP>gnB8|J{;yAW5`2r)~eg=pvJAl zuKYdl3BY}*3ty${3}67stAb=6zCUPg-Az$wNG!lbHFJhv*PGgAOiD3n?M{(wh_MrQ z@Y)oZJQOzC3tMR-d>8NzCM%X7+tgO4h?8s(ZFtTo$@IOE*hx}Uw5#BKSeX)#0Hi7u zUATGPbC!@S+{n{#H9Z)H;`1I?ptN$2@F&4*wKp!E$GMjl1$iZYGnZ^rnXyL+TPLO} z=l%4W$L#*7c16T3An9Gd2keOG?N)*|vY=71Pe$UjlEWiKrxjY~WA-q$5q5s_9{VG+x9ke z2krOTSJONHzdS_vit+0sK2*Vzdcjcis?)&4zs6FcEXTXS`uLHQ zANT=DBAV7=!9Ti8_o7L*Zn1;HPGbMS=|+a;VF@@*!37XVd6?7JZB5AG;gd#`N}#QG z5=njJ@UIUW0b;UPF4zT61-Lf}qpg1{x&V=Ez=CThjkxb3lhPeLBgy_C_`;hX99}Va zS2mnTcJh4Oi*E7eLPED$eV=NIM&2^pZwKAnRVIdvuG0uS2QBWjm4T+wA+Jf|x3{D7 zHcF>RAufO@N8Sb?IimZUX-qvINg9#!UEX^1WoE7rVSI^Ewdv);fT%ytKPmr6F+t#= zLC^PnLJkhupV}HUA3x`TKj-Fd)XVy0Y=s&*l)4>+us)ID#i%k9?z^u(IN$u17Vbfe zPI$PdrL{(yZCgEl?+e_$;?9N0IG#lk3N1-F`$l+_jEYqXpQJ9Z#6xtM<<_`&;%*n7 z_P!0pmOA?wT`O%(&l`SBlh{?I3?|KNL!~bhh!$;f$Vu%WX?B3lPs+e>s{P_vb@4^hG^@GN!8Jru2(W_DY#nl;lCF>NT-ck+Zy!ll{2 zmaLXxNI8^*RSVR7ilaZRL9YnEC2B6xqiTWtz}ECr&f{9Qe^JOSeO)d!2-63b+=G(S{928+6RLbV$9C%zhV*bsb{kE3Fl>_ki0D z=yCB)K(U$g*tmG}NM^eZM{5SA1$uTV&?sCAC)(u@OCcxwI$^*Im=xSL0SQZOmLJc& z_Hk^oIR|ph%APICt?#z^N4%Eq+kz>pgcD`Afju3izG~*#22JjzK$1INnB1^pNu76yWOit> ztZ&5(>$J93-HzW2(>|#dDf+ zg|ohkVCU+4w<2p&-=|O|B1MC@sm2TNKZ{V11thsisMPvtsb)?Z;ttA#9Cs%f$yf z0Si*|bF2UJ1V|_=b!Dxg=nuq>J|ODpl#VS%3n;iBQR1gVHqVr6*bH(4n7(;ri`JW8 zS6btQlD@G&spdsq^jp;^qBx^zs)k1msabVa-a_GwzmLo$=o1x_Lv}wVnGhNHYSI6V zC-_pXP#{*xq1l}nt@7SkBrPJwCfhxLnb8;mKZAaa0#3Yw7lchNUn=d?I>aYY2oF|e zfofrVu`!QV2J9!O6Op8qelxd4w z6o`qT%-W9hyk3VHl|OxhLEaNj_w5Gm!09TgNuFS%Co-u2g}&= z%ubqN3Pf2I7tqfob!aZ}36!aAQYFzYt|%56qKS14iX$X3!R20^_kgm!M3mN1z>2

    x=ZUcmn$b0R-bgd(85kcMHV&5l18S`43{ArN)rVPM=1#PoTBeF%41!s&Ft)(Z#TIZ>Ndn?%ObaPrvfZ7 zph`$q>J>26!044_cUCwflHzM3eTR7151WsZ5Vwe0av&Dml>6-zldH1rs&R;EuW&=+ zVR6JrF(0Gcp6H!d}d^s#IK2 z)+JAqgH`cu7*do4*pTu;B<5vdSI}QZIdejY&0k7wZi->prR9lh4giL$1yRM)e3Z22 z9Qoc~fFrBS1&8izF9?bS>e!>omu{4bS)f3byju}c!c>bf=&F={O%wehsR21C55{oV zT$$Mejq$dbeIBcH-6Mw-D`aK`Ed(hJEU3?|MaTF6)B^{fTU|Xg$%pi)=m%KhPu+1Vk!XPKVB+ePqzx)(@qL;xn3w~@A&6}H zrhG*v(n_c-iPmZ|A`!GzaJh}JUsMTu^u7?n^@wjt9D{;%Y^}&HaLd1@3@ZRMxW4%W z#y!!9jyvQY&BQb;p8<}~%HT#XW$TaMmGm6*S>XFKYIYmEidy;d0LyO8Ts9RJnHV*I zmUVxpe!k@f3%3%eY3qkPPe|p<487F&I{ELJW-~fTkq96EFdkEfbKrEfJ+>aZ`ONE+ zK03PF`veER(E3pV=$BzvQ>q;-p7%~TscFRTTKd&zr|K?(*r`ZK6oxrbU`fQ&o@ zUS-~;*_IgEWy^aPCqw^RuCAO+@%R@OVma!T1sWRM86DL83HwIK8cqpMWjO`0%bB42 zIc$&%GOnyeq?%wUfUwF?D4Ih#EN?==9oJFH_epTXy8^MFQI?RMe1N;np8h z`5;^?E`Zn}8Z_xEv+Cl+P>O5NQ zzZ`~r!=YzeRx*`A zWAEtmc13a8O{u`L*zUu*g0Cj*LQj7LMQ?qJFHz#DvkMn}tWR_pf4dcu-@*tWC#HfY zFNer#igaZR9>sO{>I)owl!WTqvFb!`KcPUZbyzq&`f}s^^-z{+%%;2>%2#DKWWGut^bNd zu}BbJCTxfyK7@}4DwM9{dBKT#G}$L+sSNxW_-4}+2D{9qG(tC44Vrt`3gX`?jyQjO zVXm;UX=Zsha!?*sEt%vXNkAY+VOmRQ`5Olo)2$GdZIglg*lF9#CzRex&i!+JSw_UVI7IEc!X%LSR#aRM5Ns zD#7;h*^+m-G!|xNvQpx52dF2YVPB55*cP5JR=H2FpeyCbpS{)XV54g-4NI!(QbtOF z7Ox)s#RU|R)Td)RcyZ>ClT>_|FMo@{j{l$k2}IAP`lKzhac1ci(%{>Q=iIFG<3|5q zgTOoKw_j)8zQK4)qab;E|C99b_t5wMxPAU73t-|clLEp2Jg6&Z##cR|52sMxK15IE z9%M)feeMBzEa~pFQUqEKx8s(ABL6&b+RGWXkyphqw;({BE03m~{ec_rfyd^3x1eK_ zW1VxP$)+?y=Ie`XSLw1k?$)Tex^RSzK?TPL@`g+r%|%+3q)C=!gUTY)0h_ha*kIi` zA0tl_!5)K(HG{m9P|UJ^j;O~7!cO{#xq7Qqu$(RelIdoku+n0gYOvv$hp`Uqv5ucRr%}77wFKBbC-(o^9ixKBcoT13kpu?E~yBh23`nqJqg(ww&d*1&Y;~!K!ToQdI#lHq@3sUusAd+tpQwuIf5T zd}?Y>ow@HbF0P74nh5s-neh&r4G-~3aJA9eV{fW;bRpW^a6i1D3B1J#c<0AB2bTL# z$h1Yca9xn(hjtq_tV?Q=I}qvynU;sb!bvx_zNLZXQau}(s0OOYFgjH=To7&=QB zIM3DF3Hg&NWyWU)JniIbVD=$&?=yu++se1Pu7KS~-wv;%jL4V( z=DqjiUFHya)6QViYquTs`X4RvWA~o|j^734?1NvL^xs9=7 zPf^BiPS%X@^_dC4ZVlggoxFxuc%=qrJ|?;DUtOD}2fh&N)o8q1DumAeb@WoJ$Te{n zY!O4KC@${BSt= z751`#Z?rN)gHX^DT}L%B-Km+uSR1zlz7w8)B595n9pk3l#P)O^(@{Mk1}kdW*ok){ zi=4w5KPW5TOrx-L^cL24kEk5D`f*C7@zBVRH@95eQ`WZG_6WcsV`pbxIw!j(Fl+&p z>T|!eb?vu!Z)=SLVXOT!t|nmS<>lnNOT+C?%GK|1n3w;a4^ke1IrQ|V_kIE(AXxv^ zeHRN;TT`b09RGCw;xF5+e!1-u{24I)&iE_}bvLl`bO67zXk=TnfI?5DNLo48H|iNx z?0xCM#!c~u8Mb~gww)1Wz@VJnDF~{$$KLqzRC>ty2hS|(K9MAB#}p*!t9O1tzEEU)+88|(!(^PvliSQk&(Sj#IHX}?+@y-#I7a%FD)IzLxX@oMkHjUNK}`? zE#2mDWe={=D;~Q1bOq!go+mhu{m}q2si9ti!9zjG8G>m&S*lAKG&|-myj*Us*@$Z@MnI`C`2Xt=Nnq>ZIlAHe-E&od!hAJrjK@!mn6zSPhhI z*hz!8JJzzG<8Z>G^8zSDp%sQv8>SEU_ikXQC=)KIiRr_dD5>~rf1+5!i1~ac$M8{D z&e@OH$K$6W5n5%e{EwAy#9wl{rnZ%GFqeybxW4Q`X%U$s zhco$JroIutv7B_ePUBxFe!KSd)|a-f!x@x-qvCwLm#($WLG>7zYWBS>Ww|9a50M@4 zO=#hSraRw9&b`oI9xAVZm;;t3$?6t!*Q!EqSP(1rkJDgYisqUTF4`}bvMN^{h23!l zWnbkT!QhIue8rS-BSTh83)t5C93I&-ljJ`>Fui~ zDJAi*>TL5mnS5W=JTG@~#V#==>F~;Iq(ZCxGHaT3h78>kig&LEuL)i?>-NW{&&VgsT&W;meX@v#*aqA+K^jyuNJwe3Ia3 zhW8Kw&*9)mzHr1Up4%=f3BbL%oaHfx(uGfz0P;jh{J@R1*eodWo&l z>;`x?{BZFZ0(r7X*AqXC4!WLeJD(<41I*EWIIGmgJA=!IUZ2++mJHklQ}<>;E_Nqi zR@I}TVVGf;E>ylSQ_5MBKn>esqNa}4)L6Ww#L~fVrgU=Ns$m)3_CT`o+(Ly}F*9+# z13EMY4>u9}n31AN#+zGk_0p0Lk$(crWQAP~@w_XB^o#t08#i*yq(Ov;jbVz^C?Q1! zv0X;vVa!<5&Aj6mIPzKJdHFDv4eyycDfN;cdB5D*QAp%zxrLY3D~;C_n94 zBS5InRQ>An6r(0I&)6*O0cf%H!pkFt!Mrc}R3TVLxkK`V(wF**)kN~pqGYwTp}A$& zeFPpjMx5s^KWQo&vJuj7wtNqwQhUdAY09^Q$xch_53JNfxMK!JE>k;gm$UbbbNM)Q zXX|bSm0k&!z&cYq@0>EvZhGGK!zi>W>#k{cfSB8dlT%SQ*nr9ts|CT}cN=xVX#-MD zx&o9+3~6Ajr`*Bvd>PBhrJQiz5y6Mp53+yU8kg{p&A}v6=pjJU+H4{Q^U?oAOKm*H z)%pwuWxt=_Z>glh$q+22{`nB(u*S_~l{{z=foK$76C!zm1?=SL)4MGO?&Z}iEX+N) z`~pMnKBnb8U##*@N@ZU{W$x0?gOD_cDnfo%Q28|Ic}e(4s^AJ>=DdS2lruPuvRIC$ zS)v*GGD*SnRV-_Qb)g$caqT>=N>36mXaQb23+8Q?j%)*=UgZkeMyFun?XD!7J z#Nm5|SB5-sbeXOSZj@Lm5rR7_s^3Ym^mQo?)vsx!-<$6B5L#B)bjt#{{aj$?BI1{n zt$w5pq%19|rLr3xe9)Qw%mLJhra}*&{^YF8ugR^3F6>8%ai-1sBMt^(s{p6wdVLLJ z3IEdwPNOZG9Hw!F>=Z0)CbVHh4O!Nj30s+&qGw7B{f~yW?9pbRU5{8)_0~AZiE9}d zPvMzot>(0r0Hy4 zs;y;nSBTBC;Rj_%>mCS+|DiHaBvgh>Q9oXbMqzkE55ci)jj^_;Z(ISgrj^??C6M7> ztqsbxZtGmay&|j?ax|_s`_*4QGPb32P9M;-o1M#X{u$KmJ@8}jN>88ZkzpBag$2EX zp}dL>(B%^|imAp7`eTdP{JS8JpO|A|jBQfrCiPGe=Y{J z;K7bYIt8}#i0)g44dcNr;$GNZhYD;py1yMwMg`b;gJs`@qC(SmMoJpOJ7>zldVV0A zHYmH8Xg#*;cE%3t6c&nzB2b*CJ1GX8{ceJI6SF&PHbris#+Zo{Cue(w(<5vr&48bg zudUrSvfbz*v;ePU->Rumu$UlzcO@ryiQZLV6*e4Z^>GDDlRn9{PA!saMw+`wk6(AV zr+2+RkZGX*4!bjKqd9H?2}_39z3ZW7!wr}w5FW->OTk1;2fC|NFCxQlKOzp{NKRJ*xRbTS!h zQY(Di3&{HU9lUFcW-2ABtEfe(5gm1_Uo5cmi|usPtX{x7$5zT0a-WwRo9U`mwUw=D zNHAbbSa?nP(N9Msfd9TrU8D4c+wkk|i*KD9=j$S&^jTu|-uPYV@Un-pF|$o31j6fz za3cqk{}u4Ylu+7w?p{b0_V?#^|4V&%9 z0x9E*WH<;b%twFtjl|1s2M|oWn!AjBE0HqG?{&cU8m`_SI;CU6%&qu7tz25^-ro=R zd+NV?&!@7TzYt5ku@jJgJNyCsMYGmF@97{kL5)0pJ*oypBUjlq7$(GZ41%#?)Qt$0 z;FA{BNQ;jSl^f<37mVT3qdQhX`y?8obohlVEUH#D%4llYA7A#-PxdsTEVWniBe?tCSq$IUj5cSAgMr4uvpR46J&H+Uydk5#YrLwE7k)5HX4Wp^8*-4hJjp{Jw zFq17=6O%3R6Nqx~w-y2N+cqZKiceB>9BT7>`O;$Z3G9(*S`h+*Qp6Qyn4dnX7E``R zLSmHB3&w(=*U^4mzkP!LV0ezV+VKGX0npmaGNixxn$nm>IMwBS?%H{bvsJk~mY&YG zKlwP7Udv|8$sOAmw$Z;2jWWd-eeqU>fOoQ-nxJ$ibBh0z8$YXm>fNNd)W+xYs2_U! z_nV|wd;0|YyHnhgi?St;KF8PckJbvjOk|IcejAkM_>*q?!;?t+YWs-$rNDLb$9veF z_dX}>KR!m=U(-9{5j9+#uj)^UIo)h+Z3XRUczj6K7a!IapuQ2|!Ox08_Fz4{6M4La zhG1#?V`=EDQ2ki5D^Ii23r`Q zM)eLAR2Fvatr0G)=B6mRUj@y~C{@ZDurI3qob|T%J6&RuMfIIuv z*4WmDiwA4q*v5JH@Y>gxsQjV5WSkX>72 zA^xWra|efu^)Bx|xNb8kdg+vsZ*9)QA7_|^#l^*wdXu)L=0HjyPD@iD*fMIvN5WLn zlj{U*jSWpdR1_R*?qqlq!ee7G$jMMqK;>L0bn<)jjrlqtssKAI%3@T{y_8C4FGMHa zy|=Wj((Z?2EVLrhbV)sfhWCUH+=>u0H7#c8c#QAw?^gy=tu>lYJ=O0`sgSCusO;|U zKH~B_rB!1v;79#*>vj7CujOex0W#1fgA3!YE^Z~+AaJ59sHjw5w8+~21xUP-1YE4P-DhC__s zA%=s8#|m3;cyqAUF~>YUIeA5db8voeAjpCVhE-f#T=Wr%jW0>MI3XNUiGq=FJykg{ zJmvN?S)Ga|!mFDQ;salDiM*<0#nv()FeD^t^e~+=q9V@7kX3{_v5PK}CQiJ`1_Y~6 zE;*nu^gY(Z#Ke_aCDd6(ny)AzP#u#Vg=T+$e^65ryN^M%E{{8gi7zh>1uTMZmU$;~ zl!O!np5!ADsXZ=j2~>yqP-p;D;AFw#_Xx;rENw)eSi*OU%geOekj6Mlw6kBo8YEK= zO3#=Yx7LHnPcdbk|5?@ZvI1p%eS!DFV2p`_Y z=d!yo^ss3Nte2!pq@<*bi|G@HtTK)VHz$m81vtU%l(}|j4wHom#(r55TTLGFiLJWc zpR!+WpwwqP?20(y<)wCYyu7n@%Np|*xOA1qit2l6;?@jb6z3t+QqLRt_Km<_xwE;L z->H~`ncbC&yTM@2wB`lC`va&^U)QSX44j$}M#BJwtjoLmP+qonbuSZ;&x)6smBo&n zauYntXAZ_RX#t3?qS;`;!b&AmJAH|`*{S|=C%L4^6i^0nfN$JCoj}8G9*dy_@YB|- zG!GYmkccrbB;wW@cX~~8Bw5@++5&WjpFP!;XlhO)O`KN01icVYSxMee+D9W;&dVu^ zP9D_3!$(DSYn$?0uBTK`VeN3n$zM^4+DB6ll^Cc4UR`P31+8I8BSF0k0^s8a+n=~8 z=_cV_%_VDWR{3ABM|4NO9u5T_okicYx-!>)RY%2!q72W_^q;wXQ=>=7SDfs3w%5mqyV{<(F$&)j2iN1EOo-QTS&wVIziOXitST#8*(jwKcY~b5I?I(VfR&;RU7XpK55Bo3@x)oQLpP5>cJPMsO+*sog0xYqcRrM~RLb1}WQ zKINxZ$LE*l^P<+m3|`j;ZbBXmq=t-R+__1;q?`M$wveiy%N;!><)f1n z@rg{i!Be;asrALvddl+S@>m3K^FDw*LYl;@rn;i(y*!pZ+NERJPvPQf*7!lNo|LKf z>u+bN*VPi$uBAq#Zc141YQ^(!LiViW*kQ*Q>rma_!=?AdI5`Z+>@^ZKHEd29Dt+v* z4uo`CwhpaoX;2zqhnW3&qcm{p4lB#!SFSGFL|x;n*=15`WT!3rWTLkBq+4GH^9`OB zLRxX453Gsbed{&iApjyO#be1mR_D$<u9a7oN~!=1P#iEXKSTz+4zwOBP#iUX@K|2FyJ*ufN`)GL=!Q979BJZUI{tMies*)>2VYBfYsE>Q7YI^X7BqRa!@$0$7 zMn~jE;tda%D1sACi*~?v*IuUc6x0cAzXmebGY`X$)oQp9=T4H-U%prow3Q%__YTY)atzDga}-WGkc29*0|fspHTV-9^1roO#! zWIm_@8QCh{?OOu$W8mkf_oKH!_fS3-^ya39v!s#>A7@gB>%EB_;F)N)#Knt~#p2i1 zP<@TK5t8r(8c~Xw5hEWv!`QOJXV)m*?4&B)2Ok??H$h$a>62f3Q-%n0i+gGz8(Q@> zyD6J*kH9Uhp8EIt@DmR#(Top28wa27qcqNqBpI2ps(Obyv|&WbV--4>N^vHdtXSb~ zuq)NijeIN!qX;U10H1G&KW45~6!>==Eexd+6YptMfPf3@#~<0B-@Et3pEE)Gyvmd0 z!iyjY^#|=Wc3j$!&RHJy+unHENHDz{A>bWG&8X;o^lJj79Fnn zdLMB=hE(Z2bCXZNt75fK)T%iWn?^{XE$fyDQDL`0f`Ij{am2k}_^zrQeMzpuDE_k= zvp{4RkE=6?2AiK5R!3*=V1<-pJ@<0XyxY5Zj|p4sA8XRxQBfj=mNUb)*ksZane4Q7 z9cAa8Mj1gXlK#4z%|@dZUM&L?AyeXK^br#9$Y4Z-VB;xF!!|oUA9BevCBGM$er4L0 zuJ5y^u2^=gWY5hfgxxbHvL(hB&!pb6FV;D&`&uHyA(Qh$D9c0;=|5wAVwsuaZJa5q*dbyvDZ23K z7>-)n*f>2D2cFiejOTco>`WrCPfEhWLk@<4!0(|lV_I@^4|dQh@nUad=b}unT}!Us zjw3idpJB{}LG^eJ?4LvU*vK|K)TZ)r zq>jk=URD8Max=DEaz=#J%@mR@Yn{HhJZ|U);BM%nxw)eoIvbrWa(7Uqc;;MHS(pIu z$}G<2^}5ni+ioMEsAZE)RtVy%hzZw1YE;vea2p76uP2NJiJ54F5>1ccJ5eU?I4kJt zXf|a{y6)C#=_2O&Z!))Z)|=M_npaqgRuKur{%yJ8C*ku}!cUCsuz8qaP+E03o;>uA zrC6k&E^86XoeFFa&-hPW2=0@r7ZkJR&u3)&y3fsD$YO-9Q-2JRvMDC*5)^dqgk?-LG%B(UC{akt;S|O#?DO!~a*~U#6R-s%yRa=JX{# zd5_aWEuVrWuBfvTJliWQvb{ITe718eI-U~kVxI8wqzjBmbTKx*o_o2v}^+*=v~!|*jC-9f~O7cXyqP3D?{mMxpR-lK}jrLXc-7CtZTqx;q{rDWK!!%f4Wa{ zYRb0JxIb7W-f&G-FpbfS$K4I<=s-yA;3_#`ky}OiX1;zbHzuENVB2z?&W$QToK5D- zaN#ILPtU547NDmpw7M`z(#yhei9I<%(8km&bj=GQUWao~TsQ>YEGiF)*XyaNs(zja3CES2YzEQWRA=6fd__Cqj*O7SajNG|Xu1 zni$&wiuB5jQ>VTe-m*tLP2D4Le-kMDdeU?!i<&#F(gr2-p|QXiGnzY!7(%S}$eTE4 zS#i1}()ZkYy_dP=J7cZ^#zVKSyXU+j{UMjI`@U0zWAQHHGp2gdlHA?=)S%~jS?`x#cnWt1|{*q^I`(O^?=Pig!>wf!d z&TsIC9=rsXa}1Z|R0-FJA79_B;eXqTeT%4p&6};*y^Z;Ab))|_=DRw(*xUYpbNOX1 zorwzefy_8V=Q4MMlIM~_f~5XRp9f6X%#4ZLSYX65r&B%+P5_?ID!HS>vN;>IH{N^t zO(k|BQk@qqB89_`TfmHiLf{oZ(zC3EY*J<(8|T5M&#~6xiji`Yfl|%9=+Fdmyi%A1*<`!#Ek=61@<=!*I0a zwlD$K!Oz+cp-Wxu5L0v{hV?0w*TvO2E1i4;_o?}M1-H#cCzD0K)Z>+A2K&DQIw%+e z$p5Xp`LAd4=h?p$J6DwXC%`{zEB_r};7#-YWW)SVB+&1Izn72wC5rb}KjD8Dll@)( z_eu-DLDK#O^;e_*g#266{V}G&NuenKRf-OF#jP7{2k_ZdXZl+{O|r9=09X3zr*}aV(<%w z=>16^!*qVf_}yLc3q#TZ1mwRPn147ge#iLdw&xcD2#Bc5-!T5#1pO}l&(-GN#Sz^9 sCjLKb&)=p0Isg7$n%nDd(#HRtjTL1eq5ilG{q2SK=E7O?`Qy|70a_2E82|tP literal 0 HcmV?d00001 diff --git a/site/sciprts/map_xml_files_to_yml.py b/site/sciprts/map_xml_files_to_yml.py new file mode 100644 index 00000000..a2cf98ca --- /dev/null +++ b/site/sciprts/map_xml_files_to_yml.py @@ -0,0 +1,16 @@ +import os + +def generate_string_list(folder_path, prefix): + file_names = os.listdir(folder_path) + string_list = [] + for file_name in file_names: + new_string = f"- {'.'.join(file_name.split('.')[:-1])}: {prefix}{file_name}" + string_list.append(new_string) + return string_list + +folder_path = "./docs/xmldocs" +prefix = "./xmldocs/" + +string_list = generate_string_list(folder_path, prefix) +result = '\n'.join(string_list) +print(result) diff --git a/site/search/search_index.json b/site/search/search_index.json new file mode 100644 index 00000000..5cf525c0 --- /dev/null +++ b/site/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":"

    LLamaSharp is the C#/.NET binding of llama.cpp. It provides APIs to inference the LLaMa Models and deploy it on native environment or Web. It could help C# developers to deploy the LLM (Large Language Model) locally and integrate with C# apps.

    "},{"location":"#main-features","title":"Main features","text":"
    • Model inference
    • Model quantization
    • Generating embeddings
    • Interactive/Instruct/Stateless executor mode
    • Chat session APIs
    • Save/load the state
    • Integration with other applications like BotSharp and semantic-kernel
    "},{"location":"#essential-insights-for-novice-learners","title":"Essential insights for novice learners","text":"

    If you are new to LLM, here're some tips for you to help you to get start with LLamaSharp. If you are experienced in this field, we'd still recommend you to take a few minutes to read it because somethings performs differently compared to cpp/python.

    1. Tha main ability of LLamaSharp is to provide an efficient way to run inference of LLM (Large Language Model) locally (and fine-tune model in the future). The model weights, however, needs to be downloaded from other resources, like huggingface.
    2. Since LLamaSharp supports multiple platforms, The nuget package is splitted to LLamaSharp and LLama.Backend. After installing LLamaSharp, please install one of LLama.Backend.Cpu, LLama.Backend.Cuda11 and LLama.Backend.Cuda12. If you use the source code, dynamic libraries could be found in LLama/Runtimes. Then rename the one you want to use to libllama.dll.
    3. LLaMa originally refers to the weights released by Meta (Facebook Research). After that, many models are fine-tuned based on it, such as Vicuna, GPT4All, and Pyglion. Though all of these models are supported by LLamaSharp, some steps are necessary with different file formats. There're mainly three kinds of files, which are .pth, .bin (ggml), .bin (quantized). If you have the .bin (quantized) file, it could be used directly by LLamaSharp. If you have the .bin (ggml) file, you could use it directly but get higher inference speed after the quantization. If you have the .pth file, you need to follow the instructions in llama.cpp to convert it to .bin (ggml) file at first.
    4. LLamaSharp supports GPU acceleration, but it requires cuda installation. Please install cuda 11 or cuda 12 on your system before using LLamaSharp to enable GPU. If you have another cuda version, you could compile llama.cpp from source to get the dll. For building from source, please refer to issue #5.
    "},{"location":"#welcome-to-join-the-development","title":"Welcome to join the development!","text":"

    Community effort is always one of the most important things in open-source projects. Any contribution in any way is welcomed here. For example, the following things mean a lot for LLamaSharp:

    1. Open an issue when you find something wrong.
    2. Open an PR if you've fixed something. Even if just correcting a typo, it also makes great sense.
    3. Help to optimize the documentation.
    4. Write an example or blog about how to integrate LLamaSharp with your APPs.
    5. Ask for a missed feature and discuss with other developers.

    If you'd like to get deeply involved in development, please touch us in discord channel or send email to AsakusaRinne@gmail.com. :)

    "},{"location":"Architecher/","title":"Architecher","text":""},{"location":"Architecher/#architecher-of-main-functions","title":"Architecher of main functions","text":"

    The figure below shows the core framework structure, which is separated to four levels.

    • LLamaModel: The holder of a model which directly interact with native library and provide some basic APIs such as tokenization and embedding. Currently it includes three classes: LLamaModel, LLamaEmbedder and LLamaQuantizer.
    • LLamaExecutors: Executors which define the way to run the LLama model. It provides text-to-text APIs to make it easy to use. Currently we provide three kinds of executors: InteractiveExecutor, InstructuExecutor and StatelessExecutor.
    • ChatSession: A wrapping for InteractiveExecutor and LLamaModel, which supports interactive tasks and saving/re-loading sessions. It also provides a flexible way to customize the text process by IHistoryTransform, ITextTransform and ITextStreamTransform.
    • High-level Applications: Some applications that provides higher-level integration. For example, BotSharp provides integration for vector search, Chatbot UI and Web APIs. semantic-kernel provides various APIs for manipulations related with LLM. If you've made an integration, please tell us and add it to the doc!

    "},{"location":"Architecher/#recommended-usings","title":"Recommended usings","text":"

    Since LLamaModel interact with native library, it's not recommended to use the methods of it directly unless you know what you are doing. So does the NativeApi, which is not included in the arcitecher figure above.

    ChatSession is recommended to be used when you want to build an application similar to ChatGPT, or the ChatBot, because it works best with InteractiveExecutor. Though other executors are also allowed to passed as a parameter to initialize a ChatSession, it's not encouraged if you are new to LLamaSharp and LLM.

    High-level applications, such as BotSharp, are supposed to be used when you concentrate on the part not related with LLM. For example, if you want to deploy a chat bot to help you remember your schedules, using BotSharp may be a good choice.

    Note that the APIs of the high-level applications may not be stable now. Please take it into account when using them.

    "},{"location":"ContributingGuide/","title":"LLamaSharp Contributing Guide","text":"

    Hi, welcome to develop LLamaSharp with us together! We are always open for every contributor and any format of contributions! If you want to maintain this library actively together, please contact us to get the write access after some PRs. (Email: AsakusaRinne@gmail.com)

    In this page, we'd like to introduce how to make contributions here easily. \ud83d\ude0a

    "},{"location":"ContributingGuide/#compile-the-native-library-from-source","title":"Compile the native library from source","text":"

    Firstly, please clone the llama.cpp repository and following the instructions in llama.cpp readme to configure your local environment.

    If you want to support cublas in the compilation, please make sure that you've installed the cuda.

    When building from source, please add -DBUILD_SHARED_LIBS=ON to the cmake instruction. For example, when building with cublas but without openblas, use the following instruction:

    cmake .. -DLLAMA_CUBLAS=ON -DBUILD_SHARED_LIBS=ON\n

    After running cmake --build . --config Release, you could find the llama.dll, llama.so or llama.dylib in your build directory. After pasting it to LLamaSharp/LLama/runtimes and renaming it to libllama.dll, libllama.so or libllama.dylib, you can use it as the native library in LLamaSharp.

    "},{"location":"ContributingGuide/#add-a-new-feature-to-llamasharp","title":"Add a new feature to LLamaSharp","text":"

    After refactoring the framework in v0.4.0, LLamaSharp will try to maintain the backward compatibility. However, in the following cases, break change is okay:

    1. Due to some break changes in llama.cpp, making a break change will help to maintain the good abstraction and friendly user APIs.
    2. A very improtant feature cannot be implemented unless refactoring some parts.
    3. After some discussions, an agreement was reached that making the break change is reasonable.

    If a new feature could be added without introducing any break change, please open a PR rather than open an issue first. We will never refuse the PR but help to improve it, unless it's malicious.

    When adding the feature, please take care of the namespace and the naming convention. For example, if you are adding an integration for WPF, please put the code under namespace LLama.WPF or LLama.Integration.WPF instead of putting it under the root namespace. The naming convention of LLamaSharp follows the pascal naming convention, but in some parts that are invisible to users, you can do whatever you want.

    "},{"location":"ContributingGuide/#find-the-problem-and-fix-the-bug","title":"Find the problem and fix the BUG","text":"

    If the issue is related to the LLM internal behaviors, such as endless generating the response, the best way to find the problem is to do comparison test between llama.cpp and LLamaSharp.

    You could use exactly the same prompt, the same model and the same parameters to run the inference in llama.cpp and LLamaSharp respectively to see if it's really a problem caused by the implementation in LLamaSharp.

    If the experiment showed that it worked well in llama.cpp but didn't in LLamaSharp, a the search for the problem could be started. While the reason of the problem could be various, the best way I think is to add log-print in the code of llama.cpp and use it in LLamaSharp after compilation. Thus, when running LLamaSharp, you could see what happened in the native library.

    After finding out the reason, a painful but happy process comes. When working on the BUG fix, there's only one rule to follow, that is keeping the examples working well. If the modification fixed the BUG but impact on other functions, it would not be a good fix.

    During the BUG fix process, please don't hesitate to discuss together when you stuck on something.

    "},{"location":"ContributingGuide/#add-integrations","title":"Add integrations","text":"

    All kinds of integration are welcomed here! Currently the following integrations are under work or on our schedule:

    1. BotSharp
    2. semantic-kernel
    3. Unity

    Besides, for some other integrations, like ASP.NET core, SQL, Blazor and so on, we'll appreciate it if you could help with that. If the time is limited for you, providing an example for it also means a lot!

    "},{"location":"ContributingGuide/#add-examples","title":"Add examples","text":"

    There're mainly two ways to add an example:

    1. Add the example to LLama.Examples of the repository.
    2. Put the example in another repositpry and add the link to the readme or docs of LLamaSharp.
    "},{"location":"ContributingGuide/#add-documents","title":"Add documents","text":"

    LLamaSharp uses mkdocs to build the documantation, please follow the tutorial of mkdocs to add or modify documents in LLamaSharp.

    "},{"location":"GetStarted/","title":"Get Started","text":""},{"location":"GetStarted/#install-packages","title":"Install packages","text":"

    Firstly, search LLamaSharp in nuget package manager and install it.

    PM> Install-Package LLamaSharp\n

    Then, search and install one of the following backends:

    LLamaSharp.Backend.Cpu\nLLamaSharp.Backend.Cuda11\nLLamaSharp.Backend.Cuda12\n

    Here's the mapping of them and corresponding model samples provided by LLamaSharp. If you're not sure which model is available for a version, please try our sample model.

    LLamaSharp.Backend LLamaSharp Verified Model Resources llama.cpp commit id - v0.2.0 This version is not recommended to use. - - v0.2.1 WizardLM, Vicuna (filenames with \"old\") - v0.2.2 v0.2.2, v0.2.3 WizardLM, Vicuna (filenames without \"old\") 63d2046 v0.3.0 v0.3.0 LLamaSharpSamples v0.3.0, WizardLM 7e4ea5b"},{"location":"GetStarted/#download-a-model","title":"Download a model","text":"

    One of the following models could be okay:

    • LLaMA \ud83e\udd99
    • Alpaca
    • GPT4All
    • Chinese LLaMA / Alpaca
    • Vigogne (French)
    • Vicuna
    • Koala
    • OpenBuddy \ud83d\udc36 (Multilingual)
    • Pygmalion 7B / Metharme 7B
    • WizardLM

    Note that because llama.cpp is under fast development now and often introduce break changes, some model weights on huggingface which works under a version may be invalid with another version. If it's your first time to configure LLamaSharp, we'd like to suggest for using verified model weights in the table above.

    "},{"location":"GetStarted/#run-the-program","title":"Run the program","text":"

    Please create a console program with dotnet runtime >= netstandard 2.0 (>= net6.0 is more recommended). Then, paste the following code to program.cs;

    using LLama.Common;\nusing LLama;\n\nstring modelPath = \"<Your model path>\" // change it to your own model path\nvar prompt = \"Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.\\r\\n\\r\\nUser: Hello, Bob.\\r\\nBob: Hello. How may I help you today?\\r\\nUser: Please tell me the largest city in Europe.\\r\\nBob: Sure. The largest city in Europe is Moscow, the capital of Russia.\\r\\nUser:\"; // use the \"chat-with-bob\" prompt here.\n\n// Initialize a chat session\nvar ex = new InteractiveExecutor(new LLamaModel(new ModelParams(modelPath, contextSize: 1024, seed: 1337, gpuLayerCount: 5)));\nChatSession session = new ChatSession(ex);\n\n// show the prompt\nConsole.WriteLine();\nConsole.Write(prompt);\n\n// run the inference in a loop to chat with LLM\nwhile (true)\n{\n    foreach (var text in session.Chat(prompt, new InferenceParams() { Temperature = 0.6f, AntiPrompts = new List<string> { \"User:\" } }))\n    {\n        Console.Write(text);\n    }\n\n    Console.ForegroundColor = ConsoleColor.Green;\n    prompt = Console.ReadLine();\n    Console.ForegroundColor = ConsoleColor.White;\n}\n

    After starting it, you'll see the following outputs.

    Please input your model path: D:\\development\\llama\\weights\\wizard-vicuna-13B.ggmlv3.q4_1.bin\nllama.cpp: loading model from D:\\development\\llama\\weights\\wizard-vicuna-13B.ggmlv3.q4_1.bin\nllama_model_load_internal: format     = ggjt v3 (latest)\nllama_model_load_internal: n_vocab    = 32000\nllama_model_load_internal: n_ctx      = 1024\nllama_model_load_internal: n_embd     = 5120\nllama_model_load_internal: n_mult     = 256\nllama_model_load_internal: n_head     = 40\nllama_model_load_internal: n_layer    = 40\nllama_model_load_internal: n_rot      = 128\nllama_model_load_internal: ftype      = 3 (mostly Q4_1)\nllama_model_load_internal: n_ff       = 13824\nllama_model_load_internal: n_parts    = 1\nllama_model_load_internal: model size = 13B\nllama_model_load_internal: ggml ctx size = 7759.48 MB\nllama_model_load_internal: mem required  = 9807.48 MB (+ 1608.00 MB per state)\n....................................................................................................\nllama_init_from_file: kv self size  =  800.00 MB\n\nTranscript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.\n\nUser: Hello, Bob.\nBob: Hello. How may I help you today?\nUser: Please tell me the largest city in Europe.\nBob: Sure. The largest city in Europe is Moscow, the capital of Russia.\nUser:\n

    Now, enjoy chatting with LLM!

    "},{"location":"Tricks/","title":"Tricks for FAQ","text":"

    Sometimes, your application with LLM and LLamaSharp may have strange behaviors. Before opening an issue to report the BUG, the following tricks may worth a try.

    "},{"location":"Tricks/#carefully-set-the-anti-prompts","title":"Carefully set the anti-prompts","text":"

    Anti-prompt can also be called as \"Stop-keyword\", which decides when to stop the response generation. Under interactive mode, the maximum tokens count is always not set, which makes the LLM generates responses infinitively. Therefore, setting anti-prompt correctly helps a lot to avoid the strange behaviors. For example, the prompt file chat-with-bob.txt has the following content:

    Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.\n\nUser: Hello, Bob.\nBob: Hello. How may I help you today?\nUser: Please tell me the largest city in Europe.\nBob: Sure. The largest city in Europe is Moscow, the capital of Russia.\nUser:\n

    Therefore, the anti-prompt should be set as \"User:\". If the last line of the prompt is removed, LLM will automatically generate a question (user) and a response (bob) for one time when running the chat session. Therefore, the antiprompt is suggested to be appended to the prompt when starting a chat session.

    What if an extra line is appended? The string \"User:\" in the prompt will be followed with a char \"\\n\". Thus when running the model, the automatic generation of a pair of question and response may appear because the anti-prompt is \"User:\" but the last token is \"User:\\n\". As for whether it will appear, it's an undefined behavior, which depends on the implementation inside the LLamaExecutor. Anyway, since it may leads to unexpected behaviors, it's recommended to trim your prompt or carefully keep consistent with your anti-prompt.

    "},{"location":"Tricks/#pay-attention-to-the-length-of-prompt","title":"Pay attention to the length of prompt","text":"

    Sometimes we want to input a long prompt to execute a task. However, the context size may limit the inference of LLama model. Please ensure the inequality below holds.

    $$ len(prompt) + len(response) < len(context) $$

    In this inequality, len(response) refers to the expected tokens for LLM to generate.

    "},{"location":"Tricks/#try-differenct-executors-with-a-prompt","title":"Try differenct executors with a prompt","text":"

    Some prompt works well under interactive mode, such as chat-with-bob, some others may work well with instruct mode, such as alpaca. Besides, if your input is quite simple and one-time job, such as \"Q: what is the satellite of the earth? A: \", stateless mode will be a good choice.

    If your chat bot has bad performance, trying different executor will possibly make it work well.

    "},{"location":"Tricks/#choose-models-weight-depending-on-you-task","title":"Choose models weight depending on you task","text":"

    The differences between modes may lead to much different behaviors under the same task. For example, if you're building a chat bot with non-English, a fine-tuned model specially for the language you want to use will have huge effect on the performance.

    "},{"location":"Tricks/#set-the-layer-count-you-want-to-offload-to-gpu","title":"Set the layer count you want to offload to GPU","text":"

    Currently, the GpuLayerCount param, which decides the number of layer loaded into GPU, is set to 20 by default. However, if you have some efficient GPUs, setting it as a larger number will attain faster inference.

    "},{"location":"ChatSession/basic-usages/","title":"Basic usages of ChatSession","text":"

    ChatSession is a higher-level absatrction than the executors. In the context of a chat application like ChatGPT, a \"chat session\" refers to an interactive conversation or exchange of messages between the user and the chatbot. It represents a continuous flow of communication where the user enters input or asks questions, and the chatbot responds accordingly. A chat session typically starts when the user initiates a conversation with the chatbot and continues until the interaction comes to a natural end or is explicitly terminated by either the user or the system. During a chat session, the chatbot maintains the context of the conversation, remembers previous messages, and generates appropriate responses based on the user's inputs and the ongoing dialogue.

    "},{"location":"ChatSession/basic-usages/#initialize-a-session","title":"Initialize a session","text":"

    Currently, the only parameter that is accepted is an ILLamaExecutor, because this is the only parameter that we're sure to exist in all the future versions. Since it's the high-level absatrction, we're conservative to the API designs. In the future, there may be more kinds of constructors added.

    InteractiveExecutor ex = new(new LLamaModel(new ModelParams(modelPath)));\nChatSession session = new ChatSession(ex);\n
    "},{"location":"ChatSession/basic-usages/#chat-with-the-bot","title":"Chat with the bot","text":"

    There'll be two kinds of input accepted by the Chat API, which are ChatHistory and String. The API with string is quite similar to that of the executors. Meanwhile, the API with ChatHistory is aimed to provide more flexible usages. For example, you have had a chat with the bot in session A before you open the session B. Now session B has no memory for what you said before. Therefore, you can feed the history of A to B.

    string prompt = \"What is C#?\";\n\nforeach (var text in session.Chat(prompt, new InferenceParams() { Temperature = 0.6f, AntiPrompts = new List<string> { \"User:\" } })) // the inference params should be changed depending on your statement\n{\n    Console.Write(text);\n}\n
    "},{"location":"ChatSession/basic-usages/#get-the-history","title":"Get the history","text":"

    Currently History is a property of ChatSession.

    foreach(var rec in session.History.Messages)\n{\n    Console.WriteLine($\"{rec.AuthorRole}: {rec.Content}\");\n}\n
    "},{"location":"ChatSession/save-load-session/","title":"Save/Load Chat Session","text":"

    Generally, the chat session could be switched, which requires the ability of loading and saving session.

    When building a chat bot app, it's NOT encouraged to initialize many chat sessions and keep them in memory to wait for being switched, because the memory comsumption of both CPU and GPU is expensive. It's recommended to save the current session before switching to a new session, and load the file when switching back to the session.

    The API is also quite simple, the files will be saved into a directory you specified. If the path does not exist, a new directory will be created.

    string savePath = \"<save dir>\";\nsession.SaveSession(savePath);\n\nsession.LoadSession(savePath);\n
    "},{"location":"ChatSession/transforms/","title":"Transforms in Chat Session","text":"

    There's three important elements in ChatSession, which are input, output and history. Besides, there're some conversions between them. Since the process of them under different conditions varies, LLamaSharp hands over this part of the power to the users.

    Currently, there're three kinds of process that could be customized, as introduced below.

    "},{"location":"ChatSession/transforms/#input-transform","title":"Input transform","text":"

    In general, the input of the chat API is a text (without stream), therefore ChatSession processes it in a pipeline. If you want to use your customized transform, you need to define a transform that implements ITextTransform and add it to the pipeline of ChatSession.

    public interface ITextTransform\n{\n    string Transform(string text);\n}\n
    public class MyInputTransform1 : ITextTransform\n{\n    public string Transform(string text)\n    {\n        return $\"Question: {text}\\n\";\n    }\n}\n\npublic class MyInputTransform2 : ITextTransform\n{\n    public string Transform(string text)\n    {\n        return text + \"Answer: \";\n    }\n}\n\nsession.AddInputTransform(new MyInputTransform1()).AddInputTransform(new MyInputTransform2());\n
    "},{"location":"ChatSession/transforms/#output-transform","title":"Output transform","text":"

    Different from the input, the output of chat API is a text stream. Therefore you need to process it word by word, instead of getting the full text at once.

    The interface of it has an IEnumerable<string> as input, which is actually a yield sequence.

    public interface ITextStreamTransform\n{\n    IEnumerable<string> Transform(IEnumerable<string> tokens);\n    IAsyncEnumerable<string> TransformAsync(IAsyncEnumerable<string> tokens);\n}\n

    When implementing it, you could throw a not-implemented exception in one of them if you only need to use the chat API in synchronously or asynchronously.

    Different from the input transform pipeline, the output transform only supports one transform.

    session.WithOutputTransform(new MyOutputTransform());\n

    Here's an example of how to implement the interface. In this example, the transform detects wether there's some keywords in the response and removes them.

    /// <summary>\n/// A text output transform that removes the keywords from the response.\n/// </summary>\npublic class KeywordTextOutputStreamTransform : ITextStreamTransform\n{\n    HashSet<string> _keywords;\n    int _maxKeywordLength;\n    bool _removeAllMatchedTokens;\n\n    /// <summary>\n    /// \n    /// </summary>\n    /// <param name=\"keywords\">Keywords that you want to remove from the response.</param>\n    /// <param name=\"redundancyLength\">The extra length when searching for the keyword. For example, if your only keyword is \"highlight\", \n    /// maybe the token you get is \"\\r\\nhighligt\". In this condition, if redundancyLength=0, the token cannot be successfully matched because the length of \"\\r\\nhighligt\" (10)\n    /// has already exceeded the maximum length of the keywords (8). On the contrary, setting redundancyLengyh >= 2 leads to successful match.\n    /// The larger the redundancyLength is, the lower the processing speed. But as an experience, it won't introduce too much performance impact when redundancyLength <= 5 </param>\n    /// <param name=\"removeAllMatchedTokens\">If set to true, when getting a matched keyword, all the related tokens will be removed. Otherwise only the part of keyword will be removed.</param>\n    public KeywordTextOutputStreamTransform(IEnumerable<string> keywords, int redundancyLength = 3, bool removeAllMatchedTokens = false)\n    {\n        _keywords = new(keywords);\n        _maxKeywordLength = keywords.Select(x => x.Length).Max() + redundancyLength;\n        _removeAllMatchedTokens = removeAllMatchedTokens;\n    }\n    /// <inheritdoc />\n    public IEnumerable<string> Transform(IEnumerable<string> tokens)\n    {\n        var window = new Queue<string>();\n\n        foreach (var s in tokens)\n        {\n            window.Enqueue(s);\n            var current = string.Join(\"\", window);\n            if (_keywords.Any(x => current.Contains(x)))\n            {\n                var matchedKeyword = _keywords.First(x => current.Contains(x));\n                int total = window.Count;\n                for (int i = 0; i < total; i++)\n                {\n                    window.Dequeue();\n                }\n                if (!_removeAllMatchedTokens)\n                {\n                    yield return current.Replace(matchedKeyword, \"\");\n                }\n            }\n            if (current.Length >= _maxKeywordLength)\n            {\n                if (_keywords.Any(x => current.Contains(x)))\n                {\n                    var matchedKeyword = _keywords.First(x => current.Contains(x));\n                    int total = window.Count;\n                    for (int i = 0; i < total; i++)\n                    {\n                        window.Dequeue();\n                    }\n                    if (!_removeAllMatchedTokens)\n                    {\n                        yield return current.Replace(matchedKeyword, \"\");\n                    }\n                }\n                else\n                {\n                    int total = window.Count;\n                    for (int i = 0; i < total; i++)\n                    {\n                        yield return window.Dequeue();\n                    }\n                }\n            }\n        }\n        int totalCount = window.Count;\n        for (int i = 0; i < totalCount; i++)\n        {\n            yield return window.Dequeue();\n        }\n    }\n    /// <inheritdoc />\n    public async IAsyncEnumerable<string> TransformAsync(IAsyncEnumerable<string> tokens)\n    {\n        throw new NotImplementedException(); // This is implemented in `LLamaTransforms` but we ignore it here.\n    }\n}\n
    "},{"location":"ChatSession/transforms/#history-transform","title":"History transform","text":"

    The chat history could be converted to or from a text, which is exactly what the interface of it.

    public interface IHistoryTransform\n{\n    string HistoryToText(ChatHistory history);\n    ChatHistory TextToHistory(AuthorRole role, string text);\n}\n

    Similar to the output transform, the history transform is added in the following way:

    session.WithHistoryTransform(new MyHistoryTransform());\n

    The implementation is quite flexible, depending on what you want the history message to be like. Here's an example, which is the default history transform in LLamaSharp.

    /// <summary>\n/// The default history transform.\n/// Uses plain text with the following format:\n/// [Author]: [Message]\n/// </summary>\npublic class DefaultHistoryTransform : IHistoryTransform\n{\n    private readonly string defaultUserName = \"User\";\n    private readonly string defaultAssistantName = \"Assistant\";\n    private readonly string defaultSystemName = \"System\";\n    private readonly string defaultUnknownName = \"??\";\n\n    string _userName;\n    string _assistantName;\n    string _systemName;\n    string _unknownName;\n    bool _isInstructMode;\n    public DefaultHistoryTransform(string? userName = null, string? assistantName = null, \n        string? systemName = null, string? unknownName = null, bool isInstructMode = false)\n    {\n        _userName = userName ?? defaultUserName;\n        _assistantName = assistantName ?? defaultAssistantName;\n        _systemName = systemName ?? defaultSystemName;\n        _unknownName = unknownName ?? defaultUnknownName;\n        _isInstructMode = isInstructMode;\n    }\n\n    public virtual string HistoryToText(ChatHistory history)\n    {\n        StringBuilder sb = new();\n        foreach (var message in history.Messages)\n        {\n            if (message.AuthorRole == AuthorRole.User)\n            {\n                sb.AppendLine($\"{_userName}: {message.Content}\");\n            }\n            else if (message.AuthorRole == AuthorRole.System)\n            {\n                sb.AppendLine($\"{_systemName}: {message.Content}\");\n            }\n            else if (message.AuthorRole == AuthorRole.Unknown)\n            {\n                sb.AppendLine($\"{_unknownName}: {message.Content}\");\n            }\n            else if (message.AuthorRole == AuthorRole.Assistant)\n            {\n                sb.AppendLine($\"{_assistantName}: {message.Content}\");\n            }\n        }\n        return sb.ToString();\n    }\n\n    public virtual ChatHistory TextToHistory(AuthorRole role, string text)\n    {\n        ChatHistory history = new ChatHistory();\n        history.AddMessage(role, TrimNamesFromText(text, role));\n        return history;\n    }\n\n    public virtual string TrimNamesFromText(string text, AuthorRole role)\n    {\n        if (role == AuthorRole.User && text.StartsWith($\"{_userName}:\"))\n        {\n            text = text.Substring($\"{_userName}:\".Length).TrimStart();\n        }\n        else if (role == AuthorRole.Assistant && text.EndsWith($\"{_assistantName}:\"))\n        {\n            text = text.Substring(0, text.Length - $\"{_assistantName}:\".Length).TrimEnd();\n        }\n        if (_isInstructMode && role == AuthorRole.Assistant && text.EndsWith(\"\\n> \"))\n        {\n            text = text.Substring(0, text.Length - \"\\n> \".Length).TrimEnd();\n        }\n        return text;\n    }\n}\n
    "},{"location":"HighLevelApps/bot-sharp/","title":"The Usage of BotSharp Integration","text":"

    The document is under work, please have a wait. Thank you for your support! :)

    "},{"location":"LLamaExecutors/differences/","title":"Differences of Executors","text":""},{"location":"LLamaExecutors/differences/#differences-between-the-executors","title":"Differences between the executors","text":"

    There're currently three kinds of executors provided, which are InteractiveExecutor, InstructExecutor and StatelessExecutor.

    In a word, InteractiveExecutor is suitable for getting answer of your questions from LLM continuously. InstructExecutor let LLM execute your instructions, such as \"continue writing\". StatelessExecutor is best for one-time job because the previous inference has no impact on the current inference.

    "},{"location":"LLamaExecutors/differences/#interactive-mode-instruct-mode","title":"Interactive mode & Instruct mode","text":"

    Both of them are taking \"completing the prompt\" as the goal to generate the response. For example, if you input Long long ago, there was a fox who wanted to make friend with humen. One day, then the LLM will continue to write the story.

    Under interactive mode, you serve a role of user and the LLM serves the role of assistant. Then it will help you with your question or request.

    Under instruct mode, you give LLM some instructions and it follows.

    Though the behaviors of them sounds similar, it could introduce many differences depending on your prompt. For example, \"chat-with-bob\" has good performance under interactive mode and alpaca does well with instruct mode.

    // chat-with-bob\n\nTranscript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.\n\nUser: Hello, Bob.\nBob: Hello. How may I help you today?\nUser: Please tell me the largest city in Europe.\nBob: Sure. The largest city in Europe is Moscow, the capital of Russia.\nUser:\n
    // alpaca\n\nBelow is an instruction that describes a task. Write a response that appropriately completes the request.\n

    Therefore, please modify the prompt correspondingly when switching from one mode to the other.

    "},{"location":"LLamaExecutors/differences/#stateful-mode-and-stateless-mode","title":"Stateful mode and Stateless mode.","text":"

    Despite the differences between interactive mode and instruct mode, both of them are stateful mode. That is, your previous question/instruction will impact on the current response from LLM. On the contrary, the steteless executor does not have such a \"memory\". No matter how many times you talk to it, it will only concentrate on what you say in this time.

    Since the stateless executor has no memory of conversations before, you need to input your question with the whole prompt into it to get the better answer.

    For example, if you feed Q: Who is Trump? A: to the steteless executor, it may give the following answer with the antiprompt Q:.

    Donald J. Trump, born June 14, 1946, is an American businessman, television personality, politician and the 45th President of the United States (2017-2021). # Anexo:Torneo de Hamburgo 2022 (individual masculino)\n\n## Presentaci\u00f3n previa\n\n* Defensor del t\u00edtulo:  Daniil Medv\u00e9dev\n

    It seems that things went well at first. However, after answering the question itself, LLM began to talk about some other things until the answer reached the token count limit. The reason of this strange behavior is the anti-prompt cannot be match. With the input, LLM cannot decide whether to append a string \"A: \" at the end of the response.

    As an improvement, let's take the following text as the input:

    Q: What is the capital of the USA? A: Washingtong. Q: What is the sum of 1 and 2? A: 3. Q: Who is Trump? A: \n

    Then, I got the following answer with the anti-prompt Q:.

    45th president of the United States.\n

    At this time, by repeating the same mode of Q: xxx? A: xxx., LLM outputs the anti-prompt we want to help to decide where to dtop the generation.

    "},{"location":"LLamaExecutors/parameters/","title":"Inference Parameters","text":"

    Different from LLamaModel, when using an exeuctor, InferenceParams is passed to the Infer method instead of constructor. This is because executors only define the ways to run the model, therefore in each run, you can change the settings for this time inference.

    "},{"location":"LLamaExecutors/parameters/#inferenceparams","title":"InferenceParams","text":"

    Namespace: LLama.Common

    public class InferenceParams\n

    Inheritance Object \u2192 InferenceParams

    "},{"location":"LLamaExecutors/parameters/#properties","title":"Properties","text":""},{"location":"LLamaExecutors/parameters/#tokenskeep","title":"TokensKeep","text":"

    number of tokens to keep from initial prompt

    public int TokensKeep { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value","title":"Property Value","text":"

    Int32

    "},{"location":"LLamaExecutors/parameters/#maxtokens","title":"MaxTokens","text":"

    how many new tokens to predict (n_predict), set to -1 to inifinitely generate response until it complete.

    public int MaxTokens { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_1","title":"Property Value","text":"

    Int32

    "},{"location":"LLamaExecutors/parameters/#logitbias","title":"LogitBias","text":"

    logit bias for specific tokens

    public Dictionary<int, float> LogitBias { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_2","title":"Property Value","text":"

    Dictionary<Int32, Single>

    "},{"location":"LLamaExecutors/parameters/#antiprompts","title":"AntiPrompts","text":"

    Sequences where the model will stop generating further tokens.

    public IEnumerable<string> AntiPrompts { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_3","title":"Property Value","text":"

    IEnumerable<String>

    "},{"location":"LLamaExecutors/parameters/#pathsession","title":"PathSession","text":"

    path to file for saving/loading model eval state

    public string PathSession { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_4","title":"Property Value","text":"

    String

    "},{"location":"LLamaExecutors/parameters/#inputsuffix","title":"InputSuffix","text":"

    string to suffix user inputs with

    public string InputSuffix { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_5","title":"Property Value","text":"

    String

    "},{"location":"LLamaExecutors/parameters/#inputprefix","title":"InputPrefix","text":"

    string to prefix user inputs with

    public string InputPrefix { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_6","title":"Property Value","text":"

    String

    "},{"location":"LLamaExecutors/parameters/#topk","title":"TopK","text":"

    0 or lower to use vocab size

    public int TopK { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_7","title":"Property Value","text":"

    Int32

    "},{"location":"LLamaExecutors/parameters/#topp","title":"TopP","text":"

    1.0 = disabled

    public float TopP { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_8","title":"Property Value","text":"

    Single

    "},{"location":"LLamaExecutors/parameters/#tfsz","title":"TfsZ","text":"

    1.0 = disabled

    public float TfsZ { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_9","title":"Property Value","text":"

    Single

    "},{"location":"LLamaExecutors/parameters/#typicalp","title":"TypicalP","text":"

    1.0 = disabled

    public float TypicalP { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_10","title":"Property Value","text":"

    Single

    "},{"location":"LLamaExecutors/parameters/#temperature","title":"Temperature","text":"

    1.0 = disabled

    public float Temperature { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_11","title":"Property Value","text":"

    Single

    "},{"location":"LLamaExecutors/parameters/#repeatpenalty","title":"RepeatPenalty","text":"

    1.0 = disabled

    public float RepeatPenalty { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_12","title":"Property Value","text":"

    Single

    "},{"location":"LLamaExecutors/parameters/#repeatlasttokenscount","title":"RepeatLastTokensCount","text":"

    last n tokens to penalize (0 = disable penalty, -1 = context size) (repeat_last_n)

    public int RepeatLastTokensCount { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_13","title":"Property Value","text":"

    Int32

    "},{"location":"LLamaExecutors/parameters/#frequencypenalty","title":"FrequencyPenalty","text":"

    frequency penalty coefficient 0.0 = disabled

    public float FrequencyPenalty { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_14","title":"Property Value","text":"

    Single

    "},{"location":"LLamaExecutors/parameters/#presencepenalty","title":"PresencePenalty","text":"

    presence penalty coefficient 0.0 = disabled

    public float PresencePenalty { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_15","title":"Property Value","text":"

    Single

    "},{"location":"LLamaExecutors/parameters/#mirostat","title":"Mirostat","text":"

    Mirostat uses tokens instead of words. algorithm described in the paper https://arxiv.org/abs/2007.14966. 0 = disabled, 1 = mirostat, 2 = mirostat 2.0

    public MiroStateType Mirostat { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_16","title":"Property Value","text":"

    MiroStateType

    "},{"location":"LLamaExecutors/parameters/#mirostattau","title":"MirostatTau","text":"

    target entropy

    public float MirostatTau { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_17","title":"Property Value","text":"

    Single

    "},{"location":"LLamaExecutors/parameters/#mirostateta","title":"MirostatEta","text":"

    learning rate

    public float MirostatEta { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_18","title":"Property Value","text":"

    Single

    "},{"location":"LLamaExecutors/parameters/#penalizenl","title":"PenalizeNL","text":"

    consider newlines as a repeatable token (penalize_nl)

    public bool PenalizeNL { get; set; }\n
    "},{"location":"LLamaExecutors/parameters/#property-value_19","title":"Property Value","text":"

    Boolean

    "},{"location":"LLamaExecutors/save-load-state/","title":"Save/Load State of Executor","text":"

    Similar to LLamaModel, an executor also has its state, which can be saved and loaded. Note that in most of cases, the state of executor and the state of the model should be loaded and saved at the same time.

    To decouple the model and executor, we provide APIs to save/load state for model and executor respectively. However, during the inference, the processed information will leave footprint in LLamaModel's native context. Therefore, if you just load a state from another executor but keep the model unmodified, some strange things may happen. So will loading model state only.

    Is there a condition that requires to load one of them only? The answer is YES. For example, after resetting the model state, if you don't want the inference starting from the new position, leaving the executor unmodified is okay. But, anyway, this flexible usage may cause some unexpected behaviors, therefore please ensure you know what you're doing before using it in this way.

    In the future version, we'll open the access for some variables inside the executor to support more flexible usages.

    The APIs to load/save state of the executors is similar to that of LLamaModel. However, note that StatelessExecutor doesn't have such APIs because it's stateless itself. Besides, the output of GetStateData is an object of type ExecutorBaseState.

    LLamaModel model = new LLamaModel(new ModelParams(\"<modelPath>\"));\nInteractiveExecutor executor = new InteractiveExecutor(model);\n// do some things...\nexecutor.SaveState(\"executor.st\");\nvar stateData = model.GetStateData();\n\nInteractiveExecutor executor2 = new InteractiveExecutor(model);\nexecutor2.LoadState(stateData);\n// do some things...\n\nInteractiveExecutor executor3 = new InteractiveExecutor(model);\nexecutor3.LoadState(\"executor.st\");\n// do some things...\n
    "},{"location":"LLamaExecutors/text-to-text-apis/","title":"Text-to-Text APIs of the executors","text":"

    All the executors implements the interface ILLamaExecutor, which provides two APIs to execute text-to-text tasks.

    public interface ILLamaExecutor\n{\n    public LLamaModel Model { get; }\n\n    IEnumerable<string> Infer(string text, InferenceParams? inferenceParams = null, CancellationToken token = default);\n\n    IAsyncEnumerable<string> InferAsync(string text, InferenceParams? inferenceParams = null, CancellationToken token = default);\n}\n

    Just pass the text to the executor with the inference parameters. For the inference parameters, please refer to executor inference parameters doc.

    The output of both two APIs are yield enumerable. Therefore, when receiving the output, you can directly use foreach to take actions on each word you get by order, instead of waiting for the whole process completed.

    "},{"location":"LLamaModel/embeddings/","title":"Get Embeddings","text":"

    Getting the embeddings of a text in LLM is sometimes useful, for example, to train other MLP models.

    To get the embeddings, please initialize a LLamaEmbedder and then call GetEmbeddings.

    var embedder = new LLamaEmbedder(new ModelParams(\"<modelPath>\"));\nstring text = \"hello, LLM.\";\nfloat[] embeddings = embedder.GetEmbeddings(text);\n

    The output is a float array. Note that the length of the array is related with the model you load. If you just want to get a smaller size embedding, please consider changing a model.

    "},{"location":"LLamaModel/parameters/","title":"LLamaModel Parameters","text":"

    When initializing a LLamaModel object, there're three parameters, ModelParams Params, string encoding = \"UTF-8\", ILLamaLogger? logger = null.

    The usage of logger will be further introduced in logger doc. The encoding is the encoding you want to use when dealing with text via this model.

    The most improtant of all, is the ModelParams, which is defined as below. We'll explain the parameters step by step in this document.

    public class ModelParams\n{\n    public int ContextSize { get; set; } = 512;\n    public int GpuLayerCount { get; set; } = 20;\n    public int Seed { get; set; } = 1686349486;\n    public bool UseFp16Memory { get; set; } = true;\n    public bool UseMemorymap { get; set; } = true;\n    public bool UseMemoryLock { get; set; } = false;\n    public bool Perplexity { get; set; } = false;\n    public string ModelPath { get; set; }\n    public string LoraAdapter { get; set; } = string.Empty;\n    public string LoraBase { get; set; } = string.Empty;\n    public int Threads { get; set; } = Math.Max(Environment.ProcessorCount / 2, 1);\n    public int BatchSize { get; set; } = 512;\n    public bool ConvertEosToNewLine { get; set; } = false;\n}\n
    "},{"location":"LLamaModel/parameters/#modelparams","title":"ModelParams","text":"

    Namespace: LLama.Common

    public class ModelParams\n

    Inheritance Object \u2192 ModelParams

    "},{"location":"LLamaModel/parameters/#properties","title":"Properties","text":""},{"location":"LLamaModel/parameters/#contextsize","title":"ContextSize","text":"

    Model context size (n_ctx)

    public int ContextSize { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value","title":"Property Value","text":"

    Int32

    "},{"location":"LLamaModel/parameters/#gpulayercount","title":"GpuLayerCount","text":"

    Number of layers to run in VRAM / GPU memory (n_gpu_layers)

    public int GpuLayerCount { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_1","title":"Property Value","text":"

    Int32

    "},{"location":"LLamaModel/parameters/#seed","title":"Seed","text":"

    Seed for the random number generator (seed)

    public int Seed { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_2","title":"Property Value","text":"

    Int32

    "},{"location":"LLamaModel/parameters/#usefp16memory","title":"UseFp16Memory","text":"

    Use f16 instead of f32 for memory kv (memory_f16)

    public bool UseFp16Memory { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_3","title":"Property Value","text":"

    Boolean

    "},{"location":"LLamaModel/parameters/#usememorymap","title":"UseMemorymap","text":"

    Use mmap for faster loads (use_mmap)

    public bool UseMemorymap { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_4","title":"Property Value","text":"

    Boolean

    "},{"location":"LLamaModel/parameters/#usememorylock","title":"UseMemoryLock","text":"

    Use mlock to keep model in memory (use_mlock)

    public bool UseMemoryLock { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_5","title":"Property Value","text":"

    Boolean

    "},{"location":"LLamaModel/parameters/#perplexity","title":"Perplexity","text":"

    Compute perplexity over the prompt (perplexity)

    public bool Perplexity { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_6","title":"Property Value","text":"

    Boolean

    "},{"location":"LLamaModel/parameters/#modelpath","title":"ModelPath","text":"

    Model path (model)

    public string ModelPath { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_7","title":"Property Value","text":"

    String

    "},{"location":"LLamaModel/parameters/#loraadapter","title":"LoraAdapter","text":"

    lora adapter path (lora_adapter)

    public string LoraAdapter { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_8","title":"Property Value","text":"

    String

    "},{"location":"LLamaModel/parameters/#lorabase","title":"LoraBase","text":"

    base model path for the lora adapter (lora_base)

    public string LoraBase { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_9","title":"Property Value","text":"

    String

    "},{"location":"LLamaModel/parameters/#threads","title":"Threads","text":"

    Number of threads (-1 = autodetect) (n_threads)

    public int Threads { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_10","title":"Property Value","text":"

    Int32

    "},{"location":"LLamaModel/parameters/#batchsize","title":"BatchSize","text":"

    batch size for prompt processing (must be >=32 to use BLAS) (n_batch)

    public int BatchSize { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_11","title":"Property Value","text":"

    Int32

    "},{"location":"LLamaModel/parameters/#converteostonewline","title":"ConvertEosToNewLine","text":"

    Whether to convert eos to newline during the inference.

    public bool ConvertEosToNewLine { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_12","title":"Property Value","text":"

    Boolean

    "},{"location":"LLamaModel/parameters/#embeddingmode","title":"EmbeddingMode","text":"

    Whether to use embedding mode. (embedding) Note that if this is set to true, The LLamaModel won't produce text response anymore.

    public bool EmbeddingMode { get; set; }\n
    "},{"location":"LLamaModel/parameters/#property-value_13","title":"Property Value","text":"

    Boolean

    "},{"location":"LLamaModel/quantization/","title":"Quantization","text":"

    Quantization is significant to accelerate the model inference. Since there's little accuracy (performance) reduction when quantizing the model, get it easy to quantize it!

    To quantize the model, please call Quantize from LLamaQuantizer, which is a static method.

    string srcPath = \"<model.bin>\";\nstring dstPath = \"<model_q4_0.bin>\";\nLLamaQuantizer.Quantize(srcPath, dstPath, \"q4_0\");\n// The following overload is also okay.\n// LLamaQuantizer.Quantize(srcPath, dstPath, LLamaFtype.LLAMA_FTYPE_MOSTLY_Q4_0);\n

    After calling it, a quantized model file will be saved.

    There're currently 5 types of quantization supported:

    • q4_0
    • q4_1
    • q5_0
    • q5_1
    • q8_0
    "},{"location":"LLamaModel/save-load-state/","title":"Save/Load State","text":"

    There're two ways to load state: loading from path and loading from bite array. Therefore, correspondingly, state data can be extracted as byte array or saved to a file.

    LLamaModel model = new LLamaModel(new ModelParams(\"<modelPath>\"));\n// do some things...\nmodel.SaveState(\"model.st\");\nvar stateData = model.GetStateData();\nmodel.Dispose();\n\nLLamaModel model2 = new LLamaModel(new ModelParams(\"<modelPath>\"));\nmodel2.LoadState(stateData);\n// do some things...\n\nLLamaModel model3 = new LLamaModel(new ModelParams(\"<modelPath>\"));\nmodel3.LoadState(\"model.st\");\n// do some things...\n
    "},{"location":"LLamaModel/tokenization/","title":"Tokenization/Detokenization","text":"

    A pair of APIs to make conversion between text and tokens.

    "},{"location":"LLamaModel/tokenization/#tokenization","title":"Tokenization","text":"

    The basic usage is to call Tokenize after initializing the model.

    LLamaModel model = new LLamaModel(new ModelParams(\"<modelPath>\"));\nstring text = \"hello\";\nint[] tokens = model.Tokenize(text).ToArray();\n

    Depending on different model (or vocab), the output will be various.

    "},{"location":"LLamaModel/tokenization/#detokenization","title":"Detokenization","text":"

    Similar to tokenization, just pass an IEnumerable<int> to Detokenize method.

    LLamaModel model = new LLamaModel(new ModelParams(\"<modelPath>\"));\nint[] tokens = new int[] {125, 2568, 13245};\nstring text = model.Detokenize(tokens);\n
    "},{"location":"More/log/","title":"The Logger in LLamaSharp","text":"

    LLamaSharp supports customized logger because it could be used in many kinds of applications, like Winform/WPF, WebAPI and Blazor, so that the preference of logger varies.

    "},{"location":"More/log/#define-customized-logger","title":"Define customized logger","text":"

    What you need to do is to implement the ILogger interface.

    public interface ILLamaLogger\n{\n    public enum LogLevel\n    {\n        Info,\n        Debug,\n        Warning,\n        Error\n    }\n    void Log(string source, string message, LogLevel level);\n}\n

    The source specifies where the log message is from, which could be a function, a class, etc..

    The message is the log message itself.

    The level is the level of the information in the log. As shown above, there're four levels, which are info, debug, warning and error respectively.

    The following is a simple example of theb logger implementation:

    public sealed class LLamaDefaultLogger : ILLamaLogger\n{\n    private static readonly Lazy<LLamaDefaultLogger> _instance = new Lazy<LLamaDefaultLogger>(() => new LLamaDefaultLogger());\n\n    private bool _toConsole = true;\n    private bool _toFile = false;\n\n    private FileStream? _fileStream = null;\n    private StreamWriter _fileWriter = null;\n\n    public static LLamaDefaultLogger Default => _instance.Value;\n\n    private LLamaDefaultLogger()\n    {\n\n    }\n\n    public LLamaDefaultLogger EnableConsole()\n    {\n        _toConsole = true;\n        return this;\n    }\n\n    public LLamaDefaultLogger DisableConsole()\n    {\n        _toConsole = false;\n        return this;\n    }\n\n    public LLamaDefaultLogger EnableFile(string filename, FileMode mode = FileMode.Append)\n    {\n        _fileStream = new FileStream(filename, mode, FileAccess.Write);\n        _fileWriter = new StreamWriter(_fileStream);\n        _toFile = true;\n        return this;\n    }\n\n    public LLamaDefaultLogger DisableFile(string filename)\n    {\n        if (_fileWriter is not null)\n        {\n            _fileWriter.Close();\n            _fileWriter = null;\n        }\n        if (_fileStream is not null)\n        {\n            _fileStream.Close();\n            _fileStream = null;\n        }\n        _toFile = false;\n        return this;\n    }\n\n    public void Log(string source, string message, LogLevel level)\n    {\n        if (level == LogLevel.Info)\n        {\n            Info(message);\n        }\n        else if (level == LogLevel.Debug)\n        {\n\n        }\n        else if (level == LogLevel.Warning)\n        {\n            Warn(message);\n        }\n        else if (level == LogLevel.Error)\n        {\n            Error(message);\n        }\n    }\n\n    public void Info(string message)\n    {\n        message = MessageFormat(\"info\", message);\n        if (_toConsole)\n        {\n            Console.ForegroundColor = ConsoleColor.White;\n            Console.WriteLine(message);\n            Console.ResetColor();\n        }\n        if (_toFile)\n        {\n            Debug.Assert(_fileStream is not null);\n            Debug.Assert(_fileWriter is not null);\n            _fileWriter.WriteLine(message);\n        }\n    }\n\n    public void Warn(string message)\n    {\n        message = MessageFormat(\"warn\", message);\n        if (_toConsole)\n        {\n            Console.ForegroundColor = ConsoleColor.Yellow;\n            Console.WriteLine(message);\n            Console.ResetColor();\n        }\n        if (_toFile)\n        {\n            Debug.Assert(_fileStream is not null);\n            Debug.Assert(_fileWriter is not null);\n            _fileWriter.WriteLine(message);\n        }\n    }\n\n    public void Error(string message)\n    {\n        message = MessageFormat(\"error\", message);\n        if (_toConsole)\n        {\n            Console.ForegroundColor = ConsoleColor.Red;\n            Console.WriteLine(message);\n            Console.ResetColor();\n        }\n        if (_toFile)\n        {\n            Debug.Assert(_fileStream is not null);\n            Debug.Assert(_fileWriter is not null);\n            _fileWriter.WriteLine(message);\n        }\n    }\n\n    private string MessageFormat(string level, string message)\n    {\n        DateTime now = DateTime.Now;\n        string formattedDate = now.ToString(\"yyyy.MM.dd HH:mm:ss\");\n        return $\"[{formattedDate}][{level}]: {message}\";\n    }\n}\n
    "},{"location":"NonEnglishUsage/Chinese/","title":"Use LLamaSharp with Chinese","text":"

    It's supported now but the document is under work. Please wait for some time. Thank you for your support! :)

    "},{"location":"xmldocs/","title":"LLamaSharp","text":""},{"location":"xmldocs/#llama","title":"LLama","text":"

    ChatSession

    InstructExecutor

    InteractiveExecutor

    LLamaEmbedder

    LLamaModel

    LLamaQuantizer

    LLamaTransforms

    ResettableLLamaModel

    StatefulExecutorBase

    StatelessExecutor

    "},{"location":"xmldocs/#llamaabstractions","title":"LLama.Abstractions","text":"

    IHistoryTransform

    ILLamaExecutor

    ITextStreamTransform

    ITextTransform

    "},{"location":"xmldocs/#llamacommon","title":"LLama.Common","text":"

    AuthorRole

    ChatHistory

    FixedSizeQueue<T>

    ILLamaLogger

    InferenceParams

    LLamaDefaultLogger

    MiroStateType

    ModelParams

    "},{"location":"xmldocs/#llamaexceptions","title":"LLama.Exceptions","text":"

    RuntimeError

    "},{"location":"xmldocs/#llamaextensions","title":"LLama.Extensions","text":"

    DictionaryExtension

    "},{"location":"xmldocs/#llamanative","title":"LLama.Native","text":"

    LLamaContextParams

    LLamaFtype

    LLamaTokenData

    LLamaTokenDataArray

    LLamaTokenDataArrayNative

    NativeApi

    SafeLLamaContextHandle

    SafeLLamaHandleBase

    "},{"location":"xmldocs/#llamaoldversion","title":"LLama.OldVersion","text":"

    ChatCompletion

    ChatCompletionChoice

    ChatCompletionChunk

    ChatCompletionChunkChoice

    ChatCompletionChunkDelta

    ChatCompletionMessage

    ChatMessageRecord

    ChatRole

    ChatSession<T>

    Completion

    CompletionChoice

    CompletionChunk

    CompletionLogprobs

    CompletionUsage

    Embedding

    EmbeddingData

    EmbeddingUsage

    IChatModel

    LLamaEmbedder

    LLamaModel

    LLamaParams

    "},{"location":"xmldocs/llama.abstractions.ihistorytransform/","title":"IHistoryTransform","text":"

    Namespace: LLama.Abstractions

    Transform history to plain text and vice versa.

    public interface IHistoryTransform\n
    "},{"location":"xmldocs/llama.abstractions.ihistorytransform/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.abstractions.ihistorytransform/#historytotextchathistory","title":"HistoryToText(ChatHistory)","text":"

    Convert a ChatHistory instance to plain text.

    string HistoryToText(ChatHistory history)\n
    "},{"location":"xmldocs/llama.abstractions.ihistorytransform/#parameters","title":"Parameters","text":"

    history ChatHistory The ChatHistory instance

    "},{"location":"xmldocs/llama.abstractions.ihistorytransform/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.abstractions.ihistorytransform/#texttohistoryauthorrole-string","title":"TextToHistory(AuthorRole, String)","text":"

    Converts plain text to a ChatHistory instance.

    ChatHistory TextToHistory(AuthorRole role, string text)\n
    "},{"location":"xmldocs/llama.abstractions.ihistorytransform/#parameters_1","title":"Parameters","text":"

    role AuthorRole The role for the author.

    text String The chat history as plain text.

    "},{"location":"xmldocs/llama.abstractions.ihistorytransform/#returns_1","title":"Returns","text":"

    ChatHistory The updated history.

    "},{"location":"xmldocs/llama.abstractions.illamaexecutor/","title":"ILLamaExecutor","text":"

    Namespace: LLama.Abstractions

    A high level interface for LLama models.

    public interface ILLamaExecutor\n
    "},{"location":"xmldocs/llama.abstractions.illamaexecutor/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.abstractions.illamaexecutor/#model","title":"Model","text":"

    The loaded model for this executor.

    public abstract LLamaModel Model { get; }\n
    "},{"location":"xmldocs/llama.abstractions.illamaexecutor/#property-value","title":"Property Value","text":"

    LLamaModel

    "},{"location":"xmldocs/llama.abstractions.illamaexecutor/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.abstractions.illamaexecutor/#inferstring-inferenceparams-cancellationtoken","title":"Infer(String, InferenceParams, CancellationToken)","text":"

    Infers a response from the model.

    IEnumerable<string> Infer(string text, InferenceParams inferenceParams, CancellationToken token)\n
    "},{"location":"xmldocs/llama.abstractions.illamaexecutor/#parameters","title":"Parameters","text":"

    text String Your prompt

    inferenceParams InferenceParams Any additional parameters

    token CancellationToken A cancellation token.

    "},{"location":"xmldocs/llama.abstractions.illamaexecutor/#returns","title":"Returns","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.abstractions.illamaexecutor/#inferasyncstring-inferenceparams-cancellationtoken","title":"InferAsync(String, InferenceParams, CancellationToken)","text":"
    IAsyncEnumerable<string> InferAsync(string text, InferenceParams inferenceParams, CancellationToken token)\n
    "},{"location":"xmldocs/llama.abstractions.illamaexecutor/#parameters_1","title":"Parameters","text":"

    text String

    inferenceParams InferenceParams

    token CancellationToken

    "},{"location":"xmldocs/llama.abstractions.illamaexecutor/#returns_1","title":"Returns","text":"

    IAsyncEnumerable<String>

    "},{"location":"xmldocs/llama.abstractions.itextstreamtransform/","title":"ITextStreamTransform","text":"

    Namespace: LLama.Abstractions

    Takes a stream of tokens and transforms them.

    public interface ITextStreamTransform\n
    "},{"location":"xmldocs/llama.abstractions.itextstreamtransform/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.abstractions.itextstreamtransform/#transformienumerablestring","title":"Transform(IEnumerable<String>)","text":"

    Takes a stream of tokens and transforms them, returning a new stream of tokens.

    IEnumerable<string> Transform(IEnumerable<string> tokens)\n
    "},{"location":"xmldocs/llama.abstractions.itextstreamtransform/#parameters","title":"Parameters","text":"

    tokens IEnumerable<String>

    "},{"location":"xmldocs/llama.abstractions.itextstreamtransform/#returns","title":"Returns","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.abstractions.itextstreamtransform/#transformasynciasyncenumerablestring","title":"TransformAsync(IAsyncEnumerable<String>)","text":"

    Takes a stream of tokens and transforms them, returning a new stream of tokens asynchronously.

    IAsyncEnumerable<string> TransformAsync(IAsyncEnumerable<string> tokens)\n
    "},{"location":"xmldocs/llama.abstractions.itextstreamtransform/#parameters_1","title":"Parameters","text":"

    tokens IAsyncEnumerable<String>

    "},{"location":"xmldocs/llama.abstractions.itextstreamtransform/#returns_1","title":"Returns","text":"

    IAsyncEnumerable<String>

    "},{"location":"xmldocs/llama.abstractions.itexttransform/","title":"ITextTransform","text":"

    Namespace: LLama.Abstractions

    An interface for text transformations. These can be used to compose a pipeline of text transformations, such as: - Tokenization - Lowercasing - Punctuation removal - Trimming - etc.

    public interface ITextTransform\n
    "},{"location":"xmldocs/llama.abstractions.itexttransform/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.abstractions.itexttransform/#transformstring","title":"Transform(String)","text":"

    Takes a string and transforms it.

    string Transform(string text)\n
    "},{"location":"xmldocs/llama.abstractions.itexttransform/#parameters","title":"Parameters","text":"

    text String

    "},{"location":"xmldocs/llama.abstractions.itexttransform/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.chatsession/","title":"ChatSession","text":"

    Namespace: LLama

    The main chat session class.

    public class ChatSession\n

    Inheritance Object \u2192 ChatSession

    "},{"location":"xmldocs/llama.chatsession/#fields","title":"Fields","text":""},{"location":"xmldocs/llama.chatsession/#outputtransform","title":"OutputTransform","text":"

    The output transform used in this session.

    public ITextStreamTransform OutputTransform;\n
    "},{"location":"xmldocs/llama.chatsession/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.chatsession/#executor","title":"Executor","text":"

    The executor for this session.

    public ILLamaExecutor Executor { get; }\n
    "},{"location":"xmldocs/llama.chatsession/#property-value","title":"Property Value","text":"

    ILLamaExecutor

    "},{"location":"xmldocs/llama.chatsession/#history","title":"History","text":"

    The chat history for this session.

    public ChatHistory History { get; }\n
    "},{"location":"xmldocs/llama.chatsession/#property-value_1","title":"Property Value","text":"

    ChatHistory

    "},{"location":"xmldocs/llama.chatsession/#historytransform","title":"HistoryTransform","text":"

    The history transform used in this session.

    public IHistoryTransform HistoryTransform { get; set; }\n
    "},{"location":"xmldocs/llama.chatsession/#property-value_2","title":"Property Value","text":"

    IHistoryTransform

    "},{"location":"xmldocs/llama.chatsession/#inputtransformpipeline","title":"InputTransformPipeline","text":"

    The input transform pipeline used in this session.

    public List<ITextTransform> InputTransformPipeline { get; set; }\n
    "},{"location":"xmldocs/llama.chatsession/#property-value_3","title":"Property Value","text":"

    List<ITextTransform>

    "},{"location":"xmldocs/llama.chatsession/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.chatsession/#chatsessionillamaexecutor","title":"ChatSession(ILLamaExecutor)","text":"
    public ChatSession(ILLamaExecutor executor)\n
    "},{"location":"xmldocs/llama.chatsession/#parameters","title":"Parameters","text":"

    executor ILLamaExecutor The executor for this session

    "},{"location":"xmldocs/llama.chatsession/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.chatsession/#withhistorytransformihistorytransform","title":"WithHistoryTransform(IHistoryTransform)","text":"

    Use a custom history transform.

    public ChatSession WithHistoryTransform(IHistoryTransform transform)\n
    "},{"location":"xmldocs/llama.chatsession/#parameters_1","title":"Parameters","text":"

    transform IHistoryTransform

    "},{"location":"xmldocs/llama.chatsession/#returns","title":"Returns","text":"

    ChatSession

    "},{"location":"xmldocs/llama.chatsession/#addinputtransformitexttransform","title":"AddInputTransform(ITextTransform)","text":"

    Add a text transform to the input transform pipeline.

    public ChatSession AddInputTransform(ITextTransform transform)\n
    "},{"location":"xmldocs/llama.chatsession/#parameters_2","title":"Parameters","text":"

    transform ITextTransform

    "},{"location":"xmldocs/llama.chatsession/#returns_1","title":"Returns","text":"

    ChatSession

    "},{"location":"xmldocs/llama.chatsession/#withoutputtransformitextstreamtransform","title":"WithOutputTransform(ITextStreamTransform)","text":"

    Use a custom output transform.

    public ChatSession WithOutputTransform(ITextStreamTransform transform)\n
    "},{"location":"xmldocs/llama.chatsession/#parameters_3","title":"Parameters","text":"

    transform ITextStreamTransform

    "},{"location":"xmldocs/llama.chatsession/#returns_2","title":"Returns","text":"

    ChatSession

    "},{"location":"xmldocs/llama.chatsession/#savesessionstring","title":"SaveSession(String)","text":"
    public void SaveSession(string path)\n
    "},{"location":"xmldocs/llama.chatsession/#parameters_4","title":"Parameters","text":"

    path String The directory name to save the session. If the directory does not exist, a new directory will be created.

    "},{"location":"xmldocs/llama.chatsession/#loadsessionstring","title":"LoadSession(String)","text":"
    public void LoadSession(string path)\n
    "},{"location":"xmldocs/llama.chatsession/#parameters_5","title":"Parameters","text":"

    path String The directory name to load the session.

    "},{"location":"xmldocs/llama.chatsession/#chatchathistory-inferenceparams-cancellationtoken","title":"Chat(ChatHistory, InferenceParams, CancellationToken)","text":"

    Get the response from the LLama model with chat histories.

    public IEnumerable<string> Chat(ChatHistory history, InferenceParams inferenceParams, CancellationToken cancellationToken)\n
    "},{"location":"xmldocs/llama.chatsession/#parameters_6","title":"Parameters","text":"

    history ChatHistory

    inferenceParams InferenceParams

    cancellationToken CancellationToken

    "},{"location":"xmldocs/llama.chatsession/#returns_3","title":"Returns","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.chatsession/#chatstring-inferenceparams-cancellationtoken","title":"Chat(String, InferenceParams, CancellationToken)","text":"

    Get the response from the LLama model. Note that prompt could not only be the preset words, but also the question you want to ask.

    public IEnumerable<string> Chat(string prompt, InferenceParams inferenceParams, CancellationToken cancellationToken)\n
    "},{"location":"xmldocs/llama.chatsession/#parameters_7","title":"Parameters","text":"

    prompt String

    inferenceParams InferenceParams

    cancellationToken CancellationToken

    "},{"location":"xmldocs/llama.chatsession/#returns_4","title":"Returns","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.chatsession/#chatasyncchathistory-inferenceparams-cancellationtoken","title":"ChatAsync(ChatHistory, InferenceParams, CancellationToken)","text":"

    Get the response from the LLama model with chat histories.

    public IAsyncEnumerable<string> ChatAsync(ChatHistory history, InferenceParams inferenceParams, CancellationToken cancellationToken)\n
    "},{"location":"xmldocs/llama.chatsession/#parameters_8","title":"Parameters","text":"

    history ChatHistory

    inferenceParams InferenceParams

    cancellationToken CancellationToken

    "},{"location":"xmldocs/llama.chatsession/#returns_5","title":"Returns","text":"

    IAsyncEnumerable<String>

    "},{"location":"xmldocs/llama.chatsession/#chatasyncstring-inferenceparams-cancellationtoken","title":"ChatAsync(String, InferenceParams, CancellationToken)","text":"

    Get the response from the LLama model with chat histories asynchronously.

    public IAsyncEnumerable<string> ChatAsync(string prompt, InferenceParams inferenceParams, CancellationToken cancellationToken)\n
    "},{"location":"xmldocs/llama.chatsession/#parameters_9","title":"Parameters","text":"

    prompt String

    inferenceParams InferenceParams

    cancellationToken CancellationToken

    "},{"location":"xmldocs/llama.chatsession/#returns_6","title":"Returns","text":"

    IAsyncEnumerable<String>

    "},{"location":"xmldocs/llama.common.authorrole/","title":"AuthorRole","text":"

    Namespace: LLama.Common

    public enum AuthorRole\n

    Inheritance Object \u2192 ValueType \u2192 Enum \u2192 AuthorRole Implements IComparable, IFormattable, IConvertible

    "},{"location":"xmldocs/llama.common.authorrole/#fields","title":"Fields","text":"Name Value Description"},{"location":"xmldocs/llama.common.chathistory/","title":"ChatHistory","text":"

    Namespace: LLama.Common

    The chat history class

    public class ChatHistory\n

    Inheritance Object \u2192 ChatHistory

    "},{"location":"xmldocs/llama.common.chathistory/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.common.chathistory/#messages","title":"Messages","text":"

    List of messages in the chat

    public List<Message> Messages { get; }\n
    "},{"location":"xmldocs/llama.common.chathistory/#property-value","title":"Property Value","text":"

    List<Message>

    "},{"location":"xmldocs/llama.common.chathistory/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.common.chathistory/#chathistory_1","title":"ChatHistory()","text":"

    Create a new instance of the chat content class

    public ChatHistory()\n
    "},{"location":"xmldocs/llama.common.chathistory/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.common.chathistory/#addmessageauthorrole-string","title":"AddMessage(AuthorRole, String)","text":"

    Add a message to the chat history

    public void AddMessage(AuthorRole authorRole, string content)\n
    "},{"location":"xmldocs/llama.common.chathistory/#parameters","title":"Parameters","text":"

    authorRole AuthorRole Role of the message author

    content String Message content

    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/","title":"FixedSizeQueue<T>","text":"

    Namespace: LLama.Common

    A queue with fixed storage size. Currently it's only a naive implementation and needs to be further optimized in the future.

    public class FixedSizeQueue<T> : , System.Collections.IEnumerable\n
    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#type-parameters","title":"Type Parameters","text":"

    T

    Inheritance Object \u2192 FixedSizeQueue<T> Implements IEnumerable<T>, IEnumerable

    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.common.fixedsizequeue-1/#count","title":"Count","text":"
    public int Count { get; }\n
    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#property-value","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#capacity","title":"Capacity","text":"
    public int Capacity { get; }\n
    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#property-value_1","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.common.fixedsizequeue-1/#fixedsizequeueint32","title":"FixedSizeQueue(Int32)","text":"
    public FixedSizeQueue(int size)\n
    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#parameters","title":"Parameters","text":"

    size Int32

    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#fixedsizequeueint32-ienumerablet","title":"FixedSizeQueue(Int32, IEnumerable<T>)","text":"
    public FixedSizeQueue(int size, IEnumerable<T> data)\n
    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#parameters_1","title":"Parameters","text":"

    size Int32

    data IEnumerable<T>

    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.common.fixedsizequeue-1/#fillwitht","title":"FillWith(T)","text":"
    public FixedSizeQueue<T> FillWith(T value)\n
    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#parameters_2","title":"Parameters","text":"

    value T

    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#returns","title":"Returns","text":"

    FixedSizeQueue<T>

    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#enqueuet","title":"Enqueue(T)","text":"

    Enquene an element.

    public void Enqueue(T item)\n
    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#parameters_3","title":"Parameters","text":"

    item T

    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#toarray","title":"ToArray()","text":"
    public T[] ToArray()\n
    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#returns_1","title":"Returns","text":"

    T[]

    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#getenumerator","title":"GetEnumerator()","text":"
    public IEnumerator<T> GetEnumerator()\n
    "},{"location":"xmldocs/llama.common.fixedsizequeue-1/#returns_2","title":"Returns","text":"

    IEnumerator<T>

    "},{"location":"xmldocs/llama.common.illamalogger/","title":"ILLamaLogger","text":"

    Namespace: LLama.Common

    public interface ILLamaLogger\n
    "},{"location":"xmldocs/llama.common.illamalogger/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.common.illamalogger/#logstring-string-loglevel","title":"Log(String, String, LogLevel)","text":"

    Write the log in cosutomized way

    void Log(string source, string message, LogLevel level)\n
    "},{"location":"xmldocs/llama.common.illamalogger/#parameters","title":"Parameters","text":"

    source String The source of the log. It may be a method name or class name.

    message String The message.

    level LogLevel The log level.

    "},{"location":"xmldocs/llama.common.inferenceparams/","title":"InferenceParams","text":"

    Namespace: LLama.Common

    public class InferenceParams\n

    Inheritance Object \u2192 InferenceParams

    "},{"location":"xmldocs/llama.common.inferenceparams/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.common.inferenceparams/#tokenskeep","title":"TokensKeep","text":"

    number of tokens to keep from initial prompt

    public int TokensKeep { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.common.inferenceparams/#maxtokens","title":"MaxTokens","text":"

    how many new tokens to predict (n_predict), set to -1 to inifinitely generate response until it complete.

    public int MaxTokens { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_1","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.common.inferenceparams/#logitbias","title":"LogitBias","text":"

    logit bias for specific tokens

    public Dictionary<int, float> LogitBias { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_2","title":"Property Value","text":"

    Dictionary<Int32, Single>

    "},{"location":"xmldocs/llama.common.inferenceparams/#antiprompts","title":"AntiPrompts","text":"

    Sequences where the model will stop generating further tokens.

    public IEnumerable<string> AntiPrompts { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_3","title":"Property Value","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.common.inferenceparams/#pathsession","title":"PathSession","text":"

    path to file for saving/loading model eval state

    public string PathSession { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_4","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.common.inferenceparams/#inputsuffix","title":"InputSuffix","text":"

    string to suffix user inputs with

    public string InputSuffix { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_5","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.common.inferenceparams/#inputprefix","title":"InputPrefix","text":"

    string to prefix user inputs with

    public string InputPrefix { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_6","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.common.inferenceparams/#topk","title":"TopK","text":"

    0 or lower to use vocab size

    public int TopK { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_7","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.common.inferenceparams/#topp","title":"TopP","text":"

    1.0 = disabled

    public float TopP { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_8","title":"Property Value","text":"

    Single

    "},{"location":"xmldocs/llama.common.inferenceparams/#tfsz","title":"TfsZ","text":"

    1.0 = disabled

    public float TfsZ { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_9","title":"Property Value","text":"

    Single

    "},{"location":"xmldocs/llama.common.inferenceparams/#typicalp","title":"TypicalP","text":"

    1.0 = disabled

    public float TypicalP { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_10","title":"Property Value","text":"

    Single

    "},{"location":"xmldocs/llama.common.inferenceparams/#temperature","title":"Temperature","text":"

    1.0 = disabled

    public float Temperature { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_11","title":"Property Value","text":"

    Single

    "},{"location":"xmldocs/llama.common.inferenceparams/#repeatpenalty","title":"RepeatPenalty","text":"

    1.0 = disabled

    public float RepeatPenalty { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_12","title":"Property Value","text":"

    Single

    "},{"location":"xmldocs/llama.common.inferenceparams/#repeatlasttokenscount","title":"RepeatLastTokensCount","text":"

    last n tokens to penalize (0 = disable penalty, -1 = context size) (repeat_last_n)

    public int RepeatLastTokensCount { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_13","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.common.inferenceparams/#frequencypenalty","title":"FrequencyPenalty","text":"

    frequency penalty coefficient 0.0 = disabled

    public float FrequencyPenalty { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_14","title":"Property Value","text":"

    Single

    "},{"location":"xmldocs/llama.common.inferenceparams/#presencepenalty","title":"PresencePenalty","text":"

    presence penalty coefficient 0.0 = disabled

    public float PresencePenalty { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_15","title":"Property Value","text":"

    Single

    "},{"location":"xmldocs/llama.common.inferenceparams/#mirostat","title":"Mirostat","text":"

    Mirostat uses tokens instead of words. algorithm described in the paper https://arxiv.org/abs/2007.14966. 0 = disabled, 1 = mirostat, 2 = mirostat 2.0

    public MiroStateType Mirostat { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_16","title":"Property Value","text":"

    MiroStateType

    "},{"location":"xmldocs/llama.common.inferenceparams/#mirostattau","title":"MirostatTau","text":"

    target entropy

    public float MirostatTau { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_17","title":"Property Value","text":"

    Single

    "},{"location":"xmldocs/llama.common.inferenceparams/#mirostateta","title":"MirostatEta","text":"

    learning rate

    public float MirostatEta { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_18","title":"Property Value","text":"

    Single

    "},{"location":"xmldocs/llama.common.inferenceparams/#penalizenl","title":"PenalizeNL","text":"

    consider newlines as a repeatable token (penalize_nl)

    public bool PenalizeNL { get; set; }\n
    "},{"location":"xmldocs/llama.common.inferenceparams/#property-value_19","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.common.inferenceparams/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.common.inferenceparams/#inferenceparams_1","title":"InferenceParams()","text":"
    public InferenceParams()\n
    "},{"location":"xmldocs/llama.common.llamadefaultlogger/","title":"LLamaDefaultLogger","text":"

    Namespace: LLama.Common

    The default logger of LLamaSharp. On default it write to console. User methods of LLamaLogger.Default to change the behavior. It's more recommended to inherit ILLamaLogger to cosutomize the behavior.

    public sealed class LLamaDefaultLogger : ILLamaLogger\n

    Inheritance Object \u2192 LLamaDefaultLogger Implements ILLamaLogger

    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.common.llamadefaultlogger/#default","title":"Default","text":"
    public static LLamaDefaultLogger Default { get; }\n
    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#property-value","title":"Property Value","text":"

    LLamaDefaultLogger

    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.common.llamadefaultlogger/#enableconsole","title":"EnableConsole()","text":"
    public LLamaDefaultLogger EnableConsole()\n
    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#returns","title":"Returns","text":"

    LLamaDefaultLogger

    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#disableconsole","title":"DisableConsole()","text":"
    public LLamaDefaultLogger DisableConsole()\n
    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#returns_1","title":"Returns","text":"

    LLamaDefaultLogger

    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#enablefilestring-filemode","title":"EnableFile(String, FileMode)","text":"
    public LLamaDefaultLogger EnableFile(string filename, FileMode mode)\n
    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#parameters","title":"Parameters","text":"

    filename String

    mode FileMode

    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#returns_2","title":"Returns","text":"

    LLamaDefaultLogger

    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#disablefilestring","title":"DisableFile(String)","text":"
    public LLamaDefaultLogger DisableFile(string filename)\n
    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#parameters_1","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#returns_3","title":"Returns","text":"

    LLamaDefaultLogger

    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#logstring-string-loglevel","title":"Log(String, String, LogLevel)","text":"
    public void Log(string source, string message, LogLevel level)\n
    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#parameters_2","title":"Parameters","text":"

    source String

    message String

    level LogLevel

    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#infostring","title":"Info(String)","text":"
    public void Info(string message)\n
    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#parameters_3","title":"Parameters","text":"

    message String

    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#warnstring","title":"Warn(String)","text":"
    public void Warn(string message)\n
    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#parameters_4","title":"Parameters","text":"

    message String

    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#errorstring","title":"Error(String)","text":"
    public void Error(string message)\n
    "},{"location":"xmldocs/llama.common.llamadefaultlogger/#parameters_5","title":"Parameters","text":"

    message String

    "},{"location":"xmldocs/llama.common.mirostatetype/","title":"MiroStateType","text":"

    Namespace: LLama.Common

    public enum MiroStateType\n

    Inheritance Object \u2192 ValueType \u2192 Enum \u2192 MiroStateType Implements IComparable, IFormattable, IConvertible

    "},{"location":"xmldocs/llama.common.mirostatetype/#fields","title":"Fields","text":"Name Value Description"},{"location":"xmldocs/llama.common.modelparams/","title":"ModelParams","text":"

    Namespace: LLama.Common

    public class ModelParams\n

    Inheritance Object \u2192 ModelParams

    "},{"location":"xmldocs/llama.common.modelparams/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.common.modelparams/#contextsize","title":"ContextSize","text":"

    Model context size (n_ctx)

    public int ContextSize { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.common.modelparams/#gpulayercount","title":"GpuLayerCount","text":"

    Number of layers to run in VRAM / GPU memory (n_gpu_layers)

    public int GpuLayerCount { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_1","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.common.modelparams/#seed","title":"Seed","text":"

    Seed for the random number generator (seed)

    public int Seed { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_2","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.common.modelparams/#usefp16memory","title":"UseFp16Memory","text":"

    Use f16 instead of f32 for memory kv (memory_f16)

    public bool UseFp16Memory { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_3","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.common.modelparams/#usememorymap","title":"UseMemorymap","text":"

    Use mmap for faster loads (use_mmap)

    public bool UseMemorymap { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_4","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.common.modelparams/#usememorylock","title":"UseMemoryLock","text":"

    Use mlock to keep model in memory (use_mlock)

    public bool UseMemoryLock { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_5","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.common.modelparams/#perplexity","title":"Perplexity","text":"

    Compute perplexity over the prompt (perplexity)

    public bool Perplexity { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_6","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.common.modelparams/#modelpath","title":"ModelPath","text":"

    Model path (model)

    public string ModelPath { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_7","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.common.modelparams/#loraadapter","title":"LoraAdapter","text":"

    lora adapter path (lora_adapter)

    public string LoraAdapter { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_8","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.common.modelparams/#lorabase","title":"LoraBase","text":"

    base model path for the lora adapter (lora_base)

    public string LoraBase { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_9","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.common.modelparams/#threads","title":"Threads","text":"

    Number of threads (-1 = autodetect) (n_threads)

    public int Threads { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_10","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.common.modelparams/#batchsize","title":"BatchSize","text":"

    batch size for prompt processing (must be >=32 to use BLAS) (n_batch)

    public int BatchSize { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_11","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.common.modelparams/#converteostonewline","title":"ConvertEosToNewLine","text":"

    Whether to convert eos to newline during the inference.

    public bool ConvertEosToNewLine { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_12","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.common.modelparams/#embeddingmode","title":"EmbeddingMode","text":"

    Whether to use embedding mode. (embedding) Note that if this is set to true, The LLamaModel won't produce text response anymore.

    public bool EmbeddingMode { get; set; }\n
    "},{"location":"xmldocs/llama.common.modelparams/#property-value_13","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.common.modelparams/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.common.modelparams/#modelparamsstring-int32-int32-int32-boolean-boolean-boolean-boolean-string-string-int32-int32-boolean-boolean","title":"ModelParams(String, Int32, Int32, Int32, Boolean, Boolean, Boolean, Boolean, String, String, Int32, Int32, Boolean, Boolean)","text":"
    public ModelParams(string modelPath, int contextSize, int gpuLayerCount, int seed, bool useFp16Memory, bool useMemorymap, bool useMemoryLock, bool perplexity, string loraAdapter, string loraBase, int threads, int batchSize, bool convertEosToNewLine, bool embeddingMode)\n
    "},{"location":"xmldocs/llama.common.modelparams/#parameters","title":"Parameters","text":"

    modelPath String The model path.

    contextSize Int32 Model context size (n_ctx)

    gpuLayerCount Int32 Number of layers to run in VRAM / GPU memory (n_gpu_layers)

    seed Int32 Seed for the random number generator (seed)

    useFp16Memory Boolean Whether to use f16 instead of f32 for memory kv (memory_f16)

    useMemorymap Boolean Whether to use mmap for faster loads (use_mmap)

    useMemoryLock Boolean Whether to use mlock to keep model in memory (use_mlock)

    perplexity Boolean Thether to compute perplexity over the prompt (perplexity)

    loraAdapter String Lora adapter path (lora_adapter)

    loraBase String Base model path for the lora adapter (lora_base)

    threads Int32 Number of threads (-1 = autodetect) (n_threads)

    batchSize Int32 Batch size for prompt processing (must be >=32 to use BLAS) (n_batch)

    convertEosToNewLine Boolean Whether to convert eos to newline during the inference.

    embeddingMode Boolean Whether to use embedding mode. (embedding) Note that if this is set to true, The LLamaModel won't produce text response anymore.

    "},{"location":"xmldocs/llama.exceptions.runtimeerror/","title":"RuntimeError","text":"

    Namespace: LLama.Exceptions

    public class RuntimeError : System.Exception, System.Runtime.Serialization.ISerializable\n

    Inheritance Object \u2192 Exception \u2192 RuntimeError Implements ISerializable

    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.exceptions.runtimeerror/#targetsite","title":"TargetSite","text":"
    public MethodBase TargetSite { get; }\n
    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#property-value","title":"Property Value","text":"

    MethodBase

    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#message","title":"Message","text":"
    public string Message { get; }\n
    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#property-value_1","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#data","title":"Data","text":"
    public IDictionary Data { get; }\n
    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#property-value_2","title":"Property Value","text":"

    IDictionary

    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#innerexception","title":"InnerException","text":"
    public Exception InnerException { get; }\n
    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#property-value_3","title":"Property Value","text":"

    Exception

    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#helplink","title":"HelpLink","text":"
    public string HelpLink { get; set; }\n
    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#property-value_4","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#source","title":"Source","text":"
    public string Source { get; set; }\n
    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#property-value_5","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#hresult","title":"HResult","text":"
    public int HResult { get; set; }\n
    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#property-value_6","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#stacktrace","title":"StackTrace","text":"
    public string StackTrace { get; }\n
    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#property-value_7","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.exceptions.runtimeerror/#runtimeerror_1","title":"RuntimeError()","text":"
    public RuntimeError()\n
    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#runtimeerrorstring","title":"RuntimeError(String)","text":"
    public RuntimeError(string message)\n
    "},{"location":"xmldocs/llama.exceptions.runtimeerror/#parameters","title":"Parameters","text":"

    message String

    "},{"location":"xmldocs/llama.extensions.dictionaryextension/","title":"DictionaryExtension","text":"

    Namespace: LLama.Extensions

    public static class DictionaryExtension\n

    Inheritance Object \u2192 DictionaryExtension

    "},{"location":"xmldocs/llama.extensions.dictionaryextension/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.extensions.dictionaryextension/#deconstructt1-t2keyvaluepairt1-t2-t1-t2","title":"Deconstruct<T1, T2>(KeyValuePair<T1, T2>, T1&, T2&)","text":"
    public static void Deconstruct<T1, T2>(KeyValuePair<T1, T2> pair, T1& first, T2& second)\n
    "},{"location":"xmldocs/llama.extensions.dictionaryextension/#type-parameters","title":"Type Parameters","text":"

    T1

    T2

    "},{"location":"xmldocs/llama.extensions.dictionaryextension/#parameters","title":"Parameters","text":"

    pair KeyValuePair<T1, T2>

    first T1&

    second T2&

    "},{"location":"xmldocs/llama.extensions.dictionaryextension/#updatet1-t2dictionaryt1-t2-idictionaryt1-t2","title":"Update<T1, T2>(Dictionary<T1, T2>, IDictionary<T1, T2>)","text":"
    public static void Update<T1, T2>(Dictionary<T1, T2> dic, IDictionary<T1, T2> other)\n
    "},{"location":"xmldocs/llama.extensions.dictionaryextension/#type-parameters_1","title":"Type Parameters","text":"

    T1

    T2

    "},{"location":"xmldocs/llama.extensions.dictionaryextension/#parameters_1","title":"Parameters","text":"

    dic Dictionary<T1, T2>

    other IDictionary<T1, T2>

    "},{"location":"xmldocs/llama.extensions.dictionaryextension/#getordefaultt1-t2dictionaryt1-t2-t1-t2","title":"GetOrDefault<T1, T2>(Dictionary<T1, T2>, T1, T2)","text":"
    public static T2 GetOrDefault<T1, T2>(Dictionary<T1, T2> dic, T1 key, T2 defaultValue)\n
    "},{"location":"xmldocs/llama.extensions.dictionaryextension/#type-parameters_2","title":"Type Parameters","text":"

    T1

    T2

    "},{"location":"xmldocs/llama.extensions.dictionaryextension/#parameters_2","title":"Parameters","text":"

    dic Dictionary<T1, T2>

    key T1

    defaultValue T2

    "},{"location":"xmldocs/llama.extensions.dictionaryextension/#returns","title":"Returns","text":"

    T2

    "},{"location":"xmldocs/llama.instructexecutor/","title":"InstructExecutor","text":"

    Namespace: LLama

    The LLama executor for instruct mode.

    public class InstructExecutor : StatefulExecutorBase, LLama.Abstractions.ILLamaExecutor\n

    Inheritance Object \u2192 StatefulExecutorBase \u2192 InstructExecutor Implements ILLamaExecutor

    "},{"location":"xmldocs/llama.instructexecutor/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.instructexecutor/#model","title":"Model","text":"

    The mode used by the executor.

    public LLamaModel Model { get; }\n
    "},{"location":"xmldocs/llama.instructexecutor/#property-value","title":"Property Value","text":"

    LLamaModel

    "},{"location":"xmldocs/llama.instructexecutor/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.instructexecutor/#instructexecutorllamamodel-string-string","title":"InstructExecutor(LLamaModel, String, String)","text":"
    public InstructExecutor(LLamaModel model, string instructionPrefix, string instructionSuffix)\n
    "},{"location":"xmldocs/llama.instructexecutor/#parameters","title":"Parameters","text":"

    model LLamaModel

    instructionPrefix String

    instructionSuffix String

    "},{"location":"xmldocs/llama.instructexecutor/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.instructexecutor/#getstatedata","title":"GetStateData()","text":"
    public ExecutorBaseState GetStateData()\n
    "},{"location":"xmldocs/llama.instructexecutor/#returns","title":"Returns","text":"

    ExecutorBaseState

    "},{"location":"xmldocs/llama.instructexecutor/#loadstateexecutorbasestate","title":"LoadState(ExecutorBaseState)","text":"
    public void LoadState(ExecutorBaseState data)\n
    "},{"location":"xmldocs/llama.instructexecutor/#parameters_1","title":"Parameters","text":"

    data ExecutorBaseState

    "},{"location":"xmldocs/llama.instructexecutor/#savestatestring","title":"SaveState(String)","text":"
    public void SaveState(string filename)\n
    "},{"location":"xmldocs/llama.instructexecutor/#parameters_2","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.instructexecutor/#loadstatestring","title":"LoadState(String)","text":"
    public void LoadState(string filename)\n
    "},{"location":"xmldocs/llama.instructexecutor/#parameters_3","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.instructexecutor/#getloopconditioninferstateargs","title":"GetLoopCondition(InferStateArgs)","text":"
    protected bool GetLoopCondition(InferStateArgs args)\n
    "},{"location":"xmldocs/llama.instructexecutor/#parameters_4","title":"Parameters","text":"

    args InferStateArgs

    "},{"location":"xmldocs/llama.instructexecutor/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.instructexecutor/#preprocessinputsstring-inferstateargs","title":"PreprocessInputs(String, InferStateArgs)","text":"
    protected void PreprocessInputs(string text, InferStateArgs args)\n
    "},{"location":"xmldocs/llama.instructexecutor/#parameters_5","title":"Parameters","text":"

    text String

    args InferStateArgs

    "},{"location":"xmldocs/llama.instructexecutor/#postprocessinferenceparams-inferstateargs-ienumerable1","title":"PostProcess(InferenceParams, InferStateArgs, IEnumerable`1&)","text":"
    protected bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, IEnumerable`1& extraOutputs)\n
    "},{"location":"xmldocs/llama.instructexecutor/#parameters_6","title":"Parameters","text":"

    inferenceParams InferenceParams

    args InferStateArgs

    extraOutputs IEnumerable`1&

    "},{"location":"xmldocs/llama.instructexecutor/#returns_2","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.instructexecutor/#inferinternalinferenceparams-inferstateargs","title":"InferInternal(InferenceParams, InferStateArgs)","text":"
    protected void InferInternal(InferenceParams inferenceParams, InferStateArgs args)\n
    "},{"location":"xmldocs/llama.instructexecutor/#parameters_7","title":"Parameters","text":"

    inferenceParams InferenceParams

    args InferStateArgs

    "},{"location":"xmldocs/llama.interactiveexecutor/","title":"InteractiveExecutor","text":"

    Namespace: LLama

    The LLama executor for interactive mode.

    public class InteractiveExecutor : StatefulExecutorBase, LLama.Abstractions.ILLamaExecutor\n

    Inheritance Object \u2192 StatefulExecutorBase \u2192 InteractiveExecutor Implements ILLamaExecutor

    "},{"location":"xmldocs/llama.interactiveexecutor/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.interactiveexecutor/#model","title":"Model","text":"

    The mode used by the executor.

    public LLamaModel Model { get; }\n
    "},{"location":"xmldocs/llama.interactiveexecutor/#property-value","title":"Property Value","text":"

    LLamaModel

    "},{"location":"xmldocs/llama.interactiveexecutor/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.interactiveexecutor/#interactiveexecutorllamamodel","title":"InteractiveExecutor(LLamaModel)","text":"
    public InteractiveExecutor(LLamaModel model)\n
    "},{"location":"xmldocs/llama.interactiveexecutor/#parameters","title":"Parameters","text":"

    model LLamaModel

    "},{"location":"xmldocs/llama.interactiveexecutor/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.interactiveexecutor/#getstatedata","title":"GetStateData()","text":"
    public ExecutorBaseState GetStateData()\n
    "},{"location":"xmldocs/llama.interactiveexecutor/#returns","title":"Returns","text":"

    ExecutorBaseState

    "},{"location":"xmldocs/llama.interactiveexecutor/#loadstateexecutorbasestate","title":"LoadState(ExecutorBaseState)","text":"
    public void LoadState(ExecutorBaseState data)\n
    "},{"location":"xmldocs/llama.interactiveexecutor/#parameters_1","title":"Parameters","text":"

    data ExecutorBaseState

    "},{"location":"xmldocs/llama.interactiveexecutor/#savestatestring","title":"SaveState(String)","text":"
    public void SaveState(string filename)\n
    "},{"location":"xmldocs/llama.interactiveexecutor/#parameters_2","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.interactiveexecutor/#loadstatestring","title":"LoadState(String)","text":"
    public void LoadState(string filename)\n
    "},{"location":"xmldocs/llama.interactiveexecutor/#parameters_3","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.interactiveexecutor/#getloopconditioninferstateargs","title":"GetLoopCondition(InferStateArgs)","text":"

    Define whether to continue the loop to generate responses.

    protected bool GetLoopCondition(InferStateArgs args)\n
    "},{"location":"xmldocs/llama.interactiveexecutor/#parameters_4","title":"Parameters","text":"

    args InferStateArgs

    "},{"location":"xmldocs/llama.interactiveexecutor/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.interactiveexecutor/#preprocessinputsstring-inferstateargs","title":"PreprocessInputs(String, InferStateArgs)","text":"
    protected void PreprocessInputs(string text, InferStateArgs args)\n
    "},{"location":"xmldocs/llama.interactiveexecutor/#parameters_5","title":"Parameters","text":"

    text String

    args InferStateArgs

    "},{"location":"xmldocs/llama.interactiveexecutor/#postprocessinferenceparams-inferstateargs-ienumerable1","title":"PostProcess(InferenceParams, InferStateArgs, IEnumerable`1&)","text":"

    Return whether to break the generation.

    protected bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, IEnumerable`1& extraOutputs)\n
    "},{"location":"xmldocs/llama.interactiveexecutor/#parameters_6","title":"Parameters","text":"

    inferenceParams InferenceParams

    args InferStateArgs

    extraOutputs IEnumerable`1&

    "},{"location":"xmldocs/llama.interactiveexecutor/#returns_2","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.interactiveexecutor/#inferinternalinferenceparams-inferstateargs","title":"InferInternal(InferenceParams, InferStateArgs)","text":"
    protected void InferInternal(InferenceParams inferenceParams, InferStateArgs args)\n
    "},{"location":"xmldocs/llama.interactiveexecutor/#parameters_7","title":"Parameters","text":"

    inferenceParams InferenceParams

    args InferStateArgs

    "},{"location":"xmldocs/llama.llamaembedder/","title":"LLamaEmbedder","text":"

    Namespace: LLama

    The embedder for LLama, which supports getting embeddings from text.

    public class LLamaEmbedder : System.IDisposable\n

    Inheritance Object \u2192 LLamaEmbedder Implements IDisposable

    "},{"location":"xmldocs/llama.llamaembedder/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.llamaembedder/#llamaembeddermodelparams","title":"LLamaEmbedder(ModelParams)","text":"
    public LLamaEmbedder(ModelParams params)\n
    "},{"location":"xmldocs/llama.llamaembedder/#parameters","title":"Parameters","text":"

    params ModelParams

    "},{"location":"xmldocs/llama.llamaembedder/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.llamaembedder/#getembeddingsstring-int32-boolean-string","title":"GetEmbeddings(String, Int32, Boolean, String)","text":"

    Get the embeddings of the text.

    public Single[] GetEmbeddings(string text, int threads, bool addBos, string encoding)\n
    "},{"location":"xmldocs/llama.llamaembedder/#parameters_1","title":"Parameters","text":"

    text String

    threads Int32 Threads used for inference.

    addBos Boolean Add bos to the text.

    encoding String

    "},{"location":"xmldocs/llama.llamaembedder/#returns","title":"Returns","text":"

    Single[]

    "},{"location":"xmldocs/llama.llamaembedder/#exceptions","title":"Exceptions","text":"

    RuntimeError

    "},{"location":"xmldocs/llama.llamaembedder/#dispose","title":"Dispose()","text":"
    public void Dispose()\n
    "},{"location":"xmldocs/llama.llamamodel/","title":"LLamaModel","text":"

    Namespace: LLama

    The abstraction of a LLama model, which holds the context in the native library.

    public class LLamaModel : System.IDisposable\n

    Inheritance Object \u2192 LLamaModel Implements IDisposable

    "},{"location":"xmldocs/llama.llamamodel/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.llamamodel/#contextsize","title":"ContextSize","text":"

    The context size.

    public int ContextSize { get; }\n
    "},{"location":"xmldocs/llama.llamamodel/#property-value","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.llamamodel/#params","title":"Params","text":"

    The model params set for this model.

    public ModelParams Params { get; set; }\n
    "},{"location":"xmldocs/llama.llamamodel/#property-value_1","title":"Property Value","text":"

    ModelParams

    "},{"location":"xmldocs/llama.llamamodel/#nativehandle","title":"NativeHandle","text":"

    The native handle, which is used to be passed to the native APIs. Please avoid using it unless you know what is the usage of the Native API.

    public SafeLLamaContextHandle NativeHandle { get; }\n
    "},{"location":"xmldocs/llama.llamamodel/#property-value_2","title":"Property Value","text":"

    SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.llamamodel/#encoding","title":"Encoding","text":"

    The encoding set for this model to deal with text input.

    public Encoding Encoding { get; }\n
    "},{"location":"xmldocs/llama.llamamodel/#property-value_3","title":"Property Value","text":"

    Encoding

    "},{"location":"xmldocs/llama.llamamodel/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.llamamodel/#llamamodelmodelparams-string-illamalogger","title":"LLamaModel(ModelParams, String, ILLamaLogger)","text":"
    public LLamaModel(ModelParams Params, string encoding, ILLamaLogger logger)\n
    "},{"location":"xmldocs/llama.llamamodel/#parameters","title":"Parameters","text":"

    Params ModelParams Model params.

    encoding String Encoding to deal with text input.

    logger ILLamaLogger The logger.

    "},{"location":"xmldocs/llama.llamamodel/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.llamamodel/#tokenizestring-boolean","title":"Tokenize(String, Boolean)","text":"

    Tokenize a string.

    public IEnumerable<int> Tokenize(string text, bool addBos)\n
    "},{"location":"xmldocs/llama.llamamodel/#parameters_1","title":"Parameters","text":"

    text String

    addBos Boolean Whether to add a bos to the text.

    "},{"location":"xmldocs/llama.llamamodel/#returns","title":"Returns","text":"

    IEnumerable<Int32>

    "},{"location":"xmldocs/llama.llamamodel/#detokenizeienumerableint32","title":"DeTokenize(IEnumerable<Int32>)","text":"

    Detokenize the tokens to text.

    public string DeTokenize(IEnumerable<int> tokens)\n
    "},{"location":"xmldocs/llama.llamamodel/#parameters_2","title":"Parameters","text":"

    tokens IEnumerable<Int32>

    "},{"location":"xmldocs/llama.llamamodel/#returns_1","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.llamamodel/#savestatestring","title":"SaveState(String)","text":"

    Save the state to specified path.

    public void SaveState(string filename)\n
    "},{"location":"xmldocs/llama.llamamodel/#parameters_3","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.llamamodel/#getstatedata","title":"GetStateData()","text":"

    Get the state data as a byte array.

    public Byte[] GetStateData()\n
    "},{"location":"xmldocs/llama.llamamodel/#returns_2","title":"Returns","text":"

    Byte[]

    "},{"location":"xmldocs/llama.llamamodel/#loadstatestring","title":"LoadState(String)","text":"

    Load the state from specified path.

    public void LoadState(string filename)\n
    "},{"location":"xmldocs/llama.llamamodel/#parameters_4","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.llamamodel/#exceptions","title":"Exceptions","text":"

    RuntimeError

    "},{"location":"xmldocs/llama.llamamodel/#loadstatebyte","title":"LoadState(Byte[])","text":"

    Load the state from memory.

    public void LoadState(Byte[] stateData)\n
    "},{"location":"xmldocs/llama.llamamodel/#parameters_5","title":"Parameters","text":"

    stateData Byte[]

    "},{"location":"xmldocs/llama.llamamodel/#exceptions_1","title":"Exceptions","text":"

    RuntimeError

    "},{"location":"xmldocs/llama.llamamodel/#samplellamatokendataarray-single-mirostatetype-single-single-int32-single-single-single","title":"Sample(LLamaTokenDataArray, Single, MiroStateType, Single, Single, Int32, Single, Single, Single)","text":"

    Perform the sampling. Please don't use it unless you fully know what it does.

    public int Sample(LLamaTokenDataArray candidates, float temperature, MiroStateType mirostat, float mirostatTau, float mirostatEta, int topK, float topP, float tfsZ, float typicalP)\n
    "},{"location":"xmldocs/llama.llamamodel/#parameters_6","title":"Parameters","text":"

    candidates LLamaTokenDataArray

    temperature Single

    mirostat MiroStateType

    mirostatTau Single

    mirostatEta Single

    topK Int32

    topP Single

    tfsZ Single

    typicalP Single

    "},{"location":"xmldocs/llama.llamamodel/#returns_3","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.llamamodel/#applypenaltyienumerableint32-dictionaryint32-single-int32-single-single-single-boolean","title":"ApplyPenalty(IEnumerable<Int32>, Dictionary<Int32, Single>, Int32, Single, Single, Single, Boolean)","text":"

    Apply the penalty for the tokens. Please don't use it unless you fully know what it does.

    public LLamaTokenDataArray ApplyPenalty(IEnumerable<int> lastTokens, Dictionary<int, float> logitBias, int repeatLastTokensCount, float repeatPenalty, float alphaFrequency, float alphaPresence, bool penalizeNL)\n
    "},{"location":"xmldocs/llama.llamamodel/#parameters_7","title":"Parameters","text":"

    lastTokens IEnumerable<Int32>

    logitBias Dictionary<Int32, Single>

    repeatLastTokensCount Int32

    repeatPenalty Single

    alphaFrequency Single

    alphaPresence Single

    penalizeNL Boolean

    "},{"location":"xmldocs/llama.llamamodel/#returns_4","title":"Returns","text":"

    LLamaTokenDataArray

    "},{"location":"xmldocs/llama.llamamodel/#evalint32-int32","title":"Eval(Int32[], Int32)","text":"
    public int Eval(Int32[] tokens, int pastTokensCount)\n
    "},{"location":"xmldocs/llama.llamamodel/#parameters_8","title":"Parameters","text":"

    tokens Int32[]

    pastTokensCount Int32

    "},{"location":"xmldocs/llama.llamamodel/#returns_5","title":"Returns","text":"

    Int32 The updated pastTokensCount.

    "},{"location":"xmldocs/llama.llamamodel/#exceptions_2","title":"Exceptions","text":"

    RuntimeError

    "},{"location":"xmldocs/llama.llamamodel/#generateresultienumerableint32","title":"GenerateResult(IEnumerable<Int32>)","text":"
    internal IEnumerable<string> GenerateResult(IEnumerable<int> ids)\n
    "},{"location":"xmldocs/llama.llamamodel/#parameters_9","title":"Parameters","text":"

    ids IEnumerable<Int32>

    "},{"location":"xmldocs/llama.llamamodel/#returns_6","title":"Returns","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.llamamodel/#dispose","title":"Dispose()","text":"
    public void Dispose()\n
    "},{"location":"xmldocs/llama.llamaquantizer/","title":"LLamaQuantizer","text":"

    Namespace: LLama

    The quantizer to quantize the model.

    public static class LLamaQuantizer\n

    Inheritance Object \u2192 LLamaQuantizer

    "},{"location":"xmldocs/llama.llamaquantizer/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.llamaquantizer/#quantizestring-string-llamaftype-int32","title":"Quantize(String, String, LLamaFtype, Int32)","text":"

    Quantize the model.

    public static bool Quantize(string srcFileName, string dstFilename, LLamaFtype ftype, int nthread)\n
    "},{"location":"xmldocs/llama.llamaquantizer/#parameters","title":"Parameters","text":"

    srcFileName String The model file to be quantized.

    dstFilename String The path to save the quantized model.

    ftype LLamaFtype The type of quantization.

    nthread Int32 Thread to be used during the quantization. By default it's the physical core number.

    "},{"location":"xmldocs/llama.llamaquantizer/#returns","title":"Returns","text":"

    Boolean Whether the quantization is successful.

    "},{"location":"xmldocs/llama.llamaquantizer/#exceptions","title":"Exceptions","text":"

    ArgumentException

    "},{"location":"xmldocs/llama.llamaquantizer/#quantizestring-string-string-int32","title":"Quantize(String, String, String, Int32)","text":"

    Quantize the model.

    public static bool Quantize(string srcFileName, string dstFilename, string ftype, int nthread)\n
    "},{"location":"xmldocs/llama.llamaquantizer/#parameters_1","title":"Parameters","text":"

    srcFileName String The model file to be quantized.

    dstFilename String The path to save the quantized model.

    ftype String The type of quantization.

    nthread Int32 Thread to be used during the quantization. By default it's the physical core number.

    "},{"location":"xmldocs/llama.llamaquantizer/#returns_1","title":"Returns","text":"

    Boolean Whether the quantization is successful.

    "},{"location":"xmldocs/llama.llamaquantizer/#exceptions_1","title":"Exceptions","text":"

    ArgumentException

    "},{"location":"xmldocs/llama.llamatransforms/","title":"LLamaTransforms","text":"

    Namespace: LLama

    A class that contains all the transforms provided internally by LLama.

    public class LLamaTransforms\n

    Inheritance Object \u2192 LLamaTransforms

    "},{"location":"xmldocs/llama.llamatransforms/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.llamatransforms/#llamatransforms_1","title":"LLamaTransforms()","text":"
    public LLamaTransforms()\n
    "},{"location":"xmldocs/llama.native.llamacontextparams/","title":"LLamaContextParams","text":"

    Namespace: LLama.Native

    public struct LLamaContextParams\n

    Inheritance Object \u2192 ValueType \u2192 LLamaContextParams

    "},{"location":"xmldocs/llama.native.llamacontextparams/#fields","title":"Fields","text":""},{"location":"xmldocs/llama.native.llamacontextparams/#n_ctx","title":"n_ctx","text":"

    text context

    public int n_ctx;\n
    "},{"location":"xmldocs/llama.native.llamacontextparams/#n_gpu_layers","title":"n_gpu_layers","text":"

    number of layers to store in VRAM

    public int n_gpu_layers;\n
    "},{"location":"xmldocs/llama.native.llamacontextparams/#seed","title":"seed","text":"

    RNG seed, -1 for random

    public int seed;\n
    "},{"location":"xmldocs/llama.native.llamacontextparams/#f16_kv","title":"f16_kv","text":"

    use fp16 for KV cache

    public bool f16_kv;\n
    "},{"location":"xmldocs/llama.native.llamacontextparams/#logits_all","title":"logits_all","text":"

    the llama_eval() call computes all logits, not just the last one

    public bool logits_all;\n
    "},{"location":"xmldocs/llama.native.llamacontextparams/#vocab_only","title":"vocab_only","text":"

    only load the vocabulary, no weights

    public bool vocab_only;\n
    "},{"location":"xmldocs/llama.native.llamacontextparams/#use_mmap","title":"use_mmap","text":"

    use mmap if possible

    public bool use_mmap;\n
    "},{"location":"xmldocs/llama.native.llamacontextparams/#use_mlock","title":"use_mlock","text":"

    force system to keep model in RAM

    public bool use_mlock;\n
    "},{"location":"xmldocs/llama.native.llamacontextparams/#embedding","title":"embedding","text":"

    embedding mode only

    public bool embedding;\n
    "},{"location":"xmldocs/llama.native.llamacontextparams/#progress_callback","title":"progress_callback","text":"

    called with a progress value between 0 and 1, pass NULL to disable

    public IntPtr progress_callback;\n
    "},{"location":"xmldocs/llama.native.llamacontextparams/#progress_callback_user_data","title":"progress_callback_user_data","text":"

    context pointer passed to the progress callback

    public IntPtr progress_callback_user_data;\n
    "},{"location":"xmldocs/llama.native.llamaftype/","title":"LLamaFtype","text":"

    Namespace: LLama.Native

    public enum LLamaFtype\n

    Inheritance Object \u2192 ValueType \u2192 Enum \u2192 LLamaFtype Implements IComparable, IFormattable, IConvertible

    "},{"location":"xmldocs/llama.native.llamaftype/#fields","title":"Fields","text":"Name Value Description"},{"location":"xmldocs/llama.native.llamatokendata/","title":"LLamaTokenData","text":"

    Namespace: LLama.Native

    public struct LLamaTokenData\n

    Inheritance Object \u2192 ValueType \u2192 LLamaTokenData

    "},{"location":"xmldocs/llama.native.llamatokendata/#fields","title":"Fields","text":""},{"location":"xmldocs/llama.native.llamatokendata/#id","title":"id","text":"

    token id

    public int id;\n
    "},{"location":"xmldocs/llama.native.llamatokendata/#logit","title":"logit","text":"

    log-odds of the token

    public float logit;\n
    "},{"location":"xmldocs/llama.native.llamatokendata/#p","title":"p","text":"

    probability of the token

    public float p;\n
    "},{"location":"xmldocs/llama.native.llamatokendata/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.native.llamatokendata/#llamatokendataint32-single-single","title":"LLamaTokenData(Int32, Single, Single)","text":"
    LLamaTokenData(int id, float logit, float p)\n
    "},{"location":"xmldocs/llama.native.llamatokendata/#parameters","title":"Parameters","text":"

    id Int32

    logit Single

    p Single

    "},{"location":"xmldocs/llama.native.llamatokendataarray/","title":"LLamaTokenDataArray","text":"

    Namespace: LLama.Native

    public struct LLamaTokenDataArray\n

    Inheritance Object \u2192 ValueType \u2192 LLamaTokenDataArray

    "},{"location":"xmldocs/llama.native.llamatokendataarray/#fields","title":"Fields","text":""},{"location":"xmldocs/llama.native.llamatokendataarray/#data","title":"data","text":"
    public Memory<LLamaTokenData> data;\n
    "},{"location":"xmldocs/llama.native.llamatokendataarray/#size","title":"size","text":"
    public ulong size;\n
    "},{"location":"xmldocs/llama.native.llamatokendataarray/#sorted","title":"sorted","text":"
    public bool sorted;\n
    "},{"location":"xmldocs/llama.native.llamatokendataarray/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.native.llamatokendataarray/#llamatokendataarrayllamatokendata-uint64-boolean","title":"LLamaTokenDataArray(LLamaTokenData[], UInt64, Boolean)","text":"
    LLamaTokenDataArray(LLamaTokenData[] data, ulong size, bool sorted)\n
    "},{"location":"xmldocs/llama.native.llamatokendataarray/#parameters","title":"Parameters","text":"

    data LLamaTokenData[]

    size UInt64

    sorted Boolean

    "},{"location":"xmldocs/llama.native.llamatokendataarraynative/","title":"LLamaTokenDataArrayNative","text":"

    Namespace: LLama.Native

    public struct LLamaTokenDataArrayNative\n

    Inheritance Object \u2192 ValueType \u2192 LLamaTokenDataArrayNative

    "},{"location":"xmldocs/llama.native.llamatokendataarraynative/#fields","title":"Fields","text":""},{"location":"xmldocs/llama.native.llamatokendataarraynative/#data","title":"data","text":"
    public IntPtr data;\n
    "},{"location":"xmldocs/llama.native.llamatokendataarraynative/#size","title":"size","text":"
    public ulong size;\n
    "},{"location":"xmldocs/llama.native.llamatokendataarraynative/#sorted","title":"sorted","text":"
    public bool sorted;\n
    "},{"location":"xmldocs/llama.native.nativeapi/","title":"NativeApi","text":"

    Namespace: LLama.Native

    public class NativeApi\n

    Inheritance Object \u2192 NativeApi

    "},{"location":"xmldocs/llama.native.nativeapi/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.native.nativeapi/#nativeapi_1","title":"NativeApi()","text":"
    public NativeApi()\n
    "},{"location":"xmldocs/llama.native.nativeapi/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.native.nativeapi/#llama_print_timingssafellamacontexthandle","title":"llama_print_timings(SafeLLamaContextHandle)","text":"
    public static void llama_print_timings(SafeLLamaContextHandle ctx)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_reset_timingssafellamacontexthandle","title":"llama_reset_timings(SafeLLamaContextHandle)","text":"
    public static void llama_reset_timings(SafeLLamaContextHandle ctx)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_1","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_print_system_info","title":"llama_print_system_info()","text":"

    Print system information

    public static IntPtr llama_print_system_info()\n
    "},{"location":"xmldocs/llama.native.nativeapi/#returns","title":"Returns","text":"

    IntPtr

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_model_quantizestring-string-llamaftype-int32","title":"llama_model_quantize(String, String, LLamaFtype, Int32)","text":"
    public static int llama_model_quantize(string fname_inp, string fname_out, LLamaFtype ftype, int nthread)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_2","title":"Parameters","text":"

    fname_inp String

    fname_out String

    ftype LLamaFtype

    nthread Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_1","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_repetition_penaltysafellamacontexthandle-intptr-int32-uint64-single","title":"llama_sample_repetition_penalty(SafeLLamaContextHandle, IntPtr, Int32[], UInt64, Single)","text":"

    Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.

    public static void llama_sample_repetition_penalty(SafeLLamaContextHandle ctx, IntPtr candidates, Int32[] last_tokens, ulong last_tokens_size, float penalty)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_3","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr Pointer to LLamaTokenDataArray

    last_tokens Int32[]

    last_tokens_size UInt64

    penalty Single

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_frequency_and_presence_penaltiessafellamacontexthandle-intptr-int32-uint64-single-single","title":"llama_sample_frequency_and_presence_penalties(SafeLLamaContextHandle, IntPtr, Int32[], UInt64, Single, Single)","text":"

    Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.

    public static void llama_sample_frequency_and_presence_penalties(SafeLLamaContextHandle ctx, IntPtr candidates, Int32[] last_tokens, ulong last_tokens_size, float alpha_frequency, float alpha_presence)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_4","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr Pointer to LLamaTokenDataArray

    last_tokens Int32[]

    last_tokens_size UInt64

    alpha_frequency Single

    alpha_presence Single

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_softmaxsafellamacontexthandle-intptr","title":"llama_sample_softmax(SafeLLamaContextHandle, IntPtr)","text":"

    Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.

    public static void llama_sample_softmax(SafeLLamaContextHandle ctx, IntPtr candidates)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_5","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr Pointer to LLamaTokenDataArray

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_top_ksafellamacontexthandle-intptr-int32-uint64","title":"llama_sample_top_k(SafeLLamaContextHandle, IntPtr, Int32, UInt64)","text":"

    Top-K sampling described in academic paper \"The Curious Case of Neural Text Degeneration\" https://arxiv.org/abs/1904.09751

    public static void llama_sample_top_k(SafeLLamaContextHandle ctx, IntPtr candidates, int k, ulong min_keep)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_6","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr Pointer to LLamaTokenDataArray

    k Int32

    min_keep UInt64

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_top_psafellamacontexthandle-intptr-single-uint64","title":"llama_sample_top_p(SafeLLamaContextHandle, IntPtr, Single, UInt64)","text":"

    Nucleus sampling described in academic paper \"The Curious Case of Neural Text Degeneration\" https://arxiv.org/abs/1904.09751

    public static void llama_sample_top_p(SafeLLamaContextHandle ctx, IntPtr candidates, float p, ulong min_keep)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_7","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr Pointer to LLamaTokenDataArray

    p Single

    min_keep UInt64

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_tail_freesafellamacontexthandle-intptr-single-uint64","title":"llama_sample_tail_free(SafeLLamaContextHandle, IntPtr, Single, UInt64)","text":"

    Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.

    public static void llama_sample_tail_free(SafeLLamaContextHandle ctx, IntPtr candidates, float z, ulong min_keep)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_8","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr Pointer to LLamaTokenDataArray

    z Single

    min_keep UInt64

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_typicalsafellamacontexthandle-intptr-single-uint64","title":"llama_sample_typical(SafeLLamaContextHandle, IntPtr, Single, UInt64)","text":"

    Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.

    public static void llama_sample_typical(SafeLLamaContextHandle ctx, IntPtr candidates, float p, ulong min_keep)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_9","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr Pointer to LLamaTokenDataArray

    p Single

    min_keep UInt64

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_temperaturesafellamacontexthandle-intptr-single","title":"llama_sample_temperature(SafeLLamaContextHandle, IntPtr, Single)","text":"
    public static void llama_sample_temperature(SafeLLamaContextHandle ctx, IntPtr candidates, float temp)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_10","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr

    temp Single

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_token_mirostatsafellamacontexthandle-intptr-single-single-int32-single","title":"llama_sample_token_mirostat(SafeLLamaContextHandle, IntPtr, Single, Single, Int32, Single*)","text":"

    Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.

    public static int llama_sample_token_mirostat(SafeLLamaContextHandle ctx, IntPtr candidates, float tau, float eta, int m, Single* mu)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_11","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr A vector of llama_token_data containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.

    tau Single The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.

    eta Single The learning rate used to update mu based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause mu to be updated more quickly, while a smaller learning rate will result in slower updates.

    m Int32 The number of tokens considered in the estimation of s_hat. This is an arbitrary value that is used to calculate s_hat, which in turn helps to calculate the value of k. In the paper, they use m = 100, but you can experiment with different values to see how it affects the performance of the algorithm.

    mu Single* Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (2 * tau) and is updated in the algorithm based on the error between the target and observed surprisal.

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_token_mirostat_v2safellamacontexthandle-intptr-single-single-single","title":"llama_sample_token_mirostat_v2(SafeLLamaContextHandle, IntPtr, Single, Single, Single*)","text":"

    Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.

    public static int llama_sample_token_mirostat_v2(SafeLLamaContextHandle ctx, IntPtr candidates, float tau, float eta, Single* mu)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_12","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr A vector of llama_token_data containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.

    tau Single The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.

    eta Single The learning rate used to update mu based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause mu to be updated more quickly, while a smaller learning rate will result in slower updates.

    mu Single* Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (2 * tau) and is updated in the algorithm based on the error between the target and observed surprisal.

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_3","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_token_greedysafellamacontexthandle-intptr","title":"llama_sample_token_greedy(SafeLLamaContextHandle, IntPtr)","text":"

    Selects the token with the highest probability.

    public static int llama_sample_token_greedy(SafeLLamaContextHandle ctx, IntPtr candidates)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_13","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr Pointer to LLamaTokenDataArray

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_4","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_sample_tokensafellamacontexthandle-intptr","title":"llama_sample_token(SafeLLamaContextHandle, IntPtr)","text":"

    Randomly selects a token from the candidates based on their probabilities.

    public static int llama_sample_token(SafeLLamaContextHandle ctx, IntPtr candidates)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_14","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    candidates IntPtr Pointer to LLamaTokenDataArray

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_5","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_empty_call","title":"llama_empty_call()","text":"
    public static bool llama_empty_call()\n
    "},{"location":"xmldocs/llama.native.nativeapi/#returns_6","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_context_default_params","title":"llama_context_default_params()","text":"
    public static LLamaContextParams llama_context_default_params()\n
    "},{"location":"xmldocs/llama.native.nativeapi/#returns_7","title":"Returns","text":"

    LLamaContextParams

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_mmap_supported","title":"llama_mmap_supported()","text":"
    public static bool llama_mmap_supported()\n
    "},{"location":"xmldocs/llama.native.nativeapi/#returns_8","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_mlock_supported","title":"llama_mlock_supported()","text":"
    public static bool llama_mlock_supported()\n
    "},{"location":"xmldocs/llama.native.nativeapi/#returns_9","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_init_from_filestring-llamacontextparams","title":"llama_init_from_file(String, LLamaContextParams)","text":"

    Various functions for loading a ggml llama model. Allocate (almost) all memory needed for the model. Return NULL on failure

    public static IntPtr llama_init_from_file(string path_model, LLamaContextParams params_)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_15","title":"Parameters","text":"

    path_model String

    params_ LLamaContextParams

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_10","title":"Returns","text":"

    IntPtr

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_init_backend","title":"llama_init_backend()","text":"

    not great API - very likely to change. Initialize the llama + ggml backend Call once at the start of the program

    public static void llama_init_backend()\n
    "},{"location":"xmldocs/llama.native.nativeapi/#llama_freeintptr","title":"llama_free(IntPtr)","text":"

    Frees all allocated memory

    public static void llama_free(IntPtr ctx)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_16","title":"Parameters","text":"

    ctx IntPtr

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_apply_lora_from_filesafellamacontexthandle-string-string-int32","title":"llama_apply_lora_from_file(SafeLLamaContextHandle, String, String, Int32)","text":"

    Apply a LoRA adapter to a loaded model path_base_model is the path to a higher quality model to use as a base for the layers modified by the adapter. Can be NULL to use the current loaded model. The model needs to be reloaded before applying a new adapter, otherwise the adapter will be applied on top of the previous one

    public static int llama_apply_lora_from_file(SafeLLamaContextHandle ctx, string path_lora, string path_base_model, int n_threads)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_17","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    path_lora String

    path_base_model String

    n_threads Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_11","title":"Returns","text":"

    Int32 Returns 0 on success

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_get_kv_cache_token_countsafellamacontexthandle","title":"llama_get_kv_cache_token_count(SafeLLamaContextHandle)","text":"

    Returns the number of tokens in the KV cache

    public static int llama_get_kv_cache_token_count(SafeLLamaContextHandle ctx)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_18","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_12","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_set_rng_seedsafellamacontexthandle-int32","title":"llama_set_rng_seed(SafeLLamaContextHandle, Int32)","text":"

    Sets the current rng seed.

    public static void llama_set_rng_seed(SafeLLamaContextHandle ctx, int seed)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_19","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    seed Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_get_state_sizesafellamacontexthandle","title":"llama_get_state_size(SafeLLamaContextHandle)","text":"

    Returns the maximum size in bytes of the state (rng, logits, embedding and kv_cache) - will often be smaller after compacting tokens

    public static ulong llama_get_state_size(SafeLLamaContextHandle ctx)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_20","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_13","title":"Returns","text":"

    UInt64

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_copy_state_datasafellamacontexthandle-byte","title":"llama_copy_state_data(SafeLLamaContextHandle, Byte[])","text":"

    Copies the state to the specified destination address. Destination needs to have allocated enough memory. Returns the number of bytes copied

    public static ulong llama_copy_state_data(SafeLLamaContextHandle ctx, Byte[] dest)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_21","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    dest Byte[]

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_14","title":"Returns","text":"

    UInt64

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_set_state_datasafellamacontexthandle-byte","title":"llama_set_state_data(SafeLLamaContextHandle, Byte[])","text":"

    Set the state reading from the specified address Returns the number of bytes read

    public static ulong llama_set_state_data(SafeLLamaContextHandle ctx, Byte[] src)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_22","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    src Byte[]

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_15","title":"Returns","text":"

    UInt64

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_load_session_filesafellamacontexthandle-string-int32-uint64-uint64","title":"llama_load_session_file(SafeLLamaContextHandle, String, Int32[], UInt64, UInt64*)","text":"

    Load session file

    public static bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, Int32[] tokens_out, ulong n_token_capacity, UInt64* n_token_count_out)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_23","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    path_session String

    tokens_out Int32[]

    n_token_capacity UInt64

    n_token_count_out UInt64*

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_16","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_save_session_filesafellamacontexthandle-string-int32-uint64","title":"llama_save_session_file(SafeLLamaContextHandle, String, Int32[], UInt64)","text":"

    Save session file

    public static bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, Int32[] tokens, ulong n_token_count)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_24","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    path_session String

    tokens Int32[]

    n_token_count UInt64

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_17","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_evalsafellamacontexthandle-int32-int32-int32-int32","title":"llama_eval(SafeLLamaContextHandle, Int32[], Int32, Int32, Int32)","text":"

    Run the llama inference to obtain the logits and probabilities for the next token. tokens + n_tokens is the provided batch of new tokens to process n_past is the number of tokens to use from previous eval calls

    public static int llama_eval(SafeLLamaContextHandle ctx, Int32[] tokens, int n_tokens, int n_past, int n_threads)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_25","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    tokens Int32[]

    n_tokens Int32

    n_past Int32

    n_threads Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_18","title":"Returns","text":"

    Int32 Returns 0 on success

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_eval_with_pointersafellamacontexthandle-int32-int32-int32-int32","title":"llama_eval_with_pointer(SafeLLamaContextHandle, Int32*, Int32, Int32, Int32)","text":"
    public static int llama_eval_with_pointer(SafeLLamaContextHandle ctx, Int32* tokens, int n_tokens, int n_past, int n_threads)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_26","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    tokens Int32*

    n_tokens Int32

    n_past Int32

    n_threads Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_19","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_tokenizesafellamacontexthandle-string-encoding-int32-int32-boolean","title":"llama_tokenize(SafeLLamaContextHandle, String, Encoding, Int32[], Int32, Boolean)","text":"

    Convert the provided text into tokens. The tokens pointer must be large enough to hold the resulting tokens. Returns the number of tokens on success, no more than n_max_tokens Returns a negative number on failure - the number of tokens that would have been returned

    public static int llama_tokenize(SafeLLamaContextHandle ctx, string text, Encoding encoding, Int32[] tokens, int n_max_tokens, bool add_bos)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_27","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    text String

    encoding Encoding

    tokens Int32[]

    n_max_tokens Int32

    add_bos Boolean

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_20","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_tokenize_nativesafellamacontexthandle-sbyte-int32-int32-boolean","title":"llama_tokenize_native(SafeLLamaContextHandle, SByte[], Int32[], Int32, Boolean)","text":"
    public static int llama_tokenize_native(SafeLLamaContextHandle ctx, SByte[] text, Int32[] tokens, int n_max_tokens, bool add_bos)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_28","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    text SByte[]

    tokens Int32[]

    n_max_tokens Int32

    add_bos Boolean

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_21","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_n_vocabsafellamacontexthandle","title":"llama_n_vocab(SafeLLamaContextHandle)","text":"
    public static int llama_n_vocab(SafeLLamaContextHandle ctx)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_29","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_22","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_n_ctxsafellamacontexthandle","title":"llama_n_ctx(SafeLLamaContextHandle)","text":"
    public static int llama_n_ctx(SafeLLamaContextHandle ctx)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_30","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_23","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_n_embdsafellamacontexthandle","title":"llama_n_embd(SafeLLamaContextHandle)","text":"
    public static int llama_n_embd(SafeLLamaContextHandle ctx)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_31","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_24","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_get_logitssafellamacontexthandle","title":"llama_get_logits(SafeLLamaContextHandle)","text":"

    Token logits obtained from the last call to llama_eval() The logits for the last token are stored in the last row Can be mutated in order to change the probabilities of the next token Rows: n_tokens Cols: n_vocab

    public static Single* llama_get_logits(SafeLLamaContextHandle ctx)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_32","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_25","title":"Returns","text":"

    Single*

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_get_embeddingssafellamacontexthandle","title":"llama_get_embeddings(SafeLLamaContextHandle)","text":"

    Get the embeddings for the input shape: [n_embd] (1-dimensional)

    public static Single* llama_get_embeddings(SafeLLamaContextHandle ctx)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_33","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_26","title":"Returns","text":"

    Single*

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_token_to_strsafellamacontexthandle-int32","title":"llama_token_to_str(SafeLLamaContextHandle, Int32)","text":"

    Token Id -> String. Uses the vocabulary in the provided context

    public static IntPtr llama_token_to_str(SafeLLamaContextHandle ctx, int token)\n
    "},{"location":"xmldocs/llama.native.nativeapi/#parameters_34","title":"Parameters","text":"

    ctx SafeLLamaContextHandle

    token Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#returns_27","title":"Returns","text":"

    IntPtr Pointer to a string.

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_token_bos","title":"llama_token_bos()","text":"
    public static int llama_token_bos()\n
    "},{"location":"xmldocs/llama.native.nativeapi/#returns_28","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_token_eos","title":"llama_token_eos()","text":"
    public static int llama_token_eos()\n
    "},{"location":"xmldocs/llama.native.nativeapi/#returns_29","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.nativeapi/#llama_token_nl","title":"llama_token_nl()","text":"
    public static int llama_token_nl()\n
    "},{"location":"xmldocs/llama.native.nativeapi/#returns_30","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.native.safellamacontexthandle/","title":"SafeLLamaContextHandle","text":"

    Namespace: LLama.Native

    public class SafeLLamaContextHandle : SafeLLamaHandleBase, System.IDisposable\n

    Inheritance Object \u2192 CriticalFinalizerObject \u2192 SafeHandle \u2192 SafeLLamaHandleBase \u2192 SafeLLamaContextHandle Implements IDisposable

    "},{"location":"xmldocs/llama.native.safellamacontexthandle/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.native.safellamacontexthandle/#isinvalid","title":"IsInvalid","text":"
    public bool IsInvalid { get; }\n
    "},{"location":"xmldocs/llama.native.safellamacontexthandle/#property-value","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.native.safellamacontexthandle/#isclosed","title":"IsClosed","text":"
    public bool IsClosed { get; }\n
    "},{"location":"xmldocs/llama.native.safellamacontexthandle/#property-value_1","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.native.safellamacontexthandle/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.native.safellamacontexthandle/#safellamacontexthandleintptr","title":"SafeLLamaContextHandle(IntPtr)","text":"
    public SafeLLamaContextHandle(IntPtr handle)\n
    "},{"location":"xmldocs/llama.native.safellamacontexthandle/#parameters","title":"Parameters","text":"

    handle IntPtr

    "},{"location":"xmldocs/llama.native.safellamacontexthandle/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.native.safellamacontexthandle/#releasehandle","title":"ReleaseHandle()","text":"
    protected bool ReleaseHandle()\n
    "},{"location":"xmldocs/llama.native.safellamacontexthandle/#returns","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.native.safellamahandlebase/","title":"SafeLLamaHandleBase","text":"

    Namespace: LLama.Native

    public abstract class SafeLLamaHandleBase : System.Runtime.InteropServices.SafeHandle, System.IDisposable\n

    Inheritance Object \u2192 CriticalFinalizerObject \u2192 SafeHandle \u2192 SafeLLamaHandleBase Implements IDisposable

    "},{"location":"xmldocs/llama.native.safellamahandlebase/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.native.safellamahandlebase/#isinvalid","title":"IsInvalid","text":"
    public bool IsInvalid { get; }\n
    "},{"location":"xmldocs/llama.native.safellamahandlebase/#property-value","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.native.safellamahandlebase/#isclosed","title":"IsClosed","text":"
    public bool IsClosed { get; }\n
    "},{"location":"xmldocs/llama.native.safellamahandlebase/#property-value_1","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.native.safellamahandlebase/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.native.safellamahandlebase/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.native.safellamahandlebase/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/","title":"ChatCompletion","text":"

    Namespace: LLama.OldVersion

    public class ChatCompletion : System.IEquatable`1[[LLama.OldVersion.ChatCompletion, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 ChatCompletion Implements IEquatable<ChatCompletion>

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.chatcompletion/#id","title":"Id","text":"
    public string Id { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#property-value","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#object","title":"Object","text":"
    public string Object { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#property-value_1","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#created","title":"Created","text":"
    public int Created { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#property-value_2","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#model","title":"Model","text":"
    public string Model { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#property-value_3","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#choices","title":"Choices","text":"
    public ChatCompletionChoice[] Choices { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#property-value_4","title":"Property Value","text":"

    ChatCompletionChoice[]

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#usage","title":"Usage","text":"
    public CompletionUsage Usage { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#property-value_5","title":"Property Value","text":"

    CompletionUsage

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.chatcompletion/#chatcompletionstring-string-int32-string-chatcompletionchoice-completionusage","title":"ChatCompletion(String, String, Int32, String, ChatCompletionChoice[], CompletionUsage)","text":"
    public ChatCompletion(string Id, string Object, int Created, string Model, ChatCompletionChoice[] Choices, CompletionUsage Usage)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#parameters","title":"Parameters","text":"

    Id String

    Object String

    Created Int32

    Model String

    Choices ChatCompletionChoice[]

    Usage CompletionUsage

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.chatcompletion/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#equalschatcompletion","title":"Equals(ChatCompletion)","text":"
    public bool Equals(ChatCompletion other)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#parameters_3","title":"Parameters","text":"

    other ChatCompletion

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#clone","title":"<Clone>$()","text":"
    public ChatCompletion <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#returns_5","title":"Returns","text":"

    ChatCompletion

    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#deconstructstring-string-int32-string-chatcompletionchoice-completionusage","title":"Deconstruct(String&, String&, Int32&, String&, ChatCompletionChoice[]&, CompletionUsage&)","text":"
    public void Deconstruct(String& Id, String& Object, Int32& Created, String& Model, ChatCompletionChoice[]& Choices, CompletionUsage& Usage)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletion/#parameters_4","title":"Parameters","text":"

    Id String&

    Object String&

    Created Int32&

    Model String&

    Choices ChatCompletionChoice[]&

    Usage CompletionUsage&

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/","title":"ChatCompletionChoice","text":"

    Namespace: LLama.OldVersion

    public class ChatCompletionChoice : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChoice, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 ChatCompletionChoice Implements IEquatable<ChatCompletionChoice>

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#index","title":"Index","text":"
    public int Index { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#property-value","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#message","title":"Message","text":"
    public ChatCompletionMessage Message { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#property-value_1","title":"Property Value","text":"

    ChatCompletionMessage

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#finishreason","title":"FinishReason","text":"
    public string FinishReason { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#property-value_2","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#chatcompletionchoiceint32-chatcompletionmessage-string","title":"ChatCompletionChoice(Int32, ChatCompletionMessage, String)","text":"
    public ChatCompletionChoice(int Index, ChatCompletionMessage Message, string FinishReason)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#parameters","title":"Parameters","text":"

    Index Int32

    Message ChatCompletionMessage

    FinishReason String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#equalschatcompletionchoice","title":"Equals(ChatCompletionChoice)","text":"
    public bool Equals(ChatCompletionChoice other)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#parameters_3","title":"Parameters","text":"

    other ChatCompletionChoice

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#clone","title":"<Clone>$()","text":"
    public ChatCompletionChoice <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#returns_5","title":"Returns","text":"

    ChatCompletionChoice

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#deconstructint32-chatcompletionmessage-string","title":"Deconstruct(Int32&, ChatCompletionMessage&, String&)","text":"
    public void Deconstruct(Int32& Index, ChatCompletionMessage& Message, String& FinishReason)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchoice/#parameters_4","title":"Parameters","text":"

    Index Int32&

    Message ChatCompletionMessage&

    FinishReason String&

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/","title":"ChatCompletionChunk","text":"

    Namespace: LLama.OldVersion

    public class ChatCompletionChunk : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChunk, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 ChatCompletionChunk Implements IEquatable<ChatCompletionChunk>

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#id","title":"Id","text":"
    public string Id { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#property-value","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#model","title":"Model","text":"
    public string Model { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#property-value_1","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#object","title":"Object","text":"
    public string Object { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#property-value_2","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#created","title":"Created","text":"
    public int Created { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#property-value_3","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#choices","title":"Choices","text":"
    public ChatCompletionChunkChoice[] Choices { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#property-value_4","title":"Property Value","text":"

    ChatCompletionChunkChoice[]

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#chatcompletionchunkstring-string-string-int32-chatcompletionchunkchoice","title":"ChatCompletionChunk(String, String, String, Int32, ChatCompletionChunkChoice[])","text":"
    public ChatCompletionChunk(string Id, string Model, string Object, int Created, ChatCompletionChunkChoice[] Choices)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#parameters","title":"Parameters","text":"

    Id String

    Model String

    Object String

    Created Int32

    Choices ChatCompletionChunkChoice[]

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#equalschatcompletionchunk","title":"Equals(ChatCompletionChunk)","text":"
    public bool Equals(ChatCompletionChunk other)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#parameters_3","title":"Parameters","text":"

    other ChatCompletionChunk

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#clone","title":"<Clone>$()","text":"
    public ChatCompletionChunk <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#returns_5","title":"Returns","text":"

    ChatCompletionChunk

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#deconstructstring-string-string-int32-chatcompletionchunkchoice","title":"Deconstruct(String&, String&, String&, Int32&, ChatCompletionChunkChoice[]&)","text":"
    public void Deconstruct(String& Id, String& Model, String& Object, Int32& Created, ChatCompletionChunkChoice[]& Choices)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunk/#parameters_4","title":"Parameters","text":"

    Id String&

    Model String&

    Object String&

    Created Int32&

    Choices ChatCompletionChunkChoice[]&

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/","title":"ChatCompletionChunkChoice","text":"

    Namespace: LLama.OldVersion

    public class ChatCompletionChunkChoice : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChunkChoice, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 ChatCompletionChunkChoice Implements IEquatable<ChatCompletionChunkChoice>

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#index","title":"Index","text":"
    public int Index { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#property-value","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#delta","title":"Delta","text":"
    public ChatCompletionChunkDelta Delta { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#property-value_1","title":"Property Value","text":"

    ChatCompletionChunkDelta

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#finishreason","title":"FinishReason","text":"
    public string FinishReason { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#property-value_2","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#chatcompletionchunkchoiceint32-chatcompletionchunkdelta-string","title":"ChatCompletionChunkChoice(Int32, ChatCompletionChunkDelta, String)","text":"
    public ChatCompletionChunkChoice(int Index, ChatCompletionChunkDelta Delta, string FinishReason)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#parameters","title":"Parameters","text":"

    Index Int32

    Delta ChatCompletionChunkDelta

    FinishReason String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#equalschatcompletionchunkchoice","title":"Equals(ChatCompletionChunkChoice)","text":"
    public bool Equals(ChatCompletionChunkChoice other)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#parameters_3","title":"Parameters","text":"

    other ChatCompletionChunkChoice

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#clone","title":"<Clone>$()","text":"
    public ChatCompletionChunkChoice <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#returns_5","title":"Returns","text":"

    ChatCompletionChunkChoice

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#deconstructint32-chatcompletionchunkdelta-string","title":"Deconstruct(Int32&, ChatCompletionChunkDelta&, String&)","text":"
    public void Deconstruct(Int32& Index, ChatCompletionChunkDelta& Delta, String& FinishReason)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkchoice/#parameters_4","title":"Parameters","text":"

    Index Int32&

    Delta ChatCompletionChunkDelta&

    FinishReason String&

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/","title":"ChatCompletionChunkDelta","text":"

    Namespace: LLama.OldVersion

    public class ChatCompletionChunkDelta : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChunkDelta, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 ChatCompletionChunkDelta Implements IEquatable<ChatCompletionChunkDelta>

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#role","title":"Role","text":"
    public string Role { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#property-value","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#content","title":"Content","text":"
    public string Content { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#property-value_1","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#chatcompletionchunkdeltastring-string","title":"ChatCompletionChunkDelta(String, String)","text":"
    public ChatCompletionChunkDelta(string Role, string Content)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#parameters","title":"Parameters","text":"

    Role String

    Content String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#equalschatcompletionchunkdelta","title":"Equals(ChatCompletionChunkDelta)","text":"
    public bool Equals(ChatCompletionChunkDelta other)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#parameters_3","title":"Parameters","text":"

    other ChatCompletionChunkDelta

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#clone","title":"<Clone>$()","text":"
    public ChatCompletionChunkDelta <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#returns_5","title":"Returns","text":"

    ChatCompletionChunkDelta

    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#deconstructstring-string","title":"Deconstruct(String&, String&)","text":"
    public void Deconstruct(String& Role, String& Content)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionchunkdelta/#parameters_4","title":"Parameters","text":"

    Role String&

    Content String&

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/","title":"ChatCompletionMessage","text":"

    Namespace: LLama.OldVersion

    public class ChatCompletionMessage : System.IEquatable`1[[LLama.OldVersion.ChatCompletionMessage, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 ChatCompletionMessage Implements IEquatable<ChatCompletionMessage>

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#role","title":"Role","text":"
    public ChatRole Role { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#property-value","title":"Property Value","text":"

    ChatRole

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#content","title":"Content","text":"
    public string Content { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#property-value_1","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#name","title":"Name","text":"
    public string Name { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#property-value_2","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#chatcompletionmessagechatrole-string-string","title":"ChatCompletionMessage(ChatRole, String, String)","text":"
    public ChatCompletionMessage(ChatRole Role, string Content, string Name)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#parameters","title":"Parameters","text":"

    Role ChatRole

    Content String

    Name String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#equalschatcompletionmessage","title":"Equals(ChatCompletionMessage)","text":"
    public bool Equals(ChatCompletionMessage other)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#parameters_3","title":"Parameters","text":"

    other ChatCompletionMessage

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#clone","title":"<Clone>$()","text":"
    public ChatCompletionMessage <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#returns_5","title":"Returns","text":"

    ChatCompletionMessage

    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#deconstructchatrole-string-string","title":"Deconstruct(ChatRole&, String&, String&)","text":"
    public void Deconstruct(ChatRole& Role, String& Content, String& Name)\n
    "},{"location":"xmldocs/llama.oldversion.chatcompletionmessage/#parameters_4","title":"Parameters","text":"

    Role ChatRole&

    Content String&

    Name String&

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/","title":"ChatMessageRecord","text":"

    Namespace: LLama.OldVersion

    public class ChatMessageRecord : System.IEquatable`1[[LLama.OldVersion.ChatMessageRecord, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 ChatMessageRecord Implements IEquatable<ChatMessageRecord>

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#message","title":"Message","text":"
    public ChatCompletionMessage Message { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#property-value","title":"Property Value","text":"

    ChatCompletionMessage

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#time","title":"Time","text":"
    public DateTime Time { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#property-value_1","title":"Property Value","text":"

    DateTime

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#chatmessagerecordchatcompletionmessage-datetime","title":"ChatMessageRecord(ChatCompletionMessage, DateTime)","text":"
    public ChatMessageRecord(ChatCompletionMessage Message, DateTime Time)\n
    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#parameters","title":"Parameters","text":"

    Message ChatCompletionMessage

    Time DateTime

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#equalschatmessagerecord","title":"Equals(ChatMessageRecord)","text":"
    public bool Equals(ChatMessageRecord other)\n
    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#parameters_3","title":"Parameters","text":"

    other ChatMessageRecord

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#clone","title":"<Clone>$()","text":"
    public ChatMessageRecord <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#returns_5","title":"Returns","text":"

    ChatMessageRecord

    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#deconstructchatcompletionmessage-datetime","title":"Deconstruct(ChatCompletionMessage&, DateTime&)","text":"
    public void Deconstruct(ChatCompletionMessage& Message, DateTime& Time)\n
    "},{"location":"xmldocs/llama.oldversion.chatmessagerecord/#parameters_4","title":"Parameters","text":"

    Message ChatCompletionMessage&

    Time DateTime&

    "},{"location":"xmldocs/llama.oldversion.chatrole/","title":"ChatRole","text":"

    Namespace: LLama.OldVersion

    public enum ChatRole\n

    Inheritance Object \u2192 ValueType \u2192 Enum \u2192 ChatRole Implements IComparable, IFormattable, IConvertible

    "},{"location":"xmldocs/llama.oldversion.chatrole/#fields","title":"Fields","text":"Name Value Description"},{"location":"xmldocs/llama.oldversion.chatsession-1/","title":"ChatSession<T>","text":"

    Namespace: LLama.OldVersion

    public class ChatSession<T>\n
    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#type-parameters","title":"Type Parameters","text":"

    T

    Inheritance Object \u2192 ChatSession<T>

    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.chatsession-1/#chatsessiont_1","title":"ChatSession(T)","text":"
    public ChatSession(T model)\n
    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#parameters","title":"Parameters","text":"

    model T

    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.chatsession-1/#chatstring-string-string","title":"Chat(String, String, String)","text":"
    public IEnumerable<string> Chat(string text, string prompt, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#parameters_1","title":"Parameters","text":"

    text String

    prompt String

    encoding String

    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#returns","title":"Returns","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#withpromptstring-string","title":"WithPrompt(String, String)","text":"
    public ChatSession<T> WithPrompt(string prompt, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#parameters_2","title":"Parameters","text":"

    prompt String

    encoding String

    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#returns_1","title":"Returns","text":"

    ChatSession<T>

    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#withpromptfilestring-string","title":"WithPromptFile(String, String)","text":"
    public ChatSession<T> WithPromptFile(string promptFilename, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#parameters_3","title":"Parameters","text":"

    promptFilename String

    encoding String

    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#returns_2","title":"Returns","text":"

    ChatSession<T>

    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#withantipromptstring","title":"WithAntiprompt(String[])","text":"

    Set the keyword to split the return value of chat AI.

    public ChatSession<T> WithAntiprompt(String[] antiprompt)\n
    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#parameters_4","title":"Parameters","text":"

    antiprompt String[]

    "},{"location":"xmldocs/llama.oldversion.chatsession-1/#returns_3","title":"Returns","text":"

    ChatSession<T>

    "},{"location":"xmldocs/llama.oldversion.completion/","title":"Completion","text":"

    Namespace: LLama.OldVersion

    public class Completion : System.IEquatable`1[[LLama.OldVersion.Completion, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 Completion Implements IEquatable<Completion>

    "},{"location":"xmldocs/llama.oldversion.completion/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.completion/#id","title":"Id","text":"
    public string Id { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completion/#property-value","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completion/#object","title":"Object","text":"
    public string Object { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completion/#property-value_1","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completion/#created","title":"Created","text":"
    public int Created { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completion/#property-value_2","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.completion/#model","title":"Model","text":"
    public string Model { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completion/#property-value_3","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completion/#choices","title":"Choices","text":"
    public CompletionChoice[] Choices { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completion/#property-value_4","title":"Property Value","text":"

    CompletionChoice[]

    "},{"location":"xmldocs/llama.oldversion.completion/#usage","title":"Usage","text":"
    public CompletionUsage Usage { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completion/#property-value_5","title":"Property Value","text":"

    CompletionUsage

    "},{"location":"xmldocs/llama.oldversion.completion/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.completion/#completionstring-string-int32-string-completionchoice-completionusage","title":"Completion(String, String, Int32, String, CompletionChoice[], CompletionUsage)","text":"
    public Completion(string Id, string Object, int Created, string Model, CompletionChoice[] Choices, CompletionUsage Usage)\n
    "},{"location":"xmldocs/llama.oldversion.completion/#parameters","title":"Parameters","text":"

    Id String

    Object String

    Created Int32

    Model String

    Choices CompletionChoice[]

    Usage CompletionUsage

    "},{"location":"xmldocs/llama.oldversion.completion/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.completion/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.completion/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completion/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.completion/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.completion/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completion/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.completion/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.completion/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.completion/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.completion/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completion/#equalscompletion","title":"Equals(Completion)","text":"
    public bool Equals(Completion other)\n
    "},{"location":"xmldocs/llama.oldversion.completion/#parameters_3","title":"Parameters","text":"

    other Completion

    "},{"location":"xmldocs/llama.oldversion.completion/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completion/#clone","title":"<Clone>$()","text":"
    public Completion <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.completion/#returns_5","title":"Returns","text":"

    Completion

    "},{"location":"xmldocs/llama.oldversion.completion/#deconstructstring-string-int32-string-completionchoice-completionusage","title":"Deconstruct(String&, String&, Int32&, String&, CompletionChoice[]&, CompletionUsage&)","text":"
    public void Deconstruct(String& Id, String& Object, Int32& Created, String& Model, CompletionChoice[]& Choices, CompletionUsage& Usage)\n
    "},{"location":"xmldocs/llama.oldversion.completion/#parameters_4","title":"Parameters","text":"

    Id String&

    Object String&

    Created Int32&

    Model String&

    Choices CompletionChoice[]&

    Usage CompletionUsage&

    "},{"location":"xmldocs/llama.oldversion.completionchoice/","title":"CompletionChoice","text":"

    Namespace: LLama.OldVersion

    public class CompletionChoice : System.IEquatable`1[[LLama.OldVersion.CompletionChoice, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 CompletionChoice Implements IEquatable<CompletionChoice>

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.completionchoice/#text","title":"Text","text":"
    public string Text { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#property-value","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#index","title":"Index","text":"
    public int Index { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#property-value_1","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#logprobs","title":"Logprobs","text":"
    public CompletionLogprobs Logprobs { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#property-value_2","title":"Property Value","text":"

    CompletionLogprobs

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#finishreason","title":"FinishReason","text":"
    public string FinishReason { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#property-value_3","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.completionchoice/#completionchoicestring-int32-completionlogprobs-string","title":"CompletionChoice(String, Int32, CompletionLogprobs, String)","text":"
    public CompletionChoice(string Text, int Index, CompletionLogprobs Logprobs, string FinishReason)\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#parameters","title":"Parameters","text":"

    Text String

    Index Int32

    Logprobs CompletionLogprobs

    FinishReason String

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.completionchoice/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#equalscompletionchoice","title":"Equals(CompletionChoice)","text":"
    public bool Equals(CompletionChoice other)\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#parameters_3","title":"Parameters","text":"

    other CompletionChoice

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#clone","title":"<Clone>$()","text":"
    public CompletionChoice <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#returns_5","title":"Returns","text":"

    CompletionChoice

    "},{"location":"xmldocs/llama.oldversion.completionchoice/#deconstructstring-int32-completionlogprobs-string","title":"Deconstruct(String&, Int32&, CompletionLogprobs&, String&)","text":"
    public void Deconstruct(String& Text, Int32& Index, CompletionLogprobs& Logprobs, String& FinishReason)\n
    "},{"location":"xmldocs/llama.oldversion.completionchoice/#parameters_4","title":"Parameters","text":"

    Text String&

    Index Int32&

    Logprobs CompletionLogprobs&

    FinishReason String&

    "},{"location":"xmldocs/llama.oldversion.completionchunk/","title":"CompletionChunk","text":"

    Namespace: LLama.OldVersion

    public class CompletionChunk : System.IEquatable`1[[LLama.OldVersion.CompletionChunk, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 CompletionChunk Implements IEquatable<CompletionChunk>

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.completionchunk/#id","title":"Id","text":"
    public string Id { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#property-value","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#object","title":"Object","text":"
    public string Object { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#property-value_1","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#created","title":"Created","text":"
    public int Created { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#property-value_2","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#model","title":"Model","text":"
    public string Model { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#property-value_3","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#choices","title":"Choices","text":"
    public CompletionChoice[] Choices { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#property-value_4","title":"Property Value","text":"

    CompletionChoice[]

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.completionchunk/#completionchunkstring-string-int32-string-completionchoice","title":"CompletionChunk(String, String, Int32, String, CompletionChoice[])","text":"
    public CompletionChunk(string Id, string Object, int Created, string Model, CompletionChoice[] Choices)\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#parameters","title":"Parameters","text":"

    Id String

    Object String

    Created Int32

    Model String

    Choices CompletionChoice[]

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.completionchunk/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#equalscompletionchunk","title":"Equals(CompletionChunk)","text":"
    public bool Equals(CompletionChunk other)\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#parameters_3","title":"Parameters","text":"

    other CompletionChunk

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#clone","title":"<Clone>$()","text":"
    public CompletionChunk <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#returns_5","title":"Returns","text":"

    CompletionChunk

    "},{"location":"xmldocs/llama.oldversion.completionchunk/#deconstructstring-string-int32-string-completionchoice","title":"Deconstruct(String&, String&, Int32&, String&, CompletionChoice[]&)","text":"
    public void Deconstruct(String& Id, String& Object, Int32& Created, String& Model, CompletionChoice[]& Choices)\n
    "},{"location":"xmldocs/llama.oldversion.completionchunk/#parameters_4","title":"Parameters","text":"

    Id String&

    Object String&

    Created Int32&

    Model String&

    Choices CompletionChoice[]&

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/","title":"CompletionLogprobs","text":"

    Namespace: LLama.OldVersion

    public class CompletionLogprobs : System.IEquatable`1[[LLama.OldVersion.CompletionLogprobs, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 CompletionLogprobs Implements IEquatable<CompletionLogprobs>

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.completionlogprobs/#textoffset","title":"TextOffset","text":"
    public Int32[] TextOffset { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#property-value","title":"Property Value","text":"

    Int32[]

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#tokenlogprobs","title":"TokenLogProbs","text":"
    public Single[] TokenLogProbs { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#property-value_1","title":"Property Value","text":"

    Single[]

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#tokens","title":"Tokens","text":"
    public String[] Tokens { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#property-value_2","title":"Property Value","text":"

    String[]

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#toplogprobs","title":"TopLogprobs","text":"
    public Dictionary`2[] TopLogprobs { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#property-value_3","title":"Property Value","text":"

    Dictionary`2[]

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.completionlogprobs/#completionlogprobsint32-single-string-dictionary2","title":"CompletionLogprobs(Int32[], Single[], String[], Dictionary`2[])","text":"
    public CompletionLogprobs(Int32[] TextOffset, Single[] TokenLogProbs, String[] Tokens, Dictionary`2[] TopLogprobs)\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#parameters","title":"Parameters","text":"

    TextOffset Int32[]

    TokenLogProbs Single[]

    Tokens String[]

    TopLogprobs Dictionary`2[]

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.completionlogprobs/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#equalscompletionlogprobs","title":"Equals(CompletionLogprobs)","text":"
    public bool Equals(CompletionLogprobs other)\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#parameters_3","title":"Parameters","text":"

    other CompletionLogprobs

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#clone","title":"<Clone>$()","text":"
    public CompletionLogprobs <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#returns_5","title":"Returns","text":"

    CompletionLogprobs

    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#deconstructint32-single-string-dictionary2","title":"Deconstruct(Int32[]&, Single[]&, String[]&, Dictionary`2[]&)","text":"
    public void Deconstruct(Int32[]& TextOffset, Single[]& TokenLogProbs, String[]& Tokens, Dictionary`2[]& TopLogprobs)\n
    "},{"location":"xmldocs/llama.oldversion.completionlogprobs/#parameters_4","title":"Parameters","text":"

    TextOffset Int32[]&

    TokenLogProbs Single[]&

    Tokens String[]&

    TopLogprobs Dictionary`2[]&

    "},{"location":"xmldocs/llama.oldversion.completionusage/","title":"CompletionUsage","text":"

    Namespace: LLama.OldVersion

    public class CompletionUsage : System.IEquatable`1[[LLama.OldVersion.CompletionUsage, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 CompletionUsage Implements IEquatable<CompletionUsage>

    "},{"location":"xmldocs/llama.oldversion.completionusage/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.completionusage/#prompttokens","title":"PromptTokens","text":"
    public int PromptTokens { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionusage/#property-value","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.completionusage/#completiontokens","title":"CompletionTokens","text":"
    public int CompletionTokens { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionusage/#property-value_1","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.completionusage/#totaltokens","title":"TotalTokens","text":"
    public int TotalTokens { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.completionusage/#property-value_2","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.completionusage/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.completionusage/#completionusageint32-int32-int32","title":"CompletionUsage(Int32, Int32, Int32)","text":"
    public CompletionUsage(int PromptTokens, int CompletionTokens, int TotalTokens)\n
    "},{"location":"xmldocs/llama.oldversion.completionusage/#parameters","title":"Parameters","text":"

    PromptTokens Int32

    CompletionTokens Int32

    TotalTokens Int32

    "},{"location":"xmldocs/llama.oldversion.completionusage/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.completionusage/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.completionusage/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.completionusage/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.completionusage/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.completionusage/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionusage/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.completionusage/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.completionusage/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.completionusage/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.completionusage/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionusage/#equalscompletionusage","title":"Equals(CompletionUsage)","text":"
    public bool Equals(CompletionUsage other)\n
    "},{"location":"xmldocs/llama.oldversion.completionusage/#parameters_3","title":"Parameters","text":"

    other CompletionUsage

    "},{"location":"xmldocs/llama.oldversion.completionusage/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.completionusage/#clone","title":"<Clone>$()","text":"
    public CompletionUsage <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.completionusage/#returns_5","title":"Returns","text":"

    CompletionUsage

    "},{"location":"xmldocs/llama.oldversion.completionusage/#deconstructint32-int32-int32","title":"Deconstruct(Int32&, Int32&, Int32&)","text":"
    public void Deconstruct(Int32& PromptTokens, Int32& CompletionTokens, Int32& TotalTokens)\n
    "},{"location":"xmldocs/llama.oldversion.completionusage/#parameters_4","title":"Parameters","text":"

    PromptTokens Int32&

    CompletionTokens Int32&

    TotalTokens Int32&

    "},{"location":"xmldocs/llama.oldversion.embedding/","title":"Embedding","text":"

    Namespace: LLama.OldVersion

    public class Embedding : System.IEquatable`1[[LLama.OldVersion.Embedding, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 Embedding Implements IEquatable<Embedding>

    "},{"location":"xmldocs/llama.oldversion.embedding/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.embedding/#object","title":"Object","text":"
    public string Object { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#property-value","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.embedding/#model","title":"Model","text":"
    public string Model { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#property-value_1","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.embedding/#data","title":"Data","text":"
    public EmbeddingData[] Data { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#property-value_2","title":"Property Value","text":"

    EmbeddingData[]

    "},{"location":"xmldocs/llama.oldversion.embedding/#usage","title":"Usage","text":"
    public EmbeddingUsage Usage { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#property-value_3","title":"Property Value","text":"

    EmbeddingUsage

    "},{"location":"xmldocs/llama.oldversion.embedding/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.embedding/#embeddingstring-string-embeddingdata-embeddingusage","title":"Embedding(String, String, EmbeddingData[], EmbeddingUsage)","text":"
    public Embedding(string Object, string Model, EmbeddingData[] Data, EmbeddingUsage Usage)\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#parameters","title":"Parameters","text":"

    Object String

    Model String

    Data EmbeddingData[]

    Usage EmbeddingUsage

    "},{"location":"xmldocs/llama.oldversion.embedding/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.embedding/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.embedding/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.embedding/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.embedding/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.embedding/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.embedding/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.embedding/#equalsembedding","title":"Equals(Embedding)","text":"
    public bool Equals(Embedding other)\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#parameters_3","title":"Parameters","text":"

    other Embedding

    "},{"location":"xmldocs/llama.oldversion.embedding/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.embedding/#clone","title":"<Clone>$()","text":"
    public Embedding <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#returns_5","title":"Returns","text":"

    Embedding

    "},{"location":"xmldocs/llama.oldversion.embedding/#deconstructstring-string-embeddingdata-embeddingusage","title":"Deconstruct(String&, String&, EmbeddingData[]&, EmbeddingUsage&)","text":"
    public void Deconstruct(String& Object, String& Model, EmbeddingData[]& Data, EmbeddingUsage& Usage)\n
    "},{"location":"xmldocs/llama.oldversion.embedding/#parameters_4","title":"Parameters","text":"

    Object String&

    Model String&

    Data EmbeddingData[]&

    Usage EmbeddingUsage&

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/","title":"EmbeddingData","text":"

    Namespace: LLama.OldVersion

    public class EmbeddingData : System.IEquatable`1[[LLama.OldVersion.EmbeddingData, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 EmbeddingData Implements IEquatable<EmbeddingData>

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.embeddingdata/#index","title":"Index","text":"
    public int Index { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#property-value","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#object","title":"Object","text":"
    public string Object { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#property-value_1","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#embedding","title":"Embedding","text":"
    public Single[] Embedding { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#property-value_2","title":"Property Value","text":"

    Single[]

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.embeddingdata/#embeddingdataint32-string-single","title":"EmbeddingData(Int32, String, Single[])","text":"
    public EmbeddingData(int Index, string Object, Single[] Embedding)\n
    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#parameters","title":"Parameters","text":"

    Index Int32

    Object String

    Embedding Single[]

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.embeddingdata/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#equalsembeddingdata","title":"Equals(EmbeddingData)","text":"
    public bool Equals(EmbeddingData other)\n
    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#parameters_3","title":"Parameters","text":"

    other EmbeddingData

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#clone","title":"<Clone>$()","text":"
    public EmbeddingData <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#returns_5","title":"Returns","text":"

    EmbeddingData

    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#deconstructint32-string-single","title":"Deconstruct(Int32&, String&, Single[]&)","text":"
    public void Deconstruct(Int32& Index, String& Object, Single[]& Embedding)\n
    "},{"location":"xmldocs/llama.oldversion.embeddingdata/#parameters_4","title":"Parameters","text":"

    Index Int32&

    Object String&

    Embedding Single[]&

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/","title":"EmbeddingUsage","text":"

    Namespace: LLama.OldVersion

    public class EmbeddingUsage : System.IEquatable`1[[LLama.OldVersion.EmbeddingUsage, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]\n

    Inheritance Object \u2192 EmbeddingUsage Implements IEquatable<EmbeddingUsage>

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.embeddingusage/#prompttokens","title":"PromptTokens","text":"
    public int PromptTokens { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#property-value","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#totaltokens","title":"TotalTokens","text":"
    public int TotalTokens { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#property-value_1","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.embeddingusage/#embeddingusageint32-int32","title":"EmbeddingUsage(Int32, Int32)","text":"
    public EmbeddingUsage(int PromptTokens, int TotalTokens)\n
    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#parameters","title":"Parameters","text":"

    PromptTokens Int32

    TotalTokens Int32

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.embeddingusage/#tostring","title":"ToString()","text":"
    public string ToString()\n
    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#returns","title":"Returns","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#printmembersstringbuilder","title":"PrintMembers(StringBuilder)","text":"
    protected bool PrintMembers(StringBuilder builder)\n
    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#parameters_1","title":"Parameters","text":"

    builder StringBuilder

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#gethashcode","title":"GetHashCode()","text":"
    public int GetHashCode()\n
    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#returns_2","title":"Returns","text":"

    Int32

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#equalsobject","title":"Equals(Object)","text":"
    public bool Equals(object obj)\n
    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#parameters_2","title":"Parameters","text":"

    obj Object

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#returns_3","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#equalsembeddingusage","title":"Equals(EmbeddingUsage)","text":"
    public bool Equals(EmbeddingUsage other)\n
    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#parameters_3","title":"Parameters","text":"

    other EmbeddingUsage

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#returns_4","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#clone","title":"<Clone>$()","text":"
    public EmbeddingUsage <Clone>$()\n
    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#returns_5","title":"Returns","text":"

    EmbeddingUsage

    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#deconstructint32-int32","title":"Deconstruct(Int32&, Int32&)","text":"
    public void Deconstruct(Int32& PromptTokens, Int32& TotalTokens)\n
    "},{"location":"xmldocs/llama.oldversion.embeddingusage/#parameters_4","title":"Parameters","text":"

    PromptTokens Int32&

    TotalTokens Int32&

    "},{"location":"xmldocs/llama.oldversion.ichatmodel/","title":"IChatModel","text":"

    Namespace: LLama.OldVersion

    public interface IChatModel\n
    "},{"location":"xmldocs/llama.oldversion.ichatmodel/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.ichatmodel/#name","title":"Name","text":"
    public abstract string Name { get; }\n
    "},{"location":"xmldocs/llama.oldversion.ichatmodel/#property-value","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.ichatmodel/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.ichatmodel/#chatstring-string-string","title":"Chat(String, String, String)","text":"
    IEnumerable<string> Chat(string text, string prompt, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.ichatmodel/#parameters","title":"Parameters","text":"

    text String

    prompt String

    encoding String

    "},{"location":"xmldocs/llama.oldversion.ichatmodel/#returns","title":"Returns","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.oldversion.ichatmodel/#initchatpromptstring-string","title":"InitChatPrompt(String, String)","text":"

    Init a prompt for chat and automatically produce the next prompt during the chat.

    void InitChatPrompt(string prompt, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.ichatmodel/#parameters_1","title":"Parameters","text":"

    prompt String

    encoding String

    "},{"location":"xmldocs/llama.oldversion.ichatmodel/#initchatantipromptstring","title":"InitChatAntiprompt(String[])","text":"
    void InitChatAntiprompt(String[] antiprompt)\n
    "},{"location":"xmldocs/llama.oldversion.ichatmodel/#parameters_2","title":"Parameters","text":"

    antiprompt String[]

    "},{"location":"xmldocs/llama.oldversion.llamaembedder/","title":"LLamaEmbedder","text":"

    Namespace: LLama.OldVersion

    public class LLamaEmbedder : System.IDisposable\n

    Inheritance Object \u2192 LLamaEmbedder Implements IDisposable

    "},{"location":"xmldocs/llama.oldversion.llamaembedder/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.llamaembedder/#llamaembedderllamaparams","title":"LLamaEmbedder(LLamaParams)","text":"
    public LLamaEmbedder(LLamaParams params)\n
    "},{"location":"xmldocs/llama.oldversion.llamaembedder/#parameters","title":"Parameters","text":"

    params LLamaParams

    "},{"location":"xmldocs/llama.oldversion.llamaembedder/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.llamaembedder/#getembeddingsstring-int32-boolean-string","title":"GetEmbeddings(String, Int32, Boolean, String)","text":"
    public Single[] GetEmbeddings(string text, int n_thread, bool add_bos, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.llamaembedder/#parameters_1","title":"Parameters","text":"

    text String

    n_thread Int32

    add_bos Boolean

    encoding String

    "},{"location":"xmldocs/llama.oldversion.llamaembedder/#returns","title":"Returns","text":"

    Single[]

    "},{"location":"xmldocs/llama.oldversion.llamaembedder/#dispose","title":"Dispose()","text":"
    public void Dispose()\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/","title":"LLamaModel","text":"

    Namespace: LLama.OldVersion

    public class LLamaModel : IChatModel, System.IDisposable\n

    Inheritance Object \u2192 LLamaModel Implements IChatModel, IDisposable

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.oldversion.llamamodel/#name","title":"Name","text":"
    public string Name { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#property-value","title":"Property Value","text":"

    String

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#verbose","title":"Verbose","text":"
    public bool Verbose { get; set; }\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#property-value_1","title":"Property Value","text":"

    Boolean

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#nativehandle","title":"NativeHandle","text":"
    public SafeLLamaContextHandle NativeHandle { get; }\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#property-value_2","title":"Property Value","text":"

    SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.llamamodel/#llamamodelstring-string-boolean-int32-int32-int32-int32-int32-int32-int32-dictionaryint32-single-int32-single-single-single-single-single-int32-single-single-int32-single-single-string-string-string-string-liststring-string-string-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-string","title":"LLamaModel(String, String, Boolean, Int32, Int32, Int32, Int32, Int32, Int32, Int32, Dictionary<Int32, Single>, Int32, Single, Single, Single, Single, Single, Int32, Single, Single, Int32, Single, Single, String, String, String, String, List<String>, String, String, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, String)","text":"

    Please refer LLamaParams to find the meanings of each arg. Be sure to have set the n_gpu_layers, otherwise it will load 20 layers to gpu by default.

    public LLamaModel(string model_path, string model_name, bool verbose, int seed, int n_threads, int n_predict, int n_ctx, int n_batch, int n_keep, int n_gpu_layers, Dictionary<int, float> logit_bias, int top_k, float top_p, float tfs_z, float typical_p, float temp, float repeat_penalty, int repeat_last_n, float frequency_penalty, float presence_penalty, int mirostat, float mirostat_tau, float mirostat_eta, string prompt, string path_session, string input_prefix, string input_suffix, List<string> antiprompt, string lora_adapter, string lora_base, bool memory_f16, bool random_prompt, bool use_color, bool interactive, bool embedding, bool interactive_first, bool prompt_cache_all, bool instruct, bool penalize_nl, bool perplexity, bool use_mmap, bool use_mlock, bool mem_test, bool verbose_prompt, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters","title":"Parameters","text":"

    model_path String The model file path.

    model_name String The model name.

    verbose Boolean Whether to print details when running the model.

    seed Int32

    n_threads Int32

    n_predict Int32

    n_ctx Int32

    n_batch Int32

    n_keep Int32

    n_gpu_layers Int32

    logit_bias Dictionary<Int32, Single>

    top_k Int32

    top_p Single

    tfs_z Single

    typical_p Single

    temp Single

    repeat_penalty Single

    repeat_last_n Int32

    frequency_penalty Single

    presence_penalty Single

    mirostat Int32

    mirostat_tau Single

    mirostat_eta Single

    prompt String

    path_session String

    input_prefix String

    input_suffix String

    antiprompt List<String>

    lora_adapter String

    lora_base String

    memory_f16 Boolean

    random_prompt Boolean

    use_color Boolean

    interactive Boolean

    embedding Boolean

    interactive_first Boolean

    prompt_cache_all Boolean

    instruct Boolean

    penalize_nl Boolean

    perplexity Boolean

    use_mmap Boolean

    use_mlock Boolean

    mem_test Boolean

    verbose_prompt Boolean

    encoding String

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#llamamodelllamaparams-string-boolean-string","title":"LLamaModel(LLamaParams, String, Boolean, String)","text":"

    Please refer LLamaParams to find the meanings of each arg. Be sure to have set the n_gpu_layers, otherwise it will load 20 layers to gpu by default.

    public LLamaModel(LLamaParams params, string name, bool verbose, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters_1","title":"Parameters","text":"

    params LLamaParams The LLamaModel params

    name String Model name

    verbose Boolean Whether to output the detailed info.

    encoding String

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#exceptions","title":"Exceptions","text":"

    RuntimeError

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.oldversion.llamamodel/#withpromptstring-string","title":"WithPrompt(String, String)","text":"

    Apply a prompt to the model.

    public LLamaModel WithPrompt(string prompt, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters_2","title":"Parameters","text":"

    prompt String

    encoding String

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#returns","title":"Returns","text":"

    LLamaModel

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#exceptions_1","title":"Exceptions","text":"

    ArgumentException

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#withpromptfilestring","title":"WithPromptFile(String)","text":"

    Apply the prompt file to the model.

    public LLamaModel WithPromptFile(string promptFileName)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters_3","title":"Parameters","text":"

    promptFileName String

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#returns_1","title":"Returns","text":"

    LLamaModel

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#initchatpromptstring-string","title":"InitChatPrompt(String, String)","text":"
    public void InitChatPrompt(string prompt, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters_4","title":"Parameters","text":"

    prompt String

    encoding String

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#initchatantipromptstring","title":"InitChatAntiprompt(String[])","text":"
    public void InitChatAntiprompt(String[] antiprompt)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters_5","title":"Parameters","text":"

    antiprompt String[]

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#chatstring-string-string","title":"Chat(String, String, String)","text":"

    Chat with the LLaMa model under interactive mode.

    public IEnumerable<string> Chat(string text, string prompt, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters_6","title":"Parameters","text":"

    text String

    prompt String

    encoding String

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#returns_2","title":"Returns","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#exceptions_2","title":"Exceptions","text":"

    ArgumentException

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#savestatestring","title":"SaveState(String)","text":"

    Save the state to specified path.

    public void SaveState(string filename)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters_7","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#loadstatestring-boolean","title":"LoadState(String, Boolean)","text":"

    Load the state from specified path.

    public void LoadState(string filename, bool clearPreviousEmbed)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters_8","title":"Parameters","text":"

    filename String

    clearPreviousEmbed Boolean Whether to clear previous footprints of this model.

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#exceptions_3","title":"Exceptions","text":"

    RuntimeError

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#tokenizestring-string","title":"Tokenize(String, String)","text":"

    Tokenize a string.

    public List<int> Tokenize(string text, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters_9","title":"Parameters","text":"

    text String The utf-8 encoded string to tokenize.

    encoding String

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#returns_3","title":"Returns","text":"

    List<Int32> A list of tokens.

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#exceptions_4","title":"Exceptions","text":"

    RuntimeError If the tokenization failed.

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#detokenizeienumerableint32","title":"DeTokenize(IEnumerable<Int32>)","text":"

    Detokenize a list of tokens.

    public string DeTokenize(IEnumerable<int> tokens)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters_10","title":"Parameters","text":"

    tokens IEnumerable<Int32> The list of tokens to detokenize.

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#returns_4","title":"Returns","text":"

    String The detokenized string.

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#callstring-string","title":"Call(String, String)","text":"

    Call the model to run inference.

    public IEnumerable<string> Call(string text, string encoding)\n
    "},{"location":"xmldocs/llama.oldversion.llamamodel/#parameters_11","title":"Parameters","text":"

    text String

    encoding String

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#returns_5","title":"Returns","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#exceptions_5","title":"Exceptions","text":"

    RuntimeError

    "},{"location":"xmldocs/llama.oldversion.llamamodel/#dispose","title":"Dispose()","text":"
    public void Dispose()\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/","title":"LLamaParams","text":"

    Namespace: LLama.OldVersion

    public struct LLamaParams\n

    Inheritance Object \u2192 ValueType \u2192 LLamaParams

    "},{"location":"xmldocs/llama.oldversion.llamaparams/#fields","title":"Fields","text":""},{"location":"xmldocs/llama.oldversion.llamaparams/#seed","title":"seed","text":"
    public int seed;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#n_threads","title":"n_threads","text":"
    public int n_threads;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#n_predict","title":"n_predict","text":"
    public int n_predict;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#n_ctx","title":"n_ctx","text":"
    public int n_ctx;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#n_batch","title":"n_batch","text":"
    public int n_batch;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#n_keep","title":"n_keep","text":"
    public int n_keep;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#n_gpu_layers","title":"n_gpu_layers","text":"
    public int n_gpu_layers;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#logit_bias","title":"logit_bias","text":"
    public Dictionary<int, float> logit_bias;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#top_k","title":"top_k","text":"
    public int top_k;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#top_p","title":"top_p","text":"
    public float top_p;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#tfs_z","title":"tfs_z","text":"
    public float tfs_z;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#typical_p","title":"typical_p","text":"
    public float typical_p;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#temp","title":"temp","text":"
    public float temp;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#repeat_penalty","title":"repeat_penalty","text":"
    public float repeat_penalty;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#repeat_last_n","title":"repeat_last_n","text":"
    public int repeat_last_n;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#frequency_penalty","title":"frequency_penalty","text":"
    public float frequency_penalty;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#presence_penalty","title":"presence_penalty","text":"
    public float presence_penalty;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#mirostat","title":"mirostat","text":"
    public int mirostat;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#mirostat_tau","title":"mirostat_tau","text":"
    public float mirostat_tau;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#mirostat_eta","title":"mirostat_eta","text":"
    public float mirostat_eta;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#model","title":"model","text":"
    public string model;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#prompt","title":"prompt","text":"
    public string prompt;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#path_session","title":"path_session","text":"
    public string path_session;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#input_prefix","title":"input_prefix","text":"
    public string input_prefix;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#input_suffix","title":"input_suffix","text":"
    public string input_suffix;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#antiprompt","title":"antiprompt","text":"
    public List<string> antiprompt;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#lora_adapter","title":"lora_adapter","text":"
    public string lora_adapter;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#lora_base","title":"lora_base","text":"
    public string lora_base;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#memory_f16","title":"memory_f16","text":"
    public bool memory_f16;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#random_prompt","title":"random_prompt","text":"
    public bool random_prompt;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#use_color","title":"use_color","text":"
    public bool use_color;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#interactive","title":"interactive","text":"
    public bool interactive;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#prompt_cache_all","title":"prompt_cache_all","text":"
    public bool prompt_cache_all;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#embedding","title":"embedding","text":"
    public bool embedding;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#interactive_first","title":"interactive_first","text":"
    public bool interactive_first;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#instruct","title":"instruct","text":"
    public bool instruct;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#penalize_nl","title":"penalize_nl","text":"
    public bool penalize_nl;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#perplexity","title":"perplexity","text":"
    public bool perplexity;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#use_mmap","title":"use_mmap","text":"
    public bool use_mmap;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#use_mlock","title":"use_mlock","text":"
    public bool use_mlock;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#mem_test","title":"mem_test","text":"
    public bool mem_test;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#verbose_prompt","title":"verbose_prompt","text":"
    public bool verbose_prompt;\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.oldversion.llamaparams/#llamaparamsint32-int32-int32-int32-int32-int32-int32-dictionaryint32-single-int32-single-single-single-single-single-int32-single-single-int32-single-single-string-string-string-string-string-liststring-string-string-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean-boolean","title":"LLamaParams(Int32, Int32, Int32, Int32, Int32, Int32, Int32, Dictionary<Int32, Single>, Int32, Single, Single, Single, Single, Single, Int32, Single, Single, Int32, Single, Single, String, String, String, String, String, List<String>, String, String, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean)","text":"
    LLamaParams(int seed, int n_threads, int n_predict, int n_ctx, int n_batch, int n_keep, int n_gpu_layers, Dictionary<int, float> logit_bias, int top_k, float top_p, float tfs_z, float typical_p, float temp, float repeat_penalty, int repeat_last_n, float frequency_penalty, float presence_penalty, int mirostat, float mirostat_tau, float mirostat_eta, string model, string prompt, string path_session, string input_prefix, string input_suffix, List<string> antiprompt, string lora_adapter, string lora_base, bool memory_f16, bool random_prompt, bool use_color, bool interactive, bool prompt_cache_all, bool embedding, bool interactive_first, bool instruct, bool penalize_nl, bool perplexity, bool use_mmap, bool use_mlock, bool mem_test, bool verbose_prompt)\n
    "},{"location":"xmldocs/llama.oldversion.llamaparams/#parameters","title":"Parameters","text":"

    seed Int32

    n_threads Int32

    n_predict Int32

    n_ctx Int32

    n_batch Int32

    n_keep Int32

    n_gpu_layers Int32

    logit_bias Dictionary<Int32, Single>

    top_k Int32

    top_p Single

    tfs_z Single

    typical_p Single

    temp Single

    repeat_penalty Single

    repeat_last_n Int32

    frequency_penalty Single

    presence_penalty Single

    mirostat Int32

    mirostat_tau Single

    mirostat_eta Single

    model String

    prompt String

    path_session String

    input_prefix String

    input_suffix String

    antiprompt List<String>

    lora_adapter String

    lora_base String

    memory_f16 Boolean

    random_prompt Boolean

    use_color Boolean

    interactive Boolean

    prompt_cache_all Boolean

    embedding Boolean

    interactive_first Boolean

    instruct Boolean

    penalize_nl Boolean

    perplexity Boolean

    use_mmap Boolean

    use_mlock Boolean

    mem_test Boolean

    verbose_prompt Boolean

    "},{"location":"xmldocs/llama.resettablellamamodel/","title":"ResettableLLamaModel","text":"

    Namespace: LLama

    A LLamaModel what could be reset. Note that using this class will consume about 10% more memories.

    public class ResettableLLamaModel : LLamaModel, System.IDisposable\n

    Inheritance Object \u2192 LLamaModel \u2192 ResettableLLamaModel Implements IDisposable

    "},{"location":"xmldocs/llama.resettablellamamodel/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.resettablellamamodel/#originalstate","title":"OriginalState","text":"

    The initial state of the model

    public Byte[] OriginalState { get; set; }\n
    "},{"location":"xmldocs/llama.resettablellamamodel/#property-value","title":"Property Value","text":"

    Byte[]

    "},{"location":"xmldocs/llama.resettablellamamodel/#contextsize","title":"ContextSize","text":"

    The context size.

    public int ContextSize { get; }\n
    "},{"location":"xmldocs/llama.resettablellamamodel/#property-value_1","title":"Property Value","text":"

    Int32

    "},{"location":"xmldocs/llama.resettablellamamodel/#params","title":"Params","text":"

    The model params set for this model.

    public ModelParams Params { get; set; }\n
    "},{"location":"xmldocs/llama.resettablellamamodel/#property-value_2","title":"Property Value","text":"

    ModelParams

    "},{"location":"xmldocs/llama.resettablellamamodel/#nativehandle","title":"NativeHandle","text":"

    The native handle, which is used to be passed to the native APIs. Please avoid using it unless you know what is the usage of the Native API.

    public SafeLLamaContextHandle NativeHandle { get; }\n
    "},{"location":"xmldocs/llama.resettablellamamodel/#property-value_3","title":"Property Value","text":"

    SafeLLamaContextHandle

    "},{"location":"xmldocs/llama.resettablellamamodel/#encoding","title":"Encoding","text":"

    The encoding set for this model to deal with text input.

    public Encoding Encoding { get; }\n
    "},{"location":"xmldocs/llama.resettablellamamodel/#property-value_4","title":"Property Value","text":"

    Encoding

    "},{"location":"xmldocs/llama.resettablellamamodel/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.resettablellamamodel/#resettablellamamodelmodelparams-string","title":"ResettableLLamaModel(ModelParams, String)","text":"
    public ResettableLLamaModel(ModelParams Params, string encoding)\n
    "},{"location":"xmldocs/llama.resettablellamamodel/#parameters","title":"Parameters","text":"

    Params ModelParams

    encoding String

    "},{"location":"xmldocs/llama.resettablellamamodel/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.resettablellamamodel/#reset","title":"Reset()","text":"

    Reset the state to the initial state.

    public void Reset()\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/","title":"StatefulExecutorBase","text":"

    Namespace: LLama

    The base class for stateful LLama executors.

    public abstract class StatefulExecutorBase : LLama.Abstractions.ILLamaExecutor\n

    Inheritance Object \u2192 StatefulExecutorBase Implements ILLamaExecutor

    "},{"location":"xmldocs/llama.statefulexecutorbase/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.statefulexecutorbase/#model","title":"Model","text":"

    The mode used by the executor.

    public LLamaModel Model { get; }\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#property-value","title":"Property Value","text":"

    LLamaModel

    "},{"location":"xmldocs/llama.statefulexecutorbase/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.statefulexecutorbase/#withsessionfilestring","title":"WithSessionFile(String)","text":"

    This API is currently not verified.

    public StatefulExecutorBase WithSessionFile(string filename)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.statefulexecutorbase/#returns","title":"Returns","text":"

    StatefulExecutorBase

    "},{"location":"xmldocs/llama.statefulexecutorbase/#exceptions","title":"Exceptions","text":"

    ArgumentNullException

    RuntimeError

    "},{"location":"xmldocs/llama.statefulexecutorbase/#savesessionfilestring","title":"SaveSessionFile(String)","text":"

    This API has not been verified currently.

    public void SaveSessionFile(string filename)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters_1","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.statefulexecutorbase/#handlerunoutofcontextint32","title":"HandleRunOutOfContext(Int32)","text":"

    After running out of the context, take some tokens from the original prompt and recompute the logits in batches.

    protected void HandleRunOutOfContext(int tokensToKeep)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters_2","title":"Parameters","text":"

    tokensToKeep Int32

    "},{"location":"xmldocs/llama.statefulexecutorbase/#tryreusemathingprefix","title":"TryReuseMathingPrefix()","text":"

    Try to reuse the matching prefix from the session file.

    protected void TryReuseMathingPrefix()\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#getloopconditioninferstateargs","title":"GetLoopCondition(InferStateArgs)","text":"

    Decide whether to continue the loop.

    protected abstract bool GetLoopCondition(InferStateArgs args)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters_3","title":"Parameters","text":"

    args InferStateArgs

    "},{"location":"xmldocs/llama.statefulexecutorbase/#returns_1","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.statefulexecutorbase/#preprocessinputsstring-inferstateargs","title":"PreprocessInputs(String, InferStateArgs)","text":"

    Preprocess the inputs before the inference.

    protected abstract void PreprocessInputs(string text, InferStateArgs args)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters_4","title":"Parameters","text":"

    text String

    args InferStateArgs

    "},{"location":"xmldocs/llama.statefulexecutorbase/#postprocessinferenceparams-inferstateargs-ienumerable1","title":"PostProcess(InferenceParams, InferStateArgs, IEnumerable`1&)","text":"

    Do some post processing after the inference.

    protected abstract bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, IEnumerable`1& extraOutputs)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters_5","title":"Parameters","text":"

    inferenceParams InferenceParams

    args InferStateArgs

    extraOutputs IEnumerable`1&

    "},{"location":"xmldocs/llama.statefulexecutorbase/#returns_2","title":"Returns","text":"

    Boolean

    "},{"location":"xmldocs/llama.statefulexecutorbase/#inferinternalinferenceparams-inferstateargs","title":"InferInternal(InferenceParams, InferStateArgs)","text":"

    The core inference logic.

    protected abstract void InferInternal(InferenceParams inferenceParams, InferStateArgs args)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters_6","title":"Parameters","text":"

    inferenceParams InferenceParams

    args InferStateArgs

    "},{"location":"xmldocs/llama.statefulexecutorbase/#savestatestring","title":"SaveState(String)","text":"

    Save the current state to a file.

    public abstract void SaveState(string filename)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters_7","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.statefulexecutorbase/#getstatedata","title":"GetStateData()","text":"

    Get the current state data.

    public abstract ExecutorBaseState GetStateData()\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#returns_3","title":"Returns","text":"

    ExecutorBaseState

    "},{"location":"xmldocs/llama.statefulexecutorbase/#loadstateexecutorbasestate","title":"LoadState(ExecutorBaseState)","text":"

    Load the state from data.

    public abstract void LoadState(ExecutorBaseState data)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters_8","title":"Parameters","text":"

    data ExecutorBaseState

    "},{"location":"xmldocs/llama.statefulexecutorbase/#loadstatestring","title":"LoadState(String)","text":"

    Load the state from a file.

    public abstract void LoadState(string filename)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters_9","title":"Parameters","text":"

    filename String

    "},{"location":"xmldocs/llama.statefulexecutorbase/#inferstring-inferenceparams-cancellationtoken","title":"Infer(String, InferenceParams, CancellationToken)","text":"

    Execute the inference.

    public IEnumerable<string> Infer(string text, InferenceParams inferenceParams, CancellationToken cancellationToken)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters_10","title":"Parameters","text":"

    text String

    inferenceParams InferenceParams

    cancellationToken CancellationToken

    "},{"location":"xmldocs/llama.statefulexecutorbase/#returns_4","title":"Returns","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.statefulexecutorbase/#inferasyncstring-inferenceparams-cancellationtoken","title":"InferAsync(String, InferenceParams, CancellationToken)","text":"

    Execute the inference asynchronously.

    public IAsyncEnumerable<string> InferAsync(string text, InferenceParams inferenceParams, CancellationToken cancellationToken)\n
    "},{"location":"xmldocs/llama.statefulexecutorbase/#parameters_11","title":"Parameters","text":"

    text String

    inferenceParams InferenceParams

    cancellationToken CancellationToken

    "},{"location":"xmldocs/llama.statefulexecutorbase/#returns_5","title":"Returns","text":"

    IAsyncEnumerable<String>

    "},{"location":"xmldocs/llama.statelessexecutor/","title":"StatelessExecutor","text":"

    Namespace: LLama

    This executor infer the input as one-time job. Previous inputs won't impact on the response to current input.

    public class StatelessExecutor : LLama.Abstractions.ILLamaExecutor\n

    Inheritance Object \u2192 StatelessExecutor Implements ILLamaExecutor

    "},{"location":"xmldocs/llama.statelessexecutor/#properties","title":"Properties","text":""},{"location":"xmldocs/llama.statelessexecutor/#model","title":"Model","text":"

    The mode used by the executor when running the inference.

    public LLamaModel Model { get; }\n
    "},{"location":"xmldocs/llama.statelessexecutor/#property-value","title":"Property Value","text":"

    LLamaModel

    "},{"location":"xmldocs/llama.statelessexecutor/#constructors","title":"Constructors","text":""},{"location":"xmldocs/llama.statelessexecutor/#statelessexecutorllamamodel","title":"StatelessExecutor(LLamaModel)","text":"
    public StatelessExecutor(LLamaModel model)\n
    "},{"location":"xmldocs/llama.statelessexecutor/#parameters","title":"Parameters","text":"

    model LLamaModel The LLama model.

    "},{"location":"xmldocs/llama.statelessexecutor/#methods","title":"Methods","text":""},{"location":"xmldocs/llama.statelessexecutor/#inferstring-inferenceparams-cancellationtoken","title":"Infer(String, InferenceParams, CancellationToken)","text":"
    public IEnumerable<string> Infer(string text, InferenceParams inferenceParams, CancellationToken cancellationToken)\n
    "},{"location":"xmldocs/llama.statelessexecutor/#parameters_1","title":"Parameters","text":"

    text String

    inferenceParams InferenceParams

    cancellationToken CancellationToken

    "},{"location":"xmldocs/llama.statelessexecutor/#returns","title":"Returns","text":"

    IEnumerable<String>

    "},{"location":"xmldocs/llama.statelessexecutor/#inferasyncstring-inferenceparams-cancellationtoken","title":"InferAsync(String, InferenceParams, CancellationToken)","text":"
    public IAsyncEnumerable<string> InferAsync(string text, InferenceParams inferenceParams, CancellationToken token)\n
    "},{"location":"xmldocs/llama.statelessexecutor/#parameters_2","title":"Parameters","text":"

    text String

    inferenceParams InferenceParams

    token CancellationToken

    "},{"location":"xmldocs/llama.statelessexecutor/#returns_1","title":"Returns","text":"

    IAsyncEnumerable<String>

    "}]} \ No newline at end of file diff --git a/site/sitemap.xml.gz b/site/sitemap.xml.gz new file mode 100644 index 0000000000000000000000000000000000000000..393f18705d3ebed401d2a02e5b2afd3b027c98d8 GIT binary patch literal 127 zcmV-_0D%7=iwFpMpO9n%|8r?{Wo=<_E_iKh04<9_3V)_WXo8&M?ytk3HC}0~zlG)Vu + + + + + + + + + + + + + + + + + + + + index - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.abstractions.ihistorytransform/index.html b/site/xmldocs/llama.abstractions.ihistorytransform/index.html new file mode 100644 index 00000000..057c4a8c --- /dev/null +++ b/site/xmldocs/llama.abstractions.ihistorytransform/index.html @@ -0,0 +1,1749 @@ + + + + + + + + + + + + + + + + + + + + + + llama.abstractions.ihistorytransform - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    IHistoryTransform

    +

    Namespace: LLama.Abstractions

    +

    Transform history to plain text and vice versa.

    +
    public interface IHistoryTransform
    +
    +

    Methods

    +

    HistoryToText(ChatHistory)

    +

    Convert a ChatHistory instance to plain text.

    +
    string HistoryToText(ChatHistory history)
    +
    +

    Parameters

    +

    history ChatHistory
    +The ChatHistory instance

    +

    Returns

    +

    String

    +

    TextToHistory(AuthorRole, String)

    +

    Converts plain text to a ChatHistory instance.

    +
    ChatHistory TextToHistory(AuthorRole role, string text)
    +
    +

    Parameters

    +

    role AuthorRole
    +The role for the author.

    +

    text String
    +The chat history as plain text.

    +

    Returns

    +

    ChatHistory
    +The updated history.

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.abstractions.illamaexecutor/index.html b/site/xmldocs/llama.abstractions.illamaexecutor/index.html new file mode 100644 index 00000000..198c108b --- /dev/null +++ b/site/xmldocs/llama.abstractions.illamaexecutor/index.html @@ -0,0 +1,1823 @@ + + + + + + + + + + + + + + + + + + + + + + llama.abstractions.illamaexecutor - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ILLamaExecutor

    +

    Namespace: LLama.Abstractions

    +

    A high level interface for LLama models.

    +
    public interface ILLamaExecutor
    +
    +

    Properties

    +

    Model

    +

    The loaded model for this executor.

    +
    public abstract LLamaModel Model { get; }
    +
    +

    Property Value

    +

    LLamaModel

    +

    Methods

    +

    Infer(String, InferenceParams, CancellationToken)

    +

    Infers a response from the model.

    +
    IEnumerable<string> Infer(string text, InferenceParams inferenceParams, CancellationToken token)
    +
    +

    Parameters

    +

    text String
    +Your prompt

    +

    inferenceParams InferenceParams
    +Any additional parameters

    +

    token CancellationToken
    +A cancellation token.

    +

    Returns

    +

    IEnumerable<String>

    +

    InferAsync(String, InferenceParams, CancellationToken)

    +
    IAsyncEnumerable<string> InferAsync(string text, InferenceParams inferenceParams, CancellationToken token)
    +
    +

    Parameters

    +

    text String

    +

    inferenceParams InferenceParams

    +

    token CancellationToken

    +

    Returns

    +

    IAsyncEnumerable<String>

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.abstractions.itextstreamtransform/index.html b/site/xmldocs/llama.abstractions.itextstreamtransform/index.html new file mode 100644 index 00000000..f3775675 --- /dev/null +++ b/site/xmldocs/llama.abstractions.itextstreamtransform/index.html @@ -0,0 +1,1744 @@ + + + + + + + + + + + + + + + + + + + + + + llama.abstractions.itextstreamtransform - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ITextStreamTransform

    +

    Namespace: LLama.Abstractions

    +

    Takes a stream of tokens and transforms them.

    +
    public interface ITextStreamTransform
    +
    +

    Methods

    +

    Transform(IEnumerable<String>)

    +

    Takes a stream of tokens and transforms them, returning a new stream of tokens.

    +
    IEnumerable<string> Transform(IEnumerable<string> tokens)
    +
    +

    Parameters

    +

    tokens IEnumerable<String>

    +

    Returns

    +

    IEnumerable<String>

    +

    TransformAsync(IAsyncEnumerable<String>)

    +

    Takes a stream of tokens and transforms them, returning a new stream of tokens asynchronously.

    +
    IAsyncEnumerable<string> TransformAsync(IAsyncEnumerable<string> tokens)
    +
    +

    Parameters

    +

    tokens IAsyncEnumerable<String>

    +

    Returns

    +

    IAsyncEnumerable<String>

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.abstractions.itexttransform/index.html b/site/xmldocs/llama.abstractions.itexttransform/index.html new file mode 100644 index 00000000..0b8426b4 --- /dev/null +++ b/site/xmldocs/llama.abstractions.itexttransform/index.html @@ -0,0 +1,1688 @@ + + + + + + + + + + + + + + + + + + + + + + llama.abstractions.itexttransform - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ITextTransform

    +

    Namespace: LLama.Abstractions

    +

    An interface for text transformations. + These can be used to compose a pipeline of text transformations, such as: + - Tokenization + - Lowercasing + - Punctuation removal + - Trimming + - etc.

    +
    public interface ITextTransform
    +
    +

    Methods

    +

    Transform(String)

    +

    Takes a string and transforms it.

    +
    string Transform(string text)
    +
    +

    Parameters

    +

    text String

    +

    Returns

    +

    String

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.chatsession/index.html b/site/xmldocs/llama.chatsession/index.html new file mode 100644 index 00000000..0641af9d --- /dev/null +++ b/site/xmldocs/llama.chatsession/index.html @@ -0,0 +1,2485 @@ + + + + + + + + + + + + + + + + + + + + + + llama.chatsession - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ChatSession

    +

    Namespace: LLama

    +

    The main chat session class.

    +
    public class ChatSession
    +
    +

    Inheritance ObjectChatSession

    +

    Fields

    +

    OutputTransform

    +

    The output transform used in this session.

    +
    public ITextStreamTransform OutputTransform;
    +
    +

    Properties

    +

    Executor

    +

    The executor for this session.

    +
    public ILLamaExecutor Executor { get; }
    +
    +

    Property Value

    +

    ILLamaExecutor

    +

    History

    +

    The chat history for this session.

    +
    public ChatHistory History { get; }
    +
    +

    Property Value

    +

    ChatHistory

    +

    HistoryTransform

    +

    The history transform used in this session.

    +
    public IHistoryTransform HistoryTransform { get; set; }
    +
    +

    Property Value

    +

    IHistoryTransform

    +

    InputTransformPipeline

    +

    The input transform pipeline used in this session.

    +
    public List<ITextTransform> InputTransformPipeline { get; set; }
    +
    +

    Property Value

    +

    List<ITextTransform>

    +

    Constructors

    +

    ChatSession(ILLamaExecutor)

    +
    public ChatSession(ILLamaExecutor executor)
    +
    +

    Parameters

    +

    executor ILLamaExecutor
    +The executor for this session

    +

    Methods

    +

    WithHistoryTransform(IHistoryTransform)

    +

    Use a custom history transform.

    +
    public ChatSession WithHistoryTransform(IHistoryTransform transform)
    +
    +

    Parameters

    +

    transform IHistoryTransform

    +

    Returns

    +

    ChatSession

    +

    AddInputTransform(ITextTransform)

    +

    Add a text transform to the input transform pipeline.

    +
    public ChatSession AddInputTransform(ITextTransform transform)
    +
    +

    Parameters

    +

    transform ITextTransform

    +

    Returns

    +

    ChatSession

    +

    WithOutputTransform(ITextStreamTransform)

    +

    Use a custom output transform.

    +
    public ChatSession WithOutputTransform(ITextStreamTransform transform)
    +
    +

    Parameters

    +

    transform ITextStreamTransform

    +

    Returns

    +

    ChatSession

    +

    SaveSession(String)

    +
    public void SaveSession(string path)
    +
    +

    Parameters

    +

    path String
    +The directory name to save the session. If the directory does not exist, a new directory will be created.

    +

    LoadSession(String)

    +
    public void LoadSession(string path)
    +
    +

    Parameters

    +

    path String
    +The directory name to load the session.

    +

    Chat(ChatHistory, InferenceParams, CancellationToken)

    +

    Get the response from the LLama model with chat histories.

    +
    public IEnumerable<string> Chat(ChatHistory history, InferenceParams inferenceParams, CancellationToken cancellationToken)
    +
    +

    Parameters

    +

    history ChatHistory

    +

    inferenceParams InferenceParams

    +

    cancellationToken CancellationToken

    +

    Returns

    +

    IEnumerable<String>

    +

    Chat(String, InferenceParams, CancellationToken)

    +

    Get the response from the LLama model. Note that prompt could not only be the preset words, + but also the question you want to ask.

    +
    public IEnumerable<string> Chat(string prompt, InferenceParams inferenceParams, CancellationToken cancellationToken)
    +
    +

    Parameters

    +

    prompt String

    +

    inferenceParams InferenceParams

    +

    cancellationToken CancellationToken

    +

    Returns

    +

    IEnumerable<String>

    +

    ChatAsync(ChatHistory, InferenceParams, CancellationToken)

    +

    Get the response from the LLama model with chat histories.

    +
    public IAsyncEnumerable<string> ChatAsync(ChatHistory history, InferenceParams inferenceParams, CancellationToken cancellationToken)
    +
    +

    Parameters

    +

    history ChatHistory

    +

    inferenceParams InferenceParams

    +

    cancellationToken CancellationToken

    +

    Returns

    +

    IAsyncEnumerable<String>

    +

    ChatAsync(String, InferenceParams, CancellationToken)

    +

    Get the response from the LLama model with chat histories asynchronously.

    +
    public IAsyncEnumerable<string> ChatAsync(string prompt, InferenceParams inferenceParams, CancellationToken cancellationToken)
    +
    +

    Parameters

    +

    prompt String

    +

    inferenceParams InferenceParams

    +

    cancellationToken CancellationToken

    +

    Returns

    +

    IAsyncEnumerable<String>

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.common.authorrole/index.html b/site/xmldocs/llama.common.authorrole/index.html new file mode 100644 index 00000000..2dfb46fd --- /dev/null +++ b/site/xmldocs/llama.common.authorrole/index.html @@ -0,0 +1,1625 @@ + + + + + + + + + + + + + + + + + + + + + + llama.common.authorrole - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    + +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.common.chathistory/index.html b/site/xmldocs/llama.common.chathistory/index.html new file mode 100644 index 00000000..524e1687 --- /dev/null +++ b/site/xmldocs/llama.common.chathistory/index.html @@ -0,0 +1,1788 @@ + + + + + + + + + + + + + + + + + + + + + + llama.common.chathistory - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ChatHistory

    +

    Namespace: LLama.Common

    +

    The chat history class

    +
    public class ChatHistory
    +
    +

    Inheritance ObjectChatHistory

    +

    Properties

    +

    Messages

    +

    List of messages in the chat

    +
    public List<Message> Messages { get; }
    +
    +

    Property Value

    +

    List<Message>

    +

    Constructors

    +

    ChatHistory()

    +

    Create a new instance of the chat content class

    +
    public ChatHistory()
    +
    +

    Methods

    +

    AddMessage(AuthorRole, String)

    +

    Add a message to the chat history

    +
    public void AddMessage(AuthorRole authorRole, string content)
    +
    +

    Parameters

    +

    authorRole AuthorRole
    +Role of the message author

    +

    content String
    +Message content

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.common.fixedsizequeue-1/index.html b/site/xmldocs/llama.common.fixedsizequeue-1/index.html new file mode 100644 index 00000000..b7174df3 --- /dev/null +++ b/site/xmldocs/llama.common.fixedsizequeue-1/index.html @@ -0,0 +1,2071 @@ + + + + + + + + + + + + + + + + + + + + + + llama.common.fixedsizequeue-1 - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    FixedSizeQueue<T>

    +

    Namespace: LLama.Common

    +

    A queue with fixed storage size. + Currently it's only a naive implementation and needs to be further optimized in the future.

    +
    public class FixedSizeQueue<T> : , System.Collections.IEnumerable
    +
    +

    Type Parameters

    +

    T

    +

    Inheritance ObjectFixedSizeQueue<T>
    +Implements IEnumerable<T>, IEnumerable

    +

    Properties

    +

    Count

    +
    public int Count { get; }
    +
    +

    Property Value

    +

    Int32

    +

    Capacity

    +
    public int Capacity { get; }
    +
    +

    Property Value

    +

    Int32

    +

    Constructors

    +

    FixedSizeQueue(Int32)

    +
    public FixedSizeQueue(int size)
    +
    +

    Parameters

    +

    size Int32

    +

    FixedSizeQueue(Int32, IEnumerable<T>)

    +
    public FixedSizeQueue(int size, IEnumerable<T> data)
    +
    +

    Parameters

    +

    size Int32

    +

    data IEnumerable<T>

    +

    Methods

    +

    FillWith(T)

    +
    public FixedSizeQueue<T> FillWith(T value)
    +
    +

    Parameters

    +

    value T

    +

    Returns

    +

    FixedSizeQueue<T>

    +

    Enqueue(T)

    +

    Enquene an element.

    +
    public void Enqueue(T item)
    +
    +

    Parameters

    +

    item T

    +

    ToArray()

    +
    public T[] ToArray()
    +
    +

    Returns

    +

    T[]

    +

    GetEnumerator()

    +
    public IEnumerator<T> GetEnumerator()
    +
    +

    Returns

    +

    IEnumerator<T>

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.common.illamalogger/index.html b/site/xmldocs/llama.common.illamalogger/index.html new file mode 100644 index 00000000..2bef61bd --- /dev/null +++ b/site/xmldocs/llama.common.illamalogger/index.html @@ -0,0 +1,1670 @@ + + + + + + + + + + + + + + + + + + + + + + llama.common.illamalogger - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ILLamaLogger

    +

    Namespace: LLama.Common

    +
    public interface ILLamaLogger
    +
    +

    Methods

    +

    Log(String, String, LogLevel)

    +

    Write the log in cosutomized way

    +
    void Log(string source, string message, LogLevel level)
    +
    +

    Parameters

    +

    source String
    +The source of the log. It may be a method name or class name.

    +

    message String
    +The message.

    +

    level LogLevel
    +The log level.

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.common.inferenceparams/index.html b/site/xmldocs/llama.common.inferenceparams/index.html new file mode 100644 index 00000000..ec2fc11a --- /dev/null +++ b/site/xmldocs/llama.common.inferenceparams/index.html @@ -0,0 +1,2589 @@ + + + + + + + + + + + + + + + + + + + + + + llama.common.inferenceparams - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    InferenceParams

    +

    Namespace: LLama.Common

    +
    public class InferenceParams
    +
    +

    Inheritance ObjectInferenceParams

    +

    Properties

    +

    TokensKeep

    +

    number of tokens to keep from initial prompt

    +
    public int TokensKeep { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    MaxTokens

    +

    how many new tokens to predict (n_predict), set to -1 to inifinitely generate response + until it complete.

    +
    public int MaxTokens { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    LogitBias

    +

    logit bias for specific tokens

    +
    public Dictionary<int, float> LogitBias { get; set; }
    +
    +

    Property Value

    +

    Dictionary<Int32, Single>

    +

    AntiPrompts

    +

    Sequences where the model will stop generating further tokens.

    +
    public IEnumerable<string> AntiPrompts { get; set; }
    +
    +

    Property Value

    +

    IEnumerable<String>

    +

    PathSession

    +

    path to file for saving/loading model eval state

    +
    public string PathSession { get; set; }
    +
    +

    Property Value

    +

    String

    +

    InputSuffix

    +

    string to suffix user inputs with

    +
    public string InputSuffix { get; set; }
    +
    +

    Property Value

    +

    String

    +

    InputPrefix

    +

    string to prefix user inputs with

    +
    public string InputPrefix { get; set; }
    +
    +

    Property Value

    +

    String

    +

    TopK

    +

    0 or lower to use vocab size

    +
    public int TopK { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    TopP

    +

    1.0 = disabled

    +
    public float TopP { get; set; }
    +
    +

    Property Value

    +

    Single

    +

    TfsZ

    +

    1.0 = disabled

    +
    public float TfsZ { get; set; }
    +
    +

    Property Value

    +

    Single

    +

    TypicalP

    +

    1.0 = disabled

    +
    public float TypicalP { get; set; }
    +
    +

    Property Value

    +

    Single

    +

    Temperature

    +

    1.0 = disabled

    +
    public float Temperature { get; set; }
    +
    +

    Property Value

    +

    Single

    +

    RepeatPenalty

    +

    1.0 = disabled

    +
    public float RepeatPenalty { get; set; }
    +
    +

    Property Value

    +

    Single

    +

    RepeatLastTokensCount

    +

    last n tokens to penalize (0 = disable penalty, -1 = context size) (repeat_last_n)

    +
    public int RepeatLastTokensCount { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    FrequencyPenalty

    +

    frequency penalty coefficient + 0.0 = disabled

    +
    public float FrequencyPenalty { get; set; }
    +
    +

    Property Value

    +

    Single

    +

    PresencePenalty

    +

    presence penalty coefficient + 0.0 = disabled

    +
    public float PresencePenalty { get; set; }
    +
    +

    Property Value

    +

    Single

    +

    Mirostat

    +

    Mirostat uses tokens instead of words. + algorithm described in the paper https://arxiv.org/abs/2007.14966. + 0 = disabled, 1 = mirostat, 2 = mirostat 2.0

    +
    public MiroStateType Mirostat { get; set; }
    +
    +

    Property Value

    +

    MiroStateType

    +

    MirostatTau

    +

    target entropy

    +
    public float MirostatTau { get; set; }
    +
    +

    Property Value

    +

    Single

    +

    MirostatEta

    +

    learning rate

    +
    public float MirostatEta { get; set; }
    +
    +

    Property Value

    +

    Single

    +

    PenalizeNL

    +

    consider newlines as a repeatable token (penalize_nl)

    +
    public bool PenalizeNL { get; set; }
    +
    +

    Property Value

    +

    Boolean

    +

    Constructors

    +

    InferenceParams()

    +
    public InferenceParams()
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.common.llamadefaultlogger/index.html b/site/xmldocs/llama.common.llamadefaultlogger/index.html new file mode 100644 index 00000000..1dfb47c5 --- /dev/null +++ b/site/xmldocs/llama.common.llamadefaultlogger/index.html @@ -0,0 +1,2090 @@ + + + + + + + + + + + + + + + + + + + + + + llama.common.llamadefaultlogger - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaDefaultLogger

    +

    Namespace: LLama.Common

    +

    The default logger of LLamaSharp. On default it write to console. User methods of LLamaLogger.Default to change the behavior. + It's more recommended to inherit ILLamaLogger to cosutomize the behavior.

    +
    public sealed class LLamaDefaultLogger : ILLamaLogger
    +
    +

    Inheritance ObjectLLamaDefaultLogger
    +Implements ILLamaLogger

    +

    Properties

    +

    Default

    +
    public static LLamaDefaultLogger Default { get; }
    +
    +

    Property Value

    +

    LLamaDefaultLogger

    +

    Methods

    +

    EnableConsole()

    +
    public LLamaDefaultLogger EnableConsole()
    +
    +

    Returns

    +

    LLamaDefaultLogger

    +

    DisableConsole()

    +
    public LLamaDefaultLogger DisableConsole()
    +
    +

    Returns

    +

    LLamaDefaultLogger

    +

    EnableFile(String, FileMode)

    +
    public LLamaDefaultLogger EnableFile(string filename, FileMode mode)
    +
    +

    Parameters

    +

    filename String

    +

    mode FileMode

    +

    Returns

    +

    LLamaDefaultLogger

    +

    DisableFile(String)

    +
    public LLamaDefaultLogger DisableFile(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    Returns

    +

    LLamaDefaultLogger

    +

    Log(String, String, LogLevel)

    +
    public void Log(string source, string message, LogLevel level)
    +
    +

    Parameters

    +

    source String

    +

    message String

    +

    level LogLevel

    +

    Info(String)

    +
    public void Info(string message)
    +
    +

    Parameters

    +

    message String

    +

    Warn(String)

    +
    public void Warn(string message)
    +
    +

    Parameters

    +

    message String

    +

    Error(String)

    +
    public void Error(string message)
    +
    +

    Parameters

    +

    message String

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.common.mirostatetype/index.html b/site/xmldocs/llama.common.mirostatetype/index.html new file mode 100644 index 00000000..e8cc7c36 --- /dev/null +++ b/site/xmldocs/llama.common.mirostatetype/index.html @@ -0,0 +1,1625 @@ + + + + + + + + + + + + + + + + + + + + + + llama.common.mirostatetype - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    + +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.common.modelparams/index.html b/site/xmldocs/llama.common.modelparams/index.html new file mode 100644 index 00000000..b67bca6e --- /dev/null +++ b/site/xmldocs/llama.common.modelparams/index.html @@ -0,0 +1,2364 @@ + + + + + + + + + + + + + + + + + + + + + + llama.common.modelparams - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ModelParams

    +

    Namespace: LLama.Common

    +
    public class ModelParams
    +
    +

    Inheritance ObjectModelParams

    +

    Properties

    +

    ContextSize

    +

    Model context size (n_ctx)

    +
    public int ContextSize { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    GpuLayerCount

    +

    Number of layers to run in VRAM / GPU memory (n_gpu_layers)

    +
    public int GpuLayerCount { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    Seed

    +

    Seed for the random number generator (seed)

    +
    public int Seed { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    UseFp16Memory

    +

    Use f16 instead of f32 for memory kv (memory_f16)

    +
    public bool UseFp16Memory { get; set; }
    +
    +

    Property Value

    +

    Boolean

    +

    UseMemorymap

    +

    Use mmap for faster loads (use_mmap)

    +
    public bool UseMemorymap { get; set; }
    +
    +

    Property Value

    +

    Boolean

    +

    UseMemoryLock

    +

    Use mlock to keep model in memory (use_mlock)

    +
    public bool UseMemoryLock { get; set; }
    +
    +

    Property Value

    +

    Boolean

    +

    Perplexity

    +

    Compute perplexity over the prompt (perplexity)

    +
    public bool Perplexity { get; set; }
    +
    +

    Property Value

    +

    Boolean

    +

    ModelPath

    +

    Model path (model)

    +
    public string ModelPath { get; set; }
    +
    +

    Property Value

    +

    String

    +

    LoraAdapter

    +

    lora adapter path (lora_adapter)

    +
    public string LoraAdapter { get; set; }
    +
    +

    Property Value

    +

    String

    +

    LoraBase

    +

    base model path for the lora adapter (lora_base)

    +
    public string LoraBase { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Threads

    +

    Number of threads (-1 = autodetect) (n_threads)

    +
    public int Threads { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    BatchSize

    +

    batch size for prompt processing (must be >=32 to use BLAS) (n_batch)

    +
    public int BatchSize { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    ConvertEosToNewLine

    +

    Whether to convert eos to newline during the inference.

    +
    public bool ConvertEosToNewLine { get; set; }
    +
    +

    Property Value

    +

    Boolean

    +

    EmbeddingMode

    +

    Whether to use embedding mode. (embedding) Note that if this is set to true, + The LLamaModel won't produce text response anymore.

    +
    public bool EmbeddingMode { get; set; }
    +
    +

    Property Value

    +

    Boolean

    +

    Constructors

    +

    ModelParams(String, Int32, Int32, Int32, Boolean, Boolean, Boolean, Boolean, String, String, Int32, Int32, Boolean, Boolean)

    +
    public ModelParams(string modelPath, int contextSize, int gpuLayerCount, int seed, bool useFp16Memory, bool useMemorymap, bool useMemoryLock, bool perplexity, string loraAdapter, string loraBase, int threads, int batchSize, bool convertEosToNewLine, bool embeddingMode)
    +
    +

    Parameters

    +

    modelPath String
    +The model path.

    +

    contextSize Int32
    +Model context size (n_ctx)

    +

    gpuLayerCount Int32
    +Number of layers to run in VRAM / GPU memory (n_gpu_layers)

    +

    seed Int32
    +Seed for the random number generator (seed)

    +

    useFp16Memory Boolean
    +Whether to use f16 instead of f32 for memory kv (memory_f16)

    +

    useMemorymap Boolean
    +Whether to use mmap for faster loads (use_mmap)

    +

    useMemoryLock Boolean
    +Whether to use mlock to keep model in memory (use_mlock)

    +

    perplexity Boolean
    +Thether to compute perplexity over the prompt (perplexity)

    +

    loraAdapter String
    +Lora adapter path (lora_adapter)

    +

    loraBase String
    +Base model path for the lora adapter (lora_base)

    +

    threads Int32
    +Number of threads (-1 = autodetect) (n_threads)

    +

    batchSize Int32
    +Batch size for prompt processing (must be >=32 to use BLAS) (n_batch)

    +

    convertEosToNewLine Boolean
    +Whether to convert eos to newline during the inference.

    +

    embeddingMode Boolean
    +Whether to use embedding mode. (embedding) Note that if this is set to true, The LLamaModel won't produce text response anymore.

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.exceptions.runtimeerror/index.html b/site/xmldocs/llama.exceptions.runtimeerror/index.html new file mode 100644 index 00000000..266f0401 --- /dev/null +++ b/site/xmldocs/llama.exceptions.runtimeerror/index.html @@ -0,0 +1,2070 @@ + + + + + + + + + + + + + + + + + + + + + + llama.exceptions.runtimeerror - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    RuntimeError

    +

    Namespace: LLama.Exceptions

    +
    public class RuntimeError : System.Exception, System.Runtime.Serialization.ISerializable
    +
    +

    Inheritance ObjectExceptionRuntimeError
    +Implements ISerializable

    +

    Properties

    +

    TargetSite

    +
    public MethodBase TargetSite { get; }
    +
    +

    Property Value

    +

    MethodBase

    +

    Message

    +
    public string Message { get; }
    +
    +

    Property Value

    +

    String

    +

    Data

    +
    public IDictionary Data { get; }
    +
    +

    Property Value

    +

    IDictionary

    +

    InnerException

    +
    public Exception InnerException { get; }
    +
    +

    Property Value

    +

    Exception

    + +
    public string HelpLink { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Source

    +
    public string Source { get; set; }
    +
    +

    Property Value

    +

    String

    +

    HResult

    +
    public int HResult { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    StackTrace

    +
    public string StackTrace { get; }
    +
    +

    Property Value

    +

    String

    +

    Constructors

    +

    RuntimeError()

    +
    public RuntimeError()
    +
    +

    RuntimeError(String)

    +
    public RuntimeError(string message)
    +
    +

    Parameters

    +

    message String

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.extensions.dictionaryextension/index.html b/site/xmldocs/llama.extensions.dictionaryextension/index.html new file mode 100644 index 00000000..e91c2bca --- /dev/null +++ b/site/xmldocs/llama.extensions.dictionaryextension/index.html @@ -0,0 +1,1827 @@ + + + + + + + + + + + + + + + + + + + + + + llama.extensions.dictionaryextension - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    + +
    +
    + + + +
    +
    + + + + +

    DictionaryExtension

    +

    Namespace: LLama.Extensions

    +
    public static class DictionaryExtension
    +
    +

    Inheritance ObjectDictionaryExtension

    +

    Methods

    +

    Deconstruct<T1, T2>(KeyValuePair<T1, T2>, T1&, T2&)

    +
    public static void Deconstruct<T1, T2>(KeyValuePair<T1, T2> pair, T1& first, T2& second)
    +
    +

    Type Parameters

    +

    T1

    +

    T2

    +

    Parameters

    +

    pair KeyValuePair<T1, T2>

    +

    first T1&

    +

    second T2&

    +

    Update<T1, T2>(Dictionary<T1, T2>, IDictionary<T1, T2>)

    +
    public static void Update<T1, T2>(Dictionary<T1, T2> dic, IDictionary<T1, T2> other)
    +
    +

    Type Parameters

    +

    T1

    +

    T2

    +

    Parameters

    +

    dic Dictionary<T1, T2>

    +

    other IDictionary<T1, T2>

    +

    GetOrDefault<T1, T2>(Dictionary<T1, T2>, T1, T2)

    +
    public static T2 GetOrDefault<T1, T2>(Dictionary<T1, T2> dic, T1 key, T2 defaultValue)
    +
    +

    Type Parameters

    +

    T1

    +

    T2

    +

    Parameters

    +

    dic Dictionary<T1, T2>

    +

    key T1

    +

    defaultValue T2

    +

    Returns

    +

    T2

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.instructexecutor/index.html b/site/xmldocs/llama.instructexecutor/index.html new file mode 100644 index 00000000..970f52ef --- /dev/null +++ b/site/xmldocs/llama.instructexecutor/index.html @@ -0,0 +1,2165 @@ + + + + + + + + + + + + + + + + + + + + + + llama.instructexecutor - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    InstructExecutor

    +

    Namespace: LLama

    +

    The LLama executor for instruct mode.

    +
    public class InstructExecutor : StatefulExecutorBase, LLama.Abstractions.ILLamaExecutor
    +
    +

    Inheritance ObjectStatefulExecutorBaseInstructExecutor
    +Implements ILLamaExecutor

    +

    Properties

    +

    Model

    +

    The mode used by the executor.

    +
    public LLamaModel Model { get; }
    +
    +

    Property Value

    +

    LLamaModel

    +

    Constructors

    +

    InstructExecutor(LLamaModel, String, String)

    +
    public InstructExecutor(LLamaModel model, string instructionPrefix, string instructionSuffix)
    +
    +

    Parameters

    +

    model LLamaModel

    +

    instructionPrefix String

    +

    instructionSuffix String

    +

    Methods

    +

    GetStateData()

    +
    public ExecutorBaseState GetStateData()
    +
    +

    Returns

    +

    ExecutorBaseState

    +

    LoadState(ExecutorBaseState)

    +
    public void LoadState(ExecutorBaseState data)
    +
    +

    Parameters

    +

    data ExecutorBaseState

    +

    SaveState(String)

    +
    public void SaveState(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    LoadState(String)

    +
    public void LoadState(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    GetLoopCondition(InferStateArgs)

    +
    protected bool GetLoopCondition(InferStateArgs args)
    +
    +

    Parameters

    +

    args InferStateArgs

    +

    Returns

    +

    Boolean

    +

    PreprocessInputs(String, InferStateArgs)

    +
    protected void PreprocessInputs(string text, InferStateArgs args)
    +
    +

    Parameters

    +

    text String

    +

    args InferStateArgs

    +

    PostProcess(InferenceParams, InferStateArgs, IEnumerable`1&)

    +
    protected bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, IEnumerable`1& extraOutputs)
    +
    +

    Parameters

    +

    inferenceParams InferenceParams

    +

    args InferStateArgs

    +

    extraOutputs IEnumerable`1&

    +

    Returns

    +

    Boolean

    +

    InferInternal(InferenceParams, InferStateArgs)

    +
    protected void InferInternal(InferenceParams inferenceParams, InferStateArgs args)
    +
    +

    Parameters

    +

    inferenceParams InferenceParams

    +

    args InferStateArgs

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.interactiveexecutor/index.html b/site/xmldocs/llama.interactiveexecutor/index.html new file mode 100644 index 00000000..2dd1c760 --- /dev/null +++ b/site/xmldocs/llama.interactiveexecutor/index.html @@ -0,0 +1,2165 @@ + + + + + + + + + + + + + + + + + + + + + + llama.interactiveexecutor - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    InteractiveExecutor

    +

    Namespace: LLama

    +

    The LLama executor for interactive mode.

    +
    public class InteractiveExecutor : StatefulExecutorBase, LLama.Abstractions.ILLamaExecutor
    +
    +

    Inheritance ObjectStatefulExecutorBaseInteractiveExecutor
    +Implements ILLamaExecutor

    +

    Properties

    +

    Model

    +

    The mode used by the executor.

    +
    public LLamaModel Model { get; }
    +
    +

    Property Value

    +

    LLamaModel

    +

    Constructors

    +

    InteractiveExecutor(LLamaModel)

    +
    public InteractiveExecutor(LLamaModel model)
    +
    +

    Parameters

    +

    model LLamaModel

    +

    Methods

    +

    GetStateData()

    +
    public ExecutorBaseState GetStateData()
    +
    +

    Returns

    +

    ExecutorBaseState

    +

    LoadState(ExecutorBaseState)

    +
    public void LoadState(ExecutorBaseState data)
    +
    +

    Parameters

    +

    data ExecutorBaseState

    +

    SaveState(String)

    +
    public void SaveState(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    LoadState(String)

    +
    public void LoadState(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    GetLoopCondition(InferStateArgs)

    +

    Define whether to continue the loop to generate responses.

    +
    protected bool GetLoopCondition(InferStateArgs args)
    +
    +

    Parameters

    +

    args InferStateArgs

    +

    Returns

    +

    Boolean

    +

    PreprocessInputs(String, InferStateArgs)

    +
    protected void PreprocessInputs(string text, InferStateArgs args)
    +
    +

    Parameters

    +

    text String

    +

    args InferStateArgs

    +

    PostProcess(InferenceParams, InferStateArgs, IEnumerable`1&)

    +

    Return whether to break the generation.

    +
    protected bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, IEnumerable`1& extraOutputs)
    +
    +

    Parameters

    +

    inferenceParams InferenceParams

    +

    args InferStateArgs

    +

    extraOutputs IEnumerable`1&

    +

    Returns

    +

    Boolean

    +

    InferInternal(InferenceParams, InferStateArgs)

    +
    protected void InferInternal(InferenceParams inferenceParams, InferStateArgs args)
    +
    +

    Parameters

    +

    inferenceParams InferenceParams

    +

    args InferStateArgs

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.llamaembedder/index.html b/site/xmldocs/llama.llamaembedder/index.html new file mode 100644 index 00000000..68a1fb02 --- /dev/null +++ b/site/xmldocs/llama.llamaembedder/index.html @@ -0,0 +1,1794 @@ + + + + + + + + + + + + + + + + + + + + + + llama.llamaembedder - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaEmbedder

    +

    Namespace: LLama

    +

    The embedder for LLama, which supports getting embeddings from text.

    +
    public class LLamaEmbedder : System.IDisposable
    +
    +

    Inheritance ObjectLLamaEmbedder
    +Implements IDisposable

    +

    Constructors

    +

    LLamaEmbedder(ModelParams)

    +
    public LLamaEmbedder(ModelParams params)
    +
    +

    Parameters

    +

    params ModelParams

    +

    Methods

    +

    GetEmbeddings(String, Int32, Boolean, String)

    +

    Get the embeddings of the text.

    +
    public Single[] GetEmbeddings(string text, int threads, bool addBos, string encoding)
    +
    +

    Parameters

    +

    text String

    +

    threads Int32
    +Threads used for inference.

    +

    addBos Boolean
    +Add bos to the text.

    +

    encoding String

    +

    Returns

    +

    Single[]

    +

    Exceptions

    +

    RuntimeError

    +

    Dispose()

    +
    public void Dispose()
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.llamamodel/index.html b/site/xmldocs/llama.llamamodel/index.html new file mode 100644 index 00000000..f527a461 --- /dev/null +++ b/site/xmldocs/llama.llamamodel/index.html @@ -0,0 +1,2548 @@ + + + + + + + + + + + + + + + + + + + + + + llama.llamamodel - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaModel

    +

    Namespace: LLama

    +

    The abstraction of a LLama model, which holds the context in the native library.

    +
    public class LLamaModel : System.IDisposable
    +
    +

    Inheritance ObjectLLamaModel
    +Implements IDisposable

    +

    Properties

    +

    ContextSize

    +

    The context size.

    +
    public int ContextSize { get; }
    +
    +

    Property Value

    +

    Int32

    +

    Params

    +

    The model params set for this model.

    +
    public ModelParams Params { get; set; }
    +
    +

    Property Value

    +

    ModelParams

    +

    NativeHandle

    +

    The native handle, which is used to be passed to the native APIs. Please avoid using it + unless you know what is the usage of the Native API.

    +
    public SafeLLamaContextHandle NativeHandle { get; }
    +
    +

    Property Value

    +

    SafeLLamaContextHandle

    +

    Encoding

    +

    The encoding set for this model to deal with text input.

    +
    public Encoding Encoding { get; }
    +
    +

    Property Value

    +

    Encoding

    +

    Constructors

    +

    LLamaModel(ModelParams, String, ILLamaLogger)

    +
    public LLamaModel(ModelParams Params, string encoding, ILLamaLogger logger)
    +
    +

    Parameters

    +

    Params ModelParams
    +Model params.

    +

    encoding String
    +Encoding to deal with text input.

    +

    logger ILLamaLogger
    +The logger.

    +

    Methods

    +

    Tokenize(String, Boolean)

    +

    Tokenize a string.

    +
    public IEnumerable<int> Tokenize(string text, bool addBos)
    +
    +

    Parameters

    +

    text String

    +

    addBos Boolean
    +Whether to add a bos to the text.

    +

    Returns

    +

    IEnumerable<Int32>

    +

    DeTokenize(IEnumerable<Int32>)

    +

    Detokenize the tokens to text.

    +
    public string DeTokenize(IEnumerable<int> tokens)
    +
    +

    Parameters

    +

    tokens IEnumerable<Int32>

    +

    Returns

    +

    String

    +

    SaveState(String)

    +

    Save the state to specified path.

    +
    public void SaveState(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    GetStateData()

    +

    Get the state data as a byte array.

    +
    public Byte[] GetStateData()
    +
    +

    Returns

    +

    Byte[]

    +

    LoadState(String)

    +

    Load the state from specified path.

    +
    public void LoadState(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    Exceptions

    +

    RuntimeError

    +

    LoadState(Byte[])

    +

    Load the state from memory.

    +
    public void LoadState(Byte[] stateData)
    +
    +

    Parameters

    +

    stateData Byte[]

    +

    Exceptions

    +

    RuntimeError

    +

    Sample(LLamaTokenDataArray, Single, MiroStateType, Single, Single, Int32, Single, Single, Single)

    +

    Perform the sampling. Please don't use it unless you fully know what it does.

    +
    public int Sample(LLamaTokenDataArray candidates, float temperature, MiroStateType mirostat, float mirostatTau, float mirostatEta, int topK, float topP, float tfsZ, float typicalP)
    +
    +

    Parameters

    +

    candidates LLamaTokenDataArray

    +

    temperature Single

    +

    mirostat MiroStateType

    +

    mirostatTau Single

    +

    mirostatEta Single

    +

    topK Int32

    +

    topP Single

    +

    tfsZ Single

    +

    typicalP Single

    +

    Returns

    +

    Int32

    +

    ApplyPenalty(IEnumerable<Int32>, Dictionary<Int32, Single>, Int32, Single, Single, Single, Boolean)

    +

    Apply the penalty for the tokens. Please don't use it unless you fully know what it does.

    +
    public LLamaTokenDataArray ApplyPenalty(IEnumerable<int> lastTokens, Dictionary<int, float> logitBias, int repeatLastTokensCount, float repeatPenalty, float alphaFrequency, float alphaPresence, bool penalizeNL)
    +
    +

    Parameters

    +

    lastTokens IEnumerable<Int32>

    +

    logitBias Dictionary<Int32, Single>

    +

    repeatLastTokensCount Int32

    +

    repeatPenalty Single

    +

    alphaFrequency Single

    +

    alphaPresence Single

    +

    penalizeNL Boolean

    +

    Returns

    +

    LLamaTokenDataArray

    +

    Eval(Int32[], Int32)

    +
    public int Eval(Int32[] tokens, int pastTokensCount)
    +
    +

    Parameters

    +

    tokens Int32[]

    +

    pastTokensCount Int32

    +

    Returns

    +

    Int32
    +The updated pastTokensCount.

    +

    Exceptions

    +

    RuntimeError

    +

    GenerateResult(IEnumerable<Int32>)

    +
    internal IEnumerable<string> GenerateResult(IEnumerable<int> ids)
    +
    +

    Parameters

    +

    ids IEnumerable<Int32>

    +

    Returns

    +

    IEnumerable<String>

    +

    Dispose()

    +
    public void Dispose()
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.llamaquantizer/index.html b/site/xmldocs/llama.llamaquantizer/index.html new file mode 100644 index 00000000..f207d7d7 --- /dev/null +++ b/site/xmldocs/llama.llamaquantizer/index.html @@ -0,0 +1,1793 @@ + + + + + + + + + + + + + + + + + + + + + + llama.llamaquantizer - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaQuantizer

    +

    Namespace: LLama

    +

    The quantizer to quantize the model.

    +
    public static class LLamaQuantizer
    +
    +

    Inheritance ObjectLLamaQuantizer

    +

    Methods

    +

    Quantize(String, String, LLamaFtype, Int32)

    +

    Quantize the model.

    +
    public static bool Quantize(string srcFileName, string dstFilename, LLamaFtype ftype, int nthread)
    +
    +

    Parameters

    +

    srcFileName String
    +The model file to be quantized.

    +

    dstFilename String
    +The path to save the quantized model.

    +

    ftype LLamaFtype
    +The type of quantization.

    +

    nthread Int32
    +Thread to be used during the quantization. By default it's the physical core number.

    +

    Returns

    +

    Boolean
    +Whether the quantization is successful.

    +

    Exceptions

    +

    ArgumentException

    +

    Quantize(String, String, String, Int32)

    +

    Quantize the model.

    +
    public static bool Quantize(string srcFileName, string dstFilename, string ftype, int nthread)
    +
    +

    Parameters

    +

    srcFileName String
    +The model file to be quantized.

    +

    dstFilename String
    +The path to save the quantized model.

    +

    ftype String
    +The type of quantization.

    +

    nthread Int32
    +Thread to be used during the quantization. By default it's the physical core number.

    +

    Returns

    +

    Boolean
    +Whether the quantization is successful.

    +

    Exceptions

    +

    ArgumentException

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.llamatransforms/index.html b/site/xmldocs/llama.llamatransforms/index.html new file mode 100644 index 00000000..2533de08 --- /dev/null +++ b/site/xmldocs/llama.llamatransforms/index.html @@ -0,0 +1,1638 @@ + + + + + + + + + + + + + + + + + + + + + + llama.llamatransforms - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaTransforms

    +

    Namespace: LLama

    +

    A class that contains all the transforms provided internally by LLama.

    +
    public class LLamaTransforms
    +
    +

    Inheritance ObjectLLamaTransforms

    +

    Constructors

    +

    LLamaTransforms()

    +
    public LLamaTransforms()
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.native.llamacontextparams/index.html b/site/xmldocs/llama.native.llamacontextparams/index.html new file mode 100644 index 00000000..c7124dc7 --- /dev/null +++ b/site/xmldocs/llama.native.llamacontextparams/index.html @@ -0,0 +1,1818 @@ + + + + + + + + + + + + + + + + + + + + + + llama.native.llamacontextparams - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaContextParams

    +

    Namespace: LLama.Native

    +
    public struct LLamaContextParams
    +
    +

    Inheritance ObjectValueTypeLLamaContextParams

    +

    Fields

    +

    n_ctx

    +

    text context

    +
    public int n_ctx;
    +
    +

    n_gpu_layers

    +

    number of layers to store in VRAM

    +
    public int n_gpu_layers;
    +
    +

    seed

    +

    RNG seed, -1 for random

    +
    public int seed;
    +
    +

    f16_kv

    +

    use fp16 for KV cache

    +
    public bool f16_kv;
    +
    +

    logits_all

    +

    the llama_eval() call computes all logits, not just the last one

    +
    public bool logits_all;
    +
    +

    vocab_only

    +

    only load the vocabulary, no weights

    +
    public bool vocab_only;
    +
    +

    use_mmap

    +

    use mmap if possible

    +
    public bool use_mmap;
    +
    +

    use_mlock

    +

    force system to keep model in RAM

    +
    public bool use_mlock;
    +
    +

    embedding

    +

    embedding mode only

    +
    public bool embedding;
    +
    +

    progress_callback

    +

    called with a progress value between 0 and 1, pass NULL to disable

    +
    public IntPtr progress_callback;
    +
    +

    progress_callback_user_data

    +

    context pointer passed to the progress callback

    +
    public IntPtr progress_callback_user_data;
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.native.llamaftype/index.html b/site/xmldocs/llama.native.llamaftype/index.html new file mode 100644 index 00000000..e15f0107 --- /dev/null +++ b/site/xmldocs/llama.native.llamaftype/index.html @@ -0,0 +1,1625 @@ + + + + + + + + + + + + + + + + + + + + + + llama.native.llamaftype - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    + +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.native.llamatokendata/index.html b/site/xmldocs/llama.native.llamatokendata/index.html new file mode 100644 index 00000000..d30bc20d --- /dev/null +++ b/site/xmldocs/llama.native.llamatokendata/index.html @@ -0,0 +1,1748 @@ + + + + + + + + + + + + + + + + + + + + + + llama.native.llamatokendata - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaTokenData

    +

    Namespace: LLama.Native

    +
    public struct LLamaTokenData
    +
    +

    Inheritance ObjectValueTypeLLamaTokenData

    +

    Fields

    +

    id

    +

    token id

    +
    public int id;
    +
    +

    logit

    +

    log-odds of the token

    +
    public float logit;
    +
    +

    p

    +

    probability of the token

    +
    public float p;
    +
    +

    Constructors

    +

    LLamaTokenData(Int32, Single, Single)

    +
    LLamaTokenData(int id, float logit, float p)
    +
    +

    Parameters

    +

    id Int32

    +

    logit Single

    +

    p Single

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.native.llamatokendataarray/index.html b/site/xmldocs/llama.native.llamatokendataarray/index.html new file mode 100644 index 00000000..8b2ecb7e --- /dev/null +++ b/site/xmldocs/llama.native.llamatokendataarray/index.html @@ -0,0 +1,1745 @@ + + + + + + + + + + + + + + + + + + + + + + llama.native.llamatokendataarray - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaTokenDataArray

    +

    Namespace: LLama.Native

    +
    public struct LLamaTokenDataArray
    +
    +

    Inheritance ObjectValueTypeLLamaTokenDataArray

    +

    Fields

    +

    data

    +
    public Memory<LLamaTokenData> data;
    +
    +

    size

    +
    public ulong size;
    +
    +

    sorted

    +
    public bool sorted;
    +
    +

    Constructors

    +

    LLamaTokenDataArray(LLamaTokenData[], UInt64, Boolean)

    +
    LLamaTokenDataArray(LLamaTokenData[] data, ulong size, bool sorted)
    +
    +

    Parameters

    +

    data LLamaTokenData[]

    +

    size UInt64

    +

    sorted Boolean

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.native.llamatokendataarraynative/index.html b/site/xmldocs/llama.native.llamatokendataarraynative/index.html new file mode 100644 index 00000000..2e3140da --- /dev/null +++ b/site/xmldocs/llama.native.llamatokendataarraynative/index.html @@ -0,0 +1,1671 @@ + + + + + + + + + + + + + + + + + + + + + + llama.native.llamatokendataarraynative - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaTokenDataArrayNative

    +

    Namespace: LLama.Native

    +
    public struct LLamaTokenDataArrayNative
    +
    +

    Inheritance ObjectValueTypeLLamaTokenDataArrayNative

    +

    Fields

    +

    data

    +
    public IntPtr data;
    +
    +

    size

    +
    public ulong size;
    +
    +

    sorted

    +
    public bool sorted;
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.native.nativeapi/index.html b/site/xmldocs/llama.native.nativeapi/index.html new file mode 100644 index 00000000..6f7bcc3c --- /dev/null +++ b/site/xmldocs/llama.native.nativeapi/index.html @@ -0,0 +1,4125 @@ + + + + + + + + + + + + + + + + + + + + + + llama.native.nativeapi - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    NativeApi

    +

    Namespace: LLama.Native

    +
    public class NativeApi
    +
    +

    Inheritance ObjectNativeApi

    +

    Constructors

    +

    NativeApi()

    +
    public NativeApi()
    +
    +

    Methods

    +

    llama_print_timings(SafeLLamaContextHandle)

    +
    public static void llama_print_timings(SafeLLamaContextHandle ctx)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    llama_reset_timings(SafeLLamaContextHandle)

    +
    public static void llama_reset_timings(SafeLLamaContextHandle ctx)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    llama_print_system_info()

    +

    Print system information

    +
    public static IntPtr llama_print_system_info()
    +
    +

    Returns

    +

    IntPtr

    +

    llama_model_quantize(String, String, LLamaFtype, Int32)

    +
    public static int llama_model_quantize(string fname_inp, string fname_out, LLamaFtype ftype, int nthread)
    +
    +

    Parameters

    +

    fname_inp String

    +

    fname_out String

    +

    ftype LLamaFtype

    +

    nthread Int32

    +

    Returns

    +

    Int32

    +

    llama_sample_repetition_penalty(SafeLLamaContextHandle, IntPtr, Int32[], UInt64, Single)

    +

    Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.

    +
    public static void llama_sample_repetition_penalty(SafeLLamaContextHandle ctx, IntPtr candidates, Int32[] last_tokens, ulong last_tokens_size, float penalty)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr
    +Pointer to LLamaTokenDataArray

    +

    last_tokens Int32[]

    +

    last_tokens_size UInt64

    +

    penalty Single

    +

    llama_sample_frequency_and_presence_penalties(SafeLLamaContextHandle, IntPtr, Int32[], UInt64, Single, Single)

    +

    Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.

    +
    public static void llama_sample_frequency_and_presence_penalties(SafeLLamaContextHandle ctx, IntPtr candidates, Int32[] last_tokens, ulong last_tokens_size, float alpha_frequency, float alpha_presence)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr
    +Pointer to LLamaTokenDataArray

    +

    last_tokens Int32[]

    +

    last_tokens_size UInt64

    +

    alpha_frequency Single

    +

    alpha_presence Single

    +

    llama_sample_softmax(SafeLLamaContextHandle, IntPtr)

    +

    Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.

    +
    public static void llama_sample_softmax(SafeLLamaContextHandle ctx, IntPtr candidates)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr
    +Pointer to LLamaTokenDataArray

    +

    llama_sample_top_k(SafeLLamaContextHandle, IntPtr, Int32, UInt64)

    +

    Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751

    +
    public static void llama_sample_top_k(SafeLLamaContextHandle ctx, IntPtr candidates, int k, ulong min_keep)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr
    +Pointer to LLamaTokenDataArray

    +

    k Int32

    +

    min_keep UInt64

    +

    llama_sample_top_p(SafeLLamaContextHandle, IntPtr, Single, UInt64)

    +

    Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751

    +
    public static void llama_sample_top_p(SafeLLamaContextHandle ctx, IntPtr candidates, float p, ulong min_keep)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr
    +Pointer to LLamaTokenDataArray

    +

    p Single

    +

    min_keep UInt64

    +

    llama_sample_tail_free(SafeLLamaContextHandle, IntPtr, Single, UInt64)

    +

    Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.

    +
    public static void llama_sample_tail_free(SafeLLamaContextHandle ctx, IntPtr candidates, float z, ulong min_keep)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr
    +Pointer to LLamaTokenDataArray

    +

    z Single

    +

    min_keep UInt64

    +

    llama_sample_typical(SafeLLamaContextHandle, IntPtr, Single, UInt64)

    +

    Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.

    +
    public static void llama_sample_typical(SafeLLamaContextHandle ctx, IntPtr candidates, float p, ulong min_keep)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr
    +Pointer to LLamaTokenDataArray

    +

    p Single

    +

    min_keep UInt64

    +

    llama_sample_temperature(SafeLLamaContextHandle, IntPtr, Single)

    +
    public static void llama_sample_temperature(SafeLLamaContextHandle ctx, IntPtr candidates, float temp)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr

    +

    temp Single

    +

    llama_sample_token_mirostat(SafeLLamaContextHandle, IntPtr, Single, Single, Int32, Single*)

    +

    Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.

    +
    public static int llama_sample_token_mirostat(SafeLLamaContextHandle ctx, IntPtr candidates, float tau, float eta, int m, Single* mu)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr
    +A vector of llama_token_data containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.

    +

    tau Single
    +The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.

    +

    eta Single
    +The learning rate used to update mu based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause mu to be updated more quickly, while a smaller learning rate will result in slower updates.

    +

    m Int32
    +The number of tokens considered in the estimation of s_hat. This is an arbitrary value that is used to calculate s_hat, which in turn helps to calculate the value of k. In the paper, they use m = 100, but you can experiment with different values to see how it affects the performance of the algorithm.

    +

    mu Single*
    +Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (2 * tau) and is updated in the algorithm based on the error between the target and observed surprisal.

    +

    Returns

    +

    Int32

    +

    llama_sample_token_mirostat_v2(SafeLLamaContextHandle, IntPtr, Single, Single, Single*)

    +

    Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.

    +
    public static int llama_sample_token_mirostat_v2(SafeLLamaContextHandle ctx, IntPtr candidates, float tau, float eta, Single* mu)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr
    +A vector of llama_token_data containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.

    +

    tau Single
    +The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.

    +

    eta Single
    +The learning rate used to update mu based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause mu to be updated more quickly, while a smaller learning rate will result in slower updates.

    +

    mu Single*
    +Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (2 * tau) and is updated in the algorithm based on the error between the target and observed surprisal.

    +

    Returns

    +

    Int32

    +

    llama_sample_token_greedy(SafeLLamaContextHandle, IntPtr)

    +

    Selects the token with the highest probability.

    +
    public static int llama_sample_token_greedy(SafeLLamaContextHandle ctx, IntPtr candidates)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr
    +Pointer to LLamaTokenDataArray

    +

    Returns

    +

    Int32

    +

    llama_sample_token(SafeLLamaContextHandle, IntPtr)

    +

    Randomly selects a token from the candidates based on their probabilities.

    +
    public static int llama_sample_token(SafeLLamaContextHandle ctx, IntPtr candidates)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    candidates IntPtr
    +Pointer to LLamaTokenDataArray

    +

    Returns

    +

    Int32

    +

    llama_empty_call()

    +
    public static bool llama_empty_call()
    +
    +

    Returns

    +

    Boolean

    +

    llama_context_default_params()

    +
    public static LLamaContextParams llama_context_default_params()
    +
    +

    Returns

    +

    LLamaContextParams

    +

    llama_mmap_supported()

    +
    public static bool llama_mmap_supported()
    +
    +

    Returns

    +

    Boolean

    +

    llama_mlock_supported()

    +
    public static bool llama_mlock_supported()
    +
    +

    Returns

    +

    Boolean

    +

    llama_init_from_file(String, LLamaContextParams)

    +

    Various functions for loading a ggml llama model. + Allocate (almost) all memory needed for the model. + Return NULL on failure

    +
    public static IntPtr llama_init_from_file(string path_model, LLamaContextParams params_)
    +
    +

    Parameters

    +

    path_model String

    +

    params_ LLamaContextParams

    +

    Returns

    +

    IntPtr

    +

    llama_init_backend()

    +

    not great API - very likely to change. + Initialize the llama + ggml backend + Call once at the start of the program

    +
    public static void llama_init_backend()
    +
    +

    llama_free(IntPtr)

    +

    Frees all allocated memory

    +
    public static void llama_free(IntPtr ctx)
    +
    +

    Parameters

    +

    ctx IntPtr

    +

    llama_apply_lora_from_file(SafeLLamaContextHandle, String, String, Int32)

    +

    Apply a LoRA adapter to a loaded model + path_base_model is the path to a higher quality model to use as a base for + the layers modified by the adapter. Can be NULL to use the current loaded model. + The model needs to be reloaded before applying a new adapter, otherwise the adapter + will be applied on top of the previous one

    +
    public static int llama_apply_lora_from_file(SafeLLamaContextHandle ctx, string path_lora, string path_base_model, int n_threads)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    path_lora String

    +

    path_base_model String

    +

    n_threads Int32

    +

    Returns

    +

    Int32
    +Returns 0 on success

    +

    llama_get_kv_cache_token_count(SafeLLamaContextHandle)

    +

    Returns the number of tokens in the KV cache

    +
    public static int llama_get_kv_cache_token_count(SafeLLamaContextHandle ctx)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    Returns

    +

    Int32

    +

    llama_set_rng_seed(SafeLLamaContextHandle, Int32)

    +

    Sets the current rng seed.

    +
    public static void llama_set_rng_seed(SafeLLamaContextHandle ctx, int seed)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    seed Int32

    +

    llama_get_state_size(SafeLLamaContextHandle)

    +

    Returns the maximum size in bytes of the state (rng, logits, embedding + and kv_cache) - will often be smaller after compacting tokens

    +
    public static ulong llama_get_state_size(SafeLLamaContextHandle ctx)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    Returns

    +

    UInt64

    +

    llama_copy_state_data(SafeLLamaContextHandle, Byte[])

    +

    Copies the state to the specified destination address. + Destination needs to have allocated enough memory. + Returns the number of bytes copied

    +
    public static ulong llama_copy_state_data(SafeLLamaContextHandle ctx, Byte[] dest)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    dest Byte[]

    +

    Returns

    +

    UInt64

    +

    llama_set_state_data(SafeLLamaContextHandle, Byte[])

    +

    Set the state reading from the specified address + Returns the number of bytes read

    +
    public static ulong llama_set_state_data(SafeLLamaContextHandle ctx, Byte[] src)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    src Byte[]

    +

    Returns

    +

    UInt64

    +

    llama_load_session_file(SafeLLamaContextHandle, String, Int32[], UInt64, UInt64*)

    +

    Load session file

    +
    public static bool llama_load_session_file(SafeLLamaContextHandle ctx, string path_session, Int32[] tokens_out, ulong n_token_capacity, UInt64* n_token_count_out)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    path_session String

    +

    tokens_out Int32[]

    +

    n_token_capacity UInt64

    +

    n_token_count_out UInt64*

    +

    Returns

    +

    Boolean

    +

    llama_save_session_file(SafeLLamaContextHandle, String, Int32[], UInt64)

    +

    Save session file

    +
    public static bool llama_save_session_file(SafeLLamaContextHandle ctx, string path_session, Int32[] tokens, ulong n_token_count)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    path_session String

    +

    tokens Int32[]

    +

    n_token_count UInt64

    +

    Returns

    +

    Boolean

    +

    llama_eval(SafeLLamaContextHandle, Int32[], Int32, Int32, Int32)

    +

    Run the llama inference to obtain the logits and probabilities for the next token. + tokens + n_tokens is the provided batch of new tokens to process + n_past is the number of tokens to use from previous eval calls

    +
    public static int llama_eval(SafeLLamaContextHandle ctx, Int32[] tokens, int n_tokens, int n_past, int n_threads)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    tokens Int32[]

    +

    n_tokens Int32

    +

    n_past Int32

    +

    n_threads Int32

    +

    Returns

    +

    Int32
    +Returns 0 on success

    +

    llama_eval_with_pointer(SafeLLamaContextHandle, Int32*, Int32, Int32, Int32)

    +
    public static int llama_eval_with_pointer(SafeLLamaContextHandle ctx, Int32* tokens, int n_tokens, int n_past, int n_threads)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    tokens Int32*

    +

    n_tokens Int32

    +

    n_past Int32

    +

    n_threads Int32

    +

    Returns

    +

    Int32

    +

    llama_tokenize(SafeLLamaContextHandle, String, Encoding, Int32[], Int32, Boolean)

    +

    Convert the provided text into tokens. + The tokens pointer must be large enough to hold the resulting tokens. + Returns the number of tokens on success, no more than n_max_tokens + Returns a negative number on failure - the number of tokens that would have been returned

    +
    public static int llama_tokenize(SafeLLamaContextHandle ctx, string text, Encoding encoding, Int32[] tokens, int n_max_tokens, bool add_bos)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    text String

    +

    encoding Encoding

    +

    tokens Int32[]

    +

    n_max_tokens Int32

    +

    add_bos Boolean

    +

    Returns

    +

    Int32

    +

    llama_tokenize_native(SafeLLamaContextHandle, SByte[], Int32[], Int32, Boolean)

    +
    public static int llama_tokenize_native(SafeLLamaContextHandle ctx, SByte[] text, Int32[] tokens, int n_max_tokens, bool add_bos)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    text SByte[]

    +

    tokens Int32[]

    +

    n_max_tokens Int32

    +

    add_bos Boolean

    +

    Returns

    +

    Int32

    +

    llama_n_vocab(SafeLLamaContextHandle)

    +
    public static int llama_n_vocab(SafeLLamaContextHandle ctx)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    Returns

    +

    Int32

    +

    llama_n_ctx(SafeLLamaContextHandle)

    +
    public static int llama_n_ctx(SafeLLamaContextHandle ctx)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    Returns

    +

    Int32

    +

    llama_n_embd(SafeLLamaContextHandle)

    +
    public static int llama_n_embd(SafeLLamaContextHandle ctx)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    Returns

    +

    Int32

    +

    llama_get_logits(SafeLLamaContextHandle)

    +

    Token logits obtained from the last call to llama_eval() + The logits for the last token are stored in the last row + Can be mutated in order to change the probabilities of the next token + Rows: n_tokens + Cols: n_vocab

    +
    public static Single* llama_get_logits(SafeLLamaContextHandle ctx)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    Returns

    +

    Single*

    +

    llama_get_embeddings(SafeLLamaContextHandle)

    +

    Get the embeddings for the input + shape: [n_embd] (1-dimensional)

    +
    public static Single* llama_get_embeddings(SafeLLamaContextHandle ctx)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    Returns

    +

    Single*

    +

    llama_token_to_str(SafeLLamaContextHandle, Int32)

    +

    Token Id -> String. Uses the vocabulary in the provided context

    +
    public static IntPtr llama_token_to_str(SafeLLamaContextHandle ctx, int token)
    +
    +

    Parameters

    +

    ctx SafeLLamaContextHandle

    +

    token Int32

    +

    Returns

    +

    IntPtr
    +Pointer to a string.

    +

    llama_token_bos()

    +
    public static int llama_token_bos()
    +
    +

    Returns

    +

    Int32

    +

    llama_token_eos()

    +
    public static int llama_token_eos()
    +
    +

    Returns

    +

    Int32

    +

    llama_token_nl()

    +
    public static int llama_token_nl()
    +
    +

    Returns

    +

    Int32

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.native.safellamacontexthandle/index.html b/site/xmldocs/llama.native.safellamacontexthandle/index.html new file mode 100644 index 00000000..19a6f6f8 --- /dev/null +++ b/site/xmldocs/llama.native.safellamacontexthandle/index.html @@ -0,0 +1,1855 @@ + + + + + + + + + + + + + + + + + + + + + + llama.native.safellamacontexthandle - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    SafeLLamaContextHandle

    +

    Namespace: LLama.Native

    +
    public class SafeLLamaContextHandle : SafeLLamaHandleBase, System.IDisposable
    +
    +

    Inheritance ObjectCriticalFinalizerObjectSafeHandleSafeLLamaHandleBaseSafeLLamaContextHandle
    +Implements IDisposable

    +

    Properties

    +

    IsInvalid

    +
    public bool IsInvalid { get; }
    +
    +

    Property Value

    +

    Boolean

    +

    IsClosed

    +
    public bool IsClosed { get; }
    +
    +

    Property Value

    +

    Boolean

    +

    Constructors

    +

    SafeLLamaContextHandle(IntPtr)

    +
    public SafeLLamaContextHandle(IntPtr handle)
    +
    +

    Parameters

    +

    handle IntPtr

    +

    Methods

    +

    ReleaseHandle()

    +
    protected bool ReleaseHandle()
    +
    +

    Returns

    +

    Boolean

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.native.safellamahandlebase/index.html b/site/xmldocs/llama.native.safellamahandlebase/index.html new file mode 100644 index 00000000..ecedee4b --- /dev/null +++ b/site/xmldocs/llama.native.safellamahandlebase/index.html @@ -0,0 +1,1783 @@ + + + + + + + + + + + + + + + + + + + + + + llama.native.safellamahandlebase - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    SafeLLamaHandleBase

    +

    Namespace: LLama.Native

    +
    public abstract class SafeLLamaHandleBase : System.Runtime.InteropServices.SafeHandle, System.IDisposable
    +
    +

    Inheritance ObjectCriticalFinalizerObjectSafeHandleSafeLLamaHandleBase
    +Implements IDisposable

    +

    Properties

    +

    IsInvalid

    +
    public bool IsInvalid { get; }
    +
    +

    Property Value

    +

    Boolean

    +

    IsClosed

    +
    public bool IsClosed { get; }
    +
    +

    Property Value

    +

    Boolean

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.chatcompletion/index.html b/site/xmldocs/llama.oldversion.chatcompletion/index.html new file mode 100644 index 00000000..26469fb0 --- /dev/null +++ b/site/xmldocs/llama.oldversion.chatcompletion/index.html @@ -0,0 +1,2363 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.chatcompletion - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ChatCompletion

    +

    Namespace: LLama.OldVersion

    +
    public class ChatCompletion : System.IEquatable`1[[LLama.OldVersion.ChatCompletion, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectChatCompletion
    +Implements IEquatable<ChatCompletion>

    +

    Properties

    +

    Id

    +
    public string Id { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Object

    +
    public string Object { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Created

    +
    public int Created { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    Model

    +
    public string Model { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Choices

    +
    public ChatCompletionChoice[] Choices { get; set; }
    +
    +

    Property Value

    +

    ChatCompletionChoice[]

    +

    Usage

    +
    public CompletionUsage Usage { get; set; }
    +
    +

    Property Value

    +

    CompletionUsage

    +

    Constructors

    +

    ChatCompletion(String, String, Int32, String, ChatCompletionChoice[], CompletionUsage)

    +
    public ChatCompletion(string Id, string Object, int Created, string Model, ChatCompletionChoice[] Choices, CompletionUsage Usage)
    +
    +

    Parameters

    +

    Id String

    +

    Object String

    +

    Created Int32

    +

    Model String

    +

    Choices ChatCompletionChoice[]

    +

    Usage CompletionUsage

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(ChatCompletion)

    +
    public bool Equals(ChatCompletion other)
    +
    +

    Parameters

    +

    other ChatCompletion

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public ChatCompletion <Clone>$()
    +
    +

    Returns

    +

    ChatCompletion

    +

    Deconstruct(String&, String&, Int32&, String&, ChatCompletionChoice[]&, CompletionUsage&)

    +
    public void Deconstruct(String& Id, String& Object, Int32& Created, String& Model, ChatCompletionChoice[]& Choices, CompletionUsage& Usage)
    +
    +

    Parameters

    +

    Id String&

    +

    Object String&

    +

    Created Int32&

    +

    Model String&

    +

    Choices ChatCompletionChoice[]&

    +

    Usage CompletionUsage&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.chatcompletionchoice/index.html b/site/xmldocs/llama.oldversion.chatcompletionchoice/index.html new file mode 100644 index 00000000..b97e9222 --- /dev/null +++ b/site/xmldocs/llama.oldversion.chatcompletionchoice/index.html @@ -0,0 +1,2222 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.chatcompletionchoice - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ChatCompletionChoice

    +

    Namespace: LLama.OldVersion

    +
    public class ChatCompletionChoice : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChoice, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectChatCompletionChoice
    +Implements IEquatable<ChatCompletionChoice>

    +

    Properties

    +

    Index

    +
    public int Index { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    Message

    +
    public ChatCompletionMessage Message { get; set; }
    +
    +

    Property Value

    +

    ChatCompletionMessage

    +

    FinishReason

    +
    public string FinishReason { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Constructors

    +

    ChatCompletionChoice(Int32, ChatCompletionMessage, String)

    +
    public ChatCompletionChoice(int Index, ChatCompletionMessage Message, string FinishReason)
    +
    +

    Parameters

    +

    Index Int32

    +

    Message ChatCompletionMessage

    +

    FinishReason String

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(ChatCompletionChoice)

    +
    public bool Equals(ChatCompletionChoice other)
    +
    +

    Parameters

    +

    other ChatCompletionChoice

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public ChatCompletionChoice <Clone>$()
    +
    +

    Returns

    +

    ChatCompletionChoice

    +

    Deconstruct(Int32&, ChatCompletionMessage&, String&)

    +
    public void Deconstruct(Int32& Index, ChatCompletionMessage& Message, String& FinishReason)
    +
    +

    Parameters

    +

    Index Int32&

    +

    Message ChatCompletionMessage&

    +

    FinishReason String&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.chatcompletionchunk/index.html b/site/xmldocs/llama.oldversion.chatcompletionchunk/index.html new file mode 100644 index 00000000..82b4ecce --- /dev/null +++ b/site/xmldocs/llama.oldversion.chatcompletionchunk/index.html @@ -0,0 +1,2316 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.chatcompletionchunk - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ChatCompletionChunk

    +

    Namespace: LLama.OldVersion

    +
    public class ChatCompletionChunk : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChunk, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectChatCompletionChunk
    +Implements IEquatable<ChatCompletionChunk>

    +

    Properties

    +

    Id

    +
    public string Id { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Model

    +
    public string Model { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Object

    +
    public string Object { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Created

    +
    public int Created { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    Choices

    +
    public ChatCompletionChunkChoice[] Choices { get; set; }
    +
    +

    Property Value

    +

    ChatCompletionChunkChoice[]

    +

    Constructors

    +

    ChatCompletionChunk(String, String, String, Int32, ChatCompletionChunkChoice[])

    +
    public ChatCompletionChunk(string Id, string Model, string Object, int Created, ChatCompletionChunkChoice[] Choices)
    +
    +

    Parameters

    +

    Id String

    +

    Model String

    +

    Object String

    +

    Created Int32

    +

    Choices ChatCompletionChunkChoice[]

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(ChatCompletionChunk)

    +
    public bool Equals(ChatCompletionChunk other)
    +
    +

    Parameters

    +

    other ChatCompletionChunk

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public ChatCompletionChunk <Clone>$()
    +
    +

    Returns

    +

    ChatCompletionChunk

    +

    Deconstruct(String&, String&, String&, Int32&, ChatCompletionChunkChoice[]&)

    +
    public void Deconstruct(String& Id, String& Model, String& Object, Int32& Created, ChatCompletionChunkChoice[]& Choices)
    +
    +

    Parameters

    +

    Id String&

    +

    Model String&

    +

    Object String&

    +

    Created Int32&

    +

    Choices ChatCompletionChunkChoice[]&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.chatcompletionchunkchoice/index.html b/site/xmldocs/llama.oldversion.chatcompletionchunkchoice/index.html new file mode 100644 index 00000000..8bd90302 --- /dev/null +++ b/site/xmldocs/llama.oldversion.chatcompletionchunkchoice/index.html @@ -0,0 +1,2222 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.chatcompletionchunkchoice - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ChatCompletionChunkChoice

    +

    Namespace: LLama.OldVersion

    +
    public class ChatCompletionChunkChoice : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChunkChoice, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectChatCompletionChunkChoice
    +Implements IEquatable<ChatCompletionChunkChoice>

    +

    Properties

    +

    Index

    +
    public int Index { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    Delta

    +
    public ChatCompletionChunkDelta Delta { get; set; }
    +
    +

    Property Value

    +

    ChatCompletionChunkDelta

    +

    FinishReason

    +
    public string FinishReason { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Constructors

    +

    ChatCompletionChunkChoice(Int32, ChatCompletionChunkDelta, String)

    +
    public ChatCompletionChunkChoice(int Index, ChatCompletionChunkDelta Delta, string FinishReason)
    +
    +

    Parameters

    +

    Index Int32

    +

    Delta ChatCompletionChunkDelta

    +

    FinishReason String

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(ChatCompletionChunkChoice)

    +
    public bool Equals(ChatCompletionChunkChoice other)
    +
    +

    Parameters

    +

    other ChatCompletionChunkChoice

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public ChatCompletionChunkChoice <Clone>$()
    +
    +

    Returns

    +

    ChatCompletionChunkChoice

    +

    Deconstruct(Int32&, ChatCompletionChunkDelta&, String&)

    +
    public void Deconstruct(Int32& Index, ChatCompletionChunkDelta& Delta, String& FinishReason)
    +
    +

    Parameters

    +

    Index Int32&

    +

    Delta ChatCompletionChunkDelta&

    +

    FinishReason String&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.chatcompletionchunkdelta/index.html b/site/xmldocs/llama.oldversion.chatcompletionchunkdelta/index.html new file mode 100644 index 00000000..54534ae1 --- /dev/null +++ b/site/xmldocs/llama.oldversion.chatcompletionchunkdelta/index.html @@ -0,0 +1,2175 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.chatcompletionchunkdelta - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ChatCompletionChunkDelta

    +

    Namespace: LLama.OldVersion

    +
    public class ChatCompletionChunkDelta : System.IEquatable`1[[LLama.OldVersion.ChatCompletionChunkDelta, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectChatCompletionChunkDelta
    +Implements IEquatable<ChatCompletionChunkDelta>

    +

    Properties

    +

    Role

    +
    public string Role { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Content

    +
    public string Content { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Constructors

    +

    ChatCompletionChunkDelta(String, String)

    +
    public ChatCompletionChunkDelta(string Role, string Content)
    +
    +

    Parameters

    +

    Role String

    +

    Content String

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(ChatCompletionChunkDelta)

    +
    public bool Equals(ChatCompletionChunkDelta other)
    +
    +

    Parameters

    +

    other ChatCompletionChunkDelta

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public ChatCompletionChunkDelta <Clone>$()
    +
    +

    Returns

    +

    ChatCompletionChunkDelta

    +

    Deconstruct(String&, String&)

    +
    public void Deconstruct(String& Role, String& Content)
    +
    +

    Parameters

    +

    Role String&

    +

    Content String&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.chatcompletionmessage/index.html b/site/xmldocs/llama.oldversion.chatcompletionmessage/index.html new file mode 100644 index 00000000..6c27041a --- /dev/null +++ b/site/xmldocs/llama.oldversion.chatcompletionmessage/index.html @@ -0,0 +1,2222 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.chatcompletionmessage - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ChatCompletionMessage

    +

    Namespace: LLama.OldVersion

    +
    public class ChatCompletionMessage : System.IEquatable`1[[LLama.OldVersion.ChatCompletionMessage, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectChatCompletionMessage
    +Implements IEquatable<ChatCompletionMessage>

    +

    Properties

    +

    Role

    +
    public ChatRole Role { get; set; }
    +
    +

    Property Value

    +

    ChatRole

    +

    Content

    +
    public string Content { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Name

    +
    public string Name { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Constructors

    +

    ChatCompletionMessage(ChatRole, String, String)

    +
    public ChatCompletionMessage(ChatRole Role, string Content, string Name)
    +
    +

    Parameters

    +

    Role ChatRole

    +

    Content String

    +

    Name String

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(ChatCompletionMessage)

    +
    public bool Equals(ChatCompletionMessage other)
    +
    +

    Parameters

    +

    other ChatCompletionMessage

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public ChatCompletionMessage <Clone>$()
    +
    +

    Returns

    +

    ChatCompletionMessage

    +

    Deconstruct(ChatRole&, String&, String&)

    +
    public void Deconstruct(ChatRole& Role, String& Content, String& Name)
    +
    +

    Parameters

    +

    Role ChatRole&

    +

    Content String&

    +

    Name String&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.chatmessagerecord/index.html b/site/xmldocs/llama.oldversion.chatmessagerecord/index.html new file mode 100644 index 00000000..3ad94fc1 --- /dev/null +++ b/site/xmldocs/llama.oldversion.chatmessagerecord/index.html @@ -0,0 +1,2175 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.chatmessagerecord - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ChatMessageRecord

    +

    Namespace: LLama.OldVersion

    +
    public class ChatMessageRecord : System.IEquatable`1[[LLama.OldVersion.ChatMessageRecord, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectChatMessageRecord
    +Implements IEquatable<ChatMessageRecord>

    +

    Properties

    +

    Message

    +
    public ChatCompletionMessage Message { get; set; }
    +
    +

    Property Value

    +

    ChatCompletionMessage

    +

    Time

    +
    public DateTime Time { get; set; }
    +
    +

    Property Value

    +

    DateTime

    +

    Constructors

    +

    ChatMessageRecord(ChatCompletionMessage, DateTime)

    +
    public ChatMessageRecord(ChatCompletionMessage Message, DateTime Time)
    +
    +

    Parameters

    +

    Message ChatCompletionMessage

    +

    Time DateTime

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(ChatMessageRecord)

    +
    public bool Equals(ChatMessageRecord other)
    +
    +

    Parameters

    +

    other ChatMessageRecord

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public ChatMessageRecord <Clone>$()
    +
    +

    Returns

    +

    ChatMessageRecord

    +

    Deconstruct(ChatCompletionMessage&, DateTime&)

    +
    public void Deconstruct(ChatCompletionMessage& Message, DateTime& Time)
    +
    +

    Parameters

    +

    Message ChatCompletionMessage&

    +

    Time DateTime&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.chatrole/index.html b/site/xmldocs/llama.oldversion.chatrole/index.html new file mode 100644 index 00000000..3adc3d6e --- /dev/null +++ b/site/xmldocs/llama.oldversion.chatrole/index.html @@ -0,0 +1,1625 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.chatrole - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    + +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.chatsession-1/index.html b/site/xmldocs/llama.oldversion.chatsession-1/index.html new file mode 100644 index 00000000..d59ef185 --- /dev/null +++ b/site/xmldocs/llama.oldversion.chatsession-1/index.html @@ -0,0 +1,1957 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.chatsession-1 - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ChatSession<T>

    +

    Namespace: LLama.OldVersion

    +
    public class ChatSession<T>
    +
    +

    Type Parameters

    +

    T

    +

    Inheritance ObjectChatSession<T>

    +

    Constructors

    +

    ChatSession(T)

    +
    public ChatSession(T model)
    +
    +

    Parameters

    +

    model T

    +

    Methods

    +

    Chat(String, String, String)

    +
    public IEnumerable<string> Chat(string text, string prompt, string encoding)
    +
    +

    Parameters

    +

    text String

    +

    prompt String

    +

    encoding String

    +

    Returns

    +

    IEnumerable<String>

    +

    WithPrompt(String, String)

    +
    public ChatSession<T> WithPrompt(string prompt, string encoding)
    +
    +

    Parameters

    +

    prompt String

    +

    encoding String

    +

    Returns

    +

    ChatSession<T>

    +

    WithPromptFile(String, String)

    +
    public ChatSession<T> WithPromptFile(string promptFilename, string encoding)
    +
    +

    Parameters

    +

    promptFilename String

    +

    encoding String

    +

    Returns

    +

    ChatSession<T>

    +

    WithAntiprompt(String[])

    +

    Set the keyword to split the return value of chat AI.

    +
    public ChatSession<T> WithAntiprompt(String[] antiprompt)
    +
    +

    Parameters

    +

    antiprompt String[]

    +

    Returns

    +

    ChatSession<T>

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.completion/index.html b/site/xmldocs/llama.oldversion.completion/index.html new file mode 100644 index 00000000..071197f1 --- /dev/null +++ b/site/xmldocs/llama.oldversion.completion/index.html @@ -0,0 +1,2363 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.completion - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Completion

    +

    Namespace: LLama.OldVersion

    +
    public class Completion : System.IEquatable`1[[LLama.OldVersion.Completion, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectCompletion
    +Implements IEquatable<Completion>

    +

    Properties

    +

    Id

    +
    public string Id { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Object

    +
    public string Object { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Created

    +
    public int Created { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    Model

    +
    public string Model { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Choices

    +
    public CompletionChoice[] Choices { get; set; }
    +
    +

    Property Value

    +

    CompletionChoice[]

    +

    Usage

    +
    public CompletionUsage Usage { get; set; }
    +
    +

    Property Value

    +

    CompletionUsage

    +

    Constructors

    +

    Completion(String, String, Int32, String, CompletionChoice[], CompletionUsage)

    +
    public Completion(string Id, string Object, int Created, string Model, CompletionChoice[] Choices, CompletionUsage Usage)
    +
    +

    Parameters

    +

    Id String

    +

    Object String

    +

    Created Int32

    +

    Model String

    +

    Choices CompletionChoice[]

    +

    Usage CompletionUsage

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(Completion)

    +
    public bool Equals(Completion other)
    +
    +

    Parameters

    +

    other Completion

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public Completion <Clone>$()
    +
    +

    Returns

    +

    Completion

    +

    Deconstruct(String&, String&, Int32&, String&, CompletionChoice[]&, CompletionUsage&)

    +
    public void Deconstruct(String& Id, String& Object, Int32& Created, String& Model, CompletionChoice[]& Choices, CompletionUsage& Usage)
    +
    +

    Parameters

    +

    Id String&

    +

    Object String&

    +

    Created Int32&

    +

    Model String&

    +

    Choices CompletionChoice[]&

    +

    Usage CompletionUsage&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.completionchoice/index.html b/site/xmldocs/llama.oldversion.completionchoice/index.html new file mode 100644 index 00000000..11be118c --- /dev/null +++ b/site/xmldocs/llama.oldversion.completionchoice/index.html @@ -0,0 +1,2269 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.completionchoice - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    CompletionChoice

    +

    Namespace: LLama.OldVersion

    +
    public class CompletionChoice : System.IEquatable`1[[LLama.OldVersion.CompletionChoice, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectCompletionChoice
    +Implements IEquatable<CompletionChoice>

    +

    Properties

    +

    Text

    +
    public string Text { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Index

    +
    public int Index { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    Logprobs

    +
    public CompletionLogprobs Logprobs { get; set; }
    +
    +

    Property Value

    +

    CompletionLogprobs

    +

    FinishReason

    +
    public string FinishReason { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Constructors

    +

    CompletionChoice(String, Int32, CompletionLogprobs, String)

    +
    public CompletionChoice(string Text, int Index, CompletionLogprobs Logprobs, string FinishReason)
    +
    +

    Parameters

    +

    Text String

    +

    Index Int32

    +

    Logprobs CompletionLogprobs

    +

    FinishReason String

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(CompletionChoice)

    +
    public bool Equals(CompletionChoice other)
    +
    +

    Parameters

    +

    other CompletionChoice

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public CompletionChoice <Clone>$()
    +
    +

    Returns

    +

    CompletionChoice

    +

    Deconstruct(String&, Int32&, CompletionLogprobs&, String&)

    +
    public void Deconstruct(String& Text, Int32& Index, CompletionLogprobs& Logprobs, String& FinishReason)
    +
    +

    Parameters

    +

    Text String&

    +

    Index Int32&

    +

    Logprobs CompletionLogprobs&

    +

    FinishReason String&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.completionchunk/index.html b/site/xmldocs/llama.oldversion.completionchunk/index.html new file mode 100644 index 00000000..c8516218 --- /dev/null +++ b/site/xmldocs/llama.oldversion.completionchunk/index.html @@ -0,0 +1,2316 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.completionchunk - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    CompletionChunk

    +

    Namespace: LLama.OldVersion

    +
    public class CompletionChunk : System.IEquatable`1[[LLama.OldVersion.CompletionChunk, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectCompletionChunk
    +Implements IEquatable<CompletionChunk>

    +

    Properties

    +

    Id

    +
    public string Id { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Object

    +
    public string Object { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Created

    +
    public int Created { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    Model

    +
    public string Model { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Choices

    +
    public CompletionChoice[] Choices { get; set; }
    +
    +

    Property Value

    +

    CompletionChoice[]

    +

    Constructors

    +

    CompletionChunk(String, String, Int32, String, CompletionChoice[])

    +
    public CompletionChunk(string Id, string Object, int Created, string Model, CompletionChoice[] Choices)
    +
    +

    Parameters

    +

    Id String

    +

    Object String

    +

    Created Int32

    +

    Model String

    +

    Choices CompletionChoice[]

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(CompletionChunk)

    +
    public bool Equals(CompletionChunk other)
    +
    +

    Parameters

    +

    other CompletionChunk

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public CompletionChunk <Clone>$()
    +
    +

    Returns

    +

    CompletionChunk

    +

    Deconstruct(String&, String&, Int32&, String&, CompletionChoice[]&)

    +
    public void Deconstruct(String& Id, String& Object, Int32& Created, String& Model, CompletionChoice[]& Choices)
    +
    +

    Parameters

    +

    Id String&

    +

    Object String&

    +

    Created Int32&

    +

    Model String&

    +

    Choices CompletionChoice[]&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.completionlogprobs/index.html b/site/xmldocs/llama.oldversion.completionlogprobs/index.html new file mode 100644 index 00000000..d05ed418 --- /dev/null +++ b/site/xmldocs/llama.oldversion.completionlogprobs/index.html @@ -0,0 +1,2269 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.completionlogprobs - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    CompletionLogprobs

    +

    Namespace: LLama.OldVersion

    +
    public class CompletionLogprobs : System.IEquatable`1[[LLama.OldVersion.CompletionLogprobs, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectCompletionLogprobs
    +Implements IEquatable<CompletionLogprobs>

    +

    Properties

    +

    TextOffset

    +
    public Int32[] TextOffset { get; set; }
    +
    +

    Property Value

    +

    Int32[]

    +

    TokenLogProbs

    +
    public Single[] TokenLogProbs { get; set; }
    +
    +

    Property Value

    +

    Single[]

    +

    Tokens

    +
    public String[] Tokens { get; set; }
    +
    +

    Property Value

    +

    String[]

    +

    TopLogprobs

    +
    public Dictionary`2[] TopLogprobs { get; set; }
    +
    +

    Property Value

    +

    Dictionary`2[]

    +

    Constructors

    +

    CompletionLogprobs(Int32[], Single[], String[], Dictionary`2[])

    +
    public CompletionLogprobs(Int32[] TextOffset, Single[] TokenLogProbs, String[] Tokens, Dictionary`2[] TopLogprobs)
    +
    +

    Parameters

    +

    TextOffset Int32[]

    +

    TokenLogProbs Single[]

    +

    Tokens String[]

    +

    TopLogprobs Dictionary`2[]

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(CompletionLogprobs)

    +
    public bool Equals(CompletionLogprobs other)
    +
    +

    Parameters

    +

    other CompletionLogprobs

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public CompletionLogprobs <Clone>$()
    +
    +

    Returns

    +

    CompletionLogprobs

    +

    Deconstruct(Int32[]&, Single[]&, String[]&, Dictionary`2[]&)

    +
    public void Deconstruct(Int32[]& TextOffset, Single[]& TokenLogProbs, String[]& Tokens, Dictionary`2[]& TopLogprobs)
    +
    +

    Parameters

    +

    TextOffset Int32[]&

    +

    TokenLogProbs Single[]&

    +

    Tokens String[]&

    +

    TopLogprobs Dictionary`2[]&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.completionusage/index.html b/site/xmldocs/llama.oldversion.completionusage/index.html new file mode 100644 index 00000000..ab3b3c7d --- /dev/null +++ b/site/xmldocs/llama.oldversion.completionusage/index.html @@ -0,0 +1,2222 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.completionusage - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    CompletionUsage

    +

    Namespace: LLama.OldVersion

    +
    public class CompletionUsage : System.IEquatable`1[[LLama.OldVersion.CompletionUsage, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectCompletionUsage
    +Implements IEquatable<CompletionUsage>

    +

    Properties

    +

    PromptTokens

    +
    public int PromptTokens { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    CompletionTokens

    +
    public int CompletionTokens { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    TotalTokens

    +
    public int TotalTokens { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    Constructors

    +

    CompletionUsage(Int32, Int32, Int32)

    +
    public CompletionUsage(int PromptTokens, int CompletionTokens, int TotalTokens)
    +
    +

    Parameters

    +

    PromptTokens Int32

    +

    CompletionTokens Int32

    +

    TotalTokens Int32

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(CompletionUsage)

    +
    public bool Equals(CompletionUsage other)
    +
    +

    Parameters

    +

    other CompletionUsage

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public CompletionUsage <Clone>$()
    +
    +

    Returns

    +

    CompletionUsage

    +

    Deconstruct(Int32&, Int32&, Int32&)

    +
    public void Deconstruct(Int32& PromptTokens, Int32& CompletionTokens, Int32& TotalTokens)
    +
    +

    Parameters

    +

    PromptTokens Int32&

    +

    CompletionTokens Int32&

    +

    TotalTokens Int32&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.embedding/index.html b/site/xmldocs/llama.oldversion.embedding/index.html new file mode 100644 index 00000000..f814d0ac --- /dev/null +++ b/site/xmldocs/llama.oldversion.embedding/index.html @@ -0,0 +1,2269 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.embedding - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Embedding

    +

    Namespace: LLama.OldVersion

    +
    public class Embedding : System.IEquatable`1[[LLama.OldVersion.Embedding, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectEmbedding
    +Implements IEquatable<Embedding>

    +

    Properties

    +

    Object

    +
    public string Object { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Model

    +
    public string Model { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Data

    +
    public EmbeddingData[] Data { get; set; }
    +
    +

    Property Value

    +

    EmbeddingData[]

    +

    Usage

    +
    public EmbeddingUsage Usage { get; set; }
    +
    +

    Property Value

    +

    EmbeddingUsage

    +

    Constructors

    +

    Embedding(String, String, EmbeddingData[], EmbeddingUsage)

    +
    public Embedding(string Object, string Model, EmbeddingData[] Data, EmbeddingUsage Usage)
    +
    +

    Parameters

    +

    Object String

    +

    Model String

    +

    Data EmbeddingData[]

    +

    Usage EmbeddingUsage

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(Embedding)

    +
    public bool Equals(Embedding other)
    +
    +

    Parameters

    +

    other Embedding

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public Embedding <Clone>$()
    +
    +

    Returns

    +

    Embedding

    +

    Deconstruct(String&, String&, EmbeddingData[]&, EmbeddingUsage&)

    +
    public void Deconstruct(String& Object, String& Model, EmbeddingData[]& Data, EmbeddingUsage& Usage)
    +
    +

    Parameters

    +

    Object String&

    +

    Model String&

    +

    Data EmbeddingData[]&

    +

    Usage EmbeddingUsage&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.embeddingdata/index.html b/site/xmldocs/llama.oldversion.embeddingdata/index.html new file mode 100644 index 00000000..459be909 --- /dev/null +++ b/site/xmldocs/llama.oldversion.embeddingdata/index.html @@ -0,0 +1,2222 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.embeddingdata - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    EmbeddingData

    +

    Namespace: LLama.OldVersion

    +
    public class EmbeddingData : System.IEquatable`1[[LLama.OldVersion.EmbeddingData, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectEmbeddingData
    +Implements IEquatable<EmbeddingData>

    +

    Properties

    +

    Index

    +
    public int Index { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    Object

    +
    public string Object { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Embedding

    +
    public Single[] Embedding { get; set; }
    +
    +

    Property Value

    +

    Single[]

    +

    Constructors

    +

    EmbeddingData(Int32, String, Single[])

    +
    public EmbeddingData(int Index, string Object, Single[] Embedding)
    +
    +

    Parameters

    +

    Index Int32

    +

    Object String

    +

    Embedding Single[]

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(EmbeddingData)

    +
    public bool Equals(EmbeddingData other)
    +
    +

    Parameters

    +

    other EmbeddingData

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public EmbeddingData <Clone>$()
    +
    +

    Returns

    +

    EmbeddingData

    +

    Deconstruct(Int32&, String&, Single[]&)

    +
    public void Deconstruct(Int32& Index, String& Object, Single[]& Embedding)
    +
    +

    Parameters

    +

    Index Int32&

    +

    Object String&

    +

    Embedding Single[]&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.embeddingusage/index.html b/site/xmldocs/llama.oldversion.embeddingusage/index.html new file mode 100644 index 00000000..bb663dd3 --- /dev/null +++ b/site/xmldocs/llama.oldversion.embeddingusage/index.html @@ -0,0 +1,2175 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.embeddingusage - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    EmbeddingUsage

    +

    Namespace: LLama.OldVersion

    +
    public class EmbeddingUsage : System.IEquatable`1[[LLama.OldVersion.EmbeddingUsage, LLamaSharp, Version=0.4.0.0, Culture=neutral, PublicKeyToken=null]]
    +
    +

    Inheritance ObjectEmbeddingUsage
    +Implements IEquatable<EmbeddingUsage>

    +

    Properties

    +

    PromptTokens

    +
    public int PromptTokens { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    TotalTokens

    +
    public int TotalTokens { get; set; }
    +
    +

    Property Value

    +

    Int32

    +

    Constructors

    +

    EmbeddingUsage(Int32, Int32)

    +
    public EmbeddingUsage(int PromptTokens, int TotalTokens)
    +
    +

    Parameters

    +

    PromptTokens Int32

    +

    TotalTokens Int32

    +

    Methods

    +

    ToString()

    +
    public string ToString()
    +
    +

    Returns

    +

    String

    +

    PrintMembers(StringBuilder)

    +
    protected bool PrintMembers(StringBuilder builder)
    +
    +

    Parameters

    +

    builder StringBuilder

    +

    Returns

    +

    Boolean

    +

    GetHashCode()

    +
    public int GetHashCode()
    +
    +

    Returns

    +

    Int32

    +

    Equals(Object)

    +
    public bool Equals(object obj)
    +
    +

    Parameters

    +

    obj Object

    +

    Returns

    +

    Boolean

    +

    Equals(EmbeddingUsage)

    +
    public bool Equals(EmbeddingUsage other)
    +
    +

    Parameters

    +

    other EmbeddingUsage

    +

    Returns

    +

    Boolean

    +

    <Clone>$()

    +
    public EmbeddingUsage <Clone>$()
    +
    +

    Returns

    +

    EmbeddingUsage

    +

    Deconstruct(Int32&, Int32&)

    +
    public void Deconstruct(Int32& PromptTokens, Int32& TotalTokens)
    +
    +

    Parameters

    +

    PromptTokens Int32&

    +

    TotalTokens Int32&

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.ichatmodel/index.html b/site/xmldocs/llama.oldversion.ichatmodel/index.html new file mode 100644 index 00000000..8640ca4b --- /dev/null +++ b/site/xmldocs/llama.oldversion.ichatmodel/index.html @@ -0,0 +1,1846 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.ichatmodel - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    IChatModel

    +

    Namespace: LLama.OldVersion

    +
    public interface IChatModel
    +
    +

    Properties

    +

    Name

    +
    public abstract string Name { get; }
    +
    +

    Property Value

    +

    String

    +

    Methods

    +

    Chat(String, String, String)

    +
    IEnumerable<string> Chat(string text, string prompt, string encoding)
    +
    +

    Parameters

    +

    text String

    +

    prompt String

    +

    encoding String

    +

    Returns

    +

    IEnumerable<String>

    +

    InitChatPrompt(String, String)

    +

    Init a prompt for chat and automatically produce the next prompt during the chat.

    +
    void InitChatPrompt(string prompt, string encoding)
    +
    +

    Parameters

    +

    prompt String

    +

    encoding String

    +

    InitChatAntiprompt(String[])

    +
    void InitChatAntiprompt(String[] antiprompt)
    +
    +

    Parameters

    +

    antiprompt String[]

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.llamaembedder/index.html b/site/xmldocs/llama.oldversion.llamaembedder/index.html new file mode 100644 index 00000000..c6ca4025 --- /dev/null +++ b/site/xmldocs/llama.oldversion.llamaembedder/index.html @@ -0,0 +1,1774 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.llamaembedder - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaEmbedder

    +

    Namespace: LLama.OldVersion

    +
    public class LLamaEmbedder : System.IDisposable
    +
    +

    Inheritance ObjectLLamaEmbedder
    +Implements IDisposable

    +

    Constructors

    +

    LLamaEmbedder(LLamaParams)

    +
    public LLamaEmbedder(LLamaParams params)
    +
    +

    Parameters

    +

    params LLamaParams

    +

    Methods

    +

    GetEmbeddings(String, Int32, Boolean, String)

    +
    public Single[] GetEmbeddings(string text, int n_thread, bool add_bos, string encoding)
    +
    +

    Parameters

    +

    text String

    +

    n_thread Int32

    +

    add_bos Boolean

    +

    encoding String

    +

    Returns

    +

    Single[]

    +

    Dispose()

    +
    public void Dispose()
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.llamamodel/index.html b/site/xmldocs/llama.oldversion.llamamodel/index.html new file mode 100644 index 00000000..d10da669 --- /dev/null +++ b/site/xmldocs/llama.oldversion.llamamodel/index.html @@ -0,0 +1,2637 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.llamamodel - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaModel

    +

    Namespace: LLama.OldVersion

    +
    public class LLamaModel : IChatModel, System.IDisposable
    +
    +

    Inheritance ObjectLLamaModel
    +Implements IChatModel, IDisposable

    +

    Properties

    +

    Name

    +
    public string Name { get; set; }
    +
    +

    Property Value

    +

    String

    +

    Verbose

    +
    public bool Verbose { get; set; }
    +
    +

    Property Value

    +

    Boolean

    +

    NativeHandle

    +
    public SafeLLamaContextHandle NativeHandle { get; }
    +
    +

    Property Value

    +

    SafeLLamaContextHandle

    +

    Constructors

    +

    LLamaModel(String, String, Boolean, Int32, Int32, Int32, Int32, Int32, Int32, Int32, Dictionary<Int32, Single>, Int32, Single, Single, Single, Single, Single, Int32, Single, Single, Int32, Single, Single, String, String, String, String, List<String>, String, String, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, String)

    +

    Please refer LLamaParams to find the meanings of each arg. Be sure to have set the n_gpu_layers, otherwise it will + load 20 layers to gpu by default.

    +
    public LLamaModel(string model_path, string model_name, bool verbose, int seed, int n_threads, int n_predict, int n_ctx, int n_batch, int n_keep, int n_gpu_layers, Dictionary<int, float> logit_bias, int top_k, float top_p, float tfs_z, float typical_p, float temp, float repeat_penalty, int repeat_last_n, float frequency_penalty, float presence_penalty, int mirostat, float mirostat_tau, float mirostat_eta, string prompt, string path_session, string input_prefix, string input_suffix, List<string> antiprompt, string lora_adapter, string lora_base, bool memory_f16, bool random_prompt, bool use_color, bool interactive, bool embedding, bool interactive_first, bool prompt_cache_all, bool instruct, bool penalize_nl, bool perplexity, bool use_mmap, bool use_mlock, bool mem_test, bool verbose_prompt, string encoding)
    +
    +

    Parameters

    +

    model_path String
    +The model file path.

    +

    model_name String
    +The model name.

    +

    verbose Boolean
    +Whether to print details when running the model.

    +

    seed Int32

    +

    n_threads Int32

    +

    n_predict Int32

    +

    n_ctx Int32

    +

    n_batch Int32

    +

    n_keep Int32

    +

    n_gpu_layers Int32

    +

    logit_bias Dictionary<Int32, Single>

    +

    top_k Int32

    +

    top_p Single

    +

    tfs_z Single

    +

    typical_p Single

    +

    temp Single

    +

    repeat_penalty Single

    +

    repeat_last_n Int32

    +

    frequency_penalty Single

    +

    presence_penalty Single

    +

    mirostat Int32

    +

    mirostat_tau Single

    +

    mirostat_eta Single

    +

    prompt String

    +

    path_session String

    +

    input_prefix String

    +

    input_suffix String

    +

    antiprompt List<String>

    +

    lora_adapter String

    +

    lora_base String

    +

    memory_f16 Boolean

    +

    random_prompt Boolean

    +

    use_color Boolean

    +

    interactive Boolean

    +

    embedding Boolean

    +

    interactive_first Boolean

    +

    prompt_cache_all Boolean

    +

    instruct Boolean

    +

    penalize_nl Boolean

    +

    perplexity Boolean

    +

    use_mmap Boolean

    +

    use_mlock Boolean

    +

    mem_test Boolean

    +

    verbose_prompt Boolean

    +

    encoding String

    +

    LLamaModel(LLamaParams, String, Boolean, String)

    +

    Please refer LLamaParams to find the meanings of each arg. Be sure to have set the n_gpu_layers, otherwise it will + load 20 layers to gpu by default.

    +
    public LLamaModel(LLamaParams params, string name, bool verbose, string encoding)
    +
    +

    Parameters

    +

    params LLamaParams
    +The LLamaModel params

    +

    name String
    +Model name

    +

    verbose Boolean
    +Whether to output the detailed info.

    +

    encoding String

    +

    Exceptions

    +

    RuntimeError

    +

    Methods

    +

    WithPrompt(String, String)

    +

    Apply a prompt to the model.

    +
    public LLamaModel WithPrompt(string prompt, string encoding)
    +
    +

    Parameters

    +

    prompt String

    +

    encoding String

    +

    Returns

    +

    LLamaModel

    +

    Exceptions

    +

    ArgumentException

    +

    WithPromptFile(String)

    +

    Apply the prompt file to the model.

    +
    public LLamaModel WithPromptFile(string promptFileName)
    +
    +

    Parameters

    +

    promptFileName String

    +

    Returns

    +

    LLamaModel

    +

    InitChatPrompt(String, String)

    +
    public void InitChatPrompt(string prompt, string encoding)
    +
    +

    Parameters

    +

    prompt String

    +

    encoding String

    +

    InitChatAntiprompt(String[])

    +
    public void InitChatAntiprompt(String[] antiprompt)
    +
    +

    Parameters

    +

    antiprompt String[]

    +

    Chat(String, String, String)

    +

    Chat with the LLaMa model under interactive mode.

    +
    public IEnumerable<string> Chat(string text, string prompt, string encoding)
    +
    +

    Parameters

    +

    text String

    +

    prompt String

    +

    encoding String

    +

    Returns

    +

    IEnumerable<String>

    +

    Exceptions

    +

    ArgumentException

    +

    SaveState(String)

    +

    Save the state to specified path.

    +
    public void SaveState(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    LoadState(String, Boolean)

    +

    Load the state from specified path.

    +
    public void LoadState(string filename, bool clearPreviousEmbed)
    +
    +

    Parameters

    +

    filename String

    +

    clearPreviousEmbed Boolean
    +Whether to clear previous footprints of this model.

    +

    Exceptions

    +

    RuntimeError

    +

    Tokenize(String, String)

    +

    Tokenize a string.

    +
    public List<int> Tokenize(string text, string encoding)
    +
    +

    Parameters

    +

    text String
    +The utf-8 encoded string to tokenize.

    +

    encoding String

    +

    Returns

    +

    List<Int32>
    +A list of tokens.

    +

    Exceptions

    +

    RuntimeError
    +If the tokenization failed.

    +

    DeTokenize(IEnumerable<Int32>)

    +

    Detokenize a list of tokens.

    +
    public string DeTokenize(IEnumerable<int> tokens)
    +
    +

    Parameters

    +

    tokens IEnumerable<Int32>
    +The list of tokens to detokenize.

    +

    Returns

    +

    String
    +The detokenized string.

    +

    Call(String, String)

    +

    Call the model to run inference.

    +
    public IEnumerable<string> Call(string text, string encoding)
    +
    +

    Parameters

    +

    text String

    +

    encoding String

    +

    Returns

    +

    IEnumerable<String>

    +

    Exceptions

    +

    RuntimeError

    +

    Dispose()

    +
    public void Dispose()
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.oldversion.llamaparams/index.html b/site/xmldocs/llama.oldversion.llamaparams/index.html new file mode 100644 index 00000000..ab9c4a39 --- /dev/null +++ b/site/xmldocs/llama.oldversion.llamaparams/index.html @@ -0,0 +1,2447 @@ + + + + + + + + + + + + + + + + + + + + + + llama.oldversion.llamaparams - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    LLamaParams

    +

    Namespace: LLama.OldVersion

    +
    public struct LLamaParams
    +
    +

    Inheritance ObjectValueTypeLLamaParams

    +

    Fields

    +

    seed

    +
    public int seed;
    +
    +

    n_threads

    +
    public int n_threads;
    +
    +

    n_predict

    +
    public int n_predict;
    +
    +

    n_ctx

    +
    public int n_ctx;
    +
    +

    n_batch

    +
    public int n_batch;
    +
    +

    n_keep

    +
    public int n_keep;
    +
    +

    n_gpu_layers

    +
    public int n_gpu_layers;
    +
    +

    logit_bias

    +
    public Dictionary<int, float> logit_bias;
    +
    +

    top_k

    +
    public int top_k;
    +
    +

    top_p

    +
    public float top_p;
    +
    +

    tfs_z

    +
    public float tfs_z;
    +
    +

    typical_p

    +
    public float typical_p;
    +
    +

    temp

    +
    public float temp;
    +
    +

    repeat_penalty

    +
    public float repeat_penalty;
    +
    +

    repeat_last_n

    +
    public int repeat_last_n;
    +
    +

    frequency_penalty

    +
    public float frequency_penalty;
    +
    +

    presence_penalty

    +
    public float presence_penalty;
    +
    +

    mirostat

    +
    public int mirostat;
    +
    +

    mirostat_tau

    +
    public float mirostat_tau;
    +
    +

    mirostat_eta

    +
    public float mirostat_eta;
    +
    +

    model

    +
    public string model;
    +
    +

    prompt

    +
    public string prompt;
    +
    +

    path_session

    +
    public string path_session;
    +
    +

    input_prefix

    +
    public string input_prefix;
    +
    +

    input_suffix

    +
    public string input_suffix;
    +
    +

    antiprompt

    +
    public List<string> antiprompt;
    +
    +

    lora_adapter

    +
    public string lora_adapter;
    +
    +

    lora_base

    +
    public string lora_base;
    +
    +

    memory_f16

    +
    public bool memory_f16;
    +
    +

    random_prompt

    +
    public bool random_prompt;
    +
    +

    use_color

    +
    public bool use_color;
    +
    +

    interactive

    +
    public bool interactive;
    +
    +

    prompt_cache_all

    +
    public bool prompt_cache_all;
    +
    +

    embedding

    +
    public bool embedding;
    +
    +

    interactive_first

    +
    public bool interactive_first;
    +
    +

    instruct

    +
    public bool instruct;
    +
    +

    penalize_nl

    +
    public bool penalize_nl;
    +
    +

    perplexity

    +
    public bool perplexity;
    +
    +

    use_mmap

    +
    public bool use_mmap;
    +
    +

    use_mlock

    +
    public bool use_mlock;
    +
    +

    mem_test

    +
    public bool mem_test;
    +
    +

    verbose_prompt

    +
    public bool verbose_prompt;
    +
    +

    Constructors

    +

    LLamaParams(Int32, Int32, Int32, Int32, Int32, Int32, Int32, Dictionary<Int32, Single>, Int32, Single, Single, Single, Single, Single, Int32, Single, Single, Int32, Single, Single, String, String, String, String, String, List<String>, String, String, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean, Boolean)

    +
    LLamaParams(int seed, int n_threads, int n_predict, int n_ctx, int n_batch, int n_keep, int n_gpu_layers, Dictionary<int, float> logit_bias, int top_k, float top_p, float tfs_z, float typical_p, float temp, float repeat_penalty, int repeat_last_n, float frequency_penalty, float presence_penalty, int mirostat, float mirostat_tau, float mirostat_eta, string model, string prompt, string path_session, string input_prefix, string input_suffix, List<string> antiprompt, string lora_adapter, string lora_base, bool memory_f16, bool random_prompt, bool use_color, bool interactive, bool prompt_cache_all, bool embedding, bool interactive_first, bool instruct, bool penalize_nl, bool perplexity, bool use_mmap, bool use_mlock, bool mem_test, bool verbose_prompt)
    +
    +

    Parameters

    +

    seed Int32

    +

    n_threads Int32

    +

    n_predict Int32

    +

    n_ctx Int32

    +

    n_batch Int32

    +

    n_keep Int32

    +

    n_gpu_layers Int32

    +

    logit_bias Dictionary<Int32, Single>

    +

    top_k Int32

    +

    top_p Single

    +

    tfs_z Single

    +

    typical_p Single

    +

    temp Single

    +

    repeat_penalty Single

    +

    repeat_last_n Int32

    +

    frequency_penalty Single

    +

    presence_penalty Single

    +

    mirostat Int32

    +

    mirostat_tau Single

    +

    mirostat_eta Single

    +

    model String

    +

    prompt String

    +

    path_session String

    +

    input_prefix String

    +

    input_suffix String

    +

    antiprompt List<String>

    +

    lora_adapter String

    +

    lora_base String

    +

    memory_f16 Boolean

    +

    random_prompt Boolean

    +

    use_color Boolean

    +

    interactive Boolean

    +

    prompt_cache_all Boolean

    +

    embedding Boolean

    +

    interactive_first Boolean

    +

    instruct Boolean

    +

    penalize_nl Boolean

    +

    perplexity Boolean

    +

    use_mmap Boolean

    +

    use_mlock Boolean

    +

    mem_test Boolean

    +

    verbose_prompt Boolean

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.resettablellamamodel/index.html b/site/xmldocs/llama.resettablellamamodel/index.html new file mode 100644 index 00000000..8b6e1ebe --- /dev/null +++ b/site/xmldocs/llama.resettablellamamodel/index.html @@ -0,0 +1,1971 @@ + + + + + + + + + + + + + + + + + + + + + + llama.resettablellamamodel - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    ResettableLLamaModel

    +

    Namespace: LLama

    +

    A LLamaModel what could be reset. Note that using this class will consume about 10% more memories.

    +
    public class ResettableLLamaModel : LLamaModel, System.IDisposable
    +
    +

    Inheritance ObjectLLamaModelResettableLLamaModel
    +Implements IDisposable

    +

    Properties

    +

    OriginalState

    +

    The initial state of the model

    +
    public Byte[] OriginalState { get; set; }
    +
    +

    Property Value

    +

    Byte[]

    +

    ContextSize

    +

    The context size.

    +
    public int ContextSize { get; }
    +
    +

    Property Value

    +

    Int32

    +

    Params

    +

    The model params set for this model.

    +
    public ModelParams Params { get; set; }
    +
    +

    Property Value

    +

    ModelParams

    +

    NativeHandle

    +

    The native handle, which is used to be passed to the native APIs. Please avoid using it + unless you know what is the usage of the Native API.

    +
    public SafeLLamaContextHandle NativeHandle { get; }
    +
    +

    Property Value

    +

    SafeLLamaContextHandle

    +

    Encoding

    +

    The encoding set for this model to deal with text input.

    +
    public Encoding Encoding { get; }
    +
    +

    Property Value

    +

    Encoding

    +

    Constructors

    +

    ResettableLLamaModel(ModelParams, String)

    +
    public ResettableLLamaModel(ModelParams Params, string encoding)
    +
    +

    Parameters

    +

    Params ModelParams

    +

    encoding String

    +

    Methods

    +

    Reset()

    +

    Reset the state to the initial state.

    +
    public void Reset()
    +
    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.statefulexecutorbase/index.html b/site/xmldocs/llama.statefulexecutorbase/index.html new file mode 100644 index 00000000..c4f4e1a6 --- /dev/null +++ b/site/xmldocs/llama.statefulexecutorbase/index.html @@ -0,0 +1,2416 @@ + + + + + + + + + + + + + + + + + + + + + + llama.statefulexecutorbase - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    + +
    + + + +
    +
    + + + + +

    StatefulExecutorBase

    +

    Namespace: LLama

    +

    The base class for stateful LLama executors.

    +
    public abstract class StatefulExecutorBase : LLama.Abstractions.ILLamaExecutor
    +
    +

    Inheritance ObjectStatefulExecutorBase
    +Implements ILLamaExecutor

    +

    Properties

    +

    Model

    +

    The mode used by the executor.

    +
    public LLamaModel Model { get; }
    +
    +

    Property Value

    +

    LLamaModel

    +

    Methods

    +

    WithSessionFile(String)

    +

    This API is currently not verified.

    +
    public StatefulExecutorBase WithSessionFile(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    Returns

    +

    StatefulExecutorBase

    +

    Exceptions

    +

    ArgumentNullException

    +

    RuntimeError

    +

    SaveSessionFile(String)

    +

    This API has not been verified currently.

    +
    public void SaveSessionFile(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    HandleRunOutOfContext(Int32)

    +

    After running out of the context, take some tokens from the original prompt and recompute the logits in batches.

    +
    protected void HandleRunOutOfContext(int tokensToKeep)
    +
    +

    Parameters

    +

    tokensToKeep Int32

    +

    TryReuseMathingPrefix()

    +

    Try to reuse the matching prefix from the session file.

    +
    protected void TryReuseMathingPrefix()
    +
    +

    GetLoopCondition(InferStateArgs)

    +

    Decide whether to continue the loop.

    +
    protected abstract bool GetLoopCondition(InferStateArgs args)
    +
    +

    Parameters

    +

    args InferStateArgs

    +

    Returns

    +

    Boolean

    +

    PreprocessInputs(String, InferStateArgs)

    +

    Preprocess the inputs before the inference.

    +
    protected abstract void PreprocessInputs(string text, InferStateArgs args)
    +
    +

    Parameters

    +

    text String

    +

    args InferStateArgs

    +

    PostProcess(InferenceParams, InferStateArgs, IEnumerable`1&)

    +

    Do some post processing after the inference.

    +
    protected abstract bool PostProcess(InferenceParams inferenceParams, InferStateArgs args, IEnumerable`1& extraOutputs)
    +
    +

    Parameters

    +

    inferenceParams InferenceParams

    +

    args InferStateArgs

    +

    extraOutputs IEnumerable`1&

    +

    Returns

    +

    Boolean

    +

    InferInternal(InferenceParams, InferStateArgs)

    +

    The core inference logic.

    +
    protected abstract void InferInternal(InferenceParams inferenceParams, InferStateArgs args)
    +
    +

    Parameters

    +

    inferenceParams InferenceParams

    +

    args InferStateArgs

    +

    SaveState(String)

    +

    Save the current state to a file.

    +
    public abstract void SaveState(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    GetStateData()

    +

    Get the current state data.

    +
    public abstract ExecutorBaseState GetStateData()
    +
    +

    Returns

    +

    ExecutorBaseState

    +

    LoadState(ExecutorBaseState)

    +

    Load the state from data.

    +
    public abstract void LoadState(ExecutorBaseState data)
    +
    +

    Parameters

    +

    data ExecutorBaseState

    +

    LoadState(String)

    +

    Load the state from a file.

    +
    public abstract void LoadState(string filename)
    +
    +

    Parameters

    +

    filename String

    +

    Infer(String, InferenceParams, CancellationToken)

    +

    Execute the inference.

    +
    public IEnumerable<string> Infer(string text, InferenceParams inferenceParams, CancellationToken cancellationToken)
    +
    +

    Parameters

    +

    text String

    +

    inferenceParams InferenceParams

    +

    cancellationToken CancellationToken

    +

    Returns

    +

    IEnumerable<String>

    +

    InferAsync(String, InferenceParams, CancellationToken)

    +

    Execute the inference asynchronously.

    +
    public IAsyncEnumerable<string> InferAsync(string text, InferenceParams inferenceParams, CancellationToken cancellationToken)
    +
    +

    Parameters

    +

    text String

    +

    inferenceParams InferenceParams

    +

    cancellationToken CancellationToken

    +

    Returns

    +

    IAsyncEnumerable<String>

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file diff --git a/site/xmldocs/llama.statelessexecutor/index.html b/site/xmldocs/llama.statelessexecutor/index.html new file mode 100644 index 00000000..b59adc79 --- /dev/null +++ b/site/xmldocs/llama.statelessexecutor/index.html @@ -0,0 +1,1893 @@ + + + + + + + + + + + + + + + + + + + + llama.statelessexecutor - LLamaSharp Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    StatelessExecutor

    +

    Namespace: LLama

    +

    This executor infer the input as one-time job. Previous inputs won't impact on the + response to current input.

    +
    public class StatelessExecutor : LLama.Abstractions.ILLamaExecutor
    +
    +

    Inheritance ObjectStatelessExecutor
    +Implements ILLamaExecutor

    +

    Properties

    +

    Model

    +

    The mode used by the executor when running the inference.

    +
    public LLamaModel Model { get; }
    +
    +

    Property Value

    +

    LLamaModel

    +

    Constructors

    +

    StatelessExecutor(LLamaModel)

    +
    public StatelessExecutor(LLamaModel model)
    +
    +

    Parameters

    +

    model LLamaModel
    +The LLama model.

    +

    Methods

    +

    Infer(String, InferenceParams, CancellationToken)

    +
    public IEnumerable<string> Infer(string text, InferenceParams inferenceParams, CancellationToken cancellationToken)
    +
    +

    Parameters

    +

    text String

    +

    inferenceParams InferenceParams

    +

    cancellationToken CancellationToken

    +

    Returns

    +

    IEnumerable<String>

    +

    InferAsync(String, InferenceParams, CancellationToken)

    +
    public IAsyncEnumerable<string> InferAsync(string text, InferenceParams inferenceParams, CancellationToken token)
    +
    +

    Parameters

    +

    text String

    +

    inferenceParams InferenceParams

    +

    token CancellationToken

    +

    Returns

    +

    IAsyncEnumerable<String>

    + + + + + + +
    +
    + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + \ No newline at end of file