diff --git a/gpt4all-bindings/csharp/.editorconfig b/gpt4all-bindings/csharp/.editorconfig new file mode 100644 index 00000000..e94ba6f9 --- /dev/null +++ b/gpt4all-bindings/csharp/.editorconfig @@ -0,0 +1,346 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +# Don't use tabs for indentation. +[*] +indent_style = space +# (Please don't specify an indent_size here; that has too many unintended consequences.) + +# Code files +[*.{cs,csx,vb,vbx}] +indent_size = 4 +insert_final_newline = true +charset = utf-8-bom + +# XML project files +[*.{csproj,vbproj,vcxproj,vcxproj.filters,proj,projitems,shproj}] +indent_size = 4 + +# XML config files +[*.{props,targets,ruleset,config,nuspec,resx,vsixmanifest,vsct}] +indent_size = 2 + +# JSON files +[*.json] +indent_size = 2 + +# Powershell files +[*.ps1] +indent_size = 2 + +# Shell script files +[*.sh] +end_of_line = lf +indent_size = 2 +insert_final_newline = true + +# Dotnet code style settings: +[*.{cs,vb}] + +# IDE0055: Fix formatting +dotnet_diagnostic.IDE0055.severity = error + +# Sort using and Import directives with System.* appearing first +dotnet_sort_system_directives_first = true +dotnet_separate_import_directive_groups = false + +# Avoid "this." and "Me." if not necessary +dotnet_style_qualification_for_field = false:suggestion +dotnet_style_qualification_for_property = false:suggestion +dotnet_style_qualification_for_method = false:suggestion +dotnet_style_qualification_for_event = false:suggestion + +# Use language keywords instead of framework type names for type references +dotnet_style_predefined_type_for_locals_parameters_members = true:warning +dotnet_style_predefined_type_for_member_access = true:warning + +# Suggest more modern language features when available +dotnet_style_object_initializer = true:suggestion +dotnet_style_collection_initializer = true:suggestion +dotnet_style_coalesce_expression = true:suggestion +dotnet_style_null_propagation = true:suggestion +dotnet_style_explicit_tuple_names = true:suggestion + +# Whitespace options +dotnet_style_allow_multiple_blank_lines_experimental = false + +# Private fields are camelCase with '_' prefix +dotnet_naming_rule.private_members_with_underscore.symbols = private_fields +dotnet_naming_rule.private_members_with_underscore.style = prefix_underscore +dotnet_naming_rule.private_members_with_underscore.severity = error +dotnet_naming_symbols.private_fields.applicable_kinds = field +dotnet_naming_symbols.private_fields.applicable_accessibilities = private +dotnet_naming_style.prefix_underscore.capitalization = camel_case +dotnet_naming_style.prefix_underscore.required_prefix = _ + +# Non-private static fields are PascalCase +dotnet_naming_rule.non_private_static_fields_should_be_pascal_case.severity = suggestion +dotnet_naming_rule.non_private_static_fields_should_be_pascal_case.symbols = non_private_static_fields +dotnet_naming_rule.non_private_static_fields_should_be_pascal_case.style = non_private_static_field_style + +dotnet_naming_symbols.non_private_static_fields.applicable_kinds = field +dotnet_naming_symbols.non_private_static_fields.applicable_accessibilities = public, protected, internal, protected_internal, private_protected +dotnet_naming_symbols.non_private_static_fields.required_modifiers = static + +dotnet_naming_style.non_private_static_field_style.capitalization = pascal_case + +# Non-private readonly fields are PascalCase +dotnet_naming_rule.non_private_readonly_fields_should_be_pascal_case.severity = suggestion +dotnet_naming_rule.non_private_readonly_fields_should_be_pascal_case.symbols = non_private_readonly_fields +dotnet_naming_rule.non_private_readonly_fields_should_be_pascal_case.style = non_private_static_field_style + +dotnet_naming_symbols.non_private_readonly_fields.applicable_kinds = field +dotnet_naming_symbols.non_private_readonly_fields.applicable_accessibilities = public, protected, internal, protected_internal, private_protected +dotnet_naming_symbols.non_private_readonly_fields.required_modifiers = readonly + +dotnet_naming_style.non_private_readonly_field_style.capitalization = pascal_case + +# Constants are PascalCase +dotnet_naming_rule.constants_should_be_pascal_case.severity = suggestion +dotnet_naming_rule.constants_should_be_pascal_case.symbols = constants +dotnet_naming_rule.constants_should_be_pascal_case.style = non_private_static_field_style + +dotnet_naming_symbols.constants.applicable_kinds = field, local +dotnet_naming_symbols.constants.required_modifiers = const + +dotnet_naming_style.constant_style.capitalization = pascal_case + +# Static fields are camelCase and start with s_ +dotnet_naming_rule.static_fields_should_be_camel_case.severity = none +dotnet_naming_rule.static_fields_should_be_camel_case.symbols = static_fields +dotnet_naming_rule.static_fields_should_be_camel_case.style = static_field_style + +dotnet_naming_symbols.static_fields.applicable_kinds = field +dotnet_naming_symbols.static_fields.required_modifiers = static + +dotnet_naming_style.static_field_style.capitalization = camel_case +dotnet_naming_style.static_field_style.required_prefix = s_ + +# Instance fields are camelCase and start with _ +dotnet_naming_rule.instance_fields_should_be_camel_case.severity = none +dotnet_naming_rule.instance_fields_should_be_camel_case.symbols = instance_fields +dotnet_naming_rule.instance_fields_should_be_camel_case.style = instance_field_style + +dotnet_naming_symbols.instance_fields.applicable_kinds = field + +dotnet_naming_style.instance_field_style.capitalization = camel_case +dotnet_naming_style.instance_field_style.required_prefix = _ + +# Locals and parameters are camelCase +dotnet_naming_rule.locals_should_be_camel_case.severity = suggestion +dotnet_naming_rule.locals_should_be_camel_case.symbols = locals_and_parameters +dotnet_naming_rule.locals_should_be_camel_case.style = camel_case_style + +dotnet_naming_symbols.locals_and_parameters.applicable_kinds = parameter, local + +dotnet_naming_style.camel_case_style.capitalization = camel_case + +# Local functions are PascalCase +dotnet_naming_rule.local_functions_should_be_pascal_case.severity = suggestion +dotnet_naming_rule.local_functions_should_be_pascal_case.symbols = local_functions +dotnet_naming_rule.local_functions_should_be_pascal_case.style = non_private_static_field_style + +dotnet_naming_symbols.local_functions.applicable_kinds = local_function + +dotnet_naming_style.local_function_style.capitalization = pascal_case + +# By default, name items with PascalCase +dotnet_naming_rule.members_should_be_pascal_case.severity = suggestion +dotnet_naming_rule.members_should_be_pascal_case.symbols = all_members +dotnet_naming_rule.members_should_be_pascal_case.style = non_private_static_field_style + +dotnet_naming_symbols.all_members.applicable_kinds = * + +dotnet_naming_style.pascal_case_style.capitalization = pascal_case + +# error RS2008: Enable analyzer release tracking for the analyzer project containing rule '{0}' +dotnet_diagnostic.RS2008.severity = none + +# IDE0073: File header +dotnet_diagnostic.IDE0073.severity = none +#file_header_template = Licensed to the .NET Foundation under one or more agreements.\nThe .NET Foundation licenses this file to you under the MIT license.\nSee the LICENSE file in the project root for more information. + +# IDE0035: Remove unreachable code +dotnet_diagnostic.IDE0035.severity = warning + +# IDE0036: Order modifiers +dotnet_diagnostic.IDE0036.severity = warning + +# IDE0043: Format string contains invalid placeholder +dotnet_diagnostic.IDE0043.severity = warning + +# IDE0044: Make field readonly +dotnet_diagnostic.IDE0044.severity = warning + +# IDE1006: Naming rule violation +#dotnet_diagnostic.IDE1006.severity = none + +# RS0016: Only enable if API files are present +dotnet_public_api_analyzer.require_api_files = true +dotnet_style_operator_placement_when_wrapping = beginning_of_line +tab_width = 4 +end_of_line = crlf +dotnet_style_prefer_is_null_check_over_reference_equality_method = true:suggestion +dotnet_style_prefer_auto_properties = true:silent +dotnet_style_prefer_simplified_boolean_expressions = true:suggestion +dotnet_style_prefer_conditional_expression_over_assignment = true:silent +dotnet_style_prefer_conditional_expression_over_return = true:silent +dotnet_style_prefer_inferred_tuple_names = true:suggestion +dotnet_style_prefer_inferred_anonymous_type_member_names = true:suggestion +dotnet_style_prefer_compound_assignment = true:suggestion +dotnet_style_prefer_simplified_interpolation = true:suggestion +dotnet_style_namespace_match_folder = true:suggestion + +# CSharp code style settings: +[*.cs] +# Newline settings +csharp_new_line_before_open_brace = all +csharp_new_line_before_else = true +csharp_new_line_before_catch = true +csharp_new_line_before_finally = true +csharp_new_line_before_members_in_object_initializers = true +csharp_new_line_before_members_in_anonymous_types = true +csharp_new_line_between_query_expression_clauses = true + +# Indentation preferences +csharp_indent_block_contents = true +csharp_indent_braces = false +csharp_indent_case_contents = true +csharp_indent_case_contents_when_block = true +csharp_indent_switch_labels = true +csharp_indent_labels = flush_left + +# Whitespace options +csharp_style_allow_embedded_statements_on_same_line_experimental = false +csharp_style_allow_blank_lines_between_consecutive_braces_experimental = false +csharp_style_allow_blank_line_after_colon_in_constructor_initializer_experimental = false + +# Prefer "var" everywhere +csharp_style_var_for_built_in_types = true:suggestion +csharp_style_var_when_type_is_apparent = true:suggestion +csharp_style_var_elsewhere = true:suggestion + +# Prefer method-like constructs to have a block body +csharp_style_expression_bodied_methods = false:none +csharp_style_expression_bodied_constructors = false:none +csharp_style_expression_bodied_operators = false:none + +# Prefer property-like constructs to have an expression-body +csharp_style_expression_bodied_properties = true:none +csharp_style_expression_bodied_indexers = true:none +csharp_style_expression_bodied_accessors = true:none + +# Suggest more modern language features when available +csharp_style_pattern_matching_over_is_with_cast_check = true:suggestion +csharp_style_pattern_matching_over_as_with_null_check = true:suggestion +csharp_style_inlined_variable_declaration = true:suggestion +csharp_style_throw_expression = true:suggestion +csharp_style_conditional_delegate_call = true:suggestion + +# Space preferences +csharp_space_after_cast = false +csharp_space_after_colon_in_inheritance_clause = true +csharp_space_after_comma = true +csharp_space_after_dot = false +csharp_space_after_keywords_in_control_flow_statements = true +csharp_space_after_semicolon_in_for_statement = true +csharp_space_around_binary_operators = before_and_after +csharp_space_around_declaration_statements = do_not_ignore +csharp_space_before_colon_in_inheritance_clause = true +csharp_space_before_comma = false +csharp_space_before_dot = false +csharp_space_before_open_square_brackets = false +csharp_space_before_semicolon_in_for_statement = false +csharp_space_between_empty_square_brackets = false +csharp_space_between_method_call_empty_parameter_list_parentheses = false +csharp_space_between_method_call_name_and_opening_parenthesis = false +csharp_space_between_method_call_parameter_list_parentheses = false +csharp_space_between_method_declaration_empty_parameter_list_parentheses = false +csharp_space_between_method_declaration_name_and_open_parenthesis = false +csharp_space_between_method_declaration_parameter_list_parentheses = false +csharp_space_between_parentheses = false +csharp_space_between_square_brackets = false + +# Blocks are allowed +csharp_prefer_braces = true:silent +csharp_preserve_single_line_blocks = true +csharp_preserve_single_line_statements = true + +# Target-type new expressio +csharp_style_implicit_object_creation_when_type_is_apparent = true:suggestion + +# Currently only enabled for C# due to crash in VB analyzer. VB can be enabled once +# https://github.com/dotnet/roslyn/pull/54259 has been published. +dotnet_style_allow_statement_immediately_after_block_experimental = false +dotnet_diagnostic.RCS0003.severity=warning +dotnet_diagnostic.RCS1036.severity=error +dotnet_diagnostic.IDE0005.severity=warning +dotnet_diagnostic.IDE0007.severity=error +csharp_using_directive_placement = outside_namespace:silent +csharp_prefer_simple_using_statement = true:suggestion +csharp_style_namespace_declarations = block_scoped:silent +csharp_style_expression_bodied_lambdas = true:silent +csharp_style_expression_bodied_local_functions = false:silent +csharp_style_prefer_null_check_over_type_check = true:suggestion +dotnet_diagnostic.RCS1075.severity = suggestion + +[src/CodeStyle/**.{cs,vb}] +# warning RS0005: Do not use generic CodeAction.Create to create CodeAction +dotnet_diagnostic.RS0005.severity = none + +[src/{Analyzers,CodeStyle,Features,Workspaces,EditorFeatures,VisualStudio}/**/*.{cs,vb}] + +# IDE0011: Add braces +csharp_prefer_braces = when_multiline:warning +# NOTE: We need the below severity entry for Add Braces due to https://github.com/dotnet/roslyn/issues/44201 +dotnet_diagnostic.IDE0011.severity = warning + +# IDE0040: Add accessibility modifiers +dotnet_diagnostic.IDE0040.severity = warning + +# CONSIDER: Are IDE0051 and IDE0052 too noisy to be warnings for IDE editing scenarios? Should they be made build-only warnings? +# IDE0051: Remove unused private member +dotnet_diagnostic.IDE0051.severity = warning + +# IDE0052: Remove unread private member +dotnet_diagnostic.IDE0052.severity = warning + +# IDE0059: Unnecessary assignment to a value +dotnet_diagnostic.IDE0059.severity = warning + +# IDE0060: Remove unused parameter +dotnet_diagnostic.IDE0060.severity = warning + +# CA1012: Abstract types should not have public constructors +dotnet_diagnostic.CA1012.severity = warning + +# CA1822: Make member static +dotnet_diagnostic.CA1822.severity = warning + +# Prefer "var" everywhere +dotnet_diagnostic.IDE0007.severity = warning +csharp_style_var_for_built_in_types = true:warning +csharp_style_var_when_type_is_apparent = true:warning +csharp_style_var_elsewhere = true:warning + +# dotnet_style_allow_multiple_blank_lines_experimental +dotnet_diagnostic.IDE2000.severity = warning + +# csharp_style_allow_embedded_statements_on_same_line_experimental +dotnet_diagnostic.IDE2001.severity = warning + +# csharp_style_allow_blank_lines_between_consecutive_braces_experimental +dotnet_diagnostic.IDE2002.severity = warning + +# dotnet_style_allow_statement_immediately_after_block_experimental +dotnet_diagnostic.IDE2003.severity = warning + +# csharp_style_allow_blank_line_after_colon_in_constructor_initializer_experimental +dotnet_diagnostic.IDE2004.severity = warning + +[src/{VisualStudio}/**/*.{cs,vb}] +# CA1822: Make member static +# There is a risk of accidentally breaking an internal API that partners rely on though IVT. +dotnet_code_quality.CA1822.api_surface = private \ No newline at end of file diff --git a/gpt4all-bindings/csharp/.gitignore b/gpt4all-bindings/csharp/.gitignore new file mode 100644 index 00000000..545d87e4 --- /dev/null +++ b/gpt4all-bindings/csharp/.gitignore @@ -0,0 +1,373 @@ +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. +## +## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore + +runtimes +**/*nuget + +*.zip +include/ +*.exp +*.lib +*.dll + +# User-specific files +*.rsuser +*.suo +*.user +*.userosscache +*.sln.docstates + +# User-specific files (MonoDevelop/Xamarin Studio) +*.userprefs + +# Mono auto generated files +mono_crash.* +Tests/**/launchSettings.json + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +x64/ +x86/ +[Ww][Ii][Nn]32/ +[Aa][Rr][Mm]/ +[Aa][Rr][Mm]64/ +bld/ +[Bb]in/ +[Oo]bj/ +[Oo]ut/ +[Ll]og/ +[Ll]ogs/ + +# Visual Studio 2015/2017 cache/options directory +.vs/ +# Uncomment if you have tasks that create the project's static files in wwwroot +#wwwroot/ + +# Visual Studio 2017 auto generated files +Generated\ Files/ + +# MSTest test Results +[Tt]est[Rr]esult*/ +[Bb]uild[Ll]og.* + +# NUnit +*.VisualState.xml +TestResult.xml +nunit-*.xml + +# Build Results of an ATL Project +[Dd]ebugPS/ +[Rr]eleasePS/ +dlldata.c + +# Benchmark Results +BenchmarkDotNet.Artifacts/ + +# .NET Core +project.lock.json +project.fragment.lock.json +artifacts/ + +# ASP.NET Scaffolding +ScaffoldingReadMe.txt + +# StyleCop +StyleCopReport.xml + +# Files built by Visual Studio +*_i.c +*_p.c +*_h.h +*.ilk +*.meta +*.obj +*.iobj +*.pch +*.pdb +*.ipdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp +*.tmp_proj +*_wpftmp.csproj +*.log +*.vspscc +*.vssscc +.builds +*.pidb +*.svclog +*.scc + +# Chutzpah Test files +_Chutzpah* + +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opendb +*.opensdf +*.sdf +*.cachefile +*.VC.db +*.VC.VC.opendb + +# Visual Studio profiler +*.psess +*.vsp +*.vspx +*.sap + +# Visual Studio Trace Files +*.e2e + +# TFS 2012 Local Workspace +$tf/ + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ +*.[Rr]e[Ss]harper +*.DotSettings.user + +# TeamCity is a build add-in +_TeamCity* + +# DotCover is a Code Coverage Tool +*.dotCover + +# AxoCover is a Code Coverage Tool +.axoCover/* +!.axoCover/settings.json + +# Coverlet is a free, cross platform Code Coverage Tool +coverage*.json +coverage*.xml +coverage*.info + +# Visual Studio code coverage results +*.coverage +*.coveragexml + +# NCrunch +_NCrunch_* +.*crunch*.local.xml +nCrunchTemp_* + +# MightyMoose +*.mm.* +AutoTest.Net/ + +# Web workbench (sass) +.sass-cache/ + +# Installshield output folder +[Ee]xpress/ + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish/ + +# Publish Web Output +*.[Pp]ublish.xml +*.azurePubxml +# Note: Comment the next line if you want to checkin your web deploy settings, +# but database connection strings (with potential passwords) will be unencrypted +*.pubxml +*.publishproj + +# Microsoft Azure Web App publish settings. Comment the next line if you want to +# checkin your Azure Web App publish settings, but sensitive information contained +# in these scripts will be unencrypted +PublishScripts/ + +# NuGet Packages +*.nupkg +# NuGet Symbol Packages +*.snupkg +# The packages folder can be ignored because of Package Restore +**/[Pp]ackages/* +# except build/, which is used as an MSBuild target. +!**/[Pp]ackages/build/ +# Uncomment if necessary however generally it will be regenerated when needed +#!**/[Pp]ackages/repositories.config +# NuGet v3's project.json files produces more ignorable files +*.nuget.props +*.nuget.targets + +# Microsoft Azure Build Output +csx/ +*.build.csdef + +# Microsoft Azure Emulator +ecf/ +rcf/ + +# Windows Store app package directories and files +AppPackages/ +BundleArtifacts/ +Package.StoreAssociation.xml +_pkginfo.txt +*.appx +*.appxbundle +*.appxupload + +# Visual Studio cache files +# files ending in .cache can be ignored +*.[Cc]ache +# but keep track of directories ending in .cache +!?*.[Cc]ache/ + +# Others +ClientBin/ +~$* +*~ +*.dbmdl +*.dbproj.schemaview +*.jfm +*.pfx +*.publishsettings +orleans.codegen.cs + +# Including strong name files can present a security risk +# (https://github.com/github/gitignore/pull/2483#issue-259490424) +#*.snk + +# Since there are multiple workflows, uncomment next line to ignore bower_components +# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) +#bower_components/ + +# RIA/Silverlight projects +Generated_Code/ + +# Backup & report files from converting an old project file +# to a newer Visual Studio version. Backup files are not needed, +# because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML +UpgradeLog*.htm +ServiceFabricBackup/ +*.rptproj.bak + +# SQL Server files +*.mdf +*.ldf +*.ndf + +# Business Intelligence projects +*.rdl.data +*.bim.layout +*.bim_*.settings +*.rptproj.rsuser +*- [Bb]ackup.rdl +*- [Bb]ackup ([0-9]).rdl +*- [Bb]ackup ([0-9][0-9]).rdl + +# Microsoft Fakes +FakesAssemblies/ + +# GhostDoc plugin setting file +*.GhostDoc.xml + +# Node.js Tools for Visual Studio +.ntvs_analysis.dat +node_modules/ + +# Visual Studio 6 build log +*.plg + +# Visual Studio 6 workspace options file +*.opt + +# Visual Studio 6 auto-generated workspace file (contains which files were open etc.) +*.vbw + +# Visual Studio LightSwitch build output +**/*.HTMLClient/GeneratedArtifacts +**/*.DesktopClient/GeneratedArtifacts +**/*.DesktopClient/ModelManifest.xml +**/*.Server/GeneratedArtifacts +**/*.Server/ModelManifest.xml +_Pvt_Extensions + +# Paket dependency manager +.paket/paket.exe +paket-files/ + +# FAKE - F# Make +.fake/ + +# CodeRush personal settings +.cr/personal + +# Python Tools for Visual Studio (PTVS) +__pycache__/ +*.pyc + +# Cake - Uncomment if you are using it +# tools/** +# !tools/packages.config + +# Tabs Studio +*.tss + +# Telerik's JustMock configuration file +*.jmconfig + +# BizTalk build output +*.btp.cs +*.btm.cs +*.odx.cs +*.xsd.cs + +# OpenCover UI analysis results +OpenCover/ + +# Azure Stream Analytics local run output +ASALocalRun/ + +# MSBuild Binary and Structured Log +*.binlog + +# NVidia Nsight GPU debugger configuration file +*.nvuser + +# MFractors (Xamarin productivity tool) working folder +.mfractor/ + +# Local History for Visual Studio +.localhistory/ + +# BeatPulse healthcheck temp database +healthchecksdb + +# Backup folder for Package Reference Convert tool in Visual Studio 2017 +MigrationBackup/ + +# Ionide (cross platform F# VS Code tools) working folder +.ionide/ + +# Fody - auto-generated XML schema +FodyWeavers.xsd \ No newline at end of file diff --git a/gpt4all-bindings/csharp/Directory.Build.props b/gpt4all-bindings/csharp/Directory.Build.props new file mode 100644 index 00000000..8a634077 --- /dev/null +++ b/gpt4all-bindings/csharp/Directory.Build.props @@ -0,0 +1,44 @@ + + + + + + + en-US + 0.5.0 + $(VersionSuffix) + $(Version)$(VersionSuffix) + true + + git + true + true + latest-minimum + true + + + + + + + + preview + strict + + + + + all + runtime; build; native; contentfiles; analyzers + + + all + runtime; build; native; contentfiles; analyzers + + + all + runtime; build; native; contentfiles; analyzers + + + + diff --git a/gpt4all-bindings/csharp/Gpt4All.Samples/Gpt4All.Samples.csproj b/gpt4all-bindings/csharp/Gpt4All.Samples/Gpt4All.Samples.csproj new file mode 100644 index 00000000..6fd881b0 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All.Samples/Gpt4All.Samples.csproj @@ -0,0 +1,18 @@ + + + + Exe + net7.0 + enable + enable + + + + + + + + + + + diff --git a/gpt4all-bindings/csharp/Gpt4All.Samples/Program.cs b/gpt4all-bindings/csharp/Gpt4All.Samples/Program.cs new file mode 100644 index 00000000..f888d4ab --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All.Samples/Program.cs @@ -0,0 +1,21 @@ +using Gpt4All; + +var modelFactory = new Gpt4AllModelFactory(); + +var modelPath = args[0]; + +using var model = modelFactory.LoadModel(modelPath); + +var input = args.Length > 1 ? args[1] : "Name 3 colors."; + +var result = await model.GetStreamingPredictionAsync( + input, + PredictRequestOptions.Defaults); + +await foreach (var token in result.GetPredictionStreamingAsync()) +{ + Console.Write(token); +} + +Console.WriteLine(); +Console.WriteLine("DONE."); diff --git a/gpt4all-bindings/csharp/Gpt4All.sln b/gpt4all-bindings/csharp/Gpt4All.sln new file mode 100644 index 00000000..929a45e0 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All.sln @@ -0,0 +1,41 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.5.33516.290 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Gpt4All.Samples", "Gpt4All.Samples\Gpt4All.Samples.csproj", "{59864AE8-E45D-42F7-A7C0-1308EF185F39}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{DA396C11-CEAD-4368-8234-FB12255A30D2}" + ProjectSection(SolutionItems) = preProject + .gitignore = .gitignore + build_linux.sh = build_linux.sh + build_win-mingw.ps1 = build_win-mingw.ps1 + build_win-msvc.ps1 = build_win-msvc.ps1 + docs\gpt4all_csharp.md = docs\gpt4all_csharp.md + README.md = README.md + EndProjectSection +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Gpt4All", "Gpt4All\Gpt4All.csproj", "{6015C62B-2008-426B-A334-740D6F1FE38B}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {59864AE8-E45D-42F7-A7C0-1308EF185F39}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {59864AE8-E45D-42F7-A7C0-1308EF185F39}.Debug|Any CPU.Build.0 = Debug|Any CPU + {59864AE8-E45D-42F7-A7C0-1308EF185F39}.Release|Any CPU.ActiveCfg = Release|Any CPU + {59864AE8-E45D-42F7-A7C0-1308EF185F39}.Release|Any CPU.Build.0 = Release|Any CPU + {6015C62B-2008-426B-A334-740D6F1FE38B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {6015C62B-2008-426B-A334-740D6F1FE38B}.Debug|Any CPU.Build.0 = Debug|Any CPU + {6015C62B-2008-426B-A334-740D6F1FE38B}.Release|Any CPU.ActiveCfg = Release|Any CPU + {6015C62B-2008-426B-A334-740D6F1FE38B}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {17632027-F4C2-4903-B88F-310CE3DE386B} + EndGlobalSection +EndGlobal diff --git a/gpt4all-bindings/csharp/Gpt4All/Bindings/ILLModel.cs b/gpt4all-bindings/csharp/Gpt4All/Bindings/ILLModel.cs new file mode 100644 index 00000000..b21b30e4 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Bindings/ILLModel.cs @@ -0,0 +1,31 @@ +namespace Gpt4All.Bindings; + +/// +/// Represents the interface exposed by the universal wrapper for GPT4All language models built around llmodel C-API. +/// +public interface ILLModel : IDisposable +{ + ModelType ModelType { get; } + + ulong GetStateSizeBytes(); + + int GetThreadCount(); + + void SetThreadCount(int threadCount); + + bool IsLoaded(); + + bool Load(string modelPath); + + void Prompt( + string text, + LLModelPromptContext context, + Func? promptCallback = null, + Func? responseCallback = null, + Func? recalculateCallback = null, + CancellationToken cancellationToken = default); + + unsafe ulong RestoreStateData(byte* destination); + + unsafe ulong SaveStateData(byte* source); +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Bindings/LLModel.cs b/gpt4all-bindings/csharp/Gpt4All/Bindings/LLModel.cs new file mode 100644 index 00000000..bcda1d85 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Bindings/LLModel.cs @@ -0,0 +1,235 @@ +namespace Gpt4All.Bindings; + +/// +/// Arguments for the response processing callback +/// +/// The token id of the response +/// The response string. NOTE: a token_id of -1 indicates the string is an error string +/// +/// A bool indicating whether the model should keep generating +/// +public record ModelResponseEventArgs(int TokenId, string Response) +{ + public bool IsError => TokenId == -1; +} + +/// +/// Arguments for the prompt processing callback +/// +/// The token id of the prompt +/// +/// A bool indicating whether the model should keep processing +/// +public record ModelPromptEventArgs(int TokenId) +{ +} + +/// +/// Arguments for the recalculating callback +/// +/// whether the model is recalculating the context. +/// +/// A bool indicating whether the model should keep generating +/// +public record ModelRecalculatingEventArgs(bool IsRecalculating); + +/// +/// Base class and universal wrapper for GPT4All language models built around llmodel C-API. +/// +public class LLModel : ILLModel +{ + protected readonly IntPtr _handle; + private readonly ModelType _modelType; + private bool _disposed; + + public ModelType ModelType => _modelType; + + internal LLModel(IntPtr handle, ModelType modelType) + { + _handle = handle; + _modelType = modelType; + } + + /// + /// Create a new model from a pointer + /// + /// Pointer to underlying model + /// The model type + public static LLModel Create(IntPtr handle, ModelType modelType) + { + return new LLModel(handle, modelType); + } + + /// + /// Generate a response using the model + /// + /// The input promp + /// The context + /// A callback function for handling the processing of prompt + /// A callback function for handling the generated response + /// A callback function for handling recalculation requests + /// + public void Prompt( + string text, + LLModelPromptContext context, + Func? promptCallback = null, + Func? responseCallback = null, + Func? recalculateCallback = null, + CancellationToken cancellationToken = default) + { + GC.KeepAlive(promptCallback); + GC.KeepAlive(responseCallback); + GC.KeepAlive(recalculateCallback); + GC.KeepAlive(cancellationToken); + + NativeMethods.llmodel_prompt( + _handle, + text, + (tokenId) => + { + if (cancellationToken.IsCancellationRequested) return false; + if (promptCallback == null) return true; + var args = new ModelPromptEventArgs(tokenId); + return promptCallback(args); + }, + (tokenId, response) => + { + if (cancellationToken.IsCancellationRequested) return false; + if (responseCallback == null) return true; + var args = new ModelResponseEventArgs(tokenId, response); + return responseCallback(args); + }, + (isRecalculating) => + { + if (cancellationToken.IsCancellationRequested) return false; + if (recalculateCallback == null) return true; + var args = new ModelRecalculatingEventArgs(isRecalculating); + return recalculateCallback(args); + }, + ref context.UnderlyingContext + ); + } + + /// + /// Set the number of threads to be used by the model. + /// + /// The new thread count + public void SetThreadCount(int threadCount) + { + NativeMethods.llmodel_setThreadCount(_handle, threadCount); + } + + /// + /// Get the number of threads used by the model. + /// + /// the number of threads used by the model + public int GetThreadCount() + { + return NativeMethods.llmodel_threadCount(_handle); + } + + /// + /// Get the size of the internal state of the model. + /// + /// + /// This state data is specific to the type of model you have created. + /// + /// the size in bytes of the internal state of the model + public ulong GetStateSizeBytes() + { + return NativeMethods.llmodel_get_state_size(_handle); + } + + /// + /// Saves the internal state of the model to the specified destination address. + /// + /// A pointer to the src + /// The number of bytes copied + public unsafe ulong SaveStateData(byte* source) + { + return NativeMethods.llmodel_save_state_data(_handle, source); + } + + /// + /// Restores the internal state of the model using data from the specified address. + /// + /// A pointer to destination + /// the number of bytes read + public unsafe ulong RestoreStateData(byte* destination) + { + return NativeMethods.llmodel_restore_state_data(_handle, destination); + } + + /// + /// Check if the model is loaded. + /// + /// true if the model was loaded successfully, false otherwise. + public bool IsLoaded() + { + return NativeMethods.llmodel_isModelLoaded(_handle); + } + + /// + /// Load the model from a file. + /// + /// The path to the model file. + /// true if the model was loaded successfully, false otherwise. + public bool Load(string modelPath) + { + return NativeMethods.llmodel_loadModel(_handle, modelPath); + } + + protected void Destroy() + { + NativeMethods.llmodel_model_destroy(_handle); + } + + protected void DestroyLLama() + { + NativeMethods.llmodel_llama_destroy(_handle); + } + + protected void DestroyGptj() + { + NativeMethods.llmodel_gptj_destroy(_handle); + } + + protected void DestroyMtp() + { + NativeMethods.llmodel_mpt_destroy(_handle); + } + + protected virtual void Dispose(bool disposing) + { + if (_disposed) return; + + if (disposing) + { + // dispose managed state + } + + switch (_modelType) + { + case ModelType.LLAMA: + DestroyLLama(); + break; + case ModelType.GPTJ: + DestroyGptj(); + break; + case ModelType.MPT: + DestroyMtp(); + break; + default: + Destroy(); + break; + } + + _disposed = true; + } + + public void Dispose() + { + Dispose(disposing: true); + GC.SuppressFinalize(this); + } +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Bindings/LLPromptContext.cs b/gpt4all-bindings/csharp/Gpt4All/Bindings/LLPromptContext.cs new file mode 100644 index 00000000..ce78938f --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Bindings/LLPromptContext.cs @@ -0,0 +1,140 @@ +using System.Reflection; + +namespace Gpt4All.Bindings; + +/// +/// Wrapper around the llmodel_prompt_context structure for holding the prompt context. +/// +/// +/// The implementation takes care of all the memory handling of the raw logits pointer and the +/// raw tokens pointer.Attempting to resize them or modify them in any way can lead to undefined behavior +/// +public unsafe class LLModelPromptContext +{ + private llmodel_prompt_context _ctx; + + internal ref llmodel_prompt_context UnderlyingContext => ref _ctx; + + public LLModelPromptContext() + { + _ctx = new(); + } + + /// + /// logits of current context + /// + public Span Logits => new(_ctx.logits, (int)_ctx.logits_size); + + /// + /// the size of the raw logits vector + /// + public nuint LogitsSize + { + get => _ctx.logits_size; + set => _ctx.logits_size = value; + } + + /// + /// current tokens in the context window + /// + public Span Tokens => new(_ctx.tokens, (int)_ctx.tokens_size); + + /// + /// the size of the raw tokens vector + /// + public nuint TokensSize + { + get => _ctx.tokens_size; + set => _ctx.tokens_size = value; + } + + /// + /// top k logits to sample from + /// + public int TopK + { + get => _ctx.top_k; + set => _ctx.top_k = value; + } + + /// + /// nucleus sampling probability threshold + /// + public float TopP + { + get => _ctx.top_p; + set => _ctx.top_p = value; + } + + /// + /// temperature to adjust model's output distribution + /// + public float Temperature + { + get => _ctx.temp; + set => _ctx.temp = value; + } + + /// + /// number of tokens in past conversation + /// + public int PastNum + { + get => _ctx.n_past; + set => _ctx.n_past = value; + } + + /// + /// number of predictions to generate in parallel + /// + public int Batches + { + get => _ctx.n_batch; + set => _ctx.n_batch = value; + } + + /// + /// number of tokens to predict + /// + public int TokensToPredict + { + get => _ctx.n_predict; + set => _ctx.n_predict = value; + } + + /// + /// penalty factor for repeated tokens + /// + public float RepeatPenalty + { + get => _ctx.repeat_penalty; + set => _ctx.repeat_penalty = value; + } + + /// + /// last n tokens to penalize + /// + public int RepeatLastN + { + get => _ctx.repeat_last_n; + set => _ctx.repeat_last_n = value; + } + + /// + /// number of tokens possible in context window + /// + public int ContextSize + { + get => _ctx.n_ctx; + set => _ctx.n_ctx = value; + } + + /// + /// percent of context to erase if we exceed the context window + /// + public float ContextErase + { + get => _ctx.context_erase; + set => _ctx.context_erase = value; + } +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs b/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs new file mode 100644 index 00000000..c77212ca --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs @@ -0,0 +1,126 @@ +using System.Runtime.InteropServices; + +namespace Gpt4All.Bindings; + +public unsafe partial struct llmodel_prompt_context +{ + public float* logits; + + [NativeTypeName("size_t")] + public nuint logits_size; + + [NativeTypeName("int32_t *")] + public int* tokens; + + [NativeTypeName("size_t")] + public nuint tokens_size; + + [NativeTypeName("int32_t")] + public int n_past; + + [NativeTypeName("int32_t")] + public int n_ctx; + + [NativeTypeName("int32_t")] + public int n_predict; + + [NativeTypeName("int32_t")] + public int top_k; + + public float top_p; + + public float temp; + + [NativeTypeName("int32_t")] + public int n_batch; + + public float repeat_penalty; + + [NativeTypeName("int32_t")] + public int repeat_last_n; + + public float context_erase; +} + +internal static unsafe partial class NativeMethods +{ + [UnmanagedFunctionPointer(CallingConvention.Cdecl)] + [return: MarshalAs(UnmanagedType.I1)] + public delegate bool LlmodelResponseCallback(int token_id, [MarshalAs(UnmanagedType.LPUTF8Str)] string response); + + [UnmanagedFunctionPointer(CallingConvention.Cdecl)] + [return: MarshalAs(UnmanagedType.I1)] + public delegate bool LlmodelPromptCallback(int token_id); + + [UnmanagedFunctionPointer(CallingConvention.Cdecl)] + [return: MarshalAs(UnmanagedType.I1)] + public delegate bool LlmodelRecalculateCallback(bool isRecalculating); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + [return: NativeTypeName("llmodel_model")] + public static extern IntPtr llmodel_gptj_create(); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + public static extern void llmodel_gptj_destroy([NativeTypeName("llmodel_model")] IntPtr gptj); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + [return: NativeTypeName("llmodel_model")] + public static extern IntPtr llmodel_mpt_create(); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + public static extern void llmodel_mpt_destroy([NativeTypeName("llmodel_model")] IntPtr mpt); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + [return: NativeTypeName("llmodel_model")] + public static extern IntPtr llmodel_llama_create(); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + public static extern void llmodel_llama_destroy([NativeTypeName("llmodel_model")] IntPtr llama); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true, BestFitMapping = false, ThrowOnUnmappableChar = true)] + [return: NativeTypeName("llmodel_model")] + public static extern IntPtr llmodel_model_create( + [NativeTypeName("const char *")][MarshalAs(UnmanagedType.LPUTF8Str)] string model_path); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + public static extern void llmodel_model_destroy([NativeTypeName("llmodel_model")] IntPtr model); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true, BestFitMapping = false, ThrowOnUnmappableChar = true)] + [return: MarshalAs(UnmanagedType.I1)] + public static extern bool llmodel_loadModel( + [NativeTypeName("llmodel_model")] IntPtr model, + [NativeTypeName("const char *")][MarshalAs(UnmanagedType.LPUTF8Str)] string model_path); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + + [return: MarshalAs(UnmanagedType.I1)] + public static extern bool llmodel_isModelLoaded([NativeTypeName("llmodel_model")] IntPtr model); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + [return: NativeTypeName("uint64_t")] + public static extern ulong llmodel_get_state_size([NativeTypeName("llmodel_model")] IntPtr model); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + [return: NativeTypeName("uint64_t")] + public static extern ulong llmodel_save_state_data([NativeTypeName("llmodel_model")] IntPtr model, [NativeTypeName("uint8_t *")] byte* dest); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + [return: NativeTypeName("uint64_t")] + public static extern ulong llmodel_restore_state_data([NativeTypeName("llmodel_model")] IntPtr model, [NativeTypeName("const uint8_t *")] byte* src); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true, BestFitMapping = false, ThrowOnUnmappableChar = true)] + public static extern void llmodel_prompt( + [NativeTypeName("llmodel_model")] IntPtr model, + [NativeTypeName("const char *")][MarshalAs(UnmanagedType.LPUTF8Str)] string prompt, + LlmodelPromptCallback prompt_callback, + LlmodelResponseCallback response_callback, + LlmodelRecalculateCallback recalculate_callback, + ref llmodel_prompt_context ctx); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + public static extern void llmodel_setThreadCount([NativeTypeName("llmodel_model")] IntPtr model, [NativeTypeName("int32_t")] int n_threads); + + [DllImport("libllmodel", CallingConvention = CallingConvention.Cdecl, ExactSpelling = true)] + [return: NativeTypeName("int32_t")] + public static extern int llmodel_threadCount([NativeTypeName("llmodel_model")] IntPtr model); +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeTypeNameAttribute.cs b/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeTypeNameAttribute.cs new file mode 100644 index 00000000..f9917bc0 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeTypeNameAttribute.cs @@ -0,0 +1,21 @@ +using System.Diagnostics; + +namespace Gpt4All.Bindings; + +/// Defines the type of a member as it was used in the native signature. +[AttributeUsage(AttributeTargets.Struct | AttributeTargets.Enum | AttributeTargets.Property | AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.ReturnValue, AllowMultiple = false, Inherited = true)] +[Conditional("DEBUG")] +internal sealed partial class NativeTypeNameAttribute : Attribute +{ + private readonly string _name; + + /// Initializes a new instance of the class. + /// The name of the type that was used in the native signature. + public NativeTypeNameAttribute(string name) + { + _name = name; + } + + /// Gets the name of the type that was used in the native signature. + public string Name => _name; +} \ No newline at end of file diff --git a/gpt4all-bindings/csharp/Gpt4All/Extensions/PredictRequestOptionsExtensions.cs b/gpt4all-bindings/csharp/Gpt4All/Extensions/PredictRequestOptionsExtensions.cs new file mode 100644 index 00000000..f6e1f016 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Extensions/PredictRequestOptionsExtensions.cs @@ -0,0 +1,25 @@ +using Gpt4All.Bindings; + +namespace Gpt4All.Extensions; + +public static class PredictRequestOptionsExtensions +{ + public static LLModelPromptContext ToPromptContext(this PredictRequestOptions opts) + { + return new LLModelPromptContext + { + LogitsSize = opts.LogitsSize, + TokensSize = opts.TokensSize, + TopK = opts.TopK, + TopP = opts.TopP, + PastNum = opts.PastConversationTokensNum, + RepeatPenalty = opts.RepeatPenalty, + Temperature = opts.Temperature, + RepeatLastN = opts.RepeatLastN, + Batches = opts.Batches, + ContextErase = opts.ContextErase, + ContextSize = opts.ContextSize, + TokensToPredict = opts.TokensToPredict + }; + } +} diff --git a/gpt4all-bindings/csharp/Gpt4All/GenLLModelBindings.rsp b/gpt4all-bindings/csharp/Gpt4All/GenLLModelBindings.rsp new file mode 100644 index 00000000..4364a1f2 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/GenLLModelBindings.rsp @@ -0,0 +1,21 @@ +--config +exclude-funcs-with-body +--with-access-specifier +*=Public +--include-directory +..\..\..\gpt4all-backend\ +--file +..\..\..\gpt4all-backend\llmodel_c.h +--libraryPath +libllmodel +--remap +sbyte*=IntPtr +void*=IntPtr +--namespace +Gpt4All.Bindings +--methodClassName +NativeMethods +--output +.\Bindings\NativeMethods.cs +--output-mode +CSharp \ No newline at end of file diff --git a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.cs b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.cs new file mode 100644 index 00000000..5db08f65 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.cs @@ -0,0 +1,82 @@ +using Gpt4All.Bindings; +using Gpt4All.Extensions; + +namespace Gpt4All; + +public class Gpt4All : IGpt4AllModel +{ + private readonly ILLModel _model; + + internal Gpt4All(ILLModel model) + { + _model = model; + } + + public Task GetPredictionAsync(string text, PredictRequestOptions opts, CancellationToken cancellationToken = default) + { + return Task.Run(() => + { + var result = new TextPredictionResult(); + var context = opts.ToPromptContext(); + + _model.Prompt(text, context, responseCallback: e => + { + if (e.IsError) + { + result.Success = false; + result.ErrorMessage = e.Response; + return false; + } + result.Append(e.Response); + return true; + }, cancellationToken: cancellationToken); + + return (ITextPredictionResult)result; + }, CancellationToken.None); + } + + public Task GetStreamingPredictionAsync(string text, PredictRequestOptions opts, CancellationToken cancellationToken = default) + { + var result = new TextPredictionStreamingResult(); + + _ = Task.Run(() => + { + try + { + var context = opts.ToPromptContext(); + + _model.Prompt(text, context, responseCallback: e => + { + if (e.IsError) + { + result.Success = false; + result.ErrorMessage = e.Response; + return false; + } + result.Append(e.Response); + return true; + }, cancellationToken: cancellationToken); + } + finally + { + result.Complete(); + } + }, CancellationToken.None); + + return Task.FromResult((ITextPredictionStreamingResult)result); + } + + protected virtual void Dispose(bool disposing) + { + if (disposing) + { + _model.Dispose(); + } + } + + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj new file mode 100644 index 00000000..dc2d96fa --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj @@ -0,0 +1,23 @@ + + + + net6.0 + enable + enable + true + + + + + + + + + + + + + + + + diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/Gpt4AllModelFactory.cs b/gpt4all-bindings/csharp/Gpt4All/Model/Gpt4AllModelFactory.cs new file mode 100644 index 00000000..6d4c7875 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Model/Gpt4AllModelFactory.cs @@ -0,0 +1,41 @@ +using Gpt4All.Bindings; +using System.Diagnostics; + +namespace Gpt4All; + +public class Gpt4AllModelFactory : IGpt4AllModelFactory +{ + private static IGpt4AllModel CreateModel(string modelPath, ModelType? modelType = null) + { + var modelType_ = modelType ?? ModelFileUtils.GetModelTypeFromModelFileHeader(modelPath); + + var handle = modelType_ switch + { + ModelType.LLAMA => NativeMethods.llmodel_llama_create(), + ModelType.GPTJ => NativeMethods.llmodel_gptj_create(), + ModelType.MPT => NativeMethods.llmodel_mpt_create(), + _ => NativeMethods.llmodel_model_create(modelPath), + }; + + var loadedSuccesfully = NativeMethods.llmodel_loadModel(handle, modelPath); + + if (loadedSuccesfully == false) + { + throw new Exception($"Failed to load model: '{modelPath}'"); + } + + var underlyingModel = LLModel.Create(handle, modelType_); + + Debug.Assert(underlyingModel.IsLoaded()); + + return new Gpt4All(underlyingModel); + } + + public IGpt4AllModel LoadModel(string modelPath) => CreateModel(modelPath, modelType: null); + + public IGpt4AllModel LoadMptModel(string modelPath) => CreateModel(modelPath, ModelType.MPT); + + public IGpt4AllModel LoadGptjModel(string modelPath) => CreateModel(modelPath, ModelType.GPTJ); + + public IGpt4AllModel LoadLlamaModel(string modelPath) => CreateModel(modelPath, ModelType.LLAMA); +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModel.cs b/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModel.cs new file mode 100644 index 00000000..168a477c --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModel.cs @@ -0,0 +1,5 @@ +namespace Gpt4All; + +public interface IGpt4AllModel : ITextPrediction, IDisposable +{ +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModelFactory.cs b/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModelFactory.cs new file mode 100644 index 00000000..3a5208aa --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Model/IGpt4AllModelFactory.cs @@ -0,0 +1,12 @@ +namespace Gpt4All; + +public interface IGpt4AllModelFactory +{ + IGpt4AllModel LoadGptjModel(string modelPath); + + IGpt4AllModel LoadLlamaModel(string modelPath); + + IGpt4AllModel LoadModel(string modelPath); + + IGpt4AllModel LoadMptModel(string modelPath); +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/ModelFileUtils.cs b/gpt4all-bindings/csharp/Gpt4All/Model/ModelFileUtils.cs new file mode 100644 index 00000000..c508c78c --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Model/ModelFileUtils.cs @@ -0,0 +1,24 @@ +namespace Gpt4All; + +public static class ModelFileUtils +{ + private const uint GPTJ_MAGIC = 0x67676d6c; + private const uint LLAMA_MAGIC = 0x67676a74; + private const uint MPT_MAGIC = 0x67676d6d; + + public static ModelType GetModelTypeFromModelFileHeader(string modelPath) + { + using var fileStream = new FileStream(modelPath, FileMode.Open); + using var binReader = new BinaryReader(fileStream); + + var magic = binReader.ReadUInt32(); + + return magic switch + { + GPTJ_MAGIC => ModelType.GPTJ, + LLAMA_MAGIC => ModelType.LLAMA, + MPT_MAGIC => ModelType.MPT, + _ => throw new ArgumentOutOfRangeException($"Invalid model file. magic=0x{magic:X8}"), + }; + } +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/ModelOptions.cs b/gpt4all-bindings/csharp/Gpt4All/Model/ModelOptions.cs new file mode 100644 index 00000000..3cca142a --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Model/ModelOptions.cs @@ -0,0 +1,8 @@ +namespace Gpt4All; + +public record ModelOptions +{ + public int Threads { get; init; } = 4; + + public ModelType ModelType { get; init; } = ModelType.GPTJ; +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Model/ModelType.cs b/gpt4all-bindings/csharp/Gpt4All/Model/ModelType.cs new file mode 100644 index 00000000..4aced85a --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Model/ModelType.cs @@ -0,0 +1,11 @@ +namespace Gpt4All; + +/// +/// The supported model types +/// +public enum ModelType +{ + LLAMA = 0, + GPTJ, + MPT +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPrediction.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPrediction.cs new file mode 100644 index 00000000..c446feef --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPrediction.cs @@ -0,0 +1,31 @@ +namespace Gpt4All; + +/// +/// Interface for text prediction services +/// +public interface ITextPrediction +{ + /// + /// Get prediction results for the prompt and provided options. + /// + /// The text to complete + /// The prediction settings + /// The for cancellation requests. The default is . + /// The prediction result generated by the model + Task GetPredictionAsync( + string text, + PredictRequestOptions opts, + CancellationToken cancellation = default); + + /// + /// Get streaming prediction results for the prompt and provided options. + /// + /// The text to complete + /// The prediction settings + /// The for cancellation requests. The default is . + /// The prediction result generated by the model + Task GetStreamingPredictionAsync( + string text, + PredictRequestOptions opts, + CancellationToken cancellationToken = default); +} \ No newline at end of file diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionResult.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionResult.cs new file mode 100644 index 00000000..96cd58dd --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionResult.cs @@ -0,0 +1,10 @@ +namespace Gpt4All; + +public interface ITextPredictionResult +{ + bool Success { get; } + + string? ErrorMessage { get; } + + Task GetPredictionAsync(CancellationToken cancellationToken = default); +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionStreamingResult.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionStreamingResult.cs new file mode 100644 index 00000000..6ce49d0d --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Prediction/ITextPredictionStreamingResult.cs @@ -0,0 +1,6 @@ +namespace Gpt4All; + +public interface ITextPredictionStreamingResult : ITextPredictionResult +{ + IAsyncEnumerable GetPredictionStreamingAsync(CancellationToken cancellationToken = default); +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/PredictRequestOptions.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/PredictRequestOptions.cs new file mode 100644 index 00000000..2f3e57af --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Prediction/PredictRequestOptions.cs @@ -0,0 +1,30 @@ +namespace Gpt4All; + +public record PredictRequestOptions +{ + public nuint LogitsSize { get; init; } = 0; + + public nuint TokensSize { get; init; } = 0; + + public int PastConversationTokensNum { get; init; } = 0; + + public int ContextSize { get; init; } = 1024; + + public int TokensToPredict { get; init; } = 128; + + public int TopK { get; init; } = 40; + + public float TopP { get; init; } = 0.9f; + + public float Temperature { get; init; } = 0.1f; + + public int Batches { get; init; } = 8; + + public float RepeatPenalty { get; init; } = 1.2f; + + public int RepeatLastN { get; init; } = 10; + + public float ContextErase { get; init; } = 0.5f; + + public static readonly PredictRequestOptions Defaults = new(); +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionResult.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionResult.cs new file mode 100644 index 00000000..707bdcd9 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionResult.cs @@ -0,0 +1,27 @@ +using System.Text; + +namespace Gpt4All; + +public record TextPredictionResult : ITextPredictionResult +{ + private readonly StringBuilder _result; + + public bool Success { get; internal set; } = true; + + public string? ErrorMessage { get; internal set; } + + internal TextPredictionResult() + { + _result = new StringBuilder(); + } + + internal void Append(string token) + { + _result.Append(token); + } + + public Task GetPredictionAsync(CancellationToken cancellationToken = default) + { + return Task.FromResult(_result.ToString()); + } +} \ No newline at end of file diff --git a/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionStreamingResult.cs b/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionStreamingResult.cs new file mode 100644 index 00000000..03723d57 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/Prediction/TextPredictionStreamingResult.cs @@ -0,0 +1,49 @@ +using System.Text; +using System.Threading.Channels; + +namespace Gpt4All; + +public record TextPredictionStreamingResult : ITextPredictionStreamingResult +{ + private readonly Channel _channel; + + public bool Success { get; internal set; } = true; + + public string? ErrorMessage { get; internal set; } + + public Task Completion => _channel.Reader.Completion; + + internal TextPredictionStreamingResult() + { + _channel = Channel.CreateUnbounded(); + } + + internal bool Append(string token) + { + return _channel.Writer.TryWrite(token); + } + + internal void Complete() + { + _channel.Writer.Complete(); + } + + public async Task GetPredictionAsync(CancellationToken cancellationToken = default) + { + var sb = new StringBuilder(); + + var tokens = GetPredictionStreamingAsync(cancellationToken).ConfigureAwait(false); + + await foreach (var token in tokens) + { + sb.Append(token); + } + + return sb.ToString(); + } + + public IAsyncEnumerable GetPredictionStreamingAsync(CancellationToken cancellationToken = default) + { + return _channel.Reader.ReadAllAsync(cancellationToken); + } +} diff --git a/gpt4all-bindings/csharp/Gpt4All/gen_bindings.ps1 b/gpt4all-bindings/csharp/Gpt4All/gen_bindings.ps1 new file mode 100644 index 00000000..ec29f531 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All/gen_bindings.ps1 @@ -0,0 +1 @@ +ClangSharpPInvokeGenerator @(Get-Content .\GenLLModelBindings.rsp) \ No newline at end of file diff --git a/gpt4all-bindings/csharp/README.md b/gpt4all-bindings/csharp/README.md new file mode 100644 index 00000000..6d1171bf --- /dev/null +++ b/gpt4all-bindings/csharp/README.md @@ -0,0 +1,115 @@ +# C# GPT4All + +This package contains a set of C# bindings around the `llmodel` C-API. + +## Documentation +TBD + +## Installation +TBD NuGet + +## Project Structure +``` +gpt4all-bindings/ +└── csharp +   ├── Gpt4All // .NET Bindigs +   ├── Gpt4All.Samples // Sample project + ├── build_win-msvc.ps1 // Native build scripts + ├── build_win-mingw.ps1 + ├── build_linux.sh + └── runtimes // [POST-BUILD] Platform-specific native libraries + ├── win-x64 + ├── ... + └── linux-x64 +``` + +## Local Build Instructions +> **Note** +> Tested On: +> - Windows 11 22H + VS2022 (CE) x64 +> - Linux Ubuntu x64 +> - Linux Ubuntu (WSL2) x64 + +1. Setup the repository +2. Build the native libraries for the platform of choice (see below) +3. Build the C# Bindings (NET6+ SDK is required) +``` +git clone --recurse-submodules https://github.com/nomic-ai/gpt4all +cd gpt4all/gpt4all-bindings/csharp +``` +### Linux +1. Setup build environment and install NET6+ SDK with the appropriate procedure for your distribution +``` +sudo apt-get update +sudo apt-get install -y cmake build-essential +chmod +x ./build_linux.sh +``` +2. `./build_linux.sh` +3. The native libraries should be present at `.\native\linux-x64` + +### Windows - MinGW64 +#### Additional requirements + - [MinGW64](https://www.mingw-w64.org/) + - CMAKE +1. Setup +``` +choco install mingw +$env:Path += ";C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" +choco install -y cmake --installargs 'ADD_CMAKE_TO_PATH=System' +``` +2. Run the `./build_win-mingw.ps1` build script +3. The native libraries should be present at `.\native\win-x64` + +### Windows - MSVC +#### Additional requirements + - Visual Studio 2022 +1. Open a terminal using the `x64 Native Tools Command Prompt for VS 2022` (`vcvars64.bat`) +2. Run the `./build_win-msvc.ps1` build script +3. `libllmodel.dll` and `libllama.dll` should be present at `.\native\win-x64` + +> **Warning** +> If the build fails with: '**error C7555: use of designated initializers requires at least '/std:c++20'**' +> +> Modify `cd gpt4all/gpt4all-backends/CMakeLists.txt` adding `CXX_STANDARD_20` to `llmodel` properties. +> ```cmake +> set_target_properties(llmodel PROPERTIES +> VERSION ${PROJECT_VERSION} +> CXX_STANDARD 20 # <---- ADD THIS ----------------------- +> SOVERSION ${PROJECT_VERSION_MAJOR}) +> ``` +## C# Bindings Build Instructions +Build the `Gpt4All` (or `Gpt4All.Samples`) projects from within VisualStudio. +### Try the bindings +```csharp +using Gpt4All; + +// load the model +var modelFactory = new ModelFactory(); + +using var model = modelFactory.LoadModel("./path/to/ggml-gpt4all-j-v1.3-groovy.bin"); + +var input = "Name 3 Colors"; + +// request a prediction +var result = await model.GetStreamingPredictionAsync( + input, + PredictRequestOptions.Defaults); + +// asynchronously print the tokens as soon as they are produces by the model +await foreach(var token in result.GetPredictionStreamingAsync()) +{ + Console.Write(token); +} +``` +Output: +``` +gptj_model_load: loading model from 'ggml-gpt4all-j-v1.3-groovy.bin' - please wait ... +gptj_model_load: n_vocab = 50400 +[...TRUNCATED...] +gptj_model_load: ggml ctx size = 5401.45 MB +gptj_model_load: kv self size = 896.00 MB +gptj_model_load: ................................... done +gptj_model_load: model size = 3609.38 MB / num tensors = 285 + +Black, Blue and White +``` diff --git a/gpt4all-bindings/csharp/build_linux.sh b/gpt4all-bindings/csharp/build_linux.sh new file mode 100644 index 00000000..a89969e2 --- /dev/null +++ b/gpt4all-bindings/csharp/build_linux.sh @@ -0,0 +1,8 @@ +mkdir -p runtimes +rm -rf runtimes/linux-x64 +mkdir -p runtimes/linux-x64/native +mkdir runtimes/linux-x64/build +cmake -S ../../gpt4all-backend -B runtimes/linux-x64/build +cmake --build runtimes/linux-x64/build --parallel --config Release +cp runtimes/linux-x64/build/libllmodel.so runtimes/linux-x64/native/libllmodel.so +cp runtimes/linux-x64/build/llama.cpp/libllama.so runtimes/linux-x64/native/libllama.so diff --git a/gpt4all-bindings/csharp/build_win-mingw.ps1 b/gpt4all-bindings/csharp/build_win-mingw.ps1 new file mode 100644 index 00000000..8c436552 --- /dev/null +++ b/gpt4all-bindings/csharp/build_win-mingw.ps1 @@ -0,0 +1,17 @@ +$ROOT_DIR = '.\runtimes\win-x64' +$BUILD_DIR = '.\runtimes\win-x64\build\mingw' +$LIBS_DIR = '.\runtimes\win-x64\native' + +# cleanup env +Remove-Item -Force -Recurse $ROOT_DIR -ErrorAction SilentlyContinue | Out-Null +mkdir $BUILD_DIR | Out-Null +mkdir $LIBS_DIR | Out-Null + +# build +cmake -G "MinGW Makefiles" -S ..\..\gpt4all-backend -B $BUILD_DIR +cmake --build $BUILD_DIR --parallel --config Release + +# copy native dlls +cp "C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin\*dll" $LIBS_DIR +cp "$BUILD_DIR\libllmodel.dll" $LIBS_DIR +cp "$BUILD_DIR\bin\libllama.dll" $LIBS_DIR \ No newline at end of file diff --git a/gpt4all-bindings/csharp/build_win-msvc.ps1 b/gpt4all-bindings/csharp/build_win-msvc.ps1 new file mode 100644 index 00000000..511a4bac --- /dev/null +++ b/gpt4all-bindings/csharp/build_win-msvc.ps1 @@ -0,0 +1,6 @@ +Remove-Item -Force -Recurse .\runtimes\win-x64\msvc -ErrorAction SilentlyContinue +mkdir .\runtimes\win-x64\msvc\build | Out-Null +cmake -G "Visual Studio 17 2022" -A Win64 -S ..\..\gpt4all-backend -B .\runtimes\win-x64\msvc\build +cmake --build .\runtimes\win-x64\msvc\build --parallel --config Release +cp .\runtimes\win-x64\msvc\build\Release\llmodel.dll .\runtimes\win-x64\libllmodel.dll +cp .\runtimes\win-x64\msvc\build\bin\Release\llama.dll .\runtimes\win-x64\libllama.dll \ No newline at end of file diff --git a/gpt4all-bindings/csharp/docs/gpt4all_csharp.md b/gpt4all-bindings/csharp/docs/gpt4all_csharp.md new file mode 100644 index 00000000..0a69ed5c --- /dev/null +++ b/gpt4all-bindings/csharp/docs/gpt4all_csharp.md @@ -0,0 +1 @@ +# GPT4All C# API \ No newline at end of file