Added BittensorLLM (#9250)

Description: Adding NIBittensorLLM via Validator Endpoint to langchain
llms
Tag maintainer: @Kunj-2206

Maintainer responsibilities:
    Models / Prompts: @hwchase17, @baskaryan

---------

Co-authored-by: Bagatur <baskaryan@gmail.com>
pull/9163/head^2
Kunj-2206 1 year ago committed by GitHub
parent 852722ea45
commit 1b3942ba74
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,167 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# NIBittensorLLM\n",
"\n",
"NIBittensorLLM is developed by [Neural Internet](https://neuralinternet.ai/), powered by [Bittensor](https://bittensor.com/).\n",
"\n",
"This LLM showcases true potential of decentralized AI by giving you the best response(s) from the Bittensor protocol, which consist of various AI models such as OpenAI, LLaMA2 etc.\n",
"\n",
"Users can view their logs, requests, and API keys on the [Validator Endpoint Frontend](https://api.neuralinterent.ai/). However, changes to the configuration are currently prohibited; otherwise, the user's queries will be blocked.\n",
"\n",
"If you encounter any difficulties or have any questions, please feel free to reach out to our developer on [GitHub](https://github.com/Kunj-2206), [Discord](https://discordapp.com/users/683542109248159777) or join our discord server for latest update and queries [Neural Internet](https://discord.gg/neuralinternet).\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Different Parameter and response handling for NIBittensorLLM "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import langchain\n",
"from langchain.llms import NIBittensorLLM\n",
"import json\n",
"from pprint import pprint\n",
"\n",
"langchain.debug = True\n",
"\n",
"# System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model\n",
"llm_sys = NIBittensorLLM(\n",
" system_prompt=\"Your task is to determine response based on user prompt.Explain me like I am technical lead of a project\"\n",
")\n",
"sys_resp = llm_sys(\n",
" \"What is bittensor and What are the potential benifits of decentralized AI?\"\n",
")\n",
"print(f\"Response provided by LLM with system prompt set is : {sys_resp}\")\n",
"\n",
"# The top_responses parameter can give multiple responses based on its parameter value\n",
"# This below code retrive top 10 miner's response all the response are in format of json\n",
"\n",
"# Json response structure is\n",
"\"\"\" {\n",
" \"choices\": [\n",
" {\"index\": Bittensor's Metagraph index number,\n",
" \"uid\": Unique Identifier of a miner,\n",
" \"responder_hotkey\": Hotkey of a miner,\n",
" \"message\":{\"role\":\"assistant\",\"content\": Contains actual response},\n",
" \"response_ms\": Time in millisecond required to fetch response from a miner} \n",
" ]\n",
" } \"\"\"\n",
"\n",
"multi_response_llm = NIBittensorLLM(top_responses=10)\n",
"multi_resp = multi_response_llm(\"What is Neural Network Feeding Mechanism?\")\n",
"json_multi_resp = json.loads(multi_resp)\n",
"pprint(json_multi_resp)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using NIBittensorLLM with LLMChain and PromptTemplate"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import langchain\n",
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.llms import NIBittensorLLM\n",
"\n",
"langchain.debug = True\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"\n",
"# System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model\n",
"llm = NIBittensorLLM(system_prompt=\"Your task is to determine response based on user prompt.\")\n",
"\n",
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
"question = \"What is bittensor?\"\n",
"\n",
"llm_chain.run(question)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using NIBittensorLLM with Conversational Agent and Google Search Tool"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import (\n",
" AgentType,\n",
" initialize_agent,\n",
" load_tools,\n",
" ZeroShotAgent,\n",
" Tool,\n",
" AgentExecutor,\n",
")\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain import LLMChain, PromptTemplate\n",
"from langchain.utilities import GoogleSearchAPIWrapper, SerpAPIWrapper\n",
"from langchain.llms import NIBittensorLLM\n",
"\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
"\n",
"\n",
"prefix = \"\"\"Answer prompt based on LLM if there is need to search something then use internet and observe internet result and give accurate reply of user questions also try to use authenticated sources\"\"\"\n",
"suffix = \"\"\"Begin!\n",
" {chat_history}\n",
" Question: {input}\n",
" {agent_scratchpad}\"\"\"\n",
"\n",
"prompt = ZeroShotAgent.create_prompt(\n",
" tools,\n",
" prefix=prefix,\n",
" suffix=suffix,\n",
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
")\n",
"\n",
"llm = NIBittensorLLM(system_prompt=\"Your task is to determine response based on user prompt\")\n",
"\n",
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
"\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
"\n",
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True, memory=memory\n",
")\n",
"\n",
"response = agent_chain.run(input=prompt)"
]
}
],
"metadata": {
"language_info": {
"name": "python"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,37 @@
# NIBittensor
This page covers how to use the BittensorLLM inference runtime within LangChain.
It is broken into two parts: installation and setup, and then examples of NIBittensorLLM usage.
## Installation and Setup
- Install the Python package with `pip install langchain`
## Wrappers
### LLM
There exists a NIBittensor LLM wrapper, which you can access with:
```python
from langchain.llms import NIBittensorLLM
```
It provides a unified interface for all models:
```python
llm = NIBittensorLLM(system_prompt="Your task is to provide consice and accurate response based on user prompt")
print(llm('Write a fibonacci function in python with golder ratio'))
```
Multiple responses from top miners can be accessible using the `top_responses` parameter:
```python
multi_response_llm = NIBittensorLLM(top_responses=10)
multi_resp = multi_response_llm("What is Neural Network Feeding Mechanism?")
json_multi_resp = json.loads(multi_resp)
print(json_multi_resp)
```

@ -31,6 +31,7 @@ from langchain.llms.base import BaseLLM
from langchain.llms.baseten import Baseten
from langchain.llms.beam import Beam
from langchain.llms.bedrock import Bedrock
from langchain.llms.bittensor import NIBittensorLLM
from langchain.llms.cerebriumai import CerebriumAI
from langchain.llms.chatglm import ChatGLM
from langchain.llms.clarifai import Clarifai
@ -127,6 +128,7 @@ __all__ = [
"Modal",
"MosaicML",
"Nebula",
"NIBittensorLLM",
"NLPCloud",
"Ollama",
"OpenAI",
@ -195,6 +197,7 @@ type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
"modal": Modal,
"mosaic": MosaicML,
"nebula": Nebula,
"nibittensor": NIBittensorLLM,
"nlpcloud": NLPCloud,
"ollama": Ollama,
"openai": OpenAI,

@ -0,0 +1,173 @@
import http.client
import json
import ssl
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
class NIBittensorLLM(LLM):
"""
NIBittensorLLM is created by Neural Internet (https://neuralinternet.ai/),
powered by Bittensor, a decentralized network full of different AI models.
To analyze API_KEYS and logs of your usage visit
https://api.neuralinternet.ai/api-keys
https://api.neuralinternet.ai/logs
Example:
.. code-block:: python
from langchain.llms import NIBittensorLLM
llm = NIBittensorLLM()
"""
system_prompt: Optional[str]
"""Provide system prompt that you want to supply it to model before every prompt"""
top_responses: Optional[int] = 0
"""Provide top_responses to get Top N miner responses on one request.May get delayed
Don't use in Production"""
@property
def _llm_type(self) -> str:
return "NIBittensorLLM"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""
Wrapper around the bittensor top miner models. Its built by Neural Internet.
Call the Neural Internet's BTVEP Server and return the output.
Parameters (optional):
system_prompt(str): A system prompt defining how your model should respond.
top_responses(int): Total top miner responses to retrieve from Bittensor
protocol.
Return:
The generated response(s).
Example:
.. code-block:: python
from langchain.llms import NIBittensorLLM
llm = NIBittensorLLM(system_prompt="Act like you are programmer with \
5+ years of experience.")
"""
# Creating HTTPS connection with SSL
context = ssl.create_default_context()
context.check_hostname = True
conn = http.client.HTTPSConnection("test.neuralinternet.ai", context=context)
# Sanitizing User Input before passing to API.
if isinstance(self.top_responses, int):
top_n = min(100, self.top_responses)
else:
top_n = 0
default_prompt = "You are an assistant which is created by Neural Internet(NI) \
in decentralized network named as a Bittensor."
if self.system_prompt is None:
system_prompt = (
default_prompt
+ " Your task is to provide accurate response based on user prompt"
)
else:
system_prompt = default_prompt + str(self.system_prompt)
# Retrieving API KEY to pass into header of each request
conn.request("GET", "/admin/api-keys/")
api_key_response = conn.getresponse()
api_keys_data = (
api_key_response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
api_keys_json = json.loads(api_keys_data)
api_key = api_keys_json[0]["api_key"]
# Creating Header and getting top benchmark miner uids
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
"Endpoint-Version": "2023-05-19",
}
conn.request("GET", "/top_miner_uids", headers=headers)
miner_response = conn.getresponse()
miner_data = (
miner_response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
uids = json.loads(miner_data)
# Condition for benchmark miner response
if isinstance(uids, list) and uids and not top_n:
for uid in uids:
try:
payload = json.dumps(
{
"uids": [uid],
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
}
)
conn.request("POST", "/chat", payload, headers)
init_response = conn.getresponse()
init_data = (
init_response.read()
.decode("utf-8")
.replace("\n", "")
.replace("\t", "")
)
init_json = json.loads(init_data)
if "choices" not in init_json:
continue
reply = init_json["choices"][0]["message"]["content"]
conn.close()
return reply
except Exception:
continue
# For top miner based on bittensor response
try:
payload = json.dumps(
{
"top_n": top_n,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
}
)
conn.request("POST", "/chat", payload, headers)
response = conn.getresponse()
utf_string = (
response.read().decode("utf-8").replace("\n", "").replace("\t", "")
)
if top_n:
conn.close()
return utf_string
json_resp = json.loads(utf_string)
reply = json_resp["choices"][0]["message"]["content"]
conn.close()
return reply
except Exception as e:
conn.request("GET", f"/error_msg?e={e}&p={prompt}", headers=headers)
return "Sorry I am unable to provide response now, Please try again later."
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"system_prompt": self.system_prompt,
"top_responses": self.top_responses,
}

@ -0,0 +1,10 @@
"""Test Bittensor Validator Endpoint wrapper."""
from langchain.llms import NIBittensorLLM
def test_bittensor_call() -> None:
"""Test valid call to validator endpoint."""
llm = NIBittensorLLM(system_prompt="Your task is to answer user prompt.")
output = llm("Say foo:")
assert isinstance(output, str)
Loading…
Cancel
Save