mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
Harrison/banana fix (#1311)
Co-authored-by: Erik Dunteman <44653944+erik-dunteman@users.noreply.github.com>
This commit is contained in:
parent
648b3b3909
commit
81abcae91a
@ -4,24 +4,28 @@ This page covers how to use the Banana ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Banana wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Install with `pip3 install banana-dev`
|
||||
- Get an CerebriumAI api key and set it as an environment variable (`BANANA_API_KEY`)
|
||||
- Get an Banana api key and set it as an environment variable (`BANANA_API_KEY`)
|
||||
|
||||
## Define your Banana Template
|
||||
|
||||
If you want to use an available language model template you can find one [here](https://app.banana.dev/templates/conceptofmind/serverless-template-palmyra-base).
|
||||
This template uses the Palmyra-Base model by [Writer](https://writer.com/product/api/).
|
||||
If you want to use an available language model template you can find one [here](https://app.banana.dev/templates/conceptofmind/serverless-template-palmyra-base).
|
||||
This template uses the Palmyra-Base model by [Writer](https://writer.com/product/api/).
|
||||
You can check out an example Banana repository [here](https://github.com/conceptofmind/serverless-template-palmyra-base).
|
||||
|
||||
## Build the Banana app
|
||||
|
||||
You must include a output in the result. There is a rigid response structure.
|
||||
Banana Apps must include the "output" key in the return json.
|
||||
There is a rigid response structure.
|
||||
|
||||
```python
|
||||
# Return the results as a dictionary
|
||||
result = {'output': result}
|
||||
```
|
||||
|
||||
An example inference function would be:
|
||||
|
||||
```python
|
||||
def inference(model_inputs:dict) -> dict:
|
||||
global model
|
||||
@ -31,22 +35,22 @@ def inference(model_inputs:dict) -> dict:
|
||||
prompt = model_inputs.get('prompt', None)
|
||||
if prompt == None:
|
||||
return {'message': "No prompt provided"}
|
||||
|
||||
|
||||
# Run the model
|
||||
input_ids = tokenizer.encode(prompt, return_tensors='pt').cuda()
|
||||
output = model.generate(
|
||||
input_ids,
|
||||
max_length=100,
|
||||
do_sample=True,
|
||||
top_k=50,
|
||||
top_p=0.95,
|
||||
num_return_sequences=1,
|
||||
temperature=0.9,
|
||||
early_stopping=True,
|
||||
no_repeat_ngram_size=3,
|
||||
num_beams=5,
|
||||
length_penalty=1.5,
|
||||
repetition_penalty=1.5,
|
||||
input_ids,
|
||||
max_length=100,
|
||||
do_sample=True,
|
||||
top_k=50,
|
||||
top_p=0.95,
|
||||
num_return_sequences=1,
|
||||
temperature=0.9,
|
||||
early_stopping=True,
|
||||
no_repeat_ngram_size=3,
|
||||
num_beams=5,
|
||||
length_penalty=1.5,
|
||||
repetition_penalty=1.5,
|
||||
bad_words_ids=[[tokenizer.encode(' ', add_prefix_space=True)[0]]]
|
||||
)
|
||||
|
||||
@ -58,17 +62,18 @@ def inference(model_inputs:dict) -> dict:
|
||||
|
||||
You can find a full example of a Banana app [here](https://github.com/conceptofmind/serverless-template-palmyra-base/blob/main/app.py).
|
||||
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
|
||||
There exists an Banana LLM wrapper, which you can access with
|
||||
There exists an Banana LLM wrapper, which you can access with
|
||||
|
||||
```python
|
||||
from langchain.llms import Banana
|
||||
```
|
||||
|
||||
You need to provide a model key located in the dashboard:
|
||||
|
||||
```python
|
||||
llm = Banana(model_key="YOUR_MODEL_KEY")
|
||||
```
|
||||
```
|
||||
|
@ -100,10 +100,15 @@ class Banana(LLM, BaseModel):
|
||||
response = banana.run(api_key, model_key, model_inputs)
|
||||
try:
|
||||
text = response["modelOutputs"][0]["output"]
|
||||
except KeyError:
|
||||
except (KeyError, TypeError):
|
||||
returned = response["modelOutputs"][0]
|
||||
raise ValueError(
|
||||
f"Response should be {'modelOutputs': [{'output': 'text'}]}."
|
||||
f"Response was: {response}"
|
||||
"Response should be of schema: {'output': 'text'}."
|
||||
f"\nResponse was: {returned}"
|
||||
"\nTo fix this:"
|
||||
"\n- fork the source repo of the Banana model"
|
||||
"\n- modify app.py to return the above schema"
|
||||
"\n- deploy that as a custom repo"
|
||||
)
|
||||
if stop is not None:
|
||||
# I believe this is required since the stop tokens
|
||||
|
Loading…
Reference in New Issue
Block a user