mirror of https://github.com/hwchase17/langchain
Sphinxbio nls/add plate chain template (#12502)
Co-authored-by: Nicholas Larus-Stone <7347808+nlarusstone@users.noreply.github.com> Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>pull/12505/head
parent
221134d239
commit
61f5ea4b5e
@ -0,0 +1,13 @@
|
||||
Copyright 2023 SphinxBio, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -0,0 +1 @@
|
||||
# plate-chain
|
@ -0,0 +1,34 @@
|
||||
,1,2,3,4,5,6,7,8,9,10,11,12
|
||||
A,0.065,0.063,0.061,0.070,0.061,0.060,0.065,0.064,0.063,0.063,0.059,0.073
|
||||
B,0.188,0.164,0.213,0.219,0.161,0.176,0.184,0.172,0.176,0.164,0.181,0.173
|
||||
C,0.207,0.190,0.218,0.190,0.211,0.182,0.215,0.211,0.187,0.184,0.208,0.171
|
||||
D,0.076,0.081,0.093,0.071,0.081,0.075,0.106,0.071,0.073,0.084,0.076,0.081
|
||||
E,0.076,0.069,0.082,0.099,0.094,0.072,0.086,0.064,0.070,0.067,0.068,0.074
|
||||
F,0.080,0.067,0.077,0.067,0.068,0.066,0.069,0.074,0.068,0.078,0.065,0.066
|
||||
G,0.061,0.076,0.063,0.069,0.083,0.074,0.071,0.067,0.066,0.067,0.067,0.068
|
||||
H,0.080,0.090,0.074,0.066,0.074,0.075,0.076,0.079,0.071,0.066,0.063,0.069
|
||||
|
||||
|
||||
|
||||
,1,2,3,4,5,6,7,8,9,10,11,12
|
||||
A,Sample 1,Sample 2,Sample 3,Sample 4,Sample 5,Sample 6,Sample 7,Sample 8,Sample 9,Sample 10,Sample 11,Sample 12
|
||||
B,Sample 13,Sample 14,Sample 15,Sample 16,Sample 17,Sample 18,Sample 19,Sample 20,Sample 21,Sample 22,Sample 23,Sample 24
|
||||
C,Sample 25,Sample 26,Sample 27,Sample 28,Sample 29,Sample 30,Sample 31,Sample 32,Sample 33,Sample 34,Sample 35,Sample 36
|
||||
D,Sample 37,Sample 38,Sample 39,Sample 40,Sample 41,Sample 42,Sample 43,Sample 44,Sample 45,Sample 46,Sample 47,Sample 48
|
||||
E,Sample 49,Sample 50,Sample 51,Sample 52,Sample 53,Sample 54,Sample 55,Sample 56,Sample 57,Sample 58,Sample 59,Sample 60
|
||||
F,Sample 61,Sample 62,Sample 63,Sample 64,Sample 65,Sample 66,Sample 67,Sample 68,Sample 69,Sample 70,Sample 71,Sample 72
|
||||
G,Sample 73,Sample 74,Sample 75,Sample 76,Sample 77,Sample 78,Sample 79,Sample 80,Sample 81,Sample 82,Sample 83,Sample 84
|
||||
H,Sample 85,Sample 86,Sample 87,Sample 88,Sample 89,Sample 90,Sample 91,Sample 92,Sample 93,Sample 94,Sample 95,Sample 96
|
||||
|
||||
|
||||
|
||||
Sample Wavelength (nm),540
|
||||
Reference Wavelength (nm),-1
|
||||
|
||||
|
||||
|
||||
Smart Validation,Pass,
|
||||
Plate Well Assignment,Pass,Measured values of assigned wells indicate correct plate layout.
|
||||
Plate Orientation,,
|
||||
Replicates,,
|
||||
Standards,,
|
Can't render this file because it has a wrong number of fields in line 25.
|
@ -0,0 +1,2 @@
|
||||
|
||||
__ALL__ = ["chain"]
|
@ -0,0 +1,91 @@
|
||||
import base64
|
||||
import json
|
||||
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
|
||||
from langchain.pydantic_v1 import Field
|
||||
from langchain.schema.output_parser import StrOutputParser
|
||||
from langserve import CustomUserType
|
||||
|
||||
from .prompts import (
|
||||
AI_REPONSE_DICT,
|
||||
FULL_PROMPT,
|
||||
USER_EXAMPLE_DICT,
|
||||
create_prompt,
|
||||
)
|
||||
from .utils import parse_llm_output
|
||||
|
||||
llm = ChatOpenAI(temperature=0, model="gpt-4")
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
SystemMessagePromptTemplate.from_template(FULL_PROMPT),
|
||||
("human", "{user_example}"),
|
||||
("ai", "{ai_response}"),
|
||||
("human", "{input}"),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
# ATTENTION: Inherit from CustomUserType instead of BaseModel otherwise
|
||||
# the server will decode it into a dict instead of a pydantic model.
|
||||
class FileProcessingRequest(CustomUserType):
|
||||
"""Request including a base64 encoded file."""
|
||||
|
||||
# The extra field is used to specify a widget for the playground UI.
|
||||
file: str = Field(..., extra={"widget": {"type": "base64file"}})
|
||||
num_plates: int = None
|
||||
num_rows: int = 8
|
||||
num_cols: int = 12
|
||||
|
||||
|
||||
def _load_file(request: FileProcessingRequest):
|
||||
return base64.b64decode(request.file.encode("utf-8")).decode('utf-8')
|
||||
|
||||
|
||||
def _load_prompt(request: FileProcessingRequest):
|
||||
return create_prompt(
|
||||
num_plates=request.num_plates,
|
||||
num_rows=request.num_rows,
|
||||
num_cols=request.num_cols,
|
||||
)
|
||||
|
||||
def _get_col_range_str(request: FileProcessingRequest):
|
||||
if request.num_cols:
|
||||
return f"from 1 to {request.num_cols}"
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
def _get_json_format(request: FileProcessingRequest):
|
||||
return json.dumps(
|
||||
[
|
||||
{
|
||||
"row_start": 12,
|
||||
"row_end": 12 + request.num_rows - 1,
|
||||
"col_start": 1,
|
||||
"col_end": 1 + request.num_cols - 1,
|
||||
"contents": "Entity ID",
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
chain = (
|
||||
{
|
||||
# Should add validation to ensure numeric indices
|
||||
"input": _load_file,
|
||||
"hint": _load_prompt,
|
||||
"col_range_str": _get_col_range_str,
|
||||
"json_format": _get_json_format,
|
||||
"user_example": lambda x: USER_EXAMPLE_DICT[
|
||||
x.num_rows * x.num_cols
|
||||
],
|
||||
"ai_response": lambda x: AI_REPONSE_DICT[
|
||||
x.num_rows * x.num_cols
|
||||
],
|
||||
}
|
||||
| prompt
|
||||
| llm
|
||||
| StrOutputParser()
|
||||
| parse_llm_output
|
||||
).with_types(input_type=FileProcessingRequest)
|
File diff suppressed because one or more lines are too long
@ -0,0 +1,31 @@
|
||||
import json
|
||||
|
||||
from pydantic import BaseModel, Field, conint
|
||||
|
||||
|
||||
class LLMPlateResponse(BaseModel):
|
||||
row_start: conint(ge=0) = Field(
|
||||
..., description="The starting row of the plate (0-indexed)"
|
||||
)
|
||||
row_end: conint(ge=0) = Field(
|
||||
..., description="The ending row of the plate (0-indexed)"
|
||||
)
|
||||
col_start: conint(ge=0) = Field(
|
||||
..., description="The starting column of the plate (0-indexed)"
|
||||
)
|
||||
col_end: conint(ge=0) = Field(
|
||||
..., description="The ending column of the plate (0-indexed)"
|
||||
)
|
||||
contents: str
|
||||
|
||||
|
||||
def parse_llm_output(result: str):
|
||||
"""
|
||||
Based on the prompt we expect the result to be a string that looks like:
|
||||
|
||||
'[{"row_start": 12, "row_end": 19, "col_start": 1, \
|
||||
"col_end": 12, "contents": "Entity ID"}]'
|
||||
|
||||
We'll load that JSON and turn it into a Pydantic model
|
||||
"""
|
||||
return [LLMPlateResponse(**plate_r) for plate_r in json.loads(result)]
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,25 @@
|
||||
[tool.poetry]
|
||||
name = "plate_chain"
|
||||
version = "0.0.1"
|
||||
description = ""
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.9,<4.0"
|
||||
langchain = ">=0.0.313, <0.1"
|
||||
openai = "^0.28.1"
|
||||
langserve = "^0.0.19"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
langchain-cli = ">=0.0.4"
|
||||
fastapi = "^0.104.0"
|
||||
sse-starlette = "^1.6.5"
|
||||
|
||||
[tool.langserve]
|
||||
export_module = "plate_chain.__init__"
|
||||
export_attr = "chain"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
Loading…
Reference in New Issue