mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
208 lines
5.6 KiB
Python
208 lines
5.6 KiB
Python
|
import traceback
|
||
|
from pathlib import Path
|
||
|
from typing import Any, Dict, List, Union
|
||
|
|
||
|
from .core import (
|
||
|
Frontmatter,
|
||
|
InvokerFactory,
|
||
|
ModelSettings,
|
||
|
Prompty,
|
||
|
PropertySettings,
|
||
|
SimpleModel,
|
||
|
TemplateSettings,
|
||
|
param_hoisting,
|
||
|
)
|
||
|
|
||
|
|
||
|
def load(prompt_path: str, configuration: str = "default") -> Prompty:
|
||
|
file_path = Path(prompt_path)
|
||
|
if not file_path.is_absolute():
|
||
|
# get caller's path (take into account trace frame)
|
||
|
caller = Path(traceback.extract_stack()[-3].filename)
|
||
|
file_path = Path(caller.parent / file_path).resolve().absolute()
|
||
|
|
||
|
# load dictionary from prompty file
|
||
|
matter = Frontmatter.read_file(file_path.__fspath__())
|
||
|
attributes = matter["attributes"]
|
||
|
content = matter["body"]
|
||
|
|
||
|
# normalize attribute dictionary resolve keys and files
|
||
|
attributes = Prompty.normalize(attributes, file_path.parent)
|
||
|
|
||
|
# load global configuration
|
||
|
if "model" not in attributes:
|
||
|
attributes["model"] = {}
|
||
|
|
||
|
# pull model settings out of attributes
|
||
|
try:
|
||
|
model = ModelSettings(**attributes.pop("model"))
|
||
|
except Exception as e:
|
||
|
raise ValueError(f"Error in model settings: {e}")
|
||
|
|
||
|
# pull template settings
|
||
|
try:
|
||
|
if "template" in attributes:
|
||
|
t = attributes.pop("template")
|
||
|
if isinstance(t, dict):
|
||
|
template = TemplateSettings(**t)
|
||
|
# has to be a string denoting the type
|
||
|
else:
|
||
|
template = TemplateSettings(type=t, parser="prompty")
|
||
|
else:
|
||
|
template = TemplateSettings(type="mustache", parser="prompty")
|
||
|
except Exception as e:
|
||
|
raise ValueError(f"Error in template loader: {e}")
|
||
|
|
||
|
# formalize inputs and outputs
|
||
|
if "inputs" in attributes:
|
||
|
try:
|
||
|
inputs = {
|
||
|
k: PropertySettings(**v) for (k, v) in attributes.pop("inputs").items()
|
||
|
}
|
||
|
except Exception as e:
|
||
|
raise ValueError(f"Error in inputs: {e}")
|
||
|
else:
|
||
|
inputs = {}
|
||
|
if "outputs" in attributes:
|
||
|
try:
|
||
|
outputs = {
|
||
|
k: PropertySettings(**v) for (k, v) in attributes.pop("outputs").items()
|
||
|
}
|
||
|
except Exception as e:
|
||
|
raise ValueError(f"Error in outputs: {e}")
|
||
|
else:
|
||
|
outputs = {}
|
||
|
|
||
|
# recursive loading of base prompty
|
||
|
if "base" in attributes:
|
||
|
# load the base prompty from the same directory as the current prompty
|
||
|
base = load(file_path.parent / attributes["base"])
|
||
|
# hoist the base prompty's attributes to the current prompty
|
||
|
model.api = base.model.api if model.api == "" else model.api
|
||
|
model.configuration = param_hoisting(
|
||
|
model.configuration, base.model.configuration
|
||
|
)
|
||
|
model.parameters = param_hoisting(model.parameters, base.model.parameters)
|
||
|
model.response = param_hoisting(model.response, base.model.response)
|
||
|
attributes["sample"] = param_hoisting(attributes, base.sample, "sample")
|
||
|
|
||
|
p = Prompty(
|
||
|
**attributes,
|
||
|
model=model,
|
||
|
inputs=inputs,
|
||
|
outputs=outputs,
|
||
|
template=template,
|
||
|
content=content,
|
||
|
file=file_path,
|
||
|
basePrompty=base,
|
||
|
)
|
||
|
else:
|
||
|
p = Prompty(
|
||
|
**attributes,
|
||
|
model=model,
|
||
|
inputs=inputs,
|
||
|
outputs=outputs,
|
||
|
template=template,
|
||
|
content=content,
|
||
|
file=file_path,
|
||
|
)
|
||
|
return p
|
||
|
|
||
|
|
||
|
def prepare(
|
||
|
prompt: Prompty,
|
||
|
inputs: Dict[str, Any] = {},
|
||
|
) -> Any:
|
||
|
invoker = InvokerFactory()
|
||
|
|
||
|
inputs = param_hoisting(inputs, prompt.sample)
|
||
|
|
||
|
if prompt.template.type == "NOOP":
|
||
|
render = prompt.content
|
||
|
else:
|
||
|
# render
|
||
|
result = invoker(
|
||
|
"renderer",
|
||
|
prompt.template.type,
|
||
|
prompt,
|
||
|
SimpleModel(item=inputs),
|
||
|
)
|
||
|
render = result.item
|
||
|
|
||
|
if prompt.template.parser == "NOOP":
|
||
|
result = render
|
||
|
else:
|
||
|
# parse
|
||
|
result = invoker(
|
||
|
"parser",
|
||
|
f"{prompt.template.parser}.{prompt.model.api}",
|
||
|
prompt,
|
||
|
SimpleModel(item=result.item),
|
||
|
)
|
||
|
|
||
|
if isinstance(result, SimpleModel):
|
||
|
return result.item
|
||
|
else:
|
||
|
return result
|
||
|
|
||
|
|
||
|
def run(
|
||
|
prompt: Prompty,
|
||
|
content: Union[Dict, List, str],
|
||
|
configuration: Dict[str, Any] = {},
|
||
|
parameters: Dict[str, Any] = {},
|
||
|
raw: bool = False,
|
||
|
) -> Any:
|
||
|
invoker = InvokerFactory()
|
||
|
|
||
|
if configuration != {}:
|
||
|
prompt.model.configuration = param_hoisting(
|
||
|
configuration, prompt.model.configuration
|
||
|
)
|
||
|
|
||
|
if parameters != {}:
|
||
|
prompt.model.parameters = param_hoisting(parameters, prompt.model.parameters)
|
||
|
|
||
|
# execute
|
||
|
result = invoker(
|
||
|
"executor",
|
||
|
prompt.model.configuration["type"],
|
||
|
prompt,
|
||
|
SimpleModel(item=content),
|
||
|
)
|
||
|
|
||
|
# skip?
|
||
|
if not raw:
|
||
|
# process
|
||
|
result = invoker(
|
||
|
"processor",
|
||
|
prompt.model.configuration["type"],
|
||
|
prompt,
|
||
|
result,
|
||
|
)
|
||
|
|
||
|
if isinstance(result, SimpleModel):
|
||
|
return result.item
|
||
|
else:
|
||
|
return result
|
||
|
|
||
|
|
||
|
def execute(
|
||
|
prompt: Union[str, Prompty],
|
||
|
configuration: Dict[str, Any] = {},
|
||
|
parameters: Dict[str, Any] = {},
|
||
|
inputs: Dict[str, Any] = {},
|
||
|
raw: bool = False,
|
||
|
connection: str = "default",
|
||
|
) -> Any:
|
||
|
if isinstance(prompt, str):
|
||
|
prompt = load(prompt, connection)
|
||
|
|
||
|
# prepare content
|
||
|
content = prepare(prompt, inputs)
|
||
|
|
||
|
# run LLM model
|
||
|
result = run(prompt, content, configuration, parameters, raw)
|
||
|
|
||
|
return result
|