From a2681f950d31a417d658062b69d4517674f70782 Mon Sep 17 00:00:00 2001 From: arjunbansal Date: Tue, 8 Aug 2023 19:15:31 -0700 Subject: [PATCH] add instructions on integrating Log10 (#8938) - Description: Instruction for integration with Log10: an [open source](https://github.com/log10-io/log10) proxiless LLM data management and application development platform that lets you log, debug and tag your Langchain calls - Tag maintainer: @baskaryan - Twitter handle: @log10io @coffeephoenix Several examples showing the integration included [here](https://github.com/log10-io/log10/tree/main/examples/logging) and in the PR --- docs/docs_skeleton/vercel.json | 8 ++ docs/extras/integrations/providers/log10.mdx | 104 +++++++++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 docs/extras/integrations/providers/log10.mdx diff --git a/docs/docs_skeleton/vercel.json b/docs/docs_skeleton/vercel.json index 3e78e1407d..224afa7ba4 100644 --- a/docs/docs_skeleton/vercel.json +++ b/docs/docs_skeleton/vercel.json @@ -556,6 +556,14 @@ "source": "/docs/integrations/llamacpp", "destination": "/docs/integrations/providers/llamacpp" }, + { + "source": "/en/latest/integrations/log10.html", + "destination": "/docs/integrations/providers/log10" + }, + { + "source": "/docs/integrations/log10", + "destination": "/docs/integrations/providers/log10" + }, { "source": "/en/latest/integrations/mediawikidump.html", "destination": "/docs/integrations/providers/mediawikidump" diff --git a/docs/extras/integrations/providers/log10.mdx b/docs/extras/integrations/providers/log10.mdx new file mode 100644 index 0000000000..6511cb8d43 --- /dev/null +++ b/docs/extras/integrations/providers/log10.mdx @@ -0,0 +1,104 @@ +# Log10 + +This page covers how to use the [Log10](https://log10.io) within LangChain. + +## What is Log10? + +Log10 is an [open source](https://github.com/log10-io/log10) proxiless LLM data management and application development platform that lets you log, debug and tag your Langchain calls. + +## Quick start + +1. Create your free account at [log10.io](https://log10.io) +2. Add your `LOG10_TOKEN` and `LOG10_ORG_ID` from the Settings and Organization tabs respectively as environment variables. +3. Also add `LOG10_URL=https://log10.io` and your usual LLM API key: for e.g. `OPENAI_API_KEY` or `ANTHROPIC_API_KEY` to your environment + +## How to enable Log10 data management for Langchain + +Integration with log10 is a simple one-line `log10_callback` integration as shown below: + +```python +from langchain.chat_models import ChatOpenAI +from langchain.schema import HumanMessage + +from log10.langchain import Log10Callback +from log10.llm import Log10Config + +log10_callback = Log10Callback(log10_config=Log10Config()) + +messages = [ + HumanMessage(content="You are a ping pong machine"), + HumanMessage(content="Ping?"), +] + +llm = ChatOpenAI(model_name="gpt-3.5-turbo", callbacks=[log10_callback]) +``` + +[Log10 + Langchain + Logs docs](https://github.com/log10-io/log10/blob/main/logging.md#langchain-logger) + +[More details + screenshots](https://log10.io/docs/logs) including instructions for self-hosting logs + +## How to use tags with Log10 + +```python +from langchain import OpenAI +from langchain.chat_models import ChatAnthropic +from langchain.chat_models import ChatOpenAI +from langchain.schema import HumanMessage + +from log10.langchain import Log10Callback +from log10.llm import Log10Config + +log10_callback = Log10Callback(log10_config=Log10Config()) + +messages = [ + HumanMessage(content="You are a ping pong machine"), + HumanMessage(content="Ping?"), +] + +llm = ChatOpenAI(model_name="gpt-3.5-turbo", callbacks=[log10_callback], temperature=0.5, tags=["test"]) +completion = llm.predict_messages(messages, tags=["foobar"]) +print(completion) + +llm = ChatAnthropic(model="claude-2", callbacks=[log10_callback], temperature=0.7, tags=["baz"]) +llm.predict_messages(messages) +print(completion) + +llm = OpenAI(model_name="text-davinci-003", callbacks=[log10_callback], temperature=0.5) +completion = llm.predict("You are a ping pong machine.\nPing?\n") +print(completion) +``` + +You can also intermix direct OpenAI calls and Langchain LLM calls: + +```python +import os +from log10.load import log10, log10_session +import openai +from langchain import OpenAI + +log10(openai) + +with log10_session(tags=["foo", "bar"]): + # Log a direct OpenAI call + response = openai.Completion.create( + model="text-ada-001", + prompt="Where is the Eiffel Tower?", + temperature=0, + max_tokens=1024, + top_p=1, + frequency_penalty=0, + presence_penalty=0, + ) + print(response) + + # Log a call via Langchain + llm = OpenAI(model_name="text-ada-001", temperature=0.5) + response = llm.predict("You are a ping pong machine.\nPing?\n") + print(response) +``` + +## How to debug Langchain calls + +[Example of debugging](https://log10.io/docs/prompt_chain_debugging) + +[More Langchain examples](https://github.com/log10-io/log10/tree/main/examples#langchain)