diff --git a/lecl.py b/lecl.py new file mode 100644 index 0000000..5ce798a --- /dev/null +++ b/lecl.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +# vim: set expandtab sw=4 ts=4 sts=4 foldmethod=indent filetype=python: + +import asyncio +import os +from langchain_core.output_parsers import StrOutputParser +from langchain_core.runnables import RunnablePassthrough +from langchain_openai import ChatOpenAI + +os.environ['OPENAI_API_KEY'] = '2708b7c21129e408899d5a38e6d1af8d' +os.environ['OPENAI_API_BASE'] = 'http://localai.srvlan:8080' + + +#|%%--%%| <9871Wi18GN|qMj6mA5jLr> +r"""°°° +# Basic invokation +°°°""" + +#|%%--%%| + +llm = ChatOpenAI(model="dolphin-mixtral", temperature=0.2, max_tokens=100) + + +#|%%--%%| + +llm.invoke("how can you help me ?") + +#|%%--%%| +r"""°°° +# Using prompt templates +°°°""" +#|%%--%%| + +from langchain_core.prompts import ChatPromptTemplate + +prompt = ChatPromptTemplate.from_messages([ + ("system", "You are a genius DIY maker assistant."), + ("user", "{input}") + ]) + + +# combine llm and prompt to create a chain + +chain = prompt | llm + +chain.invoke(dict(input="what are the best adhesives to bind metal to wood ?")) + + +#|%%--%%| +r"""°°° +# Creating a chain + +- Every element in chain implements Runnable interface (invoke, stream, ainvoke, batch ...) +°°°""" +#|%%--%%| + +prompt = ChatPromptTemplate.from_template("Tell me a short joke about {topic}") +output_parser = StrOutputParser() +llm = ChatOpenAI(model="dolphin-mixtral", temperature=0.6, max_tokens=100) +chain = ( + {"topic": RunnablePassthrough()} + | prompt + | llm + | output_parser + ) + +chain.invoke("ice cream") + +#|%%--%%| + + +prompt = ChatPromptTemplate.from_template("Tell me a short joke about {topic}") +output_parser = StrOutputParser() +llm = ChatOpenAI(model="dolphin-mixtral", temperature=0.6, max_tokens=100) +chain = ( + {"topic": RunnablePassthrough()} + | prompt + | llm + | output_parser + ) + +chain.invoke("ice cream") + +#|%%--%%| +r"""°°° +# Streaming the response +https://python.langchain.com/docs/expression_language/streaming +°°°""" +#|%%--%%| + +async for chunk in chain.astream(dict(input="how can I bind metal to plastic ?")): + print(chunk.content, end="", flush=True) + diff --git a/test_localai.ipynb b/test_localai.ipynb new file mode 100644 index 0000000..27b6ac5 --- /dev/null +++ b/test_localai.ipynb @@ -0,0 +1,26 @@ +{ + "cells": [ + { + "cell_type": "code", + "metadata": { + "jukit_cell_id": "NONE" + }, + "source": [ + "#!/usr/bin/env python3\n", + "# vim: set expandtab sw=4 ts=4 sts=4 foldmethod=indent filetype=python:" + ], + "outputs": [], + "execution_count": null + } + ], + "metadata": { + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "python", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/test_localai.py b/test_localai.py new file mode 100644 index 0000000..72d062f --- /dev/null +++ b/test_localai.py @@ -0,0 +1,26 @@ +#|%%--%%| + +from langchain_core.messages import HumanMessage, SystemMessage +from langchain_openai import ChatOpenAI + +#|%%--%%| + +messages = [ + SystemMessage(content="You are a helpful assistant."), + HumanMessage(content="what is a good company name for colorful socks ?") + ] + +#|%%--%%| <5hpjZkupU5|qXWlUuUqLc> + +chat = ChatOpenAI(temperature=0.2, model="dolphin-mixtral", max_tokens=50) + + +#|%%--%%| + +result = chat.invoke(messages) + + +#|%%--%%| <00yiGVKIRw|ZrwfqMozR5> + +print(result.content) +