forked from Archives/langchain
Compare commits
88 Commits
Author | SHA1 | Date |
---|---|---|
blob42 | 84d7ad397d | 1 year ago |
blob42 | de551d62a8 | 1 year ago |
blob42 | d8fd0e790c | 1 year ago |
blob42 | 97c2b31cc5 | 1 year ago |
blob42 | f1dc03d0cc | 1 year ago |
Harrison Chase | f76e9eaab1 | 1 year ago |
Harrison Chase | db2e9c2b0d | 1 year ago |
Tim Asp | d22651d82a | 1 year ago |
Matt Robinson | c46478d70e | 1 year ago |
Eugene Yurtsev | e3fcc72879 | 1 year ago |
blob42 | 2fdb1d842b | 1 year ago |
blob42 | c30ef7dbc4 | 1 year ago |
blob42 | 8a7871ece3 | 1 year ago |
blob42 | 201ecdc9ee | 1 year ago |
blob42 | 149fe0055e | 1 year ago |
blob42 | 096b82f2a1 | 1 year ago |
blob42 | 87b5a84cfb | 1 year ago |
blob42 | ed97aa65af | 1 year ago |
blob42 | c9e6baf60d | 1 year ago |
blob42 | 7cde1cbfc3 | 1 year ago |
blob42 | 17213209e0 | 1 year ago |
blob42 | 895f862662 | 1 year ago |
Harrison Chase | f61858163d | 1 year ago |
Harrison Chase | 0824d65a5c | 1 year ago |
Akshay | a0bf856c70 | 1 year ago |
Harrison Chase | 166cda2cc6 | 1 year ago |
Harrison Chase | aaad6cc954 | 1 year ago |
Marc Puig | 3989c793fd | 1 year ago |
Alexander Hoyle | 42b892c21b | 1 year ago |
Harrison Chase | 81abcae91a | 1 year ago |
Casey A. Fitzpatrick | 648b3b3909 | 1 year ago |
Ingo Kleiber | fd9975dad7 | 1 year ago |
Harrison Chase | d29f74114e | 1 year ago |
Harrison Chase | ce441edd9c | 1 year ago |
Harrison Chase | 6f30d68581 | 1 year ago |
Harrison Chase | 002da6edc0 | 1 year ago |
Harrison Chase | 0963096491 | 1 year ago |
Harrison Chase | c5dd491a21 | 1 year ago |
Matt Robinson | 2f15c11b87 | 1 year ago |
Harrison Chase | 96db6ed073 | 1 year ago |
Harrison Chase | 7e8f832cd6 | 1 year ago |
Harrison Chase | a8e88e1874 | 1 year ago |
Harrison Chase | 42167a1e24 | 1 year ago |
Harrison Chase | bb53d9722d | 1 year ago |
Klein Tahiraj | 8a0751dadd | 1 year ago |
Harrison Chase | 4b5d427421 | 1 year ago |
Enrico Shippole | 9becdeaadf | 1 year ago |
blob42 | 5457d48416 | 1 year ago |
Harrison Chase | 9381005098 | 1 year ago |
Matt Robinson | 10e73a3723 | 1 year ago |
Justin Torre | 5bc6dc076e | 1 year ago |
Harrison Chase | 6d37d089e9 | 1 year ago |
Iskren Ivov Chernev | 8e3cd3e0dd | 1 year ago |
Dmitri Melikyan | b7765a95a0 | 1 year ago |
Satoru Sakamoto | d480330fae | 1 year ago |
Harrison Chase | 6085fe18d4 | 1 year ago |
Jon Luo | 8a35811556 | 1 year ago |
Harrison Chase | 71709ad5d5 | 1 year ago |
Dennis Antela Martinez | 53c67e04d4 | 1 year ago |
Klein Tahiraj | c6ab1bb3cb | 1 year ago |
Ikko Eltociear Ashimine | 334b553260 | 1 year ago |
Jon Luo | ac1320aae8 | 1 year ago |
djacobs7 | 4e28982d2b | 1 year ago |
Sason | cc7d2e5621 | 1 year ago |
blob42 | 424e71705d | 1 year ago |
Harrison Chase | 4e43b0efe9 | 1 year ago |
Matt Robinson | 3d5f56a8a1 | 1 year ago |
Harrison Chase | 047231840d | 1 year ago |
Harrison Chase | 5bdb8dd6fe | 1 year ago |
Harrison Chase | d90a287d8f | 1 year ago |
Harrison Chase | b7708bbec6 | 1 year ago |
Harrison Chase | fb83cd4ff4 | 1 year ago |
Harrison Chase | 44c8d8a9ac | 1 year ago |
Konstantin Hebenstreit | af94f1dd97 | 1 year ago |
Harrison Chase | 0c84ce1082 | 1 year ago |
Francisco Ingham | 0b6a650cb4 | 1 year ago |
Anton Troynikov | d2ef5d6167 | 1 year ago |
Dennis Antela Martinez | 23243ae69c | 1 year ago |
William FH | 13ba0177d0 | 1 year ago |
Naveen Tatikonda | 0118706fd6 | 1 year ago |
Andrew White | c5015d77e2 | 1 year ago |
Zach Schillaci | 159c560c95 | 1 year ago |
Harrison Chase | 926c121b98 | 1 year ago |
Harrison Chase | 91446a5e9b | 1 year ago |
Harrison Chase | a5a14405ad | 1 year ago |
Harrison Chase | 5a954efdd7 | 1 year ago |
Harrison Chase | 4766b20223 | 1 year ago |
blob42 | 9962bda70b | 1 year ago |
@ -0,0 +1,144 @@
|
||||
.vscode/
|
||||
.idea/
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
notebooks/
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
.venvs
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# macOS display setting files
|
||||
.DS_Store
|
||||
|
||||
|
||||
|
||||
# docker
|
||||
docker/
|
||||
!docker/assets/
|
||||
.dockerignore
|
||||
docker.build
|
@ -0,0 +1,13 @@
|
||||
# python env
|
||||
PYTHON_VERSION=3.10
|
||||
|
||||
# -E flag is required
|
||||
# comment the following line to only install dev dependencies
|
||||
POETRY_EXTRA_PACKAGES="-E all"
|
||||
|
||||
# at least one group needed
|
||||
POETRY_DEPENDENCIES="dev,test,lint,typing"
|
||||
|
||||
# langchain env. warning: these variables will be baked into the docker image !
|
||||
OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
||||
SERPAPI_API_KEY=${SERPAPI_API_KEY:-}
|
@ -0,0 +1,53 @@
|
||||
# Using Docker
|
||||
|
||||
To quickly get started, run the command `make docker`.
|
||||
|
||||
If docker is installed the Makefile will export extra targets in the fomrat `docker.*` to build and run the docker image. Type `make` for a list of available tasks.
|
||||
|
||||
There is a basic `docker-compose.yml` in the docker directory.
|
||||
|
||||
## Building the development image
|
||||
|
||||
Using `make docker` will build the dev image if it does not exist, then drops
|
||||
you inside the container with the langchain environment available in the shell.
|
||||
|
||||
### Customizing the image and installed dependencies
|
||||
|
||||
The image is built with a default python version and all extras and dev
|
||||
dependencies. It can be customized by changing the variables in the [.env](/docker/.env)
|
||||
file.
|
||||
|
||||
If you don't need all the `extra` dependencies a slimmer image can be obtained by
|
||||
commenting out `POETRY_EXTRA_PACKAGES` in the [.env](docker/.env) file.
|
||||
|
||||
### Image caching
|
||||
|
||||
The Dockerfile is optimized to cache the poetry install step. A rebuild is triggered when there a change to the source code.
|
||||
|
||||
## Example Usage
|
||||
|
||||
All commands from langchain's python environment are available by default in the container.
|
||||
|
||||
A few examples:
|
||||
```bash
|
||||
# run jupyter notebook
|
||||
docker run --rm -it IMG jupyter notebook
|
||||
|
||||
# run ipython
|
||||
docker run --rm -it IMG ipython
|
||||
|
||||
# start web server
|
||||
docker run --rm -p 8888:8888 IMG python -m http.server 8888
|
||||
```
|
||||
|
||||
## Testing / Linting
|
||||
|
||||
Tests and lints are run using your local source directory that is mounted on the volume /src.
|
||||
|
||||
Run unit tests in the container with `make docker.test`.
|
||||
|
||||
Run the linting and formatting checks with `make docker.lint`.
|
||||
|
||||
Note: this task can run in parallel using `make -j4 docker.lint`.
|
||||
|
||||
|
@ -0,0 +1,104 @@
|
||||
# vim: ft=dockerfile
|
||||
#
|
||||
# see also: https://github.com/python-poetry/poetry/discussions/1879
|
||||
# - with https://github.com/bneijt/poetry-lock-docker
|
||||
# see https://github.com/thehale/docker-python-poetry
|
||||
# see https://github.com/max-pfeiffer/uvicorn-poetry
|
||||
|
||||
# use by default the slim version of python
|
||||
ARG PYTHON_IMAGE_TAG=slim
|
||||
ARG PYTHON_VERSION=${PYTHON_VERSION:-3.11.2}
|
||||
|
||||
####################
|
||||
# Base Environment
|
||||
####################
|
||||
FROM python:$PYTHON_VERSION-$PYTHON_IMAGE_TAG AS lchain-base
|
||||
|
||||
ARG UID=1000
|
||||
ARG USERNAME=lchain
|
||||
|
||||
ENV USERNAME=$USERNAME
|
||||
|
||||
RUN groupadd -g ${UID} $USERNAME
|
||||
RUN useradd -l -m -u ${UID} -g ${UID} $USERNAME
|
||||
|
||||
# used for mounting source code
|
||||
RUN mkdir /src
|
||||
VOLUME /src
|
||||
|
||||
|
||||
#######################
|
||||
## Poetry Builder Image
|
||||
#######################
|
||||
FROM lchain-base AS lchain-base-builder
|
||||
|
||||
ARG POETRY_EXTRA_PACKAGES=$POETRY_EXTRA_PACKAGES
|
||||
ARG POETRY_DEPENDENCIES=$POETRY_DEPENDENCIES
|
||||
|
||||
ENV HOME=/root
|
||||
ENV POETRY_HOME=/root/.poetry
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=false
|
||||
ENV POETRY_NO_INTERACTION=1
|
||||
ENV CACHE_DIR=$HOME/.cache
|
||||
ENV POETRY_CACHE_DIR=$CACHE_DIR/pypoetry
|
||||
ENV PATH="$POETRY_HOME/bin:$PATH"
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
build-essential \
|
||||
git \
|
||||
curl
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN mkdir -p $CACHE_DIR
|
||||
|
||||
## setup poetry
|
||||
RUN curl -sSL -o $CACHE_DIR/pypoetry-installer.py https://install.python-poetry.org/
|
||||
RUN python3 $CACHE_DIR/pypoetry-installer.py
|
||||
|
||||
|
||||
# # Copy poetry files
|
||||
COPY poetry.* pyproject.toml ./
|
||||
|
||||
RUN mkdir /pip-prefix
|
||||
|
||||
RUN poetry export $POETRY_EXTRA_PACKAGES --with $POETRY_DEPENDENCIES -f requirements.txt --output requirements.txt --without-hashes && \
|
||||
pip install --no-cache-dir --disable-pip-version-check --prefix /pip-prefix -r requirements.txt
|
||||
|
||||
|
||||
# add custom motd message
|
||||
COPY docker/assets/etc/motd /tmp/motd
|
||||
RUN cat /tmp/motd > /etc/motd
|
||||
|
||||
RUN printf "\n%s\n%s\n" "$(poetry version)" "$(python --version)" >> /etc/motd
|
||||
|
||||
###################
|
||||
## Runtime Image
|
||||
###################
|
||||
FROM lchain-base AS lchain
|
||||
|
||||
#jupyter port
|
||||
EXPOSE 8888
|
||||
|
||||
COPY docker/assets/entry.sh /entry
|
||||
RUN chmod +x /entry
|
||||
|
||||
COPY --from=lchain-base-builder /etc/motd /etc/motd
|
||||
COPY --from=lchain-base-builder /usr/bin/git /usr/bin/git
|
||||
|
||||
USER ${USERNAME:-lchain}
|
||||
ENV HOME /home/$USERNAME
|
||||
WORKDIR /home/$USERNAME
|
||||
|
||||
COPY --chown=lchain:lchain --from=lchain-base-builder /pip-prefix $HOME/.local/
|
||||
|
||||
COPY . .
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
RUN pip install --no-deps --disable-pip-version-check --no-cache-dir -e .
|
||||
|
||||
|
||||
entrypoint ["/entry"]
|
@ -0,0 +1,84 @@
|
||||
#do not call this makefile it is included in the main Makefile
|
||||
.PHONY: docker docker.jupyter docker.run docker.force_build docker.clean \
|
||||
docker.test docker.lint docker.lint.mypy docker.lint.black \
|
||||
docker.lint.isort docker.lint.flake
|
||||
|
||||
# read python version from .env file ignoring comments
|
||||
PYTHON_VERSION := $(shell grep PYTHON_VERSION docker/.env | cut -d '=' -f2)
|
||||
POETRY_EXTRA_PACKAGES := $(shell grep '^[^#]*POETRY_EXTRA_PACKAGES' docker/.env | cut -d '=' -f2)
|
||||
POETRY_DEPENDENCIES := $(shell grep 'POETRY_DEPENDENCIES' docker/.env | cut -d '=' -f2)
|
||||
|
||||
|
||||
DOCKER_SRC := $(shell find docker -type f)
|
||||
DOCKER_IMAGE_NAME = langchain/dev
|
||||
|
||||
# SRC is all files matched by the git ls-files command
|
||||
SRC := $(shell git ls-files -- '*' ':!:docker/*')
|
||||
|
||||
# set DOCKER_BUILD_PROGRESS=plain to see detailed build progress
|
||||
DOCKER_BUILD_PROGRESS ?= auto
|
||||
|
||||
# extra message to show when entering the docker container
|
||||
DOCKER_MOTD := docker/assets/etc/motd
|
||||
|
||||
ROOTDIR := $(shell git rev-parse --show-toplevel)
|
||||
|
||||
DOCKER_LINT_CMD = docker run --rm -i -u lchain -v $(ROOTDIR):/src $(DOCKER_IMAGE_NAME):$(GIT_HASH)
|
||||
|
||||
docker: docker.run
|
||||
|
||||
docker.run: docker.build
|
||||
@echo "Docker image: $(DOCKER_IMAGE_NAME):$(GIT_HASH)"
|
||||
docker run --rm -it -u lchain -v $(ROOTDIR):/src $(DOCKER_IMAGE_NAME):$(GIT_HASH)
|
||||
|
||||
docker.jupyter: docker.build
|
||||
docker run --rm -it -v $(ROOTDIR):/src $(DOCKER_IMAGE_NAME):$(GIT_HASH) jupyter notebook
|
||||
|
||||
docker.build: $(SRC) $(DOCKER_SRC) $(DOCKER_MOTD)
|
||||
ifdef $(DOCKER_BUILDKIT)
|
||||
docker buildx build --build-arg PYTHON_VERSION=$(PYTHON_VERSION) \
|
||||
--build-arg POETRY_EXTRA_PACKAGES=$(POETRY_EXTRA_PACKAGES) \
|
||||
--build-arg POETRY_DEPENDENCIES=$(POETRY_DEPENDENCIES) \
|
||||
--progress=$(DOCKER_BUILD_PROGRESS) \
|
||||
$(BUILD_FLAGS) -f docker/Dockerfile -t $(DOCKER_IMAGE_NAME):$(GIT_HASH) .
|
||||
else
|
||||
docker build --build-arg PYTHON_VERSION=$(PYTHON_VERSION) \
|
||||
--build-arg POETRY_EXTRA_PACKAGES=$(POETRY_EXTRA_PACKAGES) \
|
||||
--build-arg POETRY_DEPENDENCIES=$(POETRY_DEPENDENCIES) \
|
||||
$(BUILD_FLAGS) -f docker/Dockerfile -t $(DOCKER_IMAGE_NAME):$(GIT_HASH) .
|
||||
endif
|
||||
docker tag $(DOCKER_IMAGE_NAME):$(GIT_HASH) $(DOCKER_IMAGE_NAME):latest
|
||||
@touch $@ # this prevents docker from rebuilding dependencies that have not
|
||||
@ # changed. Remove the file `docker/docker.build` to force a rebuild.
|
||||
|
||||
docker.force_build: $(DOCKER_SRC)
|
||||
@rm -f docker.build
|
||||
@$(MAKE) docker.build BUILD_FLAGS=--no-cache
|
||||
|
||||
docker.clean:
|
||||
docker rmi $(DOCKER_IMAGE_NAME):$(GIT_HASH) $(DOCKER_IMAGE_NAME):latest
|
||||
|
||||
docker.test: docker.build
|
||||
docker run --rm -it -u lchain -v $(ROOTDIR):/src $(DOCKER_IMAGE_NAME):$(GIT_HASH) \
|
||||
pytest /src/tests/unit_tests
|
||||
|
||||
# this assumes that the docker image has been built
|
||||
docker.lint: docker.lint.mypy docker.lint.black docker.lint.isort \
|
||||
docker.lint.flake
|
||||
|
||||
# these can run in parallel with -j[njobs]
|
||||
docker.lint.mypy:
|
||||
@$(DOCKER_LINT_CMD) mypy /src
|
||||
@printf "\t%s\n" "mypy ... "
|
||||
|
||||
docker.lint.black:
|
||||
@$(DOCKER_LINT_CMD) black /src --check
|
||||
@printf "\t%s\n" "black ... "
|
||||
|
||||
docker.lint.isort:
|
||||
@$(DOCKER_LINT_CMD) isort /src --check
|
||||
@printf "\t%s\n" "isort ... "
|
||||
|
||||
docker.lint.flake:
|
||||
@$(DOCKER_LINT_CMD) flake8 /src
|
||||
@printf "\t%s\n" "flake8 ... "
|
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
export PATH=$HOME/.local/bin:$PATH
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
cat /etc/motd
|
||||
exec /bin/bash
|
||||
fi
|
||||
|
||||
exec "$@"
|
@ -0,0 +1,8 @@
|
||||
All dependencies have been installed in the current shell. There is no
|
||||
virtualenv or a need for `poetry` inside the container.
|
||||
|
||||
Running the command `make docker.run` at the root directory of the project will
|
||||
build the container the first time. On the next runs it will use the cached
|
||||
image. A rebuild will happen when changes are made to the source code.
|
||||
|
||||
You local source directory has been mounted to the /src directory.
|
@ -0,0 +1,17 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
langchain:
|
||||
hostname: langchain
|
||||
image: langchain/dev:latest
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: docker/Dockerfile
|
||||
args:
|
||||
PYTHON_VERSION: ${PYTHON_VERSION}
|
||||
POETRY_EXTRA_PACKAGES: ${POETRY_EXTRA_PACKAGES}
|
||||
POETRY_DEPENDENCIES: ${POETRY_DEPENDENCIES}
|
||||
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 127.0.0.1:8888:8888
|
@ -0,0 +1,25 @@
|
||||
# AtlasDB
|
||||
|
||||
This page covers how to Nomic's Atlas ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Atlas wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Python package with `pip install nomic`
|
||||
- Nomic is also included in langchains poetry extras `poetry install -E all`
|
||||
-
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around the Atlas neural database, allowing you to use it as a vectorstore.
|
||||
This vectorstore also gives you full access to the underlying AtlasProject object, which will allow you to use the full range of Atlas map interactions, such as bulk tagging and automatic topic modeling.
|
||||
Please see [the Nomic docs](https://docs.nomic.ai/atlas_api.html) for more detailed information.
|
||||
|
||||
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import AtlasDB
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the Chroma wrapper, see [this notebook](../modules/indexes/examples/vectorstores.ipynb)
|
@ -0,0 +1,79 @@
|
||||
# Banana
|
||||
|
||||
This page covers how to use the Banana ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Banana wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Install with `pip3 install banana-dev`
|
||||
- Get an Banana api key and set it as an environment variable (`BANANA_API_KEY`)
|
||||
|
||||
## Define your Banana Template
|
||||
|
||||
If you want to use an available language model template you can find one [here](https://app.banana.dev/templates/conceptofmind/serverless-template-palmyra-base).
|
||||
This template uses the Palmyra-Base model by [Writer](https://writer.com/product/api/).
|
||||
You can check out an example Banana repository [here](https://github.com/conceptofmind/serverless-template-palmyra-base).
|
||||
|
||||
## Build the Banana app
|
||||
|
||||
Banana Apps must include the "output" key in the return json.
|
||||
There is a rigid response structure.
|
||||
|
||||
```python
|
||||
# Return the results as a dictionary
|
||||
result = {'output': result}
|
||||
```
|
||||
|
||||
An example inference function would be:
|
||||
|
||||
```python
|
||||
def inference(model_inputs:dict) -> dict:
|
||||
global model
|
||||
global tokenizer
|
||||
|
||||
# Parse out your arguments
|
||||
prompt = model_inputs.get('prompt', None)
|
||||
if prompt == None:
|
||||
return {'message': "No prompt provided"}
|
||||
|
||||
# Run the model
|
||||
input_ids = tokenizer.encode(prompt, return_tensors='pt').cuda()
|
||||
output = model.generate(
|
||||
input_ids,
|
||||
max_length=100,
|
||||
do_sample=True,
|
||||
top_k=50,
|
||||
top_p=0.95,
|
||||
num_return_sequences=1,
|
||||
temperature=0.9,
|
||||
early_stopping=True,
|
||||
no_repeat_ngram_size=3,
|
||||
num_beams=5,
|
||||
length_penalty=1.5,
|
||||
repetition_penalty=1.5,
|
||||
bad_words_ids=[[tokenizer.encode(' ', add_prefix_space=True)[0]]]
|
||||
)
|
||||
|
||||
result = tokenizer.decode(output[0], skip_special_tokens=True)
|
||||
# Return the results as a dictionary
|
||||
result = {'output': result}
|
||||
return result
|
||||
```
|
||||
|
||||
You can find a full example of a Banana app [here](https://github.com/conceptofmind/serverless-template-palmyra-base/blob/main/app.py).
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
|
||||
There exists an Banana LLM wrapper, which you can access with
|
||||
|
||||
```python
|
||||
from langchain.llms import Banana
|
||||
```
|
||||
|
||||
You need to provide a model key located in the dashboard:
|
||||
|
||||
```python
|
||||
llm = Banana(model_key="YOUR_MODEL_KEY")
|
||||
```
|
@ -0,0 +1,17 @@
|
||||
# DeepInfra
|
||||
|
||||
This page covers how to use the DeepInfra ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific DeepInfra wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Get your DeepInfra api key from this link [here](https://deepinfra.com/).
|
||||
- Get an DeepInfra api key and set it as an environment variable (`DEEPINFRA_API_TOKEN`)
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
|
||||
There exists an DeepInfra LLM wrapper, which you can access with
|
||||
```python
|
||||
from langchain.llms import DeepInfra
|
||||
```
|
@ -0,0 +1,38 @@
|
||||
# Graphsignal
|
||||
|
||||
This page covers how to use the Graphsignal to trace and monitor LangChain.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
- Install the Python library with `pip install graphsignal`
|
||||
- Create free Graphsignal account [here](https://graphsignal.com)
|
||||
- Get an API key and set it as an environment variable (`GRAPHSIGNAL_API_KEY`)
|
||||
|
||||
## Tracing and Monitoring
|
||||
|
||||
Graphsignal automatically instruments and starts tracing and monitoring chains. Traces, metrics and errors are then available in your [Graphsignal dashboard](https://app.graphsignal.com/). No prompts or other sensitive data are sent to Graphsignal cloud, only statistics and metadata.
|
||||
|
||||
Initialize the tracer by providing a deployment name:
|
||||
|
||||
```python
|
||||
import graphsignal
|
||||
|
||||
graphsignal.configure(deployment='my-langchain-app-prod')
|
||||
```
|
||||
|
||||
In order to trace full runs and see a breakdown by chains and tools, you can wrap the calling routine or use a decorator:
|
||||
|
||||
```python
|
||||
with graphsignal.start_trace('my-chain'):
|
||||
chain.run("some initial text")
|
||||
```
|
||||
|
||||
Optionally, enable profiling to record function-level statistics for each trace.
|
||||
|
||||
```python
|
||||
with graphsignal.start_trace(
|
||||
'my-chain', options=graphsignal.TraceOptions(enable_profiling=True)):
|
||||
chain.run("some initial text")
|
||||
```
|
||||
|
||||
See the [Quick Start](https://graphsignal.com/docs/guides/quick-start/) guide for complete setup instructions.
|
@ -0,0 +1,66 @@
|
||||
# Modal
|
||||
|
||||
This page covers how to use the Modal ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Modal wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Install with `pip install modal-client`
|
||||
- Run `modal token new`
|
||||
|
||||
## Define your Modal Functions and Webhooks
|
||||
|
||||
You must include a prompt. There is a rigid response structure.
|
||||
|
||||
```python
|
||||
class Item(BaseModel):
|
||||
prompt: str
|
||||
|
||||
@stub.webhook(method="POST")
|
||||
def my_webhook(item: Item):
|
||||
return {"prompt": my_function.call(item.prompt)}
|
||||
```
|
||||
|
||||
An example with GPT2:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
|
||||
import modal
|
||||
|
||||
stub = modal.Stub("example-get-started")
|
||||
|
||||
volume = modal.SharedVolume().persist("gpt2_model_vol")
|
||||
CACHE_PATH = "/root/model_cache"
|
||||
|
||||
@stub.function(
|
||||
gpu="any",
|
||||
image=modal.Image.debian_slim().pip_install(
|
||||
"tokenizers", "transformers", "torch", "accelerate"
|
||||
),
|
||||
shared_volumes={CACHE_PATH: volume},
|
||||
retries=3,
|
||||
)
|
||||
def run_gpt2(text: str):
|
||||
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
||||
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
||||
model = GPT2LMHeadModel.from_pretrained('gpt2')
|
||||
encoded_input = tokenizer(text, return_tensors='pt').input_ids
|
||||
output = model.generate(encoded_input, max_length=50, do_sample=True)
|
||||
return tokenizer.decode(output[0], skip_special_tokens=True)
|
||||
|
||||
class Item(BaseModel):
|
||||
prompt: str
|
||||
|
||||
@stub.webhook(method="POST")
|
||||
def get_text(item: Item):
|
||||
return {"prompt": run_gpt2.call(item.prompt)}
|
||||
```
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
|
||||
There exists an Modal LLM wrapper, which you can access with
|
||||
```python
|
||||
from langchain.llms import Modal
|
||||
```
|
@ -0,0 +1,21 @@
|
||||
# OpenSearch
|
||||
|
||||
This page covers how to use the OpenSearch ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific OpenSearch wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Install the Python package with `pip install opensearch-py`
|
||||
## Wrappers
|
||||
|
||||
### VectorStore
|
||||
|
||||
There exists a wrapper around OpenSearch vector databases, allowing you to use it as a vectorstore
|
||||
for semantic search using approximate vector search powered by lucene, nmslib and faiss engines
|
||||
or using painless scripting and script scoring functions for bruteforce vector search.
|
||||
|
||||
To import this vectorstore:
|
||||
```python
|
||||
from langchain.vectorstores import OpenSearchVectorSearch
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the OpenSearch wrapper, see [this notebook](../modules/indexes/vectorstore_examples/opensearch.ipynb)
|
@ -0,0 +1,17 @@
|
||||
# StochasticAI
|
||||
|
||||
This page covers how to use the StochasticAI ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific StochasticAI wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Install with `pip install stochasticx`
|
||||
- Get an StochasticAI api key and set it as an environment variable (`STOCHASTICAI_API_KEY`)
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
|
||||
There exists an StochasticAI LLM wrapper, which you can access with
|
||||
```python
|
||||
from langchain.llms import StochasticAI
|
||||
```
|
@ -0,0 +1,16 @@
|
||||
# Writer
|
||||
|
||||
This page covers how to use the Writer ecosystem within LangChain.
|
||||
It is broken into two parts: installation and setup, and then references to specific Writer wrappers.
|
||||
|
||||
## Installation and Setup
|
||||
- Get an Writer api key and set it as an environment variable (`WRITER_API_KEY`)
|
||||
|
||||
## Wrappers
|
||||
|
||||
### LLM
|
||||
|
||||
There exists an Writer LLM wrapper, which you can access with
|
||||
```python
|
||||
from langchain.llms import Writer
|
||||
```
|
@ -0,0 +1,494 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "68b24990",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Agents and Vectorstores\n",
|
||||
"\n",
|
||||
"This notebook covers how to combine agents and vectorstores. The use case for this is that you've ingested your data into a vectorstore and want to interact with it in an agentic manner.\n",
|
||||
"\n",
|
||||
"The reccomended method for doing so is to create a VectorDBQAChain and then use that as a tool in the overall agent. Let's take a look at doing this below. You can do this with multiple different vectordbs, and use the agent as a way to route between them. There are two different ways of doing this - you can either let the agent use the vectorstores as normal tools, or you can set `return_direct=True` to really just use the agent as a router."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9b22020a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the Vectorstore"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "2e87c10a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores import Chroma\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain import OpenAI, VectorDBQA\n",
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"id": "f2675861",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"docsearch = Chroma.from_documents(texts, embeddings, collection_name=\"state-of-union\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "bc5403d4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"state_of_union = VectorDBQA.from_chain_type(llm=llm, chain_type=\"stuff\", vectorstore=docsearch)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "1431cded",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import WebBaseLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"id": "915d3ff3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = WebBaseLoader(\"https://beta.ruff.rs/docs/faq/\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "96a2edf8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Running Chroma using direct local API.\n",
|
||||
"Using DuckDB in-memory for database. Data will be transient.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs = loader.load()\n",
|
||||
"ruff_texts = text_splitter.split_documents(docs)\n",
|
||||
"ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name=\"ruff\")\n",
|
||||
"ruff = VectorDBQA.from_chain_type(llm=llm, chain_type=\"stuff\", vectorstore=ruff_db)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "71ecef90",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c0a6c031",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the Agent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "eb142786",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.agents import initialize_agent, Tool\n",
|
||||
"from langchain.tools import BaseTool\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain import LLMMathChain, SerpAPIWrapper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"id": "850bc4e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"State of Union QA System\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\"\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Ruff QA System\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\"\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "fc47f230",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Construct the agent. We will use the default agent type here.\n",
|
||||
"# See documentation for a full list of options.\n",
|
||||
"agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "10ca2db8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out what Biden said about Ketanji Brown Jackson in the State of the Union address.\n",
|
||||
"Action: State of Union QA System\n",
|
||||
"Action Input: What did Biden say about Ketanji Brown Jackson in the State of the Union address?\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m Biden said that Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Biden said that Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Biden said that Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 46,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"id": "4e91b811",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out the advantages of using ruff over flake8\n",
|
||||
"Action: Ruff QA System\n",
|
||||
"Action Input: What are the advantages of using ruff over flake8?\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m Ruff can be used as a drop-in replacement for Flake8 when used (1) without or with a small number of plugins, (2) alongside Black, and (3) on Python 3 code. It also re-implements some of the most popular Flake8 plugins and related code quality tools natively, including isort, yesqa, eradicate, and most of the rules implemented in pyupgrade. Ruff also supports automatically fixing its own lint violations, which Flake8 does not.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: Ruff can be used as a drop-in replacement for Flake8 when used (1) without or with a small number of plugins, (2) alongside Black, and (3) on Python 3 code. It also re-implements some of the most popular Flake8 plugins and related code quality tools natively, including isort, yesqa, eradicate, and most of the rules implemented in pyupgrade. Ruff also supports automatically fixing its own lint violations, which Flake8 does not.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Ruff can be used as a drop-in replacement for Flake8 when used (1) without or with a small number of plugins, (2) alongside Black, and (3) on Python 3 code. It also re-implements some of the most popular Flake8 plugins and related code quality tools natively, including isort, yesqa, eradicate, and most of the rules implemented in pyupgrade. Ruff also supports automatically fixing its own lint violations, which Flake8 does not.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 47,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Why use ruff over flake8?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "787a9b5e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Use the Agent solely as a router"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9161ba91",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also set `return_direct=True` if you intend to use the agent as a router and just want to directly return the result of the VectorDBQaChain.\n",
|
||||
"\n",
|
||||
"Notice that in the above examples the agent did some extra work after querying the VectorDBQAChain. You can avoid that and just return the result directly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"id": "f59b377e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"State of Union QA System\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\",\n",
|
||||
" return_direct=True\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Ruff QA System\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\",\n",
|
||||
" return_direct=True\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"id": "8615707a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"id": "36e718a9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out what Biden said about Ketanji Brown Jackson in the State of the Union address.\n",
|
||||
"Action: State of Union QA System\n",
|
||||
"Action Input: What did Biden say about Ketanji Brown Jackson in the State of the Union address?\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m Biden said that Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Biden said that Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 50,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What did biden say about ketanji brown jackson in the state of the union address?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"id": "edfd0a1a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out the advantages of using ruff over flake8\n",
|
||||
"Action: Ruff QA System\n",
|
||||
"Action Input: What are the advantages of using ruff over flake8?\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m Ruff can be used as a drop-in replacement for Flake8 when used (1) without or with a small number of plugins, (2) alongside Black, and (3) on Python 3 code. It also re-implements some of the most popular Flake8 plugins and related code quality tools natively, including isort, yesqa, eradicate, and most of the rules implemented in pyupgrade. Ruff also supports automatically fixing its own lint violations, which Flake8 does not.\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Ruff can be used as a drop-in replacement for Flake8 when used (1) without or with a small number of plugins, (2) alongside Black, and (3) on Python 3 code. It also re-implements some of the most popular Flake8 plugins and related code quality tools natively, including isort, yesqa, eradicate, and most of the rules implemented in pyupgrade. Ruff also supports automatically fixing its own lint violations, which Flake8 does not.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 51,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"Why use ruff over flake8?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "49a0cbbe",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Multi-Hop vectorstore reasoning\n",
|
||||
"\n",
|
||||
"Because vectorstores are easily usable as tools in agents, it is easy to use answer multi-hop questions that depend on vectorstores using the existing agent framework"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 57,
|
||||
"id": "d397a233",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name = \"State of Union QA System\",\n",
|
||||
" func=state_of_union.run,\n",
|
||||
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\"\n",
|
||||
" ),\n",
|
||||
" Tool(\n",
|
||||
" name = \"Ruff QA System\",\n",
|
||||
" func=ruff.run,\n",
|
||||
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\"\n",
|
||||
" ),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 58,
|
||||
"id": "06157240",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Construct the agent. We will use the default agent type here.\n",
|
||||
"# See documentation for a full list of options.\n",
|
||||
"agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 59,
|
||||
"id": "b492b520",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to find out what tool ruff uses to run over Jupyter Notebooks, and if the president mentioned it in the state of the union.\n",
|
||||
"Action: Ruff QA System\n",
|
||||
"Action Input: What tool does ruff use to run over Jupyter Notebooks?\u001b[0m\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3m Ruff is integrated into nbQA, a tool for running linters and code formatters over Jupyter Notebooks. After installing ruff and nbqa, you can run Ruff over a notebook like so: > nbqa ruff Untitled.ipynb\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now need to find out if the president mentioned this tool in the state of the union.\n",
|
||||
"Action: State of Union QA System\n",
|
||||
"Action Input: Did the president mention nbQA in the state of the union?\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m No, the president did not mention nbQA in the state of the union.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: No, the president did not mention nbQA in the state of the union.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'No, the president did not mention nbQA in the state of the union.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 59,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"agent.run(\"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b3b857d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,116 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9f98a15e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# CoNLL-U\n",
|
||||
"This is an example of how to load a file in [CoNLL-U](https://universaldependencies.org/format.html) format. The whole file is treated as one document. The example data (`conllu.conllu`) is based on one of the standard UD/CoNLL-U examples."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d9b2e33e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import CoNLLULoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5b5eec48",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = CoNLLULoader(\"example_data/conllu.conllu\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "10f3f725",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"document = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "acbb3579",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"document"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.8"
|
||||
},
|
||||
"toc": {
|
||||
"base_numbering": 1,
|
||||
"nav_menu": {},
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"skip_h1_title": false,
|
||||
"title_cell": "Table of Contents",
|
||||
"title_sidebar": "Contents",
|
||||
"toc_cell": false,
|
||||
"toc_position": {},
|
||||
"toc_section_display": true,
|
||||
"toc_window_display": false
|
||||
},
|
||||
"varInspector": {
|
||||
"cols": {
|
||||
"lenName": 16,
|
||||
"lenType": 16,
|
||||
"lenVar": 40
|
||||
},
|
||||
"kernels_config": {
|
||||
"python": {
|
||||
"delete_cmd_postfix": "",
|
||||
"delete_cmd_prefix": "del ",
|
||||
"library": "var_list.py",
|
||||
"varRefreshCmd": "print(var_dic_list())"
|
||||
},
|
||||
"r": {
|
||||
"delete_cmd_postfix": ") ",
|
||||
"delete_cmd_prefix": "rm(",
|
||||
"library": "var_list.r",
|
||||
"varRefreshCmd": "cat(var_dic_list()) "
|
||||
}
|
||||
},
|
||||
"types_to_exclude": [
|
||||
"module",
|
||||
"function",
|
||||
"builtin_function_or_method",
|
||||
"instance",
|
||||
"_Feature"
|
||||
],
|
||||
"window_display": false
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,102 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d9826810",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Copy Paste\n",
|
||||
"\n",
|
||||
"This notebook covers how to load a document object from something you just want to copy and paste. In this case, you don't even need to use a DocumentLoader, but rather can just construct the Document directly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "fd9e71a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore.document import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f40d3f30",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"..... put the text you copy pasted here......\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "d409bdba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc = Document(page_content=text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cc0eff72",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Metadata\n",
|
||||
"If you want to add metadata about the where you got this piece of text, you easily can with the metadata key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "fe3aa5aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"metadata = {\"source\": \"internet\", \"date\": \"Friday\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "827d4e91",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"doc = Document(page_content=text, metadata=metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c986a43d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,8 @@
|
||||
# sent_id = 1
|
||||
# text = They buy and sell books.
|
||||
1 They they PRON PRP Case=Nom|Number=Plur 2 nsubj 2:nsubj|4:nsubj _
|
||||
2 buy buy VERB VBP Number=Plur|Person=3|Tense=Pres 0 root 0:root _
|
||||
3 and and CONJ CC _ 4 cc 4:cc _
|
||||
4 sell sell VERB VBP Number=Plur|Person=3|Tense=Pres 2 conj 0:root|2:conj _
|
||||
5 books book NOUN NNS Number=Plur 2 obj 2:obj|4:obj SpaceAfter=No
|
||||
6 . . PUNCT . _ 2 punct 2:punct _
|
@ -0,0 +1,64 @@
|
||||
{
|
||||
"participants": [{"name": "User 1"}, {"name": "User 2"}],
|
||||
"messages": [
|
||||
{"sender_name": "User 2", "timestamp_ms": 1675597571851, "content": "Bye!"},
|
||||
{
|
||||
"sender_name": "User 1",
|
||||
"timestamp_ms": 1675597435669,
|
||||
"content": "Oh no worries! Bye",
|
||||
},
|
||||
{
|
||||
"sender_name": "User 2",
|
||||
"timestamp_ms": 1675596277579,
|
||||
"content": "No Im sorry it was my mistake, the blue one is not for sale",
|
||||
},
|
||||
{
|
||||
"sender_name": "User 1",
|
||||
"timestamp_ms": 1675595140251,
|
||||
"content": "I thought you were selling the blue one!",
|
||||
},
|
||||
{
|
||||
"sender_name": "User 1",
|
||||
"timestamp_ms": 1675595109305,
|
||||
"content": "Im not interested in this bag. Im interested in the blue one!",
|
||||
},
|
||||
{
|
||||
"sender_name": "User 2",
|
||||
"timestamp_ms": 1675595068468,
|
||||
"content": "Here is $129",
|
||||
},
|
||||
{
|
||||
"sender_name": "User 2",
|
||||
"timestamp_ms": 1675595060730,
|
||||
"photos": [
|
||||
{"uri": "url_of_some_picture.jpg", "creation_timestamp": 1675595059}
|
||||
],
|
||||
},
|
||||
{
|
||||
"sender_name": "User 2",
|
||||
"timestamp_ms": 1675595045152,
|
||||
"content": "Online is at least $100",
|
||||
},
|
||||
{
|
||||
"sender_name": "User 1",
|
||||
"timestamp_ms": 1675594799696,
|
||||
"content": "How much do you want?",
|
||||
},
|
||||
{
|
||||
"sender_name": "User 2",
|
||||
"timestamp_ms": 1675577876645,
|
||||
"content": "Goodmorning! $50 is too low.",
|
||||
},
|
||||
{
|
||||
"sender_name": "User 1",
|
||||
"timestamp_ms": 1675549022673,
|
||||
"content": "Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!",
|
||||
},
|
||||
],
|
||||
"title": "User 1 and User 2 chat",
|
||||
"is_still_participant": true,
|
||||
"thread_path": "inbox/User 1 and User 2 chat",
|
||||
"magic_words": [],
|
||||
"image": {"uri": "image_of_the_chat.jpg", "creation_timestamp": 1675549016},
|
||||
"joinable_mode": {"mode": 1, "link": ""},
|
||||
}
|
@ -0,0 +1,83 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Notebook\n",
|
||||
"\n",
|
||||
"This notebook covers how to load data from an .ipynb notebook into a format suitable by LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import NotebookLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = NotebookLoader(\"example_data/notebook.ipynb\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`NotebookLoader.load()` loads the `.ipynb` notebook file into a `Document` object.\n",
|
||||
"\n",
|
||||
"**Parameters**:\n",
|
||||
"\n",
|
||||
"* `include_outputs` (bool): whether to include cell outputs in the resulting document (default is False).\n",
|
||||
"* `max_output_length` (int): the maximum number of characters to include from each cell output (default is 10).\n",
|
||||
"* `remove_newline` (bool): whether to remove newline characters from the cell sources and outputs (default is False).\n",
|
||||
"* `traceback` (bool): whether to include full traceback (default is False)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader.load(include_outputs=True, max_output_length=20, remove_newline=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.1"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "981b6680a42bdb5eb22187741e1607b3aae2cf73db800d1af1f268d1de6a1f70"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -0,0 +1,77 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Facebook Chat\n",
|
||||
"\n",
|
||||
"This notebook covers how to load data from the Facebook Chats into a format that can be ingested into LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import FacebookChatLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = FacebookChatLoader(\"example_data/facebook_chat.json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='User 2 on 2023-02-05 12:46:11: Bye!\\n\\nUser 1 on 2023-02-05 12:43:55: Oh no worries! Bye\\n\\nUser 2 on 2023-02-05 12:24:37: No Im sorry it was my mistake, the blue one is not for sale\\n\\nUser 1 on 2023-02-05 12:05:40: I thought you were selling the blue one!\\n\\nUser 1 on 2023-02-05 12:05:09: Im not interested in this bag. Im interested in the blue one!\\n\\nUser 2 on 2023-02-05 12:04:28: Here is $129\\n\\nUser 2 on 2023-02-05 12:04:05: Online is at least $100\\n\\nUser 1 on 2023-02-05 11:59:59: How much do you want?\\n\\nUser 2 on 2023-02-05 07:17:56: Goodmorning! $50 is too low.\\n\\nUser 1 on 2023-02-04 23:17:02: Hi! Im interested in your bag. Im offering $50. Let me know if you are interested. Thanks!\\n\\n', lookup_str='', metadata={'source': 'docs/modules/document_loaders/examples/example_data/facebook_chat.json'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "384707f4965e853a82006e90614c2e1a578ea1f6eb0ee07a1dd78a657d37dd67"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -0,0 +1,191 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4babfba5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# GitBook\n",
|
||||
"How to pull page data from any GitBook."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ff49b177",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import GitbookLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "849a8d52",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = GitbookLoader(\"https://docs.gitbook.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "65d5ddce",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load from single GitBook page"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "c2826836",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"page_data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "fefa2adc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Introduction to GitBook\\nGitBook is a modern documentation platform where teams can document everything from products to internal knowledge bases and APIs.\\nWe want to help \\nteams to work more efficiently\\n by creating a simple yet powerful platform for them to \\nshare their knowledge\\n.\\nOur mission is to make a \\nuser-friendly\\n and \\ncollaborative\\n product for everyone to create, edit and share knowledge through documentation.\\nPublish your documentation in 5 easy steps\\nImport\\n\\nMove your existing content to GitBook with ease.\\nGit Sync\\n\\nBenefit from our bi-directional synchronisation with GitHub and GitLab.\\nOrganise your content\\n\\nCreate pages and spaces and organize them into collections\\nCollaborate\\n\\nInvite other users and collaborate asynchronously with ease.\\nPublish your docs\\n\\nShare your documentation with selected users or with everyone.\\nNext\\n - Getting started\\nOverview\\nLast modified \\n3mo ago', lookup_str='', metadata={'source': 'https://docs.gitbook.com', 'title': 'Introduction to GitBook'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"page_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c325048c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load from all paths in a given GitBook\n",
|
||||
"For this to work, the GitbookLoader needs to be initialized with the root path (`https://docs.gitbook.com` in this example) and have `load_all_paths` set to `True`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "938ff4ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Fetching text from https://docs.gitbook.com/\n",
|
||||
"Fetching text from https://docs.gitbook.com/getting-started/overview\n",
|
||||
"Fetching text from https://docs.gitbook.com/getting-started/import\n",
|
||||
"Fetching text from https://docs.gitbook.com/getting-started/git-sync\n",
|
||||
"Fetching text from https://docs.gitbook.com/getting-started/content-structure\n",
|
||||
"Fetching text from https://docs.gitbook.com/getting-started/collaboration\n",
|
||||
"Fetching text from https://docs.gitbook.com/getting-started/publishing\n",
|
||||
"Fetching text from https://docs.gitbook.com/tour/quick-find\n",
|
||||
"Fetching text from https://docs.gitbook.com/tour/editor\n",
|
||||
"Fetching text from https://docs.gitbook.com/tour/customization\n",
|
||||
"Fetching text from https://docs.gitbook.com/tour/member-management\n",
|
||||
"Fetching text from https://docs.gitbook.com/tour/pdf-export\n",
|
||||
"Fetching text from https://docs.gitbook.com/tour/activity-history\n",
|
||||
"Fetching text from https://docs.gitbook.com/tour/insights\n",
|
||||
"Fetching text from https://docs.gitbook.com/tour/notifications\n",
|
||||
"Fetching text from https://docs.gitbook.com/tour/internationalization\n",
|
||||
"Fetching text from https://docs.gitbook.com/tour/keyboard-shortcuts\n",
|
||||
"Fetching text from https://docs.gitbook.com/tour/seo\n",
|
||||
"Fetching text from https://docs.gitbook.com/advanced-guides/custom-domain\n",
|
||||
"Fetching text from https://docs.gitbook.com/advanced-guides/advanced-sharing-and-security\n",
|
||||
"Fetching text from https://docs.gitbook.com/advanced-guides/integrations\n",
|
||||
"Fetching text from https://docs.gitbook.com/billing-and-admin/account-settings\n",
|
||||
"Fetching text from https://docs.gitbook.com/billing-and-admin/plans\n",
|
||||
"Fetching text from https://docs.gitbook.com/troubleshooting/faqs\n",
|
||||
"Fetching text from https://docs.gitbook.com/troubleshooting/hard-refresh\n",
|
||||
"Fetching text from https://docs.gitbook.com/troubleshooting/report-bugs\n",
|
||||
"Fetching text from https://docs.gitbook.com/troubleshooting/connectivity-issues\n",
|
||||
"Fetching text from https://docs.gitbook.com/troubleshooting/support\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader = GitbookLoader(\"https://docs.gitbook.com\", load_all_paths=True)\n",
|
||||
"all_pages_data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "db92fc39",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"fetched 28 documents.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content=\"Import\\nFind out how to easily migrate your existing documentation and which formats are supported.\\nThe import function allows you to migrate and unify existing documentation in GitBook. You can choose to import single or multiple pages although limits apply. \\nPermissions\\nAll members with editor permission or above can use the import feature.\\nSupported formats\\nGitBook supports imports from websites or files that are:\\nMarkdown (.md or .markdown)\\nHTML (.html)\\nMicrosoft Word (.docx).\\nWe also support import from:\\nConfluence\\nNotion\\nGitHub Wiki\\nQuip\\nDropbox Paper\\nGoogle Docs\\nYou can also upload a ZIP\\n \\ncontaining HTML or Markdown files when \\nimporting multiple pages.\\nNote: this feature is in beta.\\nFeel free to suggest import sources we don't support yet and \\nlet us know\\n if you have any issues.\\nImport panel\\nWhen you create a new space, you'll have the option to import content straight away:\\nThe new page menu\\nImport a page or subpage by selecting \\nImport Page\\n from the New Page menu, or \\nImport Subpage\\n in the page action menu, found in the table of contents:\\nImport from the page action menu\\nWhen you choose your input source, instructions will explain how to proceed.\\nAlthough GitBook supports importing content from different kinds of sources, the end result might be different from your source due to differences in product features and document format.\\nLimits\\nGitBook currently has the following limits for imported content:\\nThe maximum number of pages that can be uploaded in a single import is \\n20.\\nThe maximum number of files (images etc.) that can be uploaded in a single import is \\n20.\\nGetting started - \\nPrevious\\nOverview\\nNext\\n - Getting started\\nGit Sync\\nLast modified \\n4mo ago\", lookup_str='', metadata={'source': 'https://docs.gitbook.com/getting-started/import', 'title': 'Import'}, lookup_index=0)"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(f\"fetched {len(all_pages_data)} documents.\")\n",
|
||||
"# show second document\n",
|
||||
"all_pages_data[2]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "92cb3eda",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "2d002ec47225e662695b764370d7966aa11eeb4302edc2f497bbf96d49c8f899"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
File diff suppressed because one or more lines are too long
@ -0,0 +1,98 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Notebook\n",
|
||||
"\n",
|
||||
"This notebook covers how to load data from an .ipynb notebook into a format suitable by LangChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import NotebookLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = NotebookLoader(\"example_data/notebook.ipynb\", include_outputs=True, max_output_length=20, remove_newline=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`NotebookLoader.load()` loads the `.ipynb` notebook file into a `Document` object.\n",
|
||||
"\n",
|
||||
"**Parameters**:\n",
|
||||
"\n",
|
||||
"* `include_outputs` (bool): whether to include cell outputs in the resulting document (default is False).\n",
|
||||
"* `max_output_length` (int): the maximum number of characters to include from each cell output (default is 10).\n",
|
||||
"* `remove_newline` (bool): whether to remove newline characters from the cell sources and outputs (default is False).\n",
|
||||
"* `traceback` (bool): whether to include full traceback (default is False)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='\\'markdown\\' cell: \\'[\\'# Notebook\\', \\'\\', \\'This notebook covers how to load data from an .ipynb notebook into a format suitable by LangChain.\\']\\'\\n\\n \\'code\\' cell: \\'[\\'from langchain.document_loaders import NotebookLoader\\']\\'\\n\\n \\'code\\' cell: \\'[\\'loader = NotebookLoader(\"example_data/notebook.ipynb\")\\']\\'\\n\\n \\'markdown\\' cell: \\'[\\'`NotebookLoader.load()` loads the `.ipynb` notebook file into a `Document` object.\\', \\'\\', \\'**Parameters**:\\', \\'\\', \\'* `include_outputs` (bool): whether to include cell outputs in the resulting document (default is False).\\', \\'* `max_output_length` (int): the maximum number of characters to include from each cell output (default is 10).\\', \\'* `remove_newline` (bool): whether to remove newline characters from the cell sources and outputs (default is False).\\', \\'* `traceback` (bool): whether to include full traceback (default is False).\\']\\'\\n\\n \\'code\\' cell: \\'[\\'loader.load(include_outputs=True, max_output_length=20, remove_newline=True)\\']\\'\\n\\n', lookup_str='', metadata={'source': 'example_data/notebook.ipynb'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "981b6680a42bdb5eb22187741e1607b3aae2cf73db800d1af1f268d1de6a1f70"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -0,0 +1,137 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39af9ecd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Word Documents\n",
|
||||
"\n",
|
||||
"This covers how to load Word documents into a document format that we can use downstream."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "721c48aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import UnstructuredWordDocumentLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "9d3d0e35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredWordDocumentLoader(\"fake.docx\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "06073f91",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "c9adc5cb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'fake.docx'}, lookup_index=0)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "525d6b67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Retain Elements\n",
|
||||
"\n",
|
||||
"Under the hood, Unstructured creates different \"elements\" for different chunks of text. By default we combine those together, but you can easily keep that separation by specifying `mode=\"elements\"`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "064f9162",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = UnstructuredWordDocumentLoader(\"fake.docx\", mode=\"elements\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "abefbbdb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"data = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "a547c534",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='Lorem ipsum dolor sit amet.', lookup_str='', metadata={'source': 'fake.docx', 'filename': 'fake.docx', 'category': 'Title'}, lookup_index=0)"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data[0]"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,266 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# AtlasDB\n",
|
||||
"\n",
|
||||
"This notebook shows you how to use functionality related to the AtlasDB"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import SpacyTextSplitter\n",
|
||||
"from langchain.vectorstores import AtlasDB\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Collecting en-core-web-sm==3.5.0\n",
|
||||
" Downloading https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl (12.8 MB)\n",
|
||||
"\u001B[2K \u001B[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001B[0m \u001B[32m12.8/12.8 MB\u001B[0m \u001B[31m90.8 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m00:01\u001B[0m00:01\u001B[0m\n",
|
||||
"\u001B[?25hRequirement already satisfied: spacy<3.6.0,>=3.5.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from en-core-web-sm==3.5.0) (3.5.0)\n",
|
||||
"Requirement already satisfied: packaging>=20.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (23.0)\n",
|
||||
"Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (1.1.1)\n",
|
||||
"Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (3.3.0)\n",
|
||||
"Requirement already satisfied: srsly<3.0.0,>=2.4.3 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (2.4.5)\n",
|
||||
"Requirement already satisfied: pathy>=0.10.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (0.10.1)\n",
|
||||
"Requirement already satisfied: setuptools in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (67.4.0)\n",
|
||||
"Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (4.64.1)\n",
|
||||
"Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (1.0.4)\n",
|
||||
"Requirement already satisfied: smart-open<7.0.0,>=5.2.1 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (6.3.0)\n",
|
||||
"Requirement already satisfied: thinc<8.2.0,>=8.1.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (8.1.7)\n",
|
||||
"Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (2.0.7)\n",
|
||||
"Requirement already satisfied: typer<0.8.0,>=0.3.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (0.7.0)\n",
|
||||
"Requirement already satisfied: requests<3.0.0,>=2.13.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (2.28.2)\n",
|
||||
"Requirement already satisfied: jinja2 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (3.1.2)\n",
|
||||
"Requirement already satisfied: pydantic!=1.8,!=1.8.1,<1.11.0,>=1.7.4 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (1.10.5)\n",
|
||||
"Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (2.0.8)\n",
|
||||
"Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (3.0.12)\n",
|
||||
"Requirement already satisfied: numpy>=1.15.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (1.24.2)\n",
|
||||
"Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (1.0.9)\n",
|
||||
"Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (3.0.8)\n",
|
||||
"Requirement already satisfied: typing-extensions>=4.2.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from pydantic!=1.8,!=1.8.1,<1.11.0,>=1.7.4->spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (4.5.0)\n",
|
||||
"Requirement already satisfied: charset-normalizer<4,>=2 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from requests<3.0.0,>=2.13.0->spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (3.0.1)\n",
|
||||
"Requirement already satisfied: idna<4,>=2.5 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from requests<3.0.0,>=2.13.0->spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (3.4)\n",
|
||||
"Requirement already satisfied: certifi>=2017.4.17 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from requests<3.0.0,>=2.13.0->spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (2022.12.7)\n",
|
||||
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from requests<3.0.0,>=2.13.0->spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (1.26.14)\n",
|
||||
"Requirement already satisfied: blis<0.8.0,>=0.7.8 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from thinc<8.2.0,>=8.1.0->spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (0.7.9)\n",
|
||||
"Requirement already satisfied: confection<1.0.0,>=0.0.1 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from thinc<8.2.0,>=8.1.0->spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (0.0.4)\n",
|
||||
"Requirement already satisfied: click<9.0.0,>=7.1.1 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from typer<0.8.0,>=0.3.0->spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (8.1.3)\n",
|
||||
"Requirement already satisfied: MarkupSafe>=2.0 in /home/ubuntu/langchain/.venv/lib/python3.9/site-packages (from jinja2->spacy<3.6.0,>=3.5.0->en-core-web-sm==3.5.0) (2.1.2)\n",
|
||||
"\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.0\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m23.0.1\u001B[0m\n",
|
||||
"\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpip install --upgrade pip\u001B[0m\n",
|
||||
"\u001B[38;5;2m✔ Download and installation successful\u001B[0m\n",
|
||||
"You can now load the package via spacy.load('en_core_web_sm')\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!python -m spacy download en_core_web_sm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ATLAS_TEST_API_KEY = '7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = TextLoader('../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = SpacyTextSplitter(separator='|')\n",
|
||||
"texts = []\n",
|
||||
"for doc in text_splitter.split_documents(documents):\n",
|
||||
" texts.extend(doc.page_content.split('|'))\n",
|
||||
" \n",
|
||||
"texts = [e.strip() for e in texts]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2023-02-24 16:13:49.696 | INFO | nomic.project:_create_project:884 - Creating project `test_index_1677255228.136989` in organization `Atlas Demo`\n",
|
||||
"2023-02-24 16:13:51.087 | INFO | nomic.project:wait_for_project_lock:993 - test_index_1677255228.136989: Project lock is released.\n",
|
||||
"2023-02-24 16:13:51.225 | INFO | nomic.project:wait_for_project_lock:993 - test_index_1677255228.136989: Project lock is released.\n",
|
||||
"2023-02-24 16:13:51.481 | INFO | nomic.project:add_text:1351 - Uploading text to Atlas.\n",
|
||||
"1it [00:00, 1.20it/s]\n",
|
||||
"2023-02-24 16:13:52.318 | INFO | nomic.project:add_text:1422 - Text upload succeeded.\n",
|
||||
"2023-02-24 16:13:52.628 | INFO | nomic.project:wait_for_project_lock:993 - test_index_1677255228.136989: Project lock is released.\n",
|
||||
"2023-02-24 16:13:53.380 | INFO | nomic.project:create_index:1192 - Created map `test_index_1677255228.136989_index` in project `test_index_1677255228.136989`: https://atlas.nomic.ai/map/ee2354a3-7f9a-4c6b-af43-b0cda09d7198/db996d77-8981-48a0-897a-ff2c22bbf541\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db = AtlasDB.from_texts(texts=texts,\n",
|
||||
" name='test_index_'+str(time.time()),\n",
|
||||
" description='test_index',\n",
|
||||
" api_key=ATLAS_TEST_API_KEY,\n",
|
||||
" index_kwargs={'build_topic_model': True})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2023-02-24 16:14:09.106 | INFO | nomic.project:wait_for_project_lock:993 - test_index_1677255228.136989: Project lock is released.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with db.project.wait_for_project_lock():\n",
|
||||
" time.sleep(1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <strong><a href=\"https://atlas.nomic.ai/dashboard/project/ee2354a3-7f9a-4c6b-af43-b0cda09d7198\">test_index_1677255228.136989</strong></a>\n",
|
||||
" <br>\n",
|
||||
" A description for your project 508 datums inserted.\n",
|
||||
" <br>\n",
|
||||
" 1 index built.\n",
|
||||
" <br><strong>Projections</strong>\n",
|
||||
"<ul>\n",
|
||||
"<li>test_index_1677255228.136989_index. Status Completed. <a target=\"_blank\" href=\"https://atlas.nomic.ai/map/ee2354a3-7f9a-4c6b-af43-b0cda09d7198/db996d77-8981-48a0-897a-ff2c22bbf541\">view online</a></li></ul><hr><script>\n",
|
||||
" destroy = function() {\n",
|
||||
" document.getElementById(\"iframedb996d77-8981-48a0-897a-ff2c22bbf541\").remove()\n",
|
||||
" }\n",
|
||||
" </script>\n",
|
||||
"\n",
|
||||
" <h4>Projection ID: db996d77-8981-48a0-897a-ff2c22bbf541</h4>\n",
|
||||
" <div class=\"actions\">\n",
|
||||
" <div id=\"hide\" class=\"action\" onclick=\"destroy()\">Hide embedded project</div>\n",
|
||||
" <div class=\"action\" id=\"out\">\n",
|
||||
" <a href=\"https://atlas.nomic.ai/map/ee2354a3-7f9a-4c6b-af43-b0cda09d7198/db996d77-8981-48a0-897a-ff2c22bbf541\" target=\"_blank\">Explore on atlas.nomic.ai</a>\n",
|
||||
" </div>\n",
|
||||
" </div>\n",
|
||||
" \n",
|
||||
" <iframe class=\"iframe\" id=\"iframedb996d77-8981-48a0-897a-ff2c22bbf541\" allow=\"clipboard-read; clipboard-write\" src=\"https://atlas.nomic.ai/map/ee2354a3-7f9a-4c6b-af43-b0cda09d7198/db996d77-8981-48a0-897a-ff2c22bbf541\">\n",
|
||||
" </iframe>\n",
|
||||
"\n",
|
||||
" <style>\n",
|
||||
" .iframe {\n",
|
||||
" /* vh can be **very** large in vscode ipynb. */\n",
|
||||
" height: min(75vh, 66vw);\n",
|
||||
" width: 100%;\n",
|
||||
" }\n",
|
||||
" </style>\n",
|
||||
" \n",
|
||||
" <style>\n",
|
||||
" .actions {\n",
|
||||
" display: block;\n",
|
||||
" }\n",
|
||||
" .action {\n",
|
||||
" min-height: 18px;\n",
|
||||
" margin: 5px;\n",
|
||||
" transition: all 500ms ease-in-out;\n",
|
||||
" }\n",
|
||||
" .action:hover {\n",
|
||||
" cursor: pointer;\n",
|
||||
" }\n",
|
||||
" #hide:hover::after {\n",
|
||||
" content: \" X\";\n",
|
||||
" }\n",
|
||||
" #out:hover::after {\n",
|
||||
" content: \"\";\n",
|
||||
" }\n",
|
||||
" </style>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"AtlasProject: <{'id': 'ee2354a3-7f9a-4c6b-af43-b0cda09d7198', 'owner': '9c29afbb-a002-4d49-958e-ecf5ae1351ac', 'project_name': 'test_index_1677255228.136989', 'creator': 'auth0|63efc4b5462246f4d9a6ecf2', 'description': 'A description for your project', 'opensearch_index_id': 'f61fb8dd-0abf-4f31-9130-41870e443902', 'is_public': True, 'project_fields': ['atlas_id', 'text'], 'unique_id_field': 'atlas_id', 'modality': 'text', 'total_datums_in_project': 508, 'created_timestamp': '2023-02-24T16:13:50.313363+00:00', 'atlas_indices': [{'id': 'b1b01833-0964-4597-a4bc-a2d60700949d', 'project_id': 'ee2354a3-7f9a-4c6b-af43-b0cda09d7198', 'index_name': 'test_index_1677255228.136989_index', 'indexed_field': 'text', 'created_timestamp': '2023-02-24T16:13:52.957101+00:00', 'updated_timestamp': '2023-02-24T16:14:03.469621+00:00', 'atoms': ['charchunk', 'document'], 'colorable_fields': [], 'embedders': [{'id': '7ec0868a-4eed-4414-a482-25cce9803e1b', 'atlas_index_id': 'b1b01833-0964-4597-a4bc-a2d60700949d', 'ready': True, 'model_name': 'NomicEmbed', 'hyperparameters': {'norm': 'both', 'batch_size': 20, 'polymerize_by': 'charchunk', 'dataset_buffer_size': 1000}}], 'nearest_neighbor_indices': [{'id': '86f8e3ff-e07c-4678-a4d7-144db4b0301d', 'index_name': 'NomicOrganize', 'ready': True, 'hyperparameters': {'dim': 384, 'space': 'l2'}, 'atom_strategies': ['document']}], 'projections': [{'id': 'db996d77-8981-48a0-897a-ff2c22bbf541', 'projection_name': 'NomicProject', 'ready': True, 'hyperparameters': {'spread': 1.0, 'n_epochs': 50, 'n_neighbors': 15}, 'atom_strategies': ['document'], 'created_timestamp': '2023-02-24T16:13:52.979561+00:00', 'updated_timestamp': '2023-02-24T16:14:03.466309+00:00'}]}], 'insert_update_delete_lock': False}>"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db.project"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
@ -0,0 +1,220 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "683953b3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# OpenSearch\n",
|
||||
"\n",
|
||||
"This notebook shows how to use functionality related to the OpenSearch database.\n",
|
||||
"\n",
|
||||
"To run, you should have the opensearch instance up and running: [here](https://opensearch.org/docs/latest/install-and-configure/install-opensearch/index/)\n",
|
||||
"`similarity_search` by default performs the Approximate k-NN Search which uses one of the several algorithms like lucene, nmslib, faiss recommended for\n",
|
||||
"large datasets. To perform brute force search we have other search methods known as Script Scoring and Painless Scripting.\n",
|
||||
"Check [this](https://opensearch.org/docs/latest/search-plugins/knn/index/) for more details."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "aac9563e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.vectorstores import OpenSearchVectorSearch\n",
|
||||
"from langchain.document_loaders import TextLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "a3c3999a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import TextLoader\n",
|
||||
"loader = TextLoader('../../state_of_the_union.txt')\n",
|
||||
"documents = loader.load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"docs = text_splitter.split_documents(documents)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docsearch = OpenSearchVectorSearch.from_texts(texts, embeddings, opensearch_url=\"http://localhost:9200\")\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(query)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"#### similarity_search using Approximate k-NN Search with Custom Parameters"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docsearch = OpenSearchVectorSearch.from_texts(texts, embeddings, opensearch_url=\"http://localhost:9200\", engine=\"faiss\", space_type=\"innerproduct\", ef_construction=256, m=48)\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(query)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"#### similarity_search using Script Scoring with Custom Parameters"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docsearch = OpenSearchVectorSearch.from_texts(texts, embeddings, opensearch_url=\"http://localhost:9200\", is_appx_search=False)\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(\"What did the president say about Ketanji Brown Jackson\", k=1, search_type=\"script_scoring\")"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"#### similarity_search using Painless Scripting with Custom Parameters"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docsearch = OpenSearchVectorSearch.from_texts(texts, embeddings, opensearch_url=\"http://localhost:9200\", is_appx_search=False)\n",
|
||||
"filter = {\"bool\": {\"filter\": {\"term\": {\"text\": \"smuggling\"}}}}\n",
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"docs = docsearch.similarity_search(\"What did the president say about Ketanji Brown Jackson\", search_type=\"painless_scripting\", space_type=\"cosineSimilarity\", pre_filter=filter)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(docs[0].page_content)"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"pycharm": {
|
||||
"name": "#%%\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,108 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9597802c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Aleph Alpha\n",
|
||||
"This example goes over how to use LangChain to interact with Aleph Alpha models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "6fb585dd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import AlephAlpha\n",
|
||||
"from langchain import PromptTemplate, LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f81a230d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Q: {question}\n",
|
||||
"\n",
|
||||
"A:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "f0d26e48",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = AlephAlpha(model=\"luminous-extended\", maximum_tokens=20, stop_sequences=[\"Q:\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "6811d621",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "3058e63f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Artificial Intelligence (AI) is the simulation of human intelligence processes by machines, especially computer systems.\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question = \"What is AI?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "2d002ec47225e662695b764370d7966aa11eeb4302edc2f497bbf96d49c8f899"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,85 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Banana\n",
|
||||
"This example goes over how to use LangChain to interact with Banana models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from langchain.llms import Banana\n",
|
||||
"from langchain import PromptTemplate, LLMChain\n",
|
||||
"os.environ[\"BANANA_API_KEY\"] = \"YOUR_API_KEY\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = Banana(model_key=\"YOUR_MODEL_KEY\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.12 ('palm')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -0,0 +1,141 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# DeepInfra LLM Example\n",
|
||||
"This notebook goes over how to use Langchain with [DeepInfra](https://deepinfra.com)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from langchain.llms import DeepInfra\n",
|
||||
"from langchain import PromptTemplate, LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set the Environment API Key\n",
|
||||
"Make sure to get your API key from DeepInfra. You are given a 1 hour free of serverless GPU compute to test different models.\n",
|
||||
"You can print your token with `deepctl auth token`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.environ[\"DEEPINFRA_API_TOKEN\"] = \"YOUR_KEY_HERE\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the DeepInfra instance\n",
|
||||
"Make sure to deploy your model first via `deepctl deploy create -m google/flat-t5-xl` (for example)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = DeepInfra(model_id=\"DEPLOYED MODEL ID\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a Prompt Template\n",
|
||||
"We will create a prompt template for Question and Answer."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Initiate the LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Run the LLMChain\n",
|
||||
"Provide a question and run the LLMChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in 2015?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.12 ('palm')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -0,0 +1,83 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Modal\n",
|
||||
"This example goes over how to use LangChain to interact with Modal models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import Modal\n",
|
||||
"from langchain import PromptTemplate, LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = Modal(endpoint_url=\"YOUR_ENDPOINT_URL\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.12 ('palm')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -0,0 +1,83 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# StochasticAI\n",
|
||||
"This example goes over how to use LangChain to interact with StochasticAI models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import StochasticAI\n",
|
||||
"from langchain import PromptTemplate, LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = StochasticAI(api_url=\"YOUR_API_URL\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.12 ('palm')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -0,0 +1,83 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Writer\n",
|
||||
"This example goes over how to use LangChain to interact with Writer models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import Writer\n",
|
||||
"from langchain import PromptTemplate, LLMChain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = Writer()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||
"\n",
|
||||
"llm_chain.run(question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.12 ('palm')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -0,0 +1,184 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9355a547",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Partial Prompt Templates\n",
|
||||
"\n",
|
||||
"A prompt template is a class with a `.format` method which takes in a key-value map and returns a string (a prompt) to pass to the language model. Like other methods, it can make sense to \"partial\" a prompt template - eg pass in a subset of the required values, as to create a new prompt template which expects only the remaining subset of values.\n",
|
||||
"\n",
|
||||
"LangChain supports this in two ways: we allow for partially formatted prompts (1) with string values, (2) with functions that return string values. These two different ways support different use cases. In the documentation below we go over the motivations for both use cases as well as how to do it in LangChain.\n",
|
||||
"\n",
|
||||
"## Partial With Strings\n",
|
||||
"\n",
|
||||
"One common use case for wanting to partial a prompt template is if you get some of the variables before others. For example, suppose you have a prompt template that requires two variables, `foo` and `baz`. If you get the `foo` value early on in the chain, but the `baz` value later, it can be annoying to wait until you have both variables in the same place to pass them to the prompt template. Instead, you can partial the prompt template with the `foo` value, and then pass the partialed prompt template along and just use that. Below is an example of doing this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "643af5da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "4080d8d7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"foobaz\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt = PromptTemplate(template=\"{foo}{bar}\", input_variables=[\"foo\", \"bar\"])\n",
|
||||
"partial_prompt = prompt.partial(foo=\"foo\");\n",
|
||||
"print(partial_prompt.format(bar=\"baz\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9986766e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also just initialize the prompt with the partialed variables."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "e2ce95b3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"foobaz\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt = PromptTemplate(template=\"{foo}{bar}\", input_variables=[\"bar\"], partial_variables={\"foo\": \"foo\"})\n",
|
||||
"print(prompt.format(bar=\"baz\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a9c66f83",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Partial With Functions\n",
|
||||
"\n",
|
||||
"The other common use is to partial with a function. The use case for this is when you have a variable you know that you always want to fetch in a common way. A prime example of this is with date or time. Imagine you have a prompt which you always want to have the current date. You can't hard code it in the prompt, and passing it along with the other input variables is a bit annoying. In this case, it's very handy to be able to partial the prompt with a function that always returns the current date."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d0712d8a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datetime import datetime\n",
|
||||
"\n",
|
||||
"def _get_datetime():\n",
|
||||
" now = datetime.now()\n",
|
||||
" return now.strftime(\"%m/%d/%Y, %H:%M:%S\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "4cbcb666",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tell me a funny joke about the day 02/27/2023, 22:15:16\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"Tell me a {adjective} joke about the day {date}\", \n",
|
||||
" input_variables=[\"adjective\", \"date\"]\n",
|
||||
");\n",
|
||||
"partial_prompt = prompt.partial(date=_get_datetime)\n",
|
||||
"print(partial_prompt.format(adjective=\"funny\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ffed6811",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also just initialize the prompt with the partialed variables, which often makes more sense in this workflow."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "96285b25",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Tell me a funny joke about the day 02/27/2023, 22:15:16\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prompt = PromptTemplate(\n",
|
||||
" template=\"Tell me a {adjective} joke about the day {date}\", \n",
|
||||
" input_variables=[\"adjective\"],\n",
|
||||
" partial_variables={\"date\": _get_datetime}\n",
|
||||
");\n",
|
||||
"print(prompt.format(adjective=\"funny\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4bff16f7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -1,85 +1,85 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8f210ec3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Bash\n",
|
||||
"It can often be useful to have an LLM generate bash commands, and then run them. A common use case this is for letting it interact with your local file system. We provide an easy util to execute bash commands."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f7b3767b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import BashProcess"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "cf1c92f0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"bash = BashProcess()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "2fa952fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8f210ec3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Bash\n",
|
||||
"It can often be useful to have an LLM generate bash commands, and then run them. A common use case for this is letting the LLM interact with your local file system. We provide an easy util to execute bash commands."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f7b3767b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.utilities import BashProcess"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"bash.ipynb\n",
|
||||
"google_search.ipynb\n",
|
||||
"python.ipynb\n",
|
||||
"requests.ipynb\n",
|
||||
"serpapi.ipynb\n",
|
||||
"\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "cf1c92f0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"bash = BashProcess()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "2fa952fc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"bash.ipynb\n",
|
||||
"google_search.ipynb\n",
|
||||
"python.ipynb\n",
|
||||
"requests.ipynb\n",
|
||||
"serpapi.ipynb\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(bash.run(\"ls\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "851fee9f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(bash.run(\"ls\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "851fee9f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
@ -0,0 +1,180 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "O4HPx3boF0"
|
||||
},
|
||||
"source": [],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "hqQkbPEwTJ"
|
||||
},
|
||||
"source": [
|
||||
"# Using the DockerWrapper utility"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "vCepuypaFH"
|
||||
},
|
||||
"source": [
|
||||
"from langchain.utilities.docker import DockerWrapper"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "BtYVqy2YtO"
|
||||
},
|
||||
"source": [
|
||||
"d = DockerWrapper(image='shell')"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "ELWWm03ptQ"
|
||||
},
|
||||
"source": [
|
||||
"query = \"\"\"\n",
|
||||
"for i in $(seq 1 10)\n",
|
||||
"do\n",
|
||||
" echo $i\n",
|
||||
"done\n",
|
||||
"\"\"\"\n",
|
||||
"print(d.exec_run(query))"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n"
|
||||
}
|
||||
],
|
||||
"execution_count": 1
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "lGMqLz5sDo"
|
||||
},
|
||||
"source": [
|
||||
"p = DockerWrapper(image='python')\n",
|
||||
"\n",
|
||||
"py_payload = \"\"\"\n",
|
||||
"def hello_world():\n",
|
||||
" return 'hello world'\n",
|
||||
"\n",
|
||||
"hello_world()\n",
|
||||
"\"\"\""
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "X04Wd6zbrk"
|
||||
},
|
||||
"source": [
|
||||
"print(p.exec_run(py_payload))"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "'hello world'\n"
|
||||
}
|
||||
],
|
||||
"execution_count": 2
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "lKOfuDoJGk"
|
||||
},
|
||||
"source": [],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "eSzXtDrpqU"
|
||||
},
|
||||
"source": [
|
||||
"## Passing custom parameters\n",
|
||||
"\n",
|
||||
"By default containers are run with a safe set of parameters. You can pass any parameters\n",
|
||||
"that are accepted by the docker python sdk to the run and exec commands.\n",
|
||||
"\n",
|
||||
"### Using networking"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "eWFGCxD9pv"
|
||||
},
|
||||
"source": [
|
||||
"# by default containers don't have access to the network\n",
|
||||
"print(d.run('ping -c 1 google.com'))"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "STDERR: Command '/bin/sh -c 'ping -c 1 google.com'' in image 'alpine:latest' returned non-zero exit status 1: b\"ping: bad address 'google.com'\\n\"\n"
|
||||
}
|
||||
],
|
||||
"execution_count": 3
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "Z0YkpuXVyL"
|
||||
},
|
||||
"source": [
|
||||
"# using the network parameter\n",
|
||||
"print(d.run('ping -c 1 google.com', network='bridge'))"
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "PING google.com (142.250.200.110): 56 data bytes\n64 bytes from 142.250.200.110: seq=0 ttl=42 time=13.695 ms\n\n--- google.com ping statistics ---\n1 packets transmitted, 1 packets received, 0% packet loss\nround-trip min/avg/max = 13.695/13.695/13.695 ms\n"
|
||||
}
|
||||
],
|
||||
"execution_count": 4
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"metadata": {
|
||||
"jukit_cell_id": "3rMWzzuLHq"
|
||||
},
|
||||
"source": [],
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"anaconda-cloud": {},
|
||||
"kernelspec": {
|
||||
"display_name": "python",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
@ -0,0 +1,121 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "16763ed3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# IFTTT WebHooks\n",
|
||||
"\n",
|
||||
"This notebook shows how to use IFTTT Webhooks.\n",
|
||||
"\n",
|
||||
"From https://github.com/SidU/teams-langchain-js/wiki/Connecting-IFTTT-Services.\n",
|
||||
"\n",
|
||||
"# Creating a webhook\n",
|
||||
"- Go to https://ifttt.com/create\n",
|
||||
"\n",
|
||||
"# Configuring the \"If This\"\n",
|
||||
"- Click on the \"If This\" button in the IFTTT interface.\n",
|
||||
"- Search for \"Webhooks\" in the search bar.\n",
|
||||
"- Choose the first option for \"Receive a web request with a JSON payload.\"\n",
|
||||
"- Choose an Event Name that is specific to the service you plan to connect to.\n",
|
||||
"This will make it easier for you to manage the webhook URL.\n",
|
||||
"For example, if you're connecting to Spotify, you could use \"Spotify\" as your\n",
|
||||
"Event Name.\n",
|
||||
"- Click the \"Create Trigger\" button to save your settings and create your webhook.\n",
|
||||
"\n",
|
||||
"# Configuring the \"Then That\"\n",
|
||||
"- Tap on the \"Then That\" button in the IFTTT interface.\n",
|
||||
"- Search for the service you want to connect, such as Spotify.\n",
|
||||
"- Choose an action from the service, such as \"Add track to a playlist\".\n",
|
||||
"- Configure the action by specifying the necessary details, such as the playlist name,\n",
|
||||
"e.g., \"Songs from AI\".\n",
|
||||
"- Reference the JSON Payload received by the Webhook in your action. For the Spotify\n",
|
||||
"scenario, choose \"{{JsonPayload}}\" as your search query.\n",
|
||||
"- Tap the \"Create Action\" button to save your action settings.\n",
|
||||
"- Once you have finished configuring your action, click the \"Finish\" button to\n",
|
||||
"complete the setup.\n",
|
||||
"- Congratulations! You have successfully connected the Webhook to the desired\n",
|
||||
"service, and you're ready to start receiving data and triggering actions 🎉\n",
|
||||
"\n",
|
||||
"# Finishing up\n",
|
||||
"- To get your webhook URL go to https://ifttt.com/maker_webhooks/settings\n",
|
||||
"- Copy the IFTTT key value from there. The URL is of the form\n",
|
||||
"https://maker.ifttt.com/use/YOUR_IFTTT_KEY. Grab the YOUR_IFTTT_KEY value.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "10a46e7e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.tools.ifttt import IFTTTWebhook"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "12003d72",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"key = os.environ[\"IFTTTKey\"]\n",
|
||||
"url = f\"https://maker.ifttt.com/trigger/spotify/json/with/key/{key}\"\n",
|
||||
"tool = IFTTTWebhook(name=\"Spotify\", description=\"Add a song to spotify playlist\", url=url)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "6e68f846",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Congratulations! You've fired the spotify JSON event\""
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tool.run(\"taylor swift\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a7e599c9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -1,29 +1,35 @@
|
||||
# Key Concepts
|
||||
|
||||
## Python REPL
|
||||
Sometimes, for complex calculations, rather than have an LLM generate the answer directly,
|
||||
it can be better to have the LLM generate code to calculate the answer, and then run that code to get the answer.
|
||||
|
||||
Sometimes, for complex calculations, rather than have an LLM generate the answer directly,
|
||||
it can be better to have the LLM generate code to calculate the answer, and then run that code to get the answer.
|
||||
In order to easily do that, we provide a simple Python REPL to execute commands in.
|
||||
This interface will only return things that are printed -
|
||||
This interface will only return things that are printed -
|
||||
therefore, if you want to use it to calculate an answer, make sure to have it print out the answer.
|
||||
|
||||
## Bash
|
||||
It can often be useful to have an LLM generate bash commands, and then run them.
|
||||
A common use case this is for letting it interact with your local file system.
|
||||
|
||||
It can often be useful to have an LLM generate bash commands, and then run them.
|
||||
A common use case for this is letting the LLM interact with your local file system.
|
||||
We provide an easy component to execute bash commands.
|
||||
|
||||
## Requests Wrapper
|
||||
The web contains a lot of information that LLMs do not have access to.
|
||||
In order to easily let LLMs interact with that information,
|
||||
|
||||
The web contains a lot of information that LLMs do not have access to.
|
||||
In order to easily let LLMs interact with that information,
|
||||
we provide a wrapper around the Python Requests module that takes in a URL and fetches data from that URL.
|
||||
|
||||
## Google Search
|
||||
|
||||
This uses the official Google Search API to look up information on the web.
|
||||
|
||||
## SerpAPI
|
||||
|
||||
This uses SerpAPI, a third party search API engine, to interact with Google Search.
|
||||
|
||||
## Searx Search
|
||||
|
||||
This uses the Searx (SearxNG fork) meta search engine API to lookup information
|
||||
on the web. It supports 139 search engines and is easy to self-host
|
||||
on the web. It supports 139 search engines and is easy to self-host
|
||||
which makes it a good choice for privacy-conscious users.
|
||||
|
@ -0,0 +1,33 @@
|
||||
"""Load CoNLL-U files."""
|
||||
import csv
|
||||
from typing import List
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
|
||||
|
||||
class CoNLLULoader(BaseLoader):
|
||||
"""Load CoNLL-U files."""
|
||||
|
||||
def __init__(self, file_path: str):
|
||||
"""Initialize with file path."""
|
||||
self.file_path = file_path
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load from file path."""
|
||||
with open(self.file_path, encoding="utf8") as f:
|
||||
tsv = list(csv.reader(f, delimiter="\t"))
|
||||
|
||||
# If len(line) > 1, the line is not a comment
|
||||
lines = [line for line in tsv if len(line) > 1]
|
||||
|
||||
text = ""
|
||||
for i, line in enumerate(lines):
|
||||
# Do not add a space after a punctuation mark or at the end of the sentence
|
||||
if line[9] == "SpaceAfter=No" or i == len(lines) - 1:
|
||||
text += line[1]
|
||||
else:
|
||||
text += line[1] + " "
|
||||
|
||||
metadata = {"source": self.file_path}
|
||||
return [Document(page_content=text, metadata=metadata)]
|
@ -0,0 +1,57 @@
|
||||
"""Loader that loads Facebook chat json dump."""
|
||||
import datetime
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
|
||||
|
||||
def concatenate_rows(row: dict) -> str:
|
||||
"""Combine message information in a readable format ready to be used."""
|
||||
sender = row["sender_name"]
|
||||
text = row["content"]
|
||||
date = datetime.datetime.fromtimestamp(row["timestamp_ms"] / 1000).strftime(
|
||||
"%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
return f"{sender} on {date}: {text}\n\n"
|
||||
|
||||
|
||||
class FacebookChatLoader(BaseLoader):
|
||||
"""Loader that loads Facebook messages json directory dump."""
|
||||
|
||||
def __init__(self, path: str):
|
||||
"""Initialize with path."""
|
||||
self.file_path = path
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Load documents."""
|
||||
try:
|
||||
import pandas as pd
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"pandas is needed for Facebook chat loader, "
|
||||
"please install with `pip install pandas`"
|
||||
)
|
||||
p = Path(self.file_path)
|
||||
|
||||
with open(p, encoding="utf8") as f:
|
||||
d = json.load(f)
|
||||
|
||||
normalized_messages = pd.json_normalize(d["messages"])
|
||||
df_normalized_messages = pd.DataFrame(normalized_messages)
|
||||
|
||||
# Only keep plain text messages
|
||||
# (no services, nor links, hashtags, code, bold ...)
|
||||
df_filtered = df_normalized_messages[
|
||||
(df_normalized_messages.content.apply(lambda x: type(x) == str))
|
||||
]
|
||||
|
||||
df_filtered = df_filtered[["timestamp_ms", "content", "sender_name"]]
|
||||
|
||||
text = df_filtered.apply(concatenate_rows, axis=1).str.cat(sep="")
|
||||
|
||||
metadata = {"source": str(p)}
|
||||
|
||||
return [Document(page_content=text, metadata=metadata)]
|
@ -0,0 +1,53 @@
|
||||
"""Loader that loads GitBook."""
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.web_base import WebBaseLoader
|
||||
|
||||
|
||||
class GitbookLoader(WebBaseLoader):
|
||||
"""Load GitBook data.
|
||||
|
||||
1. load from either a single page, or
|
||||
2. load all (relative) paths in the navbar.
|
||||
"""
|
||||
|
||||
def __init__(self, web_page: str, load_all_paths: bool = False):
|
||||
"""Initialize with web page and whether to load all paths."""
|
||||
super().__init__(web_page)
|
||||
self.load_all_paths = load_all_paths
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Fetch text from one single GitBook page."""
|
||||
if self.load_all_paths:
|
||||
soup_info = self.scrape()
|
||||
relative_paths = self._get_paths(soup_info)
|
||||
documents = []
|
||||
for path in relative_paths:
|
||||
url = self.web_path + path
|
||||
print(f"Fetching text from {url}")
|
||||
soup_info = self._scrape(url)
|
||||
documents.append(self._get_document(soup_info, url))
|
||||
return documents
|
||||
else:
|
||||
soup_info = self.scrape()
|
||||
return [self._get_document(soup_info, self.web_path)]
|
||||
|
||||
def _get_document(self, soup: Any, custom_url: Optional[str] = None) -> Document:
|
||||
"""Fetch content from page and return Document."""
|
||||
page_content_raw = soup.find("main")
|
||||
content = page_content_raw.get_text(separator="\n").strip()
|
||||
title_if_exists = page_content_raw.find("h1")
|
||||
title = title_if_exists.text if title_if_exists else ""
|
||||
metadata = {
|
||||
"source": custom_url if custom_url else self.web_path,
|
||||
"title": title,
|
||||
}
|
||||
return Document(page_content=content, metadata=metadata)
|
||||
|
||||
def _get_paths(self, soup: Any) -> List[str]:
|
||||
"""Fetch all relative paths in the navbar."""
|
||||
nav = soup.find("nav")
|
||||
links = nav.findAll("a")
|
||||
# only return relative links
|
||||
return [link.get("href") for link in links if link.get("href")[0] == "/"]
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue