From fa5d49f2c133c12e7c7544034a9e79610a549311 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Tue, 2 Jan 2024 16:47:11 -0500 Subject: [PATCH] docs, experimental[patch], langchain[patch], community[patch]: update storage imports (#15429) ran ```bash g grep -l "langchain.vectorstores" | xargs -L 1 sed -i '' "s/langchain\.vectorstores/langchain_community.vectorstores/g" g grep -l "langchain.document_loaders" | xargs -L 1 sed -i '' "s/langchain\.document_loaders/langchain_community.document_loaders/g" g grep -l "langchain.chat_loaders" | xargs -L 1 sed -i '' "s/langchain\.chat_loaders/langchain_community.chat_loaders/g" g grep -l "langchain.document_transformers" | xargs -L 1 sed -i '' "s/langchain\.document_transformers/langchain_community.document_transformers/g" g grep -l "langchain\.graphs" | xargs -L 1 sed -i '' "s/langchain\.graphs/langchain_community.graphs/g" g grep -l "langchain\.memory\.chat_message_histories" | xargs -L 1 sed -i '' "s/langchain\.memory\.chat_message_histories/langchain_community.chat_message_histories/g" gco master libs/langchain/tests/unit_tests/*/test_imports.py gco master libs/langchain/tests/unit_tests/**/test_public_api.py ``` --- cookbook/Multi_modal_RAG.ipynb | 4 +- cookbook/Multi_modal_RAG_google.ipynb | 4 +- cookbook/Semi_Structured_RAG.ipynb | 2 +- .../Semi_structured_and_multi_modal_RAG.ipynb | 2 +- ...mi_structured_multi_modal_RAG_LLaMA2.ipynb | 2 +- cookbook/advanced_rag_eval.ipynb | 4 +- cookbook/agent_vectorstore.ipynb | 6 +- cookbook/autogpt/autogpt.ipynb | 6 +- cookbook/autogpt/marathon_times.ipynb | 2 +- cookbook/baby_agi.ipynb | 2 +- cookbook/baby_agi_with_agent.ipynb | 2 +- cookbook/code-analysis-deeplake.ipynb | 12 +- .../custom_agent_with_plugin_retrieval.ipynb | 4 +- ...ith_plugin_retrieval_using_plugnplai.ipynb | 4 +- .../custom_agent_with_tool_retrieval.ipynb | 4 +- .../deeplake_semantic_search_over_chat.ipynb | 2 +- cookbook/docugami_xml_kg_rag.ipynb | 2 +- ...eractive_simulacra_of_human_behavior.ipynb | 2 +- .../hypothetical_document_embeddings.ipynb | 2 +- cookbook/multi_modal_RAG_chroma.ipynb | 2 +- cookbook/openai_functions_retrieval_qa.ipynb | 6 +- .../qianfan_baidu_elasticesearch_RAG.ipynb | 8 +- cookbook/rag_fusion.ipynb | 2 +- cookbook/sales_agent_with_context.ipynb | 2 +- cookbook/self_query_hotel_search.ipynb | 2 +- cookbook/sql_db_qa.mdx | 2 +- ...tter-the-algorithm-analysis-deeplake.ipynb | 4 +- docs/docs/_templates/integration.mdx | 2 +- .../cookbook/retrieval.ipynb | 2 +- .../expression_language/get_started.ipynb | 2 +- .../docs/expression_language/how_to/map.ipynb | 4 +- .../how_to/message_history.ipynb | 2 +- .../how_to/passthrough.ipynb | 2 +- docs/docs/expression_language/interface.ipynb | 2 +- .../qa_privacy_protection.ipynb | 2 +- .../integrations/callbacks/confident.ipynb | 4 +- docs/docs/integrations/callbacks/infino.ipynb | 2 +- .../integrations/chat_loaders/discord.ipynb | 6 +- .../integrations/chat_loaders/facebook.ipynb | 4 +- .../integrations/chat_loaders/gmail.ipynb | 4 +- .../integrations/chat_loaders/imessage.ipynb | 6 +- .../chat_loaders/langsmith_dataset.ipynb | 2 +- .../chat_loaders/langsmith_llm_runs.ipynb | 2 +- .../integrations/chat_loaders/slack.ipynb | 6 +- .../integrations/chat_loaders/telegram.ipynb | 6 +- .../integrations/chat_loaders/wechat.ipynb | 6 +- .../integrations/chat_loaders/whatsapp.ipynb | 6 +- .../document_loaders/acreom.ipynb | 2 +- .../document_loaders/airbyte_cdk.ipynb | 2 +- .../document_loaders/airbyte_gong.ipynb | 2 +- .../document_loaders/airbyte_hubspot.ipynb | 2 +- .../document_loaders/airbyte_json.ipynb | 2 +- .../document_loaders/airbyte_salesforce.ipynb | 2 +- .../document_loaders/airbyte_shopify.ipynb | 2 +- .../document_loaders/airbyte_stripe.ipynb | 2 +- .../document_loaders/airbyte_typeform.ipynb | 2 +- .../airbyte_zendesk_support.ipynb | 2 +- .../document_loaders/airtable.ipynb | 2 +- .../alibaba_cloud_maxcompute.ipynb | 2 +- .../document_loaders/amazon_textract.ipynb | 4 +- .../document_loaders/apify_dataset.ipynb | 8 +- .../document_loaders/arcgis.ipynb | 4 +- .../integrations/document_loaders/arxiv.ipynb | 2 +- .../document_loaders/assemblyai.ipynb | 4 +- .../document_loaders/async_chromium.ipynb | 4 +- .../document_loaders/async_html.ipynb | 2 +- .../document_loaders/aws_s3_directory.ipynb | 2 +- .../document_loaders/aws_s3_file.ipynb | 2 +- .../document_loaders/azlyrics.ipynb | 2 +- .../document_loaders/azure_ai_data.ipynb | 2 +- .../azure_blob_storage_container.ipynb | 2 +- .../azure_blob_storage_file.ipynb | 2 +- .../document_loaders/bibtex.ipynb | 2 +- .../document_loaders/bilibili.ipynb | 2 +- .../document_loaders/blackboard.ipynb | 2 +- .../document_loaders/blockchain.ipynb | 2 +- .../document_loaders/brave_search.ipynb | 2 +- .../document_loaders/browserless.ipynb | 2 +- .../document_loaders/chatgpt_loader.ipynb | 2 +- .../college_confidential.ipynb | 2 +- .../document_loaders/concurrent.ipynb | 2 +- .../document_loaders/confluence.ipynb | 4 +- .../document_loaders/conll-u.ipynb | 2 +- .../document_loaders/couchbase.ipynb | 2 +- .../integrations/document_loaders/csv.ipynb | 4 +- .../document_loaders/cube_semantic.ipynb | 2 +- .../document_loaders/datadog_logs.ipynb | 2 +- .../document_loaders/diffbot.ipynb | 2 +- .../document_loaders/discord.ipynb | 2 +- .../document_loaders/docugami.ipynb | 10 +- .../document_loaders/docusaurus.ipynb | 4 +- .../document_loaders/dropbox.ipynb | 2 +- .../document_loaders/duckdb.ipynb | 2 +- .../integrations/document_loaders/email.ipynb | 4 +- .../integrations/document_loaders/epub.ipynb | 2 +- .../document_loaders/etherscan.ipynb | 2 +- .../document_loaders/evernote.ipynb | 2 +- .../document_loaders/facebook_chat.ipynb | 2 +- .../integrations/document_loaders/fauna.ipynb | 2 +- .../integrations/document_loaders/figma.ipynb | 4 +- .../document_loaders/geopandas.ipynb | 4 +- .../integrations/document_loaders/git.ipynb | 6 +- .../document_loaders/gitbook.ipynb | 2 +- .../document_loaders/github.ipynb | 2 +- .../document_loaders/google_bigquery.ipynb | 2 +- .../google_cloud_storage_directory.ipynb | 2 +- .../google_cloud_storage_file.ipynb | 4 +- .../document_loaders/google_drive.ipynb | 9 +- .../google_speech_to_text.ipynb | 4 +- .../document_loaders/grobid.ipynb | 4 +- .../document_loaders/gutenberg.ipynb | 2 +- .../document_loaders/hacker_news.ipynb | 2 +- .../huawei_obs_directory.ipynb | 2 +- .../document_loaders/huawei_obs_file.ipynb | 2 +- .../hugging_face_dataset.ipynb | 8 +- .../document_loaders/ifixit.ipynb | 2 +- .../integrations/document_loaders/image.ipynb | 2 +- .../document_loaders/image_captions.ipynb | 2 +- .../integrations/document_loaders/imsdb.ipynb | 2 +- .../integrations/document_loaders/iugu.ipynb | 4 +- .../document_loaders/joplin.ipynb | 2 +- .../document_loaders/jupyter_notebook.ipynb | 4 +- .../document_loaders/lakefs.ipynb | 2 +- .../document_loaders/larksuite.ipynb | 2 +- .../document_loaders/mastodon.ipynb | 2 +- .../document_loaders/mediawikidump.ipynb | 2 +- .../document_loaders/merge_doc.ipynb | 6 +- .../integrations/document_loaders/mhtml.ipynb | 2 +- .../document_loaders/microsoft_excel.ipynb | 2 +- .../document_loaders/microsoft_onedrive.ipynb | 8 +- .../document_loaders/microsoft_onenote.ipynb | 10 +- .../microsoft_powerpoint.ipynb | 2 +- .../microsoft_sharepoint.ipynb | 8 +- .../document_loaders/microsoft_word.ipynb | 4 +- .../document_loaders/modern_treasury.ipynb | 4 +- .../document_loaders/mongodb.ipynb | 2 +- .../integrations/document_loaders/news.ipynb | 2 +- .../document_loaders/notion.ipynb | 2 +- .../document_loaders/notiondb.ipynb | 2 +- .../document_loaders/nuclia.ipynb | 2 +- .../document_loaders/obsidian.ipynb | 2 +- .../integrations/document_loaders/odt.ipynb | 2 +- .../document_loaders/open_city_data.ipynb | 2 +- .../document_loaders/org_mode.ipynb | 2 +- .../document_loaders/pandas_dataframe.ipynb | 2 +- .../document_loaders/polars_dataframe.ipynb | 2 +- .../document_loaders/psychic.ipynb | 6 +- .../document_loaders/pubmed.ipynb | 2 +- .../document_loaders/pyspark_dataframe.ipynb | 2 +- .../integrations/document_loaders/quip.ipynb | 2 +- .../readthedocs_documentation.ipynb | 2 +- .../document_loaders/recursive_url.ipynb | 2 +- .../document_loaders/reddit.ipynb | 2 +- .../integrations/document_loaders/roam.ipynb | 2 +- .../document_loaders/rockset.ipynb | 4 +- .../document_loaders/rspace.ipynb | 2 +- .../integrations/document_loaders/rss.ipynb | 2 +- .../integrations/document_loaders/rst.ipynb | 2 +- .../document_loaders/sitemap.ipynb | 2 +- .../integrations/document_loaders/slack.ipynb | 2 +- .../document_loaders/snowflake.ipynb | 2 +- .../document_loaders/source_code.ipynb | 6 +- .../document_loaders/spreedly.ipynb | 4 +- .../document_loaders/stripe.ipynb | 4 +- .../document_loaders/subtitle.ipynb | 2 +- .../document_loaders/telegram.ipynb | 5 +- .../tencent_cos_directory.ipynb | 2 +- .../document_loaders/tencent_cos_file.ipynb | 2 +- .../tensorflow_datasets.ipynb | 2 +- .../document_loaders/tomarkdown.ipynb | 2 +- .../integrations/document_loaders/toml.ipynb | 2 +- .../document_loaders/trello.ipynb | 2 +- .../integrations/document_loaders/tsv.ipynb | 2 +- .../document_loaders/twitter.ipynb | 2 +- .../document_loaders/unstructured_file.ipynb | 8 +- .../integrations/document_loaders/url.ipynb | 6 +- .../document_loaders/weather.ipynb | 2 +- .../document_loaders/web_base.ipynb | 4 +- .../document_loaders/whatsapp_chat.ipynb | 2 +- .../document_loaders/wikipedia.ipynb | 2 +- .../integrations/document_loaders/xml.ipynb | 2 +- .../document_loaders/xorbits.ipynb | 2 +- .../document_loaders/youtube_audio.ipynb | 12 +- .../document_loaders/youtube_transcript.ipynb | 4 +- .../beautiful_soup.ipynb | 4 +- .../document_transformers/docai.ipynb | 4 +- .../doctran_extract_properties.ipynb | 4 +- .../doctran_interrogate_document.ipynb | 4 +- .../doctran_translate_document.ipynb | 4 +- .../google_translate.ipynb | 4 +- .../document_transformers/html2text.ipynb | 4 +- .../nuclia_transformer.ipynb | 6 +- .../openai_metadata_tagger.ipynb | 6 +- .../integrations/memory/aws_dynamodb.ipynb | 6 +- .../memory/rockset_chat_message_history.ipynb | 2 +- .../memory/sql_chat_message_history.ipynb | 4 +- .../streamlit_chat_message_history.ipynb | 2 +- .../upstash_redis_chat_message_history.ipynb | 2 +- .../memory/xata_chat_message_history.ipynb | 2 +- docs/docs/integrations/platforms/aws.mdx | 6 +- docs/docs/integrations/platforms/google.mdx | 28 +- .../integrations/platforms/huggingface.mdx | 2 +- .../docs/integrations/platforms/microsoft.mdx | 18 +- docs/docs/integrations/platforms/openai.mdx | 2 +- .../providers/activeloop_deeplake.mdx | 2 +- docs/docs/integrations/providers/airbyte.mdx | 2 +- docs/docs/integrations/providers/airtable.md | 2 +- .../integrations/providers/alibaba_cloud.mdx | 4 +- .../integrations/providers/analyticdb.mdx | 2 +- docs/docs/integrations/providers/annoy.mdx | 2 +- docs/docs/integrations/providers/apify.mdx | 2 +- docs/docs/integrations/providers/arangodb.mdx | 2 +- docs/docs/integrations/providers/arxiv.mdx | 2 +- docs/docs/integrations/providers/astradb.mdx | 4 +- docs/docs/integrations/providers/atlas.mdx | 2 +- docs/docs/integrations/providers/awadb.md | 2 +- docs/docs/integrations/providers/azlyrics.mdx | 2 +- docs/docs/integrations/providers/bageldb.mdx | 2 +- .../integrations/providers/beautiful_soup.mdx | 2 +- docs/docs/integrations/providers/bilibili.mdx | 2 +- .../integrations/providers/blackboard.mdx | 2 +- .../integrations/providers/brave_search.mdx | 2 +- docs/docs/integrations/providers/chroma.mdx | 2 +- docs/docs/integrations/providers/clarifai.mdx | 2 +- .../integrations/providers/clickhouse.mdx | 2 +- .../providers/college_confidential.mdx | 2 +- .../integrations/providers/confluence.mdx | 2 +- .../integrations/providers/dashvector.mdx | 2 +- .../integrations/providers/datadog_logs.mdx | 2 +- docs/docs/integrations/providers/diffbot.mdx | 2 +- docs/docs/integrations/providers/dingo.mdx | 2 +- docs/docs/integrations/providers/discord.mdx | 2 +- docs/docs/integrations/providers/docarray.mdx | 4 +- docs/docs/integrations/providers/doctran.mdx | 6 +- docs/docs/integrations/providers/docugami.mdx | 2 +- docs/docs/integrations/providers/duckdb.mdx | 2 +- .../integrations/providers/elasticsearch.mdx | 4 +- docs/docs/integrations/providers/epsilla.mdx | 2 +- docs/docs/integrations/providers/evernote.mdx | 2 +- .../integrations/providers/facebook_chat.mdx | 2 +- .../integrations/providers/facebook_faiss.mdx | 2 +- docs/docs/integrations/providers/figma.mdx | 2 +- docs/docs/integrations/providers/git.mdx | 2 +- docs/docs/integrations/providers/gitbook.mdx | 2 +- docs/docs/integrations/providers/grobid.mdx | 4 +- .../docs/integrations/providers/gutenberg.mdx | 2 +- .../integrations/providers/hacker_news.mdx | 2 +- docs/docs/integrations/providers/hologres.mdx | 2 +- .../docs/integrations/providers/html2text.mdx | 2 +- docs/docs/integrations/providers/ifixit.mdx | 2 +- docs/docs/integrations/providers/imsdb.mdx | 2 +- docs/docs/integrations/providers/lancedb.mdx | 2 +- docs/docs/integrations/providers/marqo.md | 2 +- .../integrations/providers/mediawikidump.mdx | 2 +- .../integrations/providers/meilisearch.mdx | 2 +- docs/docs/integrations/providers/milvus.mdx | 2 +- .../providers/modern_treasury.mdx | 2 +- .../integrations/providers/mongodb_atlas.mdx | 2 +- docs/docs/integrations/providers/myscale.mdx | 4 +- docs/docs/integrations/providers/neo4j.mdx | 6 +- docs/docs/integrations/providers/notion.mdx | 4 +- docs/docs/integrations/providers/nuclia.mdx | 2 +- docs/docs/integrations/providers/obsidian.mdx | 2 +- .../integrations/providers/opensearch.mdx | 2 +- .../integrations/providers/pg_embedding.mdx | 2 +- docs/docs/integrations/providers/pgvector.mdx | 2 +- docs/docs/integrations/providers/pinecone.mdx | 2 +- docs/docs/integrations/providers/pubmed.md | 2 +- docs/docs/integrations/providers/qdrant.mdx | 2 +- docs/docs/integrations/providers/reddit.mdx | 2 +- docs/docs/integrations/providers/redis.mdx | 2 +- docs/docs/integrations/providers/roam.mdx | 2 +- docs/docs/integrations/providers/rockset.mdx | 6 +- docs/docs/integrations/providers/semadb.mdx | 2 +- .../integrations/providers/singlestoredb.mdx | 2 +- docs/docs/integrations/providers/sklearn.mdx | 2 +- docs/docs/integrations/providers/slack.mdx | 2 +- docs/docs/integrations/providers/spreedly.mdx | 2 +- .../docs/integrations/providers/starrocks.mdx | 2 +- docs/docs/integrations/providers/stripe.mdx | 2 +- docs/docs/integrations/providers/supabase.mdx | 2 +- docs/docs/integrations/providers/tair.mdx | 2 +- docs/docs/integrations/providers/telegram.mdx | 4 +- docs/docs/integrations/providers/tencent.mdx | 6 +- .../providers/tensorflow_datasets.mdx | 2 +- docs/docs/integrations/providers/tigris.mdx | 2 +- .../integrations/providers/tomarkdown.mdx | 2 +- docs/docs/integrations/providers/trello.mdx | 2 +- docs/docs/integrations/providers/twitter.mdx | 2 +- .../docs/integrations/providers/typesense.mdx | 2 +- .../integrations/providers/unstructured.mdx | 4 +- docs/docs/integrations/providers/usearch.mdx | 2 +- docs/docs/integrations/providers/vearch.md | 2 +- .../integrations/providers/vectara/index.mdx | 2 +- .../providers/vectara/vectara_chat.ipynb | 6 +- .../providers/vectara/vectara_summary.ipynb | 2 +- docs/docs/integrations/providers/weather.mdx | 2 +- docs/docs/integrations/providers/weaviate.mdx | 2 +- docs/docs/integrations/providers/whatsapp.mdx | 2 +- .../docs/integrations/providers/wikipedia.mdx | 2 +- docs/docs/integrations/providers/xata.mdx | 2 +- docs/docs/integrations/providers/youtube.mdx | 4 +- docs/docs/integrations/providers/zep.mdx | 2 +- docs/docs/integrations/providers/zilliz.mdx | 2 +- .../retrievers/chatgpt-plugin.ipynb | 2 +- .../retrievers/cohere-reranker.ipynb | 4 +- .../retrievers/fleet_context.ipynb | 10 +- .../docs/integrations/retrievers/jaguar.ipynb | 2 +- .../retrievers/merger_retriever.ipynb | 12 +- .../integrations/retrievers/re_phrase.ipynb | 6 +- .../activeloop_deeplake_self_query.ipynb | 2 +- .../self_query/chroma_self_query.ipynb | 2 +- .../retrievers/self_query/dashvector.ipynb | 2 +- .../self_query/elasticsearch_self_query.ipynb | 2 +- .../self_query/milvus_self_query.ipynb | 2 +- .../retrievers/self_query/mongodb_atlas.ipynb | 2 +- .../self_query/myscale_self_query.ipynb | 2 +- .../self_query/opensearch_self_query.ipynb | 2 +- .../retrievers/self_query/pinecone.ipynb | 2 +- .../self_query/qdrant_self_query.ipynb | 2 +- .../self_query/redis_self_query.ipynb | 2 +- .../self_query/supabase_self_query.ipynb | 2 +- .../timescalevector_self_query.ipynb | 2 +- .../self_query/vectara_self_query.ipynb | 6 +- .../self_query/weaviate_self_query.ipynb | 2 +- .../retrievers/singlestoredb.ipynb | 4 +- .../text_embedding/nvidia_ai_endpoints.ipynb | 2 +- .../toolkits/airbyte_structured_qa.ipynb | 2 +- .../document_comparison_toolkit.ipynb | 4 +- docs/docs/integrations/tools/apify.ipynb | 4 +- .../vectorstores/activeloop_deeplake.ipynb | 6 +- .../alibabacloud_opensearch.ipynb | 8 +- .../vectorstores/analyticdb.ipynb | 6 +- .../integrations/vectorstores/annoy.ipynb | 4 +- .../integrations/vectorstores/astradb.ipynb | 6 +- .../integrations/vectorstores/atlas.ipynb | 4 +- .../integrations/vectorstores/awadb.ipynb | 4 +- .../vectorstores/azure_cosmos_db.ipynb | 6 +- .../vectorstores/azuresearch.ipynb | 6 +- .../integrations/vectorstores/bageldb.ipynb | 4 +- .../baiducloud_vector_search.ipynb | 4 +- .../integrations/vectorstores/chroma.ipynb | 4 +- .../integrations/vectorstores/clarifai.ipynb | 4 +- .../vectorstores/clickhouse.ipynb | 10 +- .../vectorstores/dashvector.ipynb | 6 +- .../databricks_vector_search.ipynb | 4 +- .../integrations/vectorstores/dingo.ipynb | 14 +- .../vectorstores/docarray_hnsw.ipynb | 6 +- .../vectorstores/docarray_in_memory.ipynb | 6 +- .../vectorstores/elasticsearch.ipynb | 20 +- .../integrations/vectorstores/epsilla.ipynb | 6 +- .../integrations/vectorstores/faiss.ipynb | 4 +- .../vectorstores/faiss_async.ipynb | 4 +- .../google_vertex_ai_vector_search.ipynb | 2 +- .../integrations/vectorstores/hippo.ipynb | 6 +- .../integrations/vectorstores/hologres.ipynb | 6 +- .../integrations/vectorstores/jaguar.ipynb | 2 +- .../integrations/vectorstores/lancedb.ipynb | 6 +- .../integrations/vectorstores/llm_rails.ipynb | 4 +- .../integrations/vectorstores/marqo.ipynb | 6 +- .../vectorstores/meilisearch.ipynb | 6 +- .../integrations/vectorstores/milvus.ipynb | 8 +- .../vectorstores/momento_vector_index.ipynb | 6 +- .../vectorstores/mongodb_atlas.ipynb | 6 +- .../integrations/vectorstores/myscale.ipynb | 14 +- .../vectorstores/neo4jvector.ipynb | 6 +- .../integrations/vectorstores/nucliadb.ipynb | 4 +- .../vectorstores/opensearch.ipynb | 8 +- .../vectorstores/pgembedding.ipynb | 8 +- .../vectorstores/pgvecto_rs.ipynb | 6 +- .../integrations/vectorstores/pgvector.ipynb | 8 +- .../integrations/vectorstores/pinecone.ipynb | 8 +- .../integrations/vectorstores/qdrant.ipynb | 12 +- .../integrations/vectorstores/redis.ipynb | 10 +- .../integrations/vectorstores/rockset.ipynb | 4 +- .../integrations/vectorstores/scann.ipynb | 4 +- .../integrations/vectorstores/semadb.ipynb | 6 +- .../vectorstores/singlestoredb.ipynb | 6 +- .../integrations/vectorstores/sklearn.ipynb | 4 +- .../integrations/vectorstores/sqlitevss.ipynb | 8 +- .../integrations/vectorstores/starrocks.ipynb | 9 +- .../integrations/vectorstores/supabase.ipynb | 4 +- .../docs/integrations/vectorstores/tair.ipynb | 6 +- .../vectorstores/tencentvectordb.ipynb | 8 +- .../integrations/vectorstores/tigris.ipynb | 6 +- .../integrations/vectorstores/tiledb.ipynb | 4 +- .../vectorstores/timescalevector.ipynb | 10 +- .../integrations/vectorstores/typesense.ipynb | 6 +- .../integrations/vectorstores/usearch.ipynb | 8 +- .../docs/integrations/vectorstores/vald.ipynb | 8 +- .../integrations/vectorstores/vearch.ipynb | 4 +- .../integrations/vectorstores/vectara.ipynb | 8 +- .../integrations/vectorstores/vespa.ipynb | 4 +- .../integrations/vectorstores/weaviate.ipynb | 10 +- .../docs/integrations/vectorstores/xata.ipynb | 6 +- .../vectorstores/yellowbrick.ipynb | 2 +- docs/docs/integrations/vectorstores/zep.ipynb | 6 +- .../integrations/vectorstores/zilliz.ipynb | 8 +- .../agents/how_to/agent_structured.ipynb | 6 +- docs/docs/modules/agents/quick_start.ipynb | 2 +- .../modules/chains/foundational/router.ipynb | 4 +- .../data_connection/document_loaders/csv.mdx | 2 +- .../document_loaders/file_directory.mdx | 6 +- .../data_connection/document_loaders/html.mdx | 4 +- .../document_loaders/index.mdx | 2 +- .../data_connection/document_loaders/json.mdx | 2 +- .../document_loaders/markdown.mdx | 2 +- .../data_connection/document_loaders/pdf.mdx | 24 +- .../modules/data_connection/indexing.ipynb | 8 +- .../retrievers/MultiQueryRetriever.ipynb | 4 +- .../retrievers/contextual_compression.ipynb | 6 +- .../data_connection/retrievers/ensemble.ipynb | 4 +- .../retrievers/long_context_reorder.ipynb | 6 +- .../retrievers/multi_vector.ipynb | 6 +- .../parent_document_retriever.ipynb | 6 +- .../retrievers/self_query.ipynb | 2 +- .../time_weighted_vectorstore.ipynb | 4 +- .../retrievers/vectorstore.ipynb | 4 +- .../text_embedding/caching_embeddings.ipynb | 4 +- .../data_connection/vectorstores/index.mdx | 14 +- .../adding_memory_chain_multiple_inputs.ipynb | 4 +- .../memory/agent_with_memory_in_db.ipynb | 2 +- .../types/vectorstore_retriever_memory.mdx | 2 +- .../prompts/example_selector_types/mmr.ipynb | 2 +- .../example_selector_types/similarity.ipynb | 2 +- .../model_io/prompts/few_shot_examples.ipynb | 2 +- .../prompts/few_shot_examples_chat.ipynb | 4 +- docs/docs/use_cases/chatbots.ipynb | 4 +- docs/docs/use_cases/code_understanding.ipynb | 10 +- .../graph/diffbot_graphtransformer.ipynb | 4 +- .../use_cases/graph/graph_arangodb_qa.ipynb | 2 +- .../use_cases/graph/graph_cypher_qa.ipynb | 4 +- .../use_cases/graph/graph_falkordb_qa.ipynb | 4 +- .../use_cases/graph/graph_hugegraph_qa.ipynb | 4 +- docs/docs/use_cases/graph/graph_kuzu_qa.ipynb | 4 +- .../use_cases/graph/graph_memgraph_qa.ipynb | 4 +- .../use_cases/graph/graph_nebula_qa.ipynb | 4 +- .../use_cases/graph/graph_sparql_qa.ipynb | 4 +- .../use_cases/graph/neptune_cypher_qa.ipynb | 2 +- docs/docs/use_cases/qa_structured/sql.ipynb | 4 +- .../question_answering/per_user.ipynb | 4 +- docs/docs/use_cases/summarization.ipynb | 2 +- docs/docs/use_cases/web_scraping.ipynb | 14 +- .../document_loaders/parsers/pdf.py | 6 +- .../vectorstores/chroma.py | 4 +- .../vectorstores/redis/base.py | 2 +- .../vectorstores/surrealdb.py | 2 +- .../vectorstores/vectara.py | 6 +- libs/core/langchain_core/runnables/history.py | 2 +- .../langchain_experimental/cpal/models.py | 2 +- .../graph_transformers/diffbot.py | 2 +- libs/langchain/langchain/__init__.py | 10 +- .../langchain/chains/graph_qa/arangodb.py | 2 +- .../langchain/chains/graph_qa/base.py | 2 +- .../langchain/chains/graph_qa/cypher.py | 2 +- .../langchain/chains/graph_qa/falkordb.py | 2 +- .../langchain/chains/graph_qa/hugegraph.py | 2 +- .../langchain/chains/graph_qa/kuzu.py | 2 +- .../langchain/chains/graph_qa/nebulagraph.py | 2 +- .../chains/graph_qa/neptune_cypher.py | 2 +- .../langchain/chains/graph_qa/sparql.py | 2 +- .../langchain/chains/retrieval_qa/base.py | 2 +- .../langchain/document_loaders/__init__.py | 287 ++++++++++-------- .../document_loaders/blob_loaders/__init__.py | 10 +- .../document_loaders/parsers/__init__.py | 12 +- .../document_loaders/parsers/html/__init__.py | 2 +- .../parsers/language/__init__.py | 4 +- .../document_transformers/__init__.py | 32 +- libs/langchain/langchain/graphs/__init__.py | 20 +- libs/langchain/langchain/indexes/_api.py | 2 +- libs/langchain/langchain/indexes/graph.py | 2 +- .../prompts/knowledge_triplet_extraction.py | 2 +- .../langchain/indexes/vectorstore.py | 4 +- libs/langchain/langchain/memory/__init__.py | 13 +- .../langchain/langchain/memory/chat_memory.py | 2 +- .../memory/chat_message_histories/__init__.py | 48 +-- libs/langchain/langchain/memory/kg.py | 8 +- libs/langchain/langchain/memory/zep_memory.py | 3 +- .../document_compressors/embeddings_filter.py | 8 +- .../retrievers/parent_document_retriever.py | 2 +- .../langchain/retrievers/self_query/base.py | 34 +-- .../langchain/retrievers/self_query/redis.py | 21 +- .../langchain/retrievers/web_research.py | 4 +- .../langchain/vectorstores/__init__.py | 146 ++++----- .../vectorstores/docarray/__init__.py | 4 +- .../chains/test_graph_database.py | 2 +- .../chains/test_graph_database_arangodb.py | 4 +- .../chains/test_graph_database_sparql.py | 2 +- .../chains/test_retrieval_qa.py | 4 +- .../chains/test_retrieval_qa_with_sources.py | 4 +- .../integration_tests/memory/test_astradb.py | 6 +- .../memory/test_cassandra.py | 6 +- .../memory/test_cosmos_db.py | 2 +- .../memory/test_elasticsearch.py | 2 +- .../memory/test_firestore.py | 2 +- .../integration_tests/memory/test_momento.py | 2 +- .../integration_tests/memory/test_mongodb.py | 2 +- .../integration_tests/memory/test_neo4j.py | 2 +- .../integration_tests/memory/test_redis.py | 2 +- .../integration_tests/memory/test_rockset.py | 2 +- .../memory/test_upstash_redis.py | 6 +- .../integration_tests/memory/test_xata.py | 2 +- .../document_compressors/test_base.py | 2 +- .../test_embeddings_filter.py | 6 +- .../retrievers/test_contextual_compression.py | 2 +- .../retrievers/test_merger_retriever.py | 2 +- .../test_document_transformers.py | 7 +- .../test_long_context_reorder.py | 7 +- .../test_nuclia_transformer.py | 4 +- .../test_pdf_pagesplitter.py | 5 +- .../tests/unit_tests/chains/test_graph_qa.py | 4 +- .../unit_tests/document_loaders/test_base.py | 5 +- .../tests/unit_tests/indexes/test_indexing.py | 9 +- .../unit_tests/load/test_serializable.py | 2 +- .../retrievers/self_query/test_redis.py | 22 +- .../tests/unit_tests/test_dependencies.py | 4 +- .../unit_tests/test_document_transformers.py | 3 +- .../cassandra_entomology_rag/__init__.py | 2 +- .../cohere_librarian/blurb_matcher.py | 2 +- templates/csv-agent/csv_agent/agent.py | 2 +- templates/csv-agent/ingest.py | 4 +- .../extraction_anthropic_functions.ipynb | 2 +- .../extraction_openai_functions.ipynb | 2 +- templates/hyde/hyde/chain.py | 4 +- .../mongo-parent-document-retrieval/ingest.py | 4 +- .../mongo_parent_document_retrieval/chain.py | 2 +- templates/neo4j-advanced-rag/ingest.py | 4 +- .../neo4j_advanced_rag/retrievers.py | 2 +- templates/neo4j-cypher-ft/ingest.py | 2 +- .../neo4j-cypher-ft/neo4j_cypher_ft/chain.py | 2 +- templates/neo4j-cypher-memory/ingest.py | 2 +- .../neo4j_cypher_memory/chain.py | 2 +- templates/neo4j-cypher/ingest.py | 2 +- templates/neo4j-cypher/neo4j_cypher/chain.py | 2 +- .../neo4j_generation/chain.py | 4 +- .../neo4j_generation/utils.py | 4 +- templates/neo4j-parent/ingest.py | 6 +- templates/neo4j-parent/neo4j_parent/chain.py | 2 +- templates/neo4j-vector-memory/ingest.py | 4 +- .../neo4j_vector_memory/chain.py | 2 +- .../neo4j_vector_memory/history.py | 2 +- .../agent.py | 2 +- .../astradb_entomology_rag/__init__.py | 2 +- .../rag-aws-bedrock/rag_aws_bedrock/chain.py | 2 +- .../ingest.py | 2 +- .../chain.py | 2 +- templates/rag-chroma-multi-modal/ingest.py | 2 +- .../rag_chroma_multi_modal/chain.py | 2 +- .../rag_chroma_private/chain.py | 4 +- templates/rag-chroma/rag_chroma/chain.py | 4 +- .../rag_codellama_fireworks/chain.py | 6 +- templates/rag-conversation-zep/ingest.py | 4 +- .../rag_conversation_zep/chain.py | 2 +- .../rag_conversation/chain.py | 4 +- templates/rag-elasticsearch/ingest.py | 4 +- .../rag_elasticsearch/chain.py | 2 +- templates/rag-fusion/ingest.py | 2 +- templates/rag-fusion/rag_fusion/chain.py | 2 +- templates/rag-gemini-multi-modal/ingest.py | 2 +- .../rag_gemini_multi_modal/chain.py | 2 +- .../rag-gpt-crawler/rag_gpt_crawler/chain.py | 2 +- .../rag_matching_engine/chain.py | 2 +- .../rag_momento_vector_index/chain.py | 2 +- .../rag_momento_vector_index/ingest.py | 4 +- templates/rag-mongo/ingest.py | 4 +- templates/rag-mongo/rag_mongo/chain.py | 4 +- templates/rag-multi-modal-local/ingest.py | 2 +- .../rag_multi_modal_local/chain.py | 2 +- templates/rag-multi-modal-mv-local/ingest.py | 2 +- .../rag_multi_modal_mv_local/chain.py | 2 +- .../rag_ollama_multi_query/chain.py | 4 +- .../rag-opensearch/rag_opensearch/chain.py | 4 +- .../rag_pinecone_multi_query/chain.py | 4 +- .../rag_pinecone_rerank/chain.py | 4 +- templates/rag-pinecone/rag_pinecone/chain.py | 4 +- templates/rag-redis/ingest.py | 4 +- templates/rag-redis/rag_redis/chain.py | 2 +- templates/rag-self-query/ingest.py | 4 +- .../rag-self-query/rag_self_query/chain.py | 2 +- .../rag_semi_structured/chain.py | 2 +- .../rag_singlestoredb/chain.py | 4 +- templates/rag-supabase/rag_supabase/chain.py | 2 +- .../rag_timescale_conversation/chain.py | 2 +- .../load_sample_dataset.py | 4 +- .../rag_timescale_hybrid_search_time/chain.py | 2 +- .../load_sample_dataset.py | 4 +- .../rag_vectara_multiquery/chain.py | 2 +- templates/rag-vectara/rag_vectara/chain.py | 2 +- templates/rag-weaviate/rag_weaviate/chain.py | 4 +- .../self_query_qdrant/chain.py | 2 +- .../self_query_supabase/chain.py | 2 +- .../summarize_anthropic.ipynb | 4 +- 592 files changed, 1411 insertions(+), 1326 deletions(-) diff --git a/cookbook/Multi_modal_RAG.ipynb b/cookbook/Multi_modal_RAG.ipynb index d185215be8..c2c12ef87e 100644 --- a/cookbook/Multi_modal_RAG.ipynb +++ b/cookbook/Multi_modal_RAG.ipynb @@ -101,7 +101,7 @@ "If you want to use the provided folder, then simply opt for a [pdf loader](https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf) for the document:\n", "\n", "```\n", - "from langchain.document_loaders import PyPDFLoader\n", + "from langchain_community.document_loaders import PyPDFLoader\n", "loader = PyPDFLoader(path + fname)\n", "docs = loader.load()\n", "tables = [] # Ignore w/ basic pdf loader\n", @@ -355,8 +355,8 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", "\n", "\n", diff --git a/cookbook/Multi_modal_RAG_google.ipynb b/cookbook/Multi_modal_RAG_google.ipynb index 8c8ea6dd71..e2b88b5317 100644 --- a/cookbook/Multi_modal_RAG_google.ipynb +++ b/cookbook/Multi_modal_RAG_google.ipynb @@ -93,7 +93,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import PyPDFLoader\n", + "from langchain_community.document_loaders import PyPDFLoader\n", "\n", "loader = PyPDFLoader(\"./cj/cj.pdf\")\n", "docs = loader.load()\n", @@ -344,8 +344,8 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings import VertexAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", "\n", "\n", diff --git a/cookbook/Semi_Structured_RAG.ipynb b/cookbook/Semi_Structured_RAG.ipynb index e9615c0c68..0a9117337d 100644 --- a/cookbook/Semi_Structured_RAG.ipynb +++ b/cookbook/Semi_Structured_RAG.ipynb @@ -320,8 +320,8 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", "\n", "# The vectorstore to use to index the child chunks\n", diff --git a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb index e6ba451522..ffcf351d09 100644 --- a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb +++ b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb @@ -375,8 +375,8 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", "\n", "# The vectorstore to use to index the child chunks\n", diff --git a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb index 7dd9ced8a8..2a57c329bf 100644 --- a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb +++ b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb @@ -378,8 +378,8 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings import GPT4AllEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", "\n", "# The vectorstore to use to index the child chunks\n", diff --git a/cookbook/advanced_rag_eval.ipynb b/cookbook/advanced_rag_eval.ipynb index 8e286a1735..1f8d84c41b 100644 --- a/cookbook/advanced_rag_eval.ipynb +++ b/cookbook/advanced_rag_eval.ipynb @@ -62,7 +62,7 @@ "path = \"/Users/rlm/Desktop/cpi/\"\n", "\n", "# Load\n", - "from langchain.document_loaders import PyPDFLoader\n", + "from langchain_community.document_loaders import PyPDFLoader\n", "\n", "loader = PyPDFLoader(path + \"cpi.pdf\")\n", "pdf_pages = loader.load()\n", @@ -132,8 +132,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "baseline = Chroma.from_texts(\n", " texts=all_splits_pypdf_texts,\n", diff --git a/cookbook/agent_vectorstore.ipynb b/cookbook/agent_vectorstore.ipynb index 38f281e6d2..6b9a88e448 100644 --- a/cookbook/agent_vectorstore.ipynb +++ b/cookbook/agent_vectorstore.ipynb @@ -29,9 +29,9 @@ "source": [ "from langchain.chains import RetrievalQA\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.llms import OpenAI\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "llm = OpenAI(temperature=0)" ] @@ -69,7 +69,7 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(doc_path)\n", "documents = loader.load()\n", @@ -99,7 +99,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WebBaseLoader" + "from langchain_community.document_loaders import WebBaseLoader" ] }, { diff --git a/cookbook/autogpt/autogpt.ipynb b/cookbook/autogpt/autogpt.ipynb index 6d1af21ef9..40bafecaec 100644 --- a/cookbook/autogpt/autogpt.ipynb +++ b/cookbook/autogpt/autogpt.ipynb @@ -62,8 +62,8 @@ "outputs": [], "source": [ "from langchain.docstore import InMemoryDocstore\n", - "from langchain.vectorstores import FAISS\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS" ] }, { @@ -167,7 +167,7 @@ }, "outputs": [], "source": [ - "from langchain.memory.chat_message_histories import FileChatMessageHistory\n", + "from langchain_community.chat_message_histories import FileChatMessageHistory\n", "\n", "agent = AutoGPT.from_llm_and_tools(\n", " ai_name=\"Tom\",\n", diff --git a/cookbook/autogpt/marathon_times.ipynb b/cookbook/autogpt/marathon_times.ipynb index 9141a2c312..d1f616f4d7 100644 --- a/cookbook/autogpt/marathon_times.ipynb +++ b/cookbook/autogpt/marathon_times.ipynb @@ -311,8 +311,8 @@ "# Memory\n", "import faiss\n", "from langchain.docstore import InMemoryDocstore\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "embeddings_model = OpenAIEmbeddings()\n", "embedding_size = 1536\n", diff --git a/cookbook/baby_agi.ipynb b/cookbook/baby_agi.ipynb index de8ba80e70..9583eadba6 100644 --- a/cookbook/baby_agi.ipynb +++ b/cookbook/baby_agi.ipynb @@ -54,7 +54,7 @@ "outputs": [], "source": [ "from langchain.docstore import InMemoryDocstore\n", - "from langchain.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/cookbook/baby_agi_with_agent.ipynb b/cookbook/baby_agi_with_agent.ipynb index 8d4f13c1b4..5f55b1ce7d 100644 --- a/cookbook/baby_agi_with_agent.ipynb +++ b/cookbook/baby_agi_with_agent.ipynb @@ -63,7 +63,7 @@ "%pip install faiss-cpu > /dev/null\n", "%pip install google-search-results > /dev/null\n", "from langchain.docstore import InMemoryDocstore\n", - "from langchain.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/cookbook/code-analysis-deeplake.ipynb b/cookbook/code-analysis-deeplake.ipynb index 65c5babe7e..4b5ea3ae4c 100644 --- a/cookbook/code-analysis-deeplake.ipynb +++ b/cookbook/code-analysis-deeplake.ipynb @@ -23,9 +23,9 @@ "metadata": {}, "source": [ "1. Prepare data:\n", - " 1. Upload all python project files using the `langchain.document_loaders.TextLoader`. We will call these files the **documents**.\n", + " 1. Upload all python project files using the `langchain_community.document_loaders.TextLoader`. We will call these files the **documents**.\n", " 2. Split all documents to chunks using the `langchain.text_splitter.CharacterTextSplitter`.\n", - " 3. Embed chunks and upload them into the DeepLake using `langchain.embeddings.openai.OpenAIEmbeddings` and `langchain.vectorstores.DeepLake`\n", + " 3. Embed chunks and upload them into the DeepLake using `langchain.embeddings.openai.OpenAIEmbeddings` and `langchain_community.vectorstores.DeepLake`\n", "2. Question-Answering:\n", " 1. Build a chain from `langchain.chat_models.ChatOpenAI` and `langchain.chains.ConversationalRetrievalChain`\n", " 2. Prepare questions.\n", @@ -166,7 +166,7 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "root_dir = \"../../../../../../libs\"\n", "\n", @@ -706,7 +706,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 15, @@ -715,7 +715,7 @@ } ], "source": [ - "from langchain.vectorstores import DeepLake\n", + "from langchain_community.vectorstores import DeepLake\n", "\n", "username = \"\"\n", "\n", @@ -740,7 +740,7 @@ "metadata": {}, "outputs": [], "source": [ - "# from langchain.vectorstores import DeepLake\n", + "# from langchain_community.vectorstores import DeepLake\n", "\n", "# db = DeepLake.from_documents(\n", "# texts, embeddings, dataset_path=f\"hub://{}/langchain-code\", runtime={\"tensor_db\": True}\n", diff --git a/cookbook/custom_agent_with_plugin_retrieval.ipynb b/cookbook/custom_agent_with_plugin_retrieval.ipynb index 8113d5fcd7..d8795d7631 100644 --- a/cookbook/custom_agent_with_plugin_retrieval.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval.ipynb @@ -115,8 +115,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb index ece5f08199..39b677bb81 100644 --- a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb @@ -139,8 +139,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/cookbook/custom_agent_with_tool_retrieval.ipynb b/cookbook/custom_agent_with_tool_retrieval.ipynb index d9259749dd..5e5c08d3f3 100644 --- a/cookbook/custom_agent_with_tool_retrieval.ipynb +++ b/cookbook/custom_agent_with_tool_retrieval.ipynb @@ -104,8 +104,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/cookbook/deeplake_semantic_search_over_chat.ipynb b/cookbook/deeplake_semantic_search_over_chat.ipynb index 6c146cfc06..042cdf7399 100644 --- a/cookbook/deeplake_semantic_search_over_chat.ipynb +++ b/cookbook/deeplake_semantic_search_over_chat.ipynb @@ -56,9 +56,9 @@ " CharacterTextSplitter,\n", " RecursiveCharacterTextSplitter,\n", ")\n", - "from langchain.vectorstores import DeepLake\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.llms import OpenAI\n", + "from langchain_community.vectorstores import DeepLake\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"Activeloop Token:\")\n", diff --git a/cookbook/docugami_xml_kg_rag.ipynb b/cookbook/docugami_xml_kg_rag.ipynb index 6610e82d18..2a9837ddec 100644 --- a/cookbook/docugami_xml_kg_rag.ipynb +++ b/cookbook/docugami_xml_kg_rag.ipynb @@ -547,8 +547,8 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores.chroma import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores.chroma import Chroma\n", "from langchain_core.documents import Document\n", "\n", "\n", diff --git a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb index c28ec71446..f7570fd7f2 100644 --- a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb +++ b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb @@ -49,9 +49,9 @@ "\n", "from langchain.docstore import InMemoryDocstore\n", "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "from termcolor import colored" ] }, diff --git a/cookbook/hypothetical_document_embeddings.ipynb b/cookbook/hypothetical_document_embeddings.ipynb index b729675dd1..ea997869ad 100644 --- a/cookbook/hypothetical_document_embeddings.ipynb +++ b/cookbook/hypothetical_document_embeddings.ipynb @@ -172,7 +172,7 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "with open(\"../../state_of_the_union.txt\") as f:\n", " state_of_the_union = f.read()\n", diff --git a/cookbook/multi_modal_RAG_chroma.ipynb b/cookbook/multi_modal_RAG_chroma.ipynb index c4305d64ae..17d49ffe8a 100644 --- a/cookbook/multi_modal_RAG_chroma.ipynb +++ b/cookbook/multi_modal_RAG_chroma.ipynb @@ -187,7 +187,7 @@ "\n", "import chromadb\n", "import numpy as np\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.vectorstores import Chroma\n", "from langchain_experimental.open_clip import OpenCLIPEmbeddings\n", "from PIL import Image as _PILImage\n", "\n", diff --git a/cookbook/openai_functions_retrieval_qa.ipynb b/cookbook/openai_functions_retrieval_qa.ipynb index 694ae74793..c214377e79 100644 --- a/cookbook/openai_functions_retrieval_qa.ipynb +++ b/cookbook/openai_functions_retrieval_qa.ipynb @@ -20,10 +20,10 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma" ] }, { diff --git a/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb b/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb index 2446c50a46..082c12eacf 100644 --- a/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb +++ b/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb @@ -59,11 +59,13 @@ "from baidubce.auth.bce_credentials import BceCredentials\n", "from baidubce.bce_client_configuration import BceClientConfiguration\n", "from langchain.chains.retrieval_qa import RetrievalQA\n", - "from langchain.document_loaders.baiducloud_bos_directory import BaiduBOSDirectoryLoader\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import BESVectorStore\n", + "from langchain_community.document_loaders.baiducloud_bos_directory import (\n", + " BaiduBOSDirectoryLoader,\n", + ")\n", "from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings\n", - "from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint" + "from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint\n", + "from langchain_community.vectorstores import BESVectorStore" ] }, { diff --git a/cookbook/rag_fusion.ipynb b/cookbook/rag_fusion.ipynb index 99d247128e..a340e97ed0 100644 --- a/cookbook/rag_fusion.ipynb +++ b/cookbook/rag_fusion.ipynb @@ -30,8 +30,8 @@ "outputs": [], "source": [ "import pinecone\n", - "from langchain.vectorstores import Pinecone\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Pinecone\n", "\n", "pinecone.init(api_key=\"...\", environment=\"...\")" ] diff --git a/cookbook/sales_agent_with_context.ipynb b/cookbook/sales_agent_with_context.ipynb index 48baa6f75c..11cb7afd8b 100644 --- a/cookbook/sales_agent_with_context.ipynb +++ b/cookbook/sales_agent_with_context.ipynb @@ -53,10 +53,10 @@ "from langchain.prompts.base import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.llms import BaseLLM, OpenAI\n", + "from langchain_community.vectorstores import Chroma\n", "from pydantic import BaseModel, Field" ] }, diff --git a/cookbook/self_query_hotel_search.ipynb b/cookbook/self_query_hotel_search.ipynb index a3b2f20d2f..a349bd7f9b 100644 --- a/cookbook/self_query_hotel_search.ipynb +++ b/cookbook/self_query_hotel_search.ipynb @@ -1083,8 +1083,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import ElasticsearchStore\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import ElasticsearchStore\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/cookbook/sql_db_qa.mdx b/cookbook/sql_db_qa.mdx index edc96480f2..c3299c3888 100644 --- a/cookbook/sql_db_qa.mdx +++ b/cookbook/sql_db_qa.mdx @@ -996,7 +996,7 @@ from langchain.prompts import FewShotPromptTemplate, PromptTemplate from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector -from langchain.vectorstores import Chroma +from langchain_community.vectorstores import Chroma example_prompt = PromptTemplate( input_variables=["table_info", "input", "sql_cmd", "sql_result", "answer"], diff --git a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb index 23e07c78f4..28942b32b4 100644 --- a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb +++ b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb @@ -37,8 +37,8 @@ "import getpass\n", "import os\n", "\n", - "from langchain.vectorstores import DeepLake\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import DeepLake\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"Activeloop Token:\")\n", @@ -110,7 +110,7 @@ "source": [ "import os\n", "\n", - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "root_dir = \"./the-algorithm\"\n", "docs = []\n", diff --git a/docs/docs/_templates/integration.mdx b/docs/docs/_templates/integration.mdx index 1ef74269cd..234c8cc09e 100644 --- a/docs/docs/_templates/integration.mdx +++ b/docs/docs/_templates/integration.mdx @@ -56,5 +56,5 @@ from langchain_community.chat_models import integration_class_REPLACE_ME See a [usage example](/docs/integrations/document_loaders/INCLUDE_REAL_NAME). ```python -from langchain.document_loaders import integration_class_REPLACE_ME +from langchain_community.document_loaders import integration_class_REPLACE_ME ``` diff --git a/docs/docs/expression_language/cookbook/retrieval.ipynb b/docs/docs/expression_language/cookbook/retrieval.ipynb index 7bc6acd3e5..6cca1d011a 100644 --- a/docs/docs/expression_language/cookbook/retrieval.ipynb +++ b/docs/docs/expression_language/cookbook/retrieval.ipynb @@ -39,9 +39,9 @@ "from operator import itemgetter\n", "\n", "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough" ] diff --git a/docs/docs/expression_language/get_started.ipynb b/docs/docs/expression_language/get_started.ipynb index 63c62052e4..d44947de19 100644 --- a/docs/docs/expression_language/get_started.ipynb +++ b/docs/docs/expression_language/get_started.ipynb @@ -325,9 +325,9 @@ "# pip install langchain docarray tiktoken\n", "\n", "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import DocArrayInMemorySearch\n", "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import DocArrayInMemorySearch\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n", "\n", diff --git a/docs/docs/expression_language/how_to/map.ipynb b/docs/docs/expression_language/how_to/map.ipynb index 957f3fc3da..dd3f15aca9 100644 --- a/docs/docs/expression_language/how_to/map.ipynb +++ b/docs/docs/expression_language/how_to/map.ipynb @@ -45,9 +45,9 @@ ], "source": [ "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", @@ -129,9 +129,9 @@ "from operator import itemgetter\n", "\n", "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", diff --git a/docs/docs/expression_language/how_to/message_history.ipynb b/docs/docs/expression_language/how_to/message_history.ipynb index f18796731a..929c2c53d2 100644 --- a/docs/docs/expression_language/how_to/message_history.ipynb +++ b/docs/docs/expression_language/how_to/message_history.ipynb @@ -131,8 +131,8 @@ "source": [ "from typing import Optional\n", "\n", - "from langchain.memory.chat_message_histories import RedisChatMessageHistory\n", "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_community.chat_message_histories import RedisChatMessageHistory\n", "from langchain_community.chat_models import ChatAnthropic\n", "from langchain_core.chat_history import BaseChatMessageHistory\n", "from langchain_core.runnables.history import RunnableWithMessageHistory" diff --git a/docs/docs/expression_language/how_to/passthrough.ipynb b/docs/docs/expression_language/how_to/passthrough.ipynb index 54801656ea..2399eb338f 100644 --- a/docs/docs/expression_language/how_to/passthrough.ipynb +++ b/docs/docs/expression_language/how_to/passthrough.ipynb @@ -98,9 +98,9 @@ ], "source": [ "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", diff --git a/docs/docs/expression_language/interface.ipynb b/docs/docs/expression_language/interface.ipynb index b58d1811c7..1d0a92c7ec 100644 --- a/docs/docs/expression_language/interface.ipynb +++ b/docs/docs/expression_language/interface.ipynb @@ -659,8 +659,8 @@ } ], "source": [ - "from langchain.vectorstores import FAISS\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb index 6aefe9d341..728ef65cd9 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb @@ -638,8 +638,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "# 2. Load the data: In our case data's already loaded\n", "# 3. Anonymize the data before indexing\n", diff --git a/docs/docs/integrations/callbacks/confident.ipynb b/docs/docs/integrations/callbacks/confident.ipynb index 758177dbe7..6ef644b33e 100644 --- a/docs/docs/integrations/callbacks/confident.ipynb +++ b/docs/docs/integrations/callbacks/confident.ipynb @@ -215,11 +215,11 @@ "source": [ "import requests\n", "from langchain.chains import RetrievalQA\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.llms import OpenAI\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "text_file_url = \"https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_the_union.txt\"\n", "\n", diff --git a/docs/docs/integrations/callbacks/infino.ipynb b/docs/docs/integrations/callbacks/infino.ipynb index 03553c2a28..a148c88234 100644 --- a/docs/docs/integrations/callbacks/infino.ipynb +++ b/docs/docs/integrations/callbacks/infino.ipynb @@ -316,8 +316,8 @@ "# os.environ[\"OPENAI_API_KEY\"] = \"YOUR_API_KEY\"\n", "\n", "from langchain.chains.summarize import load_summarize_chain\n", - "from langchain.document_loaders import WebBaseLoader\n", "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.document_loaders import WebBaseLoader\n", "\n", "# Create callback handler. This logs latency, errors, token usage, prompts, as well as prompt responses to Infino.\n", "handler = InfinoCallbackHandler(\n", diff --git a/docs/docs/integrations/chat_loaders/discord.ipynb b/docs/docs/integrations/chat_loaders/discord.ipynb index f6e0ef8948..d01c5068a2 100644 --- a/docs/docs/integrations/chat_loaders/discord.ipynb +++ b/docs/docs/integrations/chat_loaders/discord.ipynb @@ -79,8 +79,8 @@ "import re\n", "from typing import Iterator, List\n", "\n", - "from langchain.chat_loaders import base as chat_loaders\n", "from langchain.schema import BaseMessage, HumanMessage\n", + "from langchain_community.chat_loaders import base as chat_loaders\n", "\n", "logger = logging.getLogger()\n", "\n", @@ -216,8 +216,8 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_loaders.base import ChatSession\n", - "from langchain.chat_loaders.utils import (\n", + "from langchain_community.chat_loaders.base import ChatSession\n", + "from langchain_community.chat_loaders.utils import (\n", " map_ai_messages,\n", " merge_chat_runs,\n", ")\n", diff --git a/docs/docs/integrations/chat_loaders/facebook.ipynb b/docs/docs/integrations/chat_loaders/facebook.ipynb index 9062b8cc09..e5047cab7c 100644 --- a/docs/docs/integrations/chat_loaders/facebook.ipynb +++ b/docs/docs/integrations/chat_loaders/facebook.ipynb @@ -106,7 +106,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_loaders.facebook_messenger import (\n", + "from langchain_community.chat_loaders.facebook_messenger import (\n", " FolderFacebookMessengerChatLoader,\n", " SingleFileFacebookMessengerChatLoader,\n", ")" @@ -201,7 +201,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_loaders.utils import (\n", + "from langchain_community.chat_loaders.utils import (\n", " map_ai_messages,\n", " merge_chat_runs,\n", ")" diff --git a/docs/docs/integrations/chat_loaders/gmail.ipynb b/docs/docs/integrations/chat_loaders/gmail.ipynb index 914eea401e..22b5ba292c 100644 --- a/docs/docs/integrations/chat_loaders/gmail.ipynb +++ b/docs/docs/integrations/chat_loaders/gmail.ipynb @@ -73,7 +73,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_loaders.gmail import GMailLoader" + "from langchain_community.chat_loaders.gmail import GMailLoader" ] }, { @@ -125,7 +125,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_loaders.utils import (\n", + "from langchain_community.chat_loaders.utils import (\n", " map_ai_messages,\n", ")" ] diff --git a/docs/docs/integrations/chat_loaders/imessage.ipynb b/docs/docs/integrations/chat_loaders/imessage.ipynb index b69ff54e70..62963aea12 100644 --- a/docs/docs/integrations/chat_loaders/imessage.ipynb +++ b/docs/docs/integrations/chat_loaders/imessage.ipynb @@ -80,7 +80,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_loaders.imessage import IMessageChatLoader" + "from langchain_community.chat_loaders.imessage import IMessageChatLoader" ] }, { @@ -116,8 +116,8 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_loaders.base import ChatSession\n", - "from langchain.chat_loaders.utils import (\n", + "from langchain_community.chat_loaders.base import ChatSession\n", + "from langchain_community.chat_loaders.utils import (\n", " map_ai_messages,\n", " merge_chat_runs,\n", ")\n", diff --git a/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb b/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb index cc7995ba07..6e0fb2310a 100644 --- a/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb +++ b/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb @@ -128,7 +128,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_loaders.langsmith import LangSmithDatasetChatLoader\n", + "from langchain_community.chat_loaders.langsmith import LangSmithDatasetChatLoader\n", "\n", "loader = LangSmithDatasetChatLoader(dataset_name=dataset_name)\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb b/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb index 77349e664d..bc8f9c0c7e 100644 --- a/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb +++ b/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb @@ -263,7 +263,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_loaders.langsmith import LangSmithRunChatLoader\n", + "from langchain_community.chat_loaders.langsmith import LangSmithRunChatLoader\n", "\n", "loader = LangSmithRunChatLoader(runs=llm_runs)\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/slack.ipynb b/docs/docs/integrations/chat_loaders/slack.ipynb index 0a68c1cd52..bae9ebf886 100644 --- a/docs/docs/integrations/chat_loaders/slack.ipynb +++ b/docs/docs/integrations/chat_loaders/slack.ipynb @@ -53,7 +53,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_loaders.slack import SlackChatLoader" + "from langchain_community.chat_loaders.slack import SlackChatLoader" ] }, { @@ -87,8 +87,8 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_loaders.base import ChatSession\n", - "from langchain.chat_loaders.utils import (\n", + "from langchain_community.chat_loaders.base import ChatSession\n", + "from langchain_community.chat_loaders.utils import (\n", " map_ai_messages,\n", " merge_chat_runs,\n", ")\n", diff --git a/docs/docs/integrations/chat_loaders/telegram.ipynb b/docs/docs/integrations/chat_loaders/telegram.ipynb index d6b908f4af..233eb297cd 100644 --- a/docs/docs/integrations/chat_loaders/telegram.ipynb +++ b/docs/docs/integrations/chat_loaders/telegram.ipynb @@ -102,7 +102,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_loaders.telegram import TelegramChatLoader" + "from langchain_community.chat_loaders.telegram import TelegramChatLoader" ] }, { @@ -136,8 +136,8 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_loaders.base import ChatSession\n", - "from langchain.chat_loaders.utils import (\n", + "from langchain_community.chat_loaders.base import ChatSession\n", + "from langchain_community.chat_loaders.utils import (\n", " map_ai_messages,\n", " merge_chat_runs,\n", ")\n", diff --git a/docs/docs/integrations/chat_loaders/wechat.ipynb b/docs/docs/integrations/chat_loaders/wechat.ipynb index c586b2dc36..db0fd8b000 100644 --- a/docs/docs/integrations/chat_loaders/wechat.ipynb +++ b/docs/docs/integrations/chat_loaders/wechat.ipynb @@ -78,8 +78,8 @@ "import re\n", "from typing import Iterator, List\n", "\n", - "from langchain.chat_loaders import base as chat_loaders\n", "from langchain.schema import BaseMessage, HumanMessage\n", + "from langchain_community.chat_loaders import base as chat_loaders\n", "\n", "logger = logging.getLogger()\n", "\n", @@ -209,8 +209,8 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_loaders.base import ChatSession\n", - "from langchain.chat_loaders.utils import (\n", + "from langchain_community.chat_loaders.base import ChatSession\n", + "from langchain_community.chat_loaders.utils import (\n", " map_ai_messages,\n", " merge_chat_runs,\n", ")\n", diff --git a/docs/docs/integrations/chat_loaders/whatsapp.ipynb b/docs/docs/integrations/chat_loaders/whatsapp.ipynb index 9c59486bc5..80215a95f9 100644 --- a/docs/docs/integrations/chat_loaders/whatsapp.ipynb +++ b/docs/docs/integrations/chat_loaders/whatsapp.ipynb @@ -74,7 +74,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_loaders.whatsapp import WhatsAppChatLoader" + "from langchain_community.chat_loaders.whatsapp import WhatsAppChatLoader" ] }, { @@ -126,8 +126,8 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_loaders.base import ChatSession\n", - "from langchain.chat_loaders.utils import (\n", + "from langchain_community.chat_loaders.base import ChatSession\n", + "from langchain_community.chat_loaders.utils import (\n", " map_ai_messages,\n", " merge_chat_runs,\n", ")\n", diff --git a/docs/docs/integrations/document_loaders/acreom.ipynb b/docs/docs/integrations/document_loaders/acreom.ipynb index 756ece6a32..0339337fbf 100644 --- a/docs/docs/integrations/document_loaders/acreom.ipynb +++ b/docs/docs/integrations/document_loaders/acreom.ipynb @@ -27,7 +27,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import AcreomLoader" + "from langchain_community.document_loaders import AcreomLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/airbyte_cdk.ipynb b/docs/docs/integrations/document_loaders/airbyte_cdk.ipynb index 46f92b2f85..35ec339fb1 100644 --- a/docs/docs/integrations/document_loaders/airbyte_cdk.ipynb +++ b/docs/docs/integrations/document_loaders/airbyte_cdk.ipynb @@ -98,7 +98,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.airbyte import AirbyteCDKLoader\n", + "from langchain_community.document_loaders.airbyte import AirbyteCDKLoader\n", "from source_github.source import SourceGithub # plug in your own source here\n", "\n", "config = {\n", diff --git a/docs/docs/integrations/document_loaders/airbyte_gong.ipynb b/docs/docs/integrations/document_loaders/airbyte_gong.ipynb index 00679a764f..764f5f1005 100644 --- a/docs/docs/integrations/document_loaders/airbyte_gong.ipynb +++ b/docs/docs/integrations/document_loaders/airbyte_gong.ipynb @@ -85,7 +85,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.airbyte import AirbyteGongLoader\n", + "from langchain_community.document_loaders.airbyte import AirbyteGongLoader\n", "\n", "config = {\n", " # your gong configuration\n", diff --git a/docs/docs/integrations/document_loaders/airbyte_hubspot.ipynb b/docs/docs/integrations/document_loaders/airbyte_hubspot.ipynb index 3b5d6c0707..fe7c6ad276 100644 --- a/docs/docs/integrations/document_loaders/airbyte_hubspot.ipynb +++ b/docs/docs/integrations/document_loaders/airbyte_hubspot.ipynb @@ -87,7 +87,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.airbyte import AirbyteHubspotLoader\n", + "from langchain_community.document_loaders.airbyte import AirbyteHubspotLoader\n", "\n", "config = {\n", " # your hubspot configuration\n", diff --git a/docs/docs/integrations/document_loaders/airbyte_json.ipynb b/docs/docs/integrations/document_loaders/airbyte_json.ipynb index 499916c49b..3847e720ba 100644 --- a/docs/docs/integrations/document_loaders/airbyte_json.ipynb +++ b/docs/docs/integrations/document_loaders/airbyte_json.ipynb @@ -54,7 +54,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import AirbyteJSONLoader" + "from langchain_community.document_loaders import AirbyteJSONLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/airbyte_salesforce.ipynb b/docs/docs/integrations/document_loaders/airbyte_salesforce.ipynb index b336995269..6c6cad3f15 100644 --- a/docs/docs/integrations/document_loaders/airbyte_salesforce.ipynb +++ b/docs/docs/integrations/document_loaders/airbyte_salesforce.ipynb @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.airbyte import AirbyteSalesforceLoader\n", + "from langchain_community.document_loaders.airbyte import AirbyteSalesforceLoader\n", "\n", "config = {\n", " # your salesforce configuration\n", diff --git a/docs/docs/integrations/document_loaders/airbyte_shopify.ipynb b/docs/docs/integrations/document_loaders/airbyte_shopify.ipynb index 5298ae7231..2a4d34cc30 100644 --- a/docs/docs/integrations/document_loaders/airbyte_shopify.ipynb +++ b/docs/docs/integrations/document_loaders/airbyte_shopify.ipynb @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.airbyte import AirbyteShopifyLoader\n", + "from langchain_community.document_loaders.airbyte import AirbyteShopifyLoader\n", "\n", "config = {\n", " # your shopify configuration\n", diff --git a/docs/docs/integrations/document_loaders/airbyte_stripe.ipynb b/docs/docs/integrations/document_loaders/airbyte_stripe.ipynb index 17fde5a1bd..e14819fd63 100644 --- a/docs/docs/integrations/document_loaders/airbyte_stripe.ipynb +++ b/docs/docs/integrations/document_loaders/airbyte_stripe.ipynb @@ -85,7 +85,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.airbyte import AirbyteStripeLoader\n", + "from langchain_community.document_loaders.airbyte import AirbyteStripeLoader\n", "\n", "config = {\n", " # your stripe configuration\n", diff --git a/docs/docs/integrations/document_loaders/airbyte_typeform.ipynb b/docs/docs/integrations/document_loaders/airbyte_typeform.ipynb index ffc01e9b59..04296f348d 100644 --- a/docs/docs/integrations/document_loaders/airbyte_typeform.ipynb +++ b/docs/docs/integrations/document_loaders/airbyte_typeform.ipynb @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.airbyte import AirbyteTypeformLoader\n", + "from langchain_community.document_loaders.airbyte import AirbyteTypeformLoader\n", "\n", "config = {\n", " # your typeform configuration\n", diff --git a/docs/docs/integrations/document_loaders/airbyte_zendesk_support.ipynb b/docs/docs/integrations/document_loaders/airbyte_zendesk_support.ipynb index 5c2fd6049a..17ef38b377 100644 --- a/docs/docs/integrations/document_loaders/airbyte_zendesk_support.ipynb +++ b/docs/docs/integrations/document_loaders/airbyte_zendesk_support.ipynb @@ -89,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.airbyte import AirbyteZendeskSupportLoader\n", + "from langchain_community.document_loaders.airbyte import AirbyteZendeskSupportLoader\n", "\n", "config = {\n", " # your zendesk-support configuration\n", diff --git a/docs/docs/integrations/document_loaders/airtable.ipynb b/docs/docs/integrations/document_loaders/airtable.ipynb index 0ac03425d0..caa56f20e8 100644 --- a/docs/docs/integrations/document_loaders/airtable.ipynb +++ b/docs/docs/integrations/document_loaders/airtable.ipynb @@ -25,7 +25,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import AirtableLoader" + "from langchain_community.document_loaders import AirtableLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/alibaba_cloud_maxcompute.ipynb b/docs/docs/integrations/document_loaders/alibaba_cloud_maxcompute.ipynb index 2ffd02203f..505bc0e1bc 100644 --- a/docs/docs/integrations/document_loaders/alibaba_cloud_maxcompute.ipynb +++ b/docs/docs/integrations/document_loaders/alibaba_cloud_maxcompute.ipynb @@ -58,7 +58,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import MaxComputeLoader" + "from langchain_community.document_loaders import MaxComputeLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/amazon_textract.ipynb b/docs/docs/integrations/document_loaders/amazon_textract.ipynb index 208d0cbec2..781121c35f 100644 --- a/docs/docs/integrations/document_loaders/amazon_textract.ipynb +++ b/docs/docs/integrations/document_loaders/amazon_textract.ipynb @@ -60,7 +60,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import AmazonTextractPDFLoader\n", + "from langchain_community.document_loaders import AmazonTextractPDFLoader\n", "\n", "loader = AmazonTextractPDFLoader(\"example_data/alejandro_rosalez_sample-small.jpeg\")\n", "documents = loader.load()" @@ -116,7 +116,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import AmazonTextractPDFLoader\n", + "from langchain_community.document_loaders import AmazonTextractPDFLoader\n", "\n", "loader = AmazonTextractPDFLoader(\n", " \"https://amazon-textract-public-content.s3.us-east-2.amazonaws.com/langchain/alejandro_rosalez_sample_1.jpg\"\n", diff --git a/docs/docs/integrations/document_loaders/apify_dataset.ipynb b/docs/docs/integrations/document_loaders/apify_dataset.ipynb index 3dc3cc99fe..48d9492797 100644 --- a/docs/docs/integrations/document_loaders/apify_dataset.ipynb +++ b/docs/docs/integrations/document_loaders/apify_dataset.ipynb @@ -40,8 +40,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import ApifyDatasetLoader\n", - "from langchain.document_loaders.base import Document" + "from langchain_community.document_loaders import ApifyDatasetLoader\n", + "from langchain_community.document_loaders.base import Document" ] }, { @@ -101,8 +101,8 @@ "outputs": [], "source": [ "from langchain.docstore.document import Document\n", - "from langchain.document_loaders import ApifyDatasetLoader\n", - "from langchain.indexes import VectorstoreIndexCreator" + "from langchain.indexes import VectorstoreIndexCreator\n", + "from langchain_community.document_loaders import ApifyDatasetLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/arcgis.ipynb b/docs/docs/integrations/document_loaders/arcgis.ipynb index d9fe938025..bfe427366c 100644 --- a/docs/docs/integrations/document_loaders/arcgis.ipynb +++ b/docs/docs/integrations/document_loaders/arcgis.ipynb @@ -7,7 +7,7 @@ "source": [ "# ArcGIS\n", "\n", - "This notebook demonstrates the use of the `langchain.document_loaders.ArcGISLoader` class.\n", + "This notebook demonstrates the use of the `langchain_community.document_loaders.ArcGISLoader` class.\n", "\n", "You will need to install the ArcGIS API for Python `arcgis` and, optionally, `bs4.BeautifulSoup`.\n", "\n", @@ -21,7 +21,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import ArcGISLoader\n", + "from langchain_community.document_loaders import ArcGISLoader\n", "\n", "URL = \"https://maps1.vcgov.org/arcgis/rest/services/Beaches/MapServer/7\"\n", "loader = ArcGISLoader(URL)\n", diff --git a/docs/docs/integrations/document_loaders/arxiv.ipynb b/docs/docs/integrations/document_loaders/arxiv.ipynb index 8ec6972759..3e71ac731c 100644 --- a/docs/docs/integrations/document_loaders/arxiv.ipynb +++ b/docs/docs/integrations/document_loaders/arxiv.ipynb @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import ArxivLoader" + "from langchain_community.document_loaders import ArxivLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/assemblyai.ipynb b/docs/docs/integrations/document_loaders/assemblyai.ipynb index cb30658ff4..c217bcac60 100644 --- a/docs/docs/integrations/document_loaders/assemblyai.ipynb +++ b/docs/docs/integrations/document_loaders/assemblyai.ipynb @@ -53,7 +53,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import AssemblyAIAudioTranscriptLoader\n", + "from langchain_community.document_loaders import AssemblyAIAudioTranscriptLoader\n", "\n", "audio_file = \"https://storage.googleapis.com/aai-docs-samples/nbc.mp3\"\n", "# or a local file path: audio_file = \"./nbc.mp3\"\n", @@ -148,7 +148,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.assemblyai import TranscriptFormat\n", + "from langchain_community.document_loaders.assemblyai import TranscriptFormat\n", "\n", "loader = AssemblyAIAudioTranscriptLoader(\n", " file_path=\"./your_file.mp3\",\n", diff --git a/docs/docs/integrations/document_loaders/async_chromium.ipynb b/docs/docs/integrations/document_loaders/async_chromium.ipynb index 1b4db6b25d..94b9c61742 100644 --- a/docs/docs/integrations/document_loaders/async_chromium.ipynb +++ b/docs/docs/integrations/document_loaders/async_chromium.ipynb @@ -45,7 +45,7 @@ } ], "source": [ - "from langchain.document_loaders import AsyncChromiumLoader\n", + "from langchain_community.document_loaders import AsyncChromiumLoader\n", "\n", "urls = [\"https://www.wsj.com\"]\n", "loader = AsyncChromiumLoader(urls)\n", @@ -71,7 +71,7 @@ } ], "source": [ - "from langchain.document_transformers import Html2TextTransformer\n", + "from langchain_community.document_transformers import Html2TextTransformer\n", "\n", "html2text = Html2TextTransformer()\n", "docs_transformed = html2text.transform_documents(docs)\n", diff --git a/docs/docs/integrations/document_loaders/async_html.ipynb b/docs/docs/integrations/document_loaders/async_html.ipynb index 8a9786a08f..95bb82efaa 100644 --- a/docs/docs/integrations/document_loaders/async_html.ipynb +++ b/docs/docs/integrations/document_loaders/async_html.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import AsyncHtmlLoader" + "from langchain_community.document_loaders import AsyncHtmlLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/aws_s3_directory.ipynb b/docs/docs/integrations/document_loaders/aws_s3_directory.ipynb index 5b112dbc48..734112eb0b 100644 --- a/docs/docs/integrations/document_loaders/aws_s3_directory.ipynb +++ b/docs/docs/integrations/document_loaders/aws_s3_directory.ipynb @@ -35,7 +35,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import S3DirectoryLoader" + "from langchain_community.document_loaders import S3DirectoryLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/aws_s3_file.ipynb b/docs/docs/integrations/document_loaders/aws_s3_file.ipynb index 66dfad695d..a42ae9d6c3 100644 --- a/docs/docs/integrations/document_loaders/aws_s3_file.ipynb +++ b/docs/docs/integrations/document_loaders/aws_s3_file.ipynb @@ -21,7 +21,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import S3FileLoader" + "from langchain_community.document_loaders import S3FileLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/azlyrics.ipynb b/docs/docs/integrations/document_loaders/azlyrics.ipynb index 48056751a1..e39602a01d 100644 --- a/docs/docs/integrations/document_loaders/azlyrics.ipynb +++ b/docs/docs/integrations/document_loaders/azlyrics.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import AZLyricsLoader" + "from langchain_community.document_loaders import AZLyricsLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/azure_ai_data.ipynb b/docs/docs/integrations/document_loaders/azure_ai_data.ipynb index 93ab36edbb..45750dcc7a 100644 --- a/docs/docs/integrations/document_loaders/azure_ai_data.ipynb +++ b/docs/docs/integrations/document_loaders/azure_ai_data.ipynb @@ -39,7 +39,7 @@ "source": [ "from azure.ai.resources.client import AIClient\n", "from azure.identity import DefaultAzureCredential\n", - "from langchain.document_loaders import AzureAIDataLoader" + "from langchain_community.document_loaders import AzureAIDataLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/azure_blob_storage_container.ipynb b/docs/docs/integrations/document_loaders/azure_blob_storage_container.ipynb index 3fd7786a99..ffa32dc2d5 100644 --- a/docs/docs/integrations/document_loaders/azure_blob_storage_container.ipynb +++ b/docs/docs/integrations/document_loaders/azure_blob_storage_container.ipynb @@ -39,7 +39,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import AzureBlobStorageContainerLoader" + "from langchain_community.document_loaders import AzureBlobStorageContainerLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/azure_blob_storage_file.ipynb b/docs/docs/integrations/document_loaders/azure_blob_storage_file.ipynb index 9fbf827203..1620ffc0d2 100644 --- a/docs/docs/integrations/document_loaders/azure_blob_storage_file.ipynb +++ b/docs/docs/integrations/document_loaders/azure_blob_storage_file.ipynb @@ -31,7 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import AzureBlobStorageFileLoader" + "from langchain_community.document_loaders import AzureBlobStorageFileLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/bibtex.ipynb b/docs/docs/integrations/document_loaders/bibtex.ipynb index 8d4175490e..e30b2c4034 100644 --- a/docs/docs/integrations/document_loaders/bibtex.ipynb +++ b/docs/docs/integrations/document_loaders/bibtex.ipynb @@ -65,7 +65,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import BibtexLoader" + "from langchain_community.document_loaders import BibtexLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/bilibili.ipynb b/docs/docs/integrations/document_loaders/bilibili.ipynb index fc6b3dc386..beb44667e3 100644 --- a/docs/docs/integrations/document_loaders/bilibili.ipynb +++ b/docs/docs/integrations/document_loaders/bilibili.ipynb @@ -35,7 +35,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import BiliBiliLoader" + "from langchain_community.document_loaders import BiliBiliLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/blackboard.ipynb b/docs/docs/integrations/document_loaders/blackboard.ipynb index c6580cc795..ff72140aad 100644 --- a/docs/docs/integrations/document_loaders/blackboard.ipynb +++ b/docs/docs/integrations/document_loaders/blackboard.ipynb @@ -23,7 +23,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import BlackboardLoader\n", + "from langchain_community.document_loaders import BlackboardLoader\n", "\n", "loader = BlackboardLoader(\n", " blackboard_course_url=\"https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1\",\n", diff --git a/docs/docs/integrations/document_loaders/blockchain.ipynb b/docs/docs/integrations/document_loaders/blockchain.ipynb index e87b1927c1..d0850311f2 100644 --- a/docs/docs/integrations/document_loaders/blockchain.ipynb +++ b/docs/docs/integrations/document_loaders/blockchain.ipynb @@ -80,7 +80,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders.blockchain import (\n", + "from langchain_community.document_loaders.blockchain import (\n", " BlockchainDocumentLoader,\n", " BlockchainType,\n", ")\n", diff --git a/docs/docs/integrations/document_loaders/brave_search.ipynb b/docs/docs/integrations/document_loaders/brave_search.ipynb index 11a819d748..7dfc633311 100644 --- a/docs/docs/integrations/document_loaders/brave_search.ipynb +++ b/docs/docs/integrations/document_loaders/brave_search.ipynb @@ -48,7 +48,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import BraveSearchLoader" + "from langchain_community.document_loaders import BraveSearchLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/browserless.ipynb b/docs/docs/integrations/document_loaders/browserless.ipynb index 382a60533c..18753fb576 100644 --- a/docs/docs/integrations/document_loaders/browserless.ipynb +++ b/docs/docs/integrations/document_loaders/browserless.ipynb @@ -18,7 +18,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import BrowserlessLoader" + "from langchain_community.document_loaders import BrowserlessLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/chatgpt_loader.ipynb b/docs/docs/integrations/document_loaders/chatgpt_loader.ipynb index 1593426125..87902e1536 100644 --- a/docs/docs/integrations/document_loaders/chatgpt_loader.ipynb +++ b/docs/docs/integrations/document_loaders/chatgpt_loader.ipynb @@ -22,7 +22,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders.chatgpt import ChatGPTLoader" + "from langchain_community.document_loaders.chatgpt import ChatGPTLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/college_confidential.ipynb b/docs/docs/integrations/document_loaders/college_confidential.ipynb index f39cd1c15a..09388e2565 100644 --- a/docs/docs/integrations/document_loaders/college_confidential.ipynb +++ b/docs/docs/integrations/document_loaders/college_confidential.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import CollegeConfidentialLoader" + "from langchain_community.document_loaders import CollegeConfidentialLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/concurrent.ipynb b/docs/docs/integrations/document_loaders/concurrent.ipynb index a7c5dc627b..28b32f8bf8 100644 --- a/docs/docs/integrations/document_loaders/concurrent.ipynb +++ b/docs/docs/integrations/document_loaders/concurrent.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import ConcurrentLoader" + "from langchain_community.document_loaders import ConcurrentLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/confluence.ipynb b/docs/docs/integrations/document_loaders/confluence.ipynb index 2b4cb27e1b..27b8298a66 100644 --- a/docs/docs/integrations/document_loaders/confluence.ipynb +++ b/docs/docs/integrations/document_loaders/confluence.ipynb @@ -67,7 +67,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import ConfluenceLoader\n", + "from langchain_community.document_loaders import ConfluenceLoader\n", "\n", "loader = ConfluenceLoader(\n", " url=\"https://yoursite.atlassian.com/wiki\", username=\"me\", api_key=\"12345\"\n", @@ -93,7 +93,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import ConfluenceLoader\n", + "from langchain_community.document_loaders import ConfluenceLoader\n", "\n", "loader = ConfluenceLoader(url=\"https://yoursite.atlassian.com/wiki\", token=\"12345\")\n", "documents = loader.load(\n", diff --git a/docs/docs/integrations/document_loaders/conll-u.ipynb b/docs/docs/integrations/document_loaders/conll-u.ipynb index e3f495ab64..6d7276eebc 100644 --- a/docs/docs/integrations/document_loaders/conll-u.ipynb +++ b/docs/docs/integrations/document_loaders/conll-u.ipynb @@ -24,7 +24,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import CoNLLULoader" + "from langchain_community.document_loaders import CoNLLULoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/couchbase.ipynb b/docs/docs/integrations/document_loaders/couchbase.ipynb index f4f0d5c36c..4ad1ebc7b0 100644 --- a/docs/docs/integrations/document_loaders/couchbase.ipynb +++ b/docs/docs/integrations/document_loaders/couchbase.ipynb @@ -45,7 +45,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.couchbase import CouchbaseLoader\n", + "from langchain_community.document_loaders.couchbase import CouchbaseLoader\n", "\n", "connection_string = \"couchbase://localhost\" # valid Couchbase connection string\n", "db_username = (\n", diff --git a/docs/docs/integrations/document_loaders/csv.ipynb b/docs/docs/integrations/document_loaders/csv.ipynb index 877adb2c2a..3c1424bb1b 100644 --- a/docs/docs/integrations/document_loaders/csv.ipynb +++ b/docs/docs/integrations/document_loaders/csv.ipynb @@ -22,7 +22,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders.csv_loader import CSVLoader" + "from langchain_community.document_loaders.csv_loader import CSVLoader" ] }, { @@ -167,7 +167,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.csv_loader import UnstructuredCSVLoader" + "from langchain_community.document_loaders.csv_loader import UnstructuredCSVLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/cube_semantic.ipynb b/docs/docs/integrations/document_loaders/cube_semantic.ipynb index ace54a1364..9f8c840447 100644 --- a/docs/docs/integrations/document_loaders/cube_semantic.ipynb +++ b/docs/docs/integrations/document_loaders/cube_semantic.ipynb @@ -79,7 +79,7 @@ "outputs": [], "source": [ "import jwt\n", - "from langchain.document_loaders import CubeSemanticLoader\n", + "from langchain_community.document_loaders import CubeSemanticLoader\n", "\n", "api_url = \"https://api-example.gcp-us-central1.cubecloudapp.dev/cubejs-api/v1/meta\"\n", "cubejs_api_secret = \"api-secret-here\"\n", diff --git a/docs/docs/integrations/document_loaders/datadog_logs.ipynb b/docs/docs/integrations/document_loaders/datadog_logs.ipynb index 7fb3e4ec31..52343c552e 100644 --- a/docs/docs/integrations/document_loaders/datadog_logs.ipynb +++ b/docs/docs/integrations/document_loaders/datadog_logs.ipynb @@ -18,7 +18,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import DatadogLogsLoader" + "from langchain_community.document_loaders import DatadogLogsLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/diffbot.ipynb b/docs/docs/integrations/document_loaders/diffbot.ipynb index 6b17407436..34e9eb9f78 100644 --- a/docs/docs/integrations/document_loaders/diffbot.ipynb +++ b/docs/docs/integrations/document_loaders/diffbot.ipynb @@ -45,7 +45,7 @@ "source": [ "import os\n", "\n", - "from langchain.document_loaders import DiffbotLoader\n", + "from langchain_community.document_loaders import DiffbotLoader\n", "\n", "loader = DiffbotLoader(urls=urls, api_token=os.environ.get(\"DIFFBOT_API_TOKEN\"))" ] diff --git a/docs/docs/integrations/document_loaders/discord.ipynb b/docs/docs/integrations/document_loaders/discord.ipynb index 260531db5a..36b8fb7700 100644 --- a/docs/docs/integrations/document_loaders/discord.ipynb +++ b/docs/docs/integrations/document_loaders/discord.ipynb @@ -52,7 +52,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.discord import DiscordChatLoader" + "from langchain_community.document_loaders.discord import DiscordChatLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/docugami.ipynb b/docs/docs/integrations/document_loaders/docugami.ipynb index 48edcb2e0d..b50cbd5db2 100644 --- a/docs/docs/integrations/document_loaders/docugami.ipynb +++ b/docs/docs/integrations/document_loaders/docugami.ipynb @@ -56,7 +56,7 @@ "source": [ "import os\n", "\n", - "from langchain.document_loaders import DocugamiLoader" + "from langchain_community.document_loaders import DocugamiLoader" ] }, { @@ -211,9 +211,9 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.vectorstores.chroma import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.llms.openai import OpenAI\n", + "from langchain_community.vectorstores.chroma import Chroma\n", "\n", "embedding = OpenAIEmbeddings()\n", "vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)\n", @@ -366,7 +366,7 @@ "source": [ "from langchain.chains.query_constructor.schema import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.vectorstores.chroma import Chroma\n", + "from langchain_community.vectorstores.chroma import Chroma\n", "\n", "EXCLUDE_KEYS = [\"id\", \"xpath\", \"structure\"]\n", "metadata_field_info = [\n", @@ -471,7 +471,7 @@ "source": [ "from typing import Dict, List\n", "\n", - "from langchain.document_loaders import DocugamiLoader\n", + "from langchain_community.document_loaders import DocugamiLoader\n", "from langchain_core.documents import Document\n", "\n", "loader = DocugamiLoader(docset_id=\"zo954yqy53wp\")\n", @@ -541,8 +541,8 @@ "source": [ "from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores.chroma import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores.chroma import Chroma\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(collection_name=\"big2small\", embedding_function=OpenAIEmbeddings())\n", diff --git a/docs/docs/integrations/document_loaders/docusaurus.ipynb b/docs/docs/integrations/document_loaders/docusaurus.ipynb index 0ffa9a0b1d..1fb7b8d3f7 100644 --- a/docs/docs/integrations/document_loaders/docusaurus.ipynb +++ b/docs/docs/integrations/document_loaders/docusaurus.ipynb @@ -16,7 +16,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import DocusaurusLoader" + "from langchain_community.document_loaders import DocusaurusLoader" ] }, { @@ -135,7 +135,7 @@ { "data": { "text/plain": [ - "Document(page_content='\\n\\n\\n\\n\\nSitemap | 🦜️🔗 Langchain\\n\\n\\n\\n\\n\\n\\nSkip to main content🦜️🔗 LangChainDocsUse casesIntegrationsAPICommunityChat our docsLangSmithJS/TS DocsSearchCTRLKProvidersAnthropicAWSGoogleMicrosoftOpenAIMoreComponentsLLMsChat modelsDocument loadersacreomAirbyte CDKAirbyte GongAirbyte HubspotAirbyte JSONAirbyte SalesforceAirbyte ShopifyAirbyte StripeAirbyte TypeformAirbyte Zendesk SupportAirtableAlibaba Cloud MaxComputeApify DatasetArcGISArxivAssemblyAI Audio TranscriptsAsync ChromiumAsyncHtmlAWS S3 DirectoryAWS S3 FileAZLyricsAzure Blob Storage ContainerAzure Blob Storage FileAzure Document IntelligenceBibTeXBiliBiliBlackboardBlockchainBrave SearchBrowserlessChatGPT DataCollege ConfidentialConcurrent LoaderConfluenceCoNLL-UCopy PasteCSVCube Semantic LayerDatadog LogsDiffbotDiscordDocugamiDropboxDuckDBEmailEmbaasEPubEtherscanEverNoteexample_dataMicrosoft ExcelFacebook ChatFaunaFigmaGeopandasGitGitBookGitHubGoogle BigQueryGoogle Cloud Storage DirectoryGoogle Cloud Storage FileGoogle DriveGrobidGutenbergHacker NewsHuawei OBS DirectoryHuawei OBS FileHuggingFace datasetiFixitImagesImage captionsIMSDbIuguJoplinJupyter NotebookLarkSuite (FeiShu)MastodonMediaWiki DumpMerge Documents LoadermhtmlMicrosoft OneDriveMicrosoft PowerPointMicrosoft SharePointMicrosoft WordModern TreasuryMongoDBNews URLNotion DB 1/2Notion DB 2/2NucliaObsidianOpen Document Format (ODT)Open City DataOrg-modePandas DataFrameAmazon TextractPolars DataFramePsychicPubMedPySparkReadTheDocs DocumentationRecursive URLRedditRoamRocksetrspaceRSS FeedsRSTSitemapSlackSnowflakeSource CodeSpreedlyStripeSubtitleTelegramTencent COS DirectoryTencent COS FileTensorFlow Datasets2MarkdownTOMLTrelloTSVTwitterUnstructured FileURLWeatherWebBaseLoaderWhatsApp ChatWikipediaXMLXorbits Pandas DataFrameYouTube audioYouTube transcriptsDocument transformersText embedding modelsVector storesRetrieversToolsAgents and toolkitsMemoryCallbacksChat loadersComponentsDocument loadersSitemapOn this pageSitemapExtends from the WebBaseLoader, SitemapLoader loads a sitemap from a given URL, and then scrape and load all pages in the sitemap, returning each page as a Document.The scraping is done concurrently. There are reasonable limits to concurrent requests, defaulting to 2 per second. If you aren\\'t concerned about being a good citizen, or you control the scrapped server, or don\\'t care about load. Note, while this will speed up the scraping process, but it may cause the server to block you. Be careful!pip install nest_asyncio Requirement already satisfied: nest_asyncio in /Users/tasp/Code/projects/langchain/.venv/lib/python3.10/site-packages (1.5.6) [notice] A new release of pip available: 22.3.1 -> 23.0.1 [notice] To update, run: pip install --upgrade pip# fixes a bug with asyncio and jupyterimport nest_asyncionest_asyncio.apply()from langchain.document_loaders.sitemap import SitemapLoadersitemap_loader = SitemapLoader(web_path=\"https://langchain.readthedocs.io/sitemap.xml\")docs = sitemap_loader.load()You can change the requests_per_second parameter to increase the max concurrent requests. and use requests_kwargs to pass kwargs when send requests.sitemap_loader.requests_per_second = 2# Optional: avoid `[SSL: CERTIFICATE_VERIFY_FAILED]` issuesitemap_loader.requests_kwargs = {\"verify\": False}docs[0] Document(page_content=\\'\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain — 🦜🔗 LangChain 0.0.123\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nSkip to main content\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nCtrl+K\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n🦜🔗 LangChain 0.0.123\\\\n\\\\n\\\\n\\\\nGetting Started\\\\n\\\\nQuickstart Guide\\\\n\\\\nModules\\\\n\\\\nPrompt Templates\\\\nGetting Started\\\\nKey Concepts\\\\nHow-To Guides\\\\nCreate a custom prompt template\\\\nCreate a custom example selector\\\\nProvide few shot examples to a prompt\\\\nPrompt Serialization\\\\nExample Selectors\\\\nOutput Parsers\\\\n\\\\n\\\\nReference\\\\nPromptTemplates\\\\nExample Selector\\\\n\\\\n\\\\n\\\\n\\\\nLLMs\\\\nGetting Started\\\\nKey Concepts\\\\nHow-To Guides\\\\nGeneric Functionality\\\\nCustom LLM\\\\nFake LLM\\\\nLLM Caching\\\\nLLM Serialization\\\\nToken Usage Tracking\\\\n\\\\n\\\\nIntegrations\\\\nAI21\\\\nAleph Alpha\\\\nAnthropic\\\\nAzure OpenAI LLM Example\\\\nBanana\\\\nCerebriumAI LLM Example\\\\nCohere\\\\nDeepInfra LLM Example\\\\nForefrontAI LLM Example\\\\nGooseAI LLM Example\\\\nHugging Face Hub\\\\nManifest\\\\nModal\\\\nOpenAI\\\\nPetals LLM Example\\\\nPromptLayer OpenAI\\\\nSageMakerEndpoint\\\\nSelf-Hosted Models via Runhouse\\\\nStochasticAI\\\\nWriter\\\\n\\\\n\\\\nAsync API for LLM\\\\nStreaming with LLMs\\\\n\\\\n\\\\nReference\\\\n\\\\n\\\\nDocument Loaders\\\\nKey Concepts\\\\nHow To Guides\\\\nCoNLL-U\\\\nAirbyte JSON\\\\nAZLyrics\\\\nBlackboard\\\\nCollege Confidential\\\\nCopy Paste\\\\nCSV Loader\\\\nDirectory Loader\\\\nEmail\\\\nEverNote\\\\nFacebook Chat\\\\nFigma\\\\nGCS Directory\\\\nGCS File Storage\\\\nGitBook\\\\nGoogle Drive\\\\nGutenberg\\\\nHacker News\\\\nHTML\\\\niFixit\\\\nImages\\\\nIMSDb\\\\nMarkdown\\\\nNotebook\\\\nNotion\\\\nObsidian\\\\nPDF\\\\nPowerPoint\\\\nReadTheDocs Documentation\\\\nRoam\\\\ns3 Directory\\\\ns3 File\\\\nSubtitle Files\\\\nTelegram\\\\nUnstructured File Loader\\\\nURL\\\\nWeb Base\\\\nWord Documents\\\\nYouTube\\\\n\\\\n\\\\n\\\\n\\\\nUtils\\\\nKey Concepts\\\\nGeneric Utilities\\\\nBash\\\\nBing Search\\\\nGoogle Search\\\\nGoogle Serper API\\\\nIFTTT WebHooks\\\\nPython REPL\\\\nRequests\\\\nSearxNG Search API\\\\nSerpAPI\\\\nWolfram Alpha\\\\nZapier Natural Language Actions API\\\\n\\\\n\\\\nReference\\\\nPython REPL\\\\nSerpAPI\\\\nSearxNG Search\\\\nDocstore\\\\nText Splitter\\\\nEmbeddings\\\\nVectorStores\\\\n\\\\n\\\\n\\\\n\\\\nIndexes\\\\nGetting Started\\\\nKey Concepts\\\\nHow To Guides\\\\nEmbeddings\\\\nHypothetical Document Embeddings\\\\nText Splitter\\\\nVectorStores\\\\nAtlasDB\\\\nChroma\\\\nDeep Lake\\\\nElasticSearch\\\\nFAISS\\\\nMilvus\\\\nOpenSearch\\\\nPGVector\\\\nPinecone\\\\nQdrant\\\\nRedis\\\\nWeaviate\\\\nChatGPT Plugin Retriever\\\\nVectorStore Retriever\\\\nAnalyze Document\\\\nChat Index\\\\nGraph QA\\\\nQuestion Answering with Sources\\\\nQuestion Answering\\\\nSummarization\\\\nRetrieval Question/Answering\\\\nRetrieval Question Answering with Sources\\\\nVector DB Text Generation\\\\n\\\\n\\\\n\\\\n\\\\nChains\\\\nGetting Started\\\\nHow-To Guides\\\\nGeneric Chains\\\\nLoading from LangChainHub\\\\nLLM Chain\\\\nSequential Chains\\\\nSerialization\\\\nTransformation Chain\\\\n\\\\n\\\\nUtility Chains\\\\nAPI Chains\\\\nSelf-Critique Chain with Constitutional AI\\\\nBashChain\\\\nLLMCheckerChain\\\\nLLM Math\\\\nLLMRequestsChain\\\\nLLMSummarizationCheckerChain\\\\nModeration\\\\nPAL\\\\nSQLite example\\\\n\\\\n\\\\nAsync API for Chain\\\\n\\\\n\\\\nKey Concepts\\\\nReference\\\\n\\\\n\\\\nAgents\\\\nGetting Started\\\\nKey Concepts\\\\nHow-To Guides\\\\nAgents and Vectorstores\\\\nAsync API for Agent\\\\nConversation Agent (for Chat Models)\\\\nChatGPT Plugins\\\\nCustom Agent\\\\nDefining Custom Tools\\\\nHuman as a tool\\\\nIntermediate Steps\\\\nLoading from LangChainHub\\\\nMax Iterations\\\\nMulti Input Tools\\\\nSearch Tools\\\\nSerialization\\\\nAdding SharedMemory to an Agent and its Tools\\\\nCSV Agent\\\\nJSON Agent\\\\nOpenAPI Agent\\\\nPandas Dataframe Agent\\\\nPython Agent\\\\nSQL Database Agent\\\\nVectorstore Agent\\\\nMRKL\\\\nMRKL Chat\\\\nReAct\\\\nSelf Ask With Search\\\\n\\\\n\\\\nReference\\\\n\\\\n\\\\nMemory\\\\nGetting Started\\\\nKey Concepts\\\\nHow-To Guides\\\\nConversationBufferMemory\\\\nConversationBufferWindowMemory\\\\nEntity Memory\\\\nConversation Knowledge Graph Memory\\\\nConversationSummaryMemory\\\\nConversationSummaryBufferMemory\\\\nConversationTokenBufferMemory\\\\nAdding Memory To an LLMChain\\\\nAdding Memory to a Multi-Input Chain\\\\nAdding Memory to an Agent\\\\nChatGPT Clone\\\\nConversation Agent\\\\nConversational Memory Customization\\\\nCustom Memory\\\\nMultiple Memory\\\\n\\\\n\\\\n\\\\n\\\\nChat\\\\nGetting Started\\\\nKey Concepts\\\\nHow-To Guides\\\\nAgent\\\\nChat Vector DB\\\\nFew Shot Examples\\\\nMemory\\\\nPromptLayer ChatOpenAI\\\\nStreaming\\\\nRetrieval Question/Answering\\\\nRetrieval Question Answering with Sources\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nUse Cases\\\\n\\\\nAgents\\\\nChatbots\\\\nGenerate Examples\\\\nData Augmented Generation\\\\nQuestion Answering\\\\nSummarization\\\\nQuerying Tabular Data\\\\nExtraction\\\\nEvaluation\\\\nAgent Benchmarking: Search + Calculator\\\\nAgent VectorDB Question Answering Benchmarking\\\\nBenchmarking Template\\\\nData Augmented Question Answering\\\\nUsing Hugging Face Datasets\\\\nLLM Math\\\\nQuestion Answering Benchmarking: Paul Graham Essay\\\\nQuestion Answering Benchmarking: State of the Union Address\\\\nQA Generation\\\\nQuestion Answering\\\\nSQL Question Answering Benchmarking: Chinook\\\\n\\\\n\\\\nModel Comparison\\\\n\\\\nReference\\\\n\\\\nInstallation\\\\nIntegrations\\\\nAPI References\\\\nPrompts\\\\nPromptTemplates\\\\nExample Selector\\\\n\\\\n\\\\nUtilities\\\\nPython REPL\\\\nSerpAPI\\\\nSearxNG Search\\\\nDocstore\\\\nText Splitter\\\\nEmbeddings\\\\nVectorStores\\\\n\\\\n\\\\nChains\\\\nAgents\\\\n\\\\n\\\\n\\\\nEcosystem\\\\n\\\\nLangChain Ecosystem\\\\nAI21 Labs\\\\nAtlasDB\\\\nBanana\\\\nCerebriumAI\\\\nChroma\\\\nCohere\\\\nDeepInfra\\\\nDeep Lake\\\\nForefrontAI\\\\nGoogle Search Wrapper\\\\nGoogle Serper Wrapper\\\\nGooseAI\\\\nGraphsignal\\\\nHazy Research\\\\nHelicone\\\\nHugging Face\\\\nMilvus\\\\nModal\\\\nNLPCloud\\\\nOpenAI\\\\nOpenSearch\\\\nPetals\\\\nPGVector\\\\nPinecone\\\\nPromptLayer\\\\nQdrant\\\\nRunhouse\\\\nSearxNG Search API\\\\nSerpAPI\\\\nStochasticAI\\\\nUnstructured\\\\nWeights & Biases\\\\nWeaviate\\\\nWolfram Alpha Wrapper\\\\nWriter\\\\n\\\\n\\\\n\\\\nAdditional Resources\\\\n\\\\nLangChainHub\\\\nGlossary\\\\nLangChain Gallery\\\\nDeployments\\\\nTracing\\\\nDiscord\\\\nProduction Support\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n.rst\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n.pdf\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain\\\\n\\\\n\\\\n\\\\n\\\\n Contents \\\\n\\\\n\\\\n\\\\nGetting Started\\\\nModules\\\\nUse Cases\\\\nReference Docs\\\\nLangChain Ecosystem\\\\nAdditional Resources\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain#\\\\nLarge language models (LLMs) are emerging as a transformative technology, enabling\\\\ndevelopers to build applications that they previously could not.\\\\nBut using these LLMs in isolation is often not enough to\\\\ncreate a truly powerful app - the real power comes when you are able to\\\\ncombine them with other sources of computation or knowledge.\\\\nThis library is aimed at assisting in the development of those types of applications. Common examples of these types of applications include:\\\\n❓ Question Answering over specific documents\\\\n\\\\nDocumentation\\\\nEnd-to-end Example: Question Answering over Notion Database\\\\n\\\\n💬 Chatbots\\\\n\\\\nDocumentation\\\\nEnd-to-end Example: Chat-LangChain\\\\n\\\\n🤖 Agents\\\\n\\\\nDocumentation\\\\nEnd-to-end Example: GPT+WolframAlpha\\\\n\\\\n\\\\nGetting Started#\\\\nCheckout the below guide for a walkthrough of how to get started using LangChain to create an Language Model application.\\\\n\\\\nGetting Started Documentation\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nModules#\\\\nThere are several main modules that LangChain provides support for.\\\\nFor each module we provide some examples to get started, how-to guides, reference docs, and conceptual guides.\\\\nThese modules are, in increasing order of complexity:\\\\n\\\\nPrompts: This includes prompt management, prompt optimization, and prompt serialization.\\\\nLLMs: This includes a generic interface for all LLMs, and common utilities for working with LLMs.\\\\nDocument Loaders: This includes a standard interface for loading documents, as well as specific integrations to all types of text data sources.\\\\nUtils: Language models are often more powerful when interacting with other sources of knowledge or computation. This can include Python REPLs, embeddings, search engines, and more. LangChain provides a large collection of common utils to use in your application.\\\\nChains: Chains go beyond just a single LLM call, and are sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.\\\\nIndexes: Language models are often more powerful when combined with your own text data - this module covers best practices for doing exactly that.\\\\nAgents: Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end to end agents.\\\\nMemory: Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory.\\\\nChat: Chat models are a variation on Language Models that expose a different API - rather than working with raw text, they work with messages. LangChain provides a standard interface for working with them and doing all the same things as above.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nUse Cases#\\\\nThe above modules can be used in a variety of ways. LangChain also provides guidance and assistance in this. Below are some of the common use cases LangChain supports.\\\\n\\\\nAgents: Agents are systems that use a language model to interact with other tools. These can be used to do more grounded question/answering, interact with APIs, or even take actions.\\\\nChatbots: Since language models are good at producing text, that makes them ideal for creating chatbots.\\\\nData Augmented Generation: Data Augmented Generation involves specific types of chains that first interact with an external datasource to fetch data to use in the generation step. Examples of this include summarization of long pieces of text and question/answering over specific data sources.\\\\nQuestion Answering: Answering questions over specific documents, only utilizing the information in those documents to construct an answer. A type of Data Augmented Generation.\\\\nSummarization: Summarizing longer documents into shorter, more condensed chunks of information. A type of Data Augmented Generation.\\\\nQuerying Tabular Data: If you want to understand how to use LLMs to query data that is stored in a tabular format (csvs, SQL, dataframes, etc) you should read this page.\\\\nEvaluation: Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.\\\\nGenerate similar examples: Generating similar examples to a given input. This is a common use case for many applications, and LangChain provides some prompts/chains for assisting in this.\\\\nCompare models: Experimenting with different prompts, models, and chains is a big part of developing the best possible application. The ModelLaboratory makes it easy to do so.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nReference Docs#\\\\nAll of LangChain’s reference documentation, in one place. Full documentation on all methods, classes, installation methods, and integration setups for LangChain.\\\\n\\\\nReference Documentation\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nLangChain Ecosystem#\\\\nGuides for how other companies/products can be used with LangChain\\\\n\\\\nLangChain Ecosystem\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nAdditional Resources#\\\\nAdditional collection of resources we think may be useful as you develop your application!\\\\n\\\\nLangChainHub: The LangChainHub is a place to share and explore other prompts, chains, and agents.\\\\nGlossary: A glossary of all related terms, papers, methods, etc. Whether implemented in LangChain or not!\\\\nGallery: A collection of our favorite projects that use LangChain. Useful for finding inspiration or seeing how things were done in other applications.\\\\nDeployments: A collection of instructions, code snippets, and template repositories for deploying LangChain apps.\\\\nDiscord: Join us on our Discord to discuss all things LangChain!\\\\nTracing: A guide on using tracing in LangChain to visualize the execution of chains and agents.\\\\nProduction Support: As you move your LangChains into production, we’d love to offer more comprehensive support. Please fill out this form and we’ll set up a dedicated support Slack channel.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nnext\\\\nQuickstart Guide\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n Contents\\\\n \\\\n\\\\n\\\\nGetting Started\\\\nModules\\\\nUse Cases\\\\nReference Docs\\\\nLangChain Ecosystem\\\\nAdditional Resources\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nBy Harrison Chase\\\\n\\\\n\\\\n\\\\n\\\\n \\\\n © Copyright 2023, Harrison Chase.\\\\n \\\\n\\\\n\\\\n\\\\n\\\\n Last updated on Mar 24, 2023.\\\\n \\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\', lookup_str=\\'\\', metadata={\\'source\\': \\'https://python.langchain.com/en/stable/\\', \\'loc\\': \\'https://python.langchain.com/en/stable/\\', \\'lastmod\\': \\'2023-03-24T19:30:54.647430+00:00\\', \\'changefreq\\': \\'weekly\\', \\'priority\\': \\'1\\'}, lookup_index=0)Filtering sitemap URLs\\u200bSitemaps can be massive files, with thousands of URLs. Often you don\\'t need every single one of them. You can filter the URLs by passing a list of strings or regex patterns to the url_filter parameter. Only URLs that match one of the patterns will be loaded.loader = SitemapLoader( \"https://langchain.readthedocs.io/sitemap.xml\", filter_urls=[\"https://python.langchain.com/en/latest/\"],)documents = loader.load()documents[0] Document(page_content=\\'\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain — 🦜🔗 LangChain 0.0.123\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nSkip to main content\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nCtrl+K\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n🦜🔗 LangChain 0.0.123\\\\n\\\\n\\\\n\\\\nGetting Started\\\\n\\\\nQuickstart Guide\\\\n\\\\nModules\\\\n\\\\nModels\\\\nLLMs\\\\nGetting Started\\\\nGeneric Functionality\\\\nHow to use the async API for LLMs\\\\nHow to write a custom LLM wrapper\\\\nHow (and why) to use the fake LLM\\\\nHow to cache LLM calls\\\\nHow to serialize LLM classes\\\\nHow to stream LLM responses\\\\nHow to track token usage\\\\n\\\\n\\\\nIntegrations\\\\nAI21\\\\nAleph Alpha\\\\nAnthropic\\\\nAzure OpenAI LLM Example\\\\nBanana\\\\nCerebriumAI LLM Example\\\\nCohere\\\\nDeepInfra LLM Example\\\\nForefrontAI LLM Example\\\\nGooseAI LLM Example\\\\nHugging Face Hub\\\\nManifest\\\\nModal\\\\nOpenAI\\\\nPetals LLM Example\\\\nPromptLayer OpenAI\\\\nSageMakerEndpoint\\\\nSelf-Hosted Models via Runhouse\\\\nStochasticAI\\\\nWriter\\\\n\\\\n\\\\nReference\\\\n\\\\n\\\\nChat Models\\\\nGetting Started\\\\nHow-To Guides\\\\nHow to use few shot examples\\\\nHow to stream responses\\\\n\\\\n\\\\nIntegrations\\\\nAzure\\\\nOpenAI\\\\nPromptLayer ChatOpenAI\\\\n\\\\n\\\\n\\\\n\\\\nText Embedding Models\\\\nAzureOpenAI\\\\nCohere\\\\nFake Embeddings\\\\nHugging Face Hub\\\\nInstructEmbeddings\\\\nOpenAI\\\\nSageMaker Endpoint Embeddings\\\\nSelf Hosted Embeddings\\\\nTensorflowHub\\\\n\\\\n\\\\n\\\\n\\\\nPrompts\\\\nPrompt Templates\\\\nGetting Started\\\\nHow-To Guides\\\\nHow to create a custom prompt template\\\\nHow to create a prompt template that uses few shot examples\\\\nHow to work with partial Prompt Templates\\\\nHow to serialize prompts\\\\n\\\\n\\\\nReference\\\\nPromptTemplates\\\\nExample Selector\\\\n\\\\n\\\\n\\\\n\\\\nChat Prompt Template\\\\nExample Selectors\\\\nHow to create a custom example selector\\\\nLengthBased ExampleSelector\\\\nMaximal Marginal Relevance ExampleSelector\\\\nNGram Overlap ExampleSelector\\\\nSimilarity ExampleSelector\\\\n\\\\n\\\\nOutput Parsers\\\\nOutput Parsers\\\\nCommaSeparatedListOutputParser\\\\nOutputFixingParser\\\\nPydanticOutputParser\\\\nRetryOutputParser\\\\nStructured Output Parser\\\\n\\\\n\\\\n\\\\n\\\\nIndexes\\\\nGetting Started\\\\nDocument Loaders\\\\nCoNLL-U\\\\nAirbyte JSON\\\\nAZLyrics\\\\nBlackboard\\\\nCollege Confidential\\\\nCopy Paste\\\\nCSV Loader\\\\nDirectory Loader\\\\nEmail\\\\nEverNote\\\\nFacebook Chat\\\\nFigma\\\\nGCS Directory\\\\nGCS File Storage\\\\nGitBook\\\\nGoogle Drive\\\\nGutenberg\\\\nHacker News\\\\nHTML\\\\niFixit\\\\nImages\\\\nIMSDb\\\\nMarkdown\\\\nNotebook\\\\nNotion\\\\nObsidian\\\\nPDF\\\\nPowerPoint\\\\nReadTheDocs Documentation\\\\nRoam\\\\ns3 Directory\\\\ns3 File\\\\nSubtitle Files\\\\nTelegram\\\\nUnstructured File Loader\\\\nURL\\\\nWeb Base\\\\nWord Documents\\\\nYouTube\\\\n\\\\n\\\\nText Splitters\\\\nGetting Started\\\\nCharacter Text Splitter\\\\nHuggingFace Length Function\\\\nLatex Text Splitter\\\\nMarkdown Text Splitter\\\\nNLTK Text Splitter\\\\nPython Code Text Splitter\\\\nRecursiveCharacterTextSplitter\\\\nSpacy Text Splitter\\\\ntiktoken (OpenAI) Length Function\\\\nTiktokenText Splitter\\\\n\\\\n\\\\nVectorstores\\\\nGetting Started\\\\nAtlasDB\\\\nChroma\\\\nDeep Lake\\\\nElasticSearch\\\\nFAISS\\\\nMilvus\\\\nOpenSearch\\\\nPGVector\\\\nPinecone\\\\nQdrant\\\\nRedis\\\\nWeaviate\\\\n\\\\n\\\\nRetrievers\\\\nChatGPT Plugin Retriever\\\\nVectorStore Retriever\\\\n\\\\n\\\\n\\\\n\\\\nMemory\\\\nGetting Started\\\\nHow-To Guides\\\\nConversationBufferMemory\\\\nConversationBufferWindowMemory\\\\nEntity Memory\\\\nConversation Knowledge Graph Memory\\\\nConversationSummaryMemory\\\\nConversationSummaryBufferMemory\\\\nConversationTokenBufferMemory\\\\nHow to add Memory to an LLMChain\\\\nHow to add memory to a Multi-Input Chain\\\\nHow to add Memory to an Agent\\\\nHow to customize conversational memory\\\\nHow to create a custom Memory class\\\\nHow to use multiple memroy classes in the same chain\\\\n\\\\n\\\\n\\\\n\\\\nChains\\\\nGetting Started\\\\nHow-To Guides\\\\nAsync API for Chain\\\\nLoading from LangChainHub\\\\nLLM Chain\\\\nSequential Chains\\\\nSerialization\\\\nTransformation Chain\\\\nAnalyze Document\\\\nChat Index\\\\nGraph QA\\\\nHypothetical Document Embeddings\\\\nQuestion Answering with Sources\\\\nQuestion Answering\\\\nSummarization\\\\nRetrieval Question/Answering\\\\nRetrieval Question Answering with Sources\\\\nVector DB Text Generation\\\\nAPI Chains\\\\nSelf-Critique Chain with Constitutional AI\\\\nBashChain\\\\nLLMCheckerChain\\\\nLLM Math\\\\nLLMRequestsChain\\\\nLLMSummarizationCheckerChain\\\\nModeration\\\\nPAL\\\\nSQLite example\\\\n\\\\n\\\\nReference\\\\n\\\\n\\\\nAgents\\\\nGetting Started\\\\nTools\\\\nGetting Started\\\\nDefining Custom Tools\\\\nMulti Input Tools\\\\nBash\\\\nBing Search\\\\nChatGPT Plugins\\\\nGoogle Search\\\\nGoogle Serper API\\\\nHuman as a tool\\\\nIFTTT WebHooks\\\\nPython REPL\\\\nRequests\\\\nSearch Tools\\\\nSearxNG Search API\\\\nSerpAPI\\\\nWolfram Alpha\\\\nZapier Natural Language Actions API\\\\n\\\\n\\\\nAgents\\\\nAgent Types\\\\nCustom Agent\\\\nConversation Agent (for Chat Models)\\\\nConversation Agent\\\\nMRKL\\\\nMRKL Chat\\\\nReAct\\\\nSelf Ask With Search\\\\n\\\\n\\\\nToolkits\\\\nCSV Agent\\\\nJSON Agent\\\\nOpenAPI Agent\\\\nPandas Dataframe Agent\\\\nPython Agent\\\\nSQL Database Agent\\\\nVectorstore Agent\\\\n\\\\n\\\\nAgent Executors\\\\nHow to combine agents and vectorstores\\\\nHow to use the async API for Agents\\\\nHow to create ChatGPT Clone\\\\nHow to access intermediate steps\\\\nHow to cap the max number of iterations\\\\nHow to add SharedMemory to an Agent and its Tools\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nUse Cases\\\\n\\\\nPersonal Assistants\\\\nQuestion Answering over Docs\\\\nChatbots\\\\nQuerying Tabular Data\\\\nInteracting with APIs\\\\nSummarization\\\\nExtraction\\\\nEvaluation\\\\nAgent Benchmarking: Search + Calculator\\\\nAgent VectorDB Question Answering Benchmarking\\\\nBenchmarking Template\\\\nData Augmented Question Answering\\\\nUsing Hugging Face Datasets\\\\nLLM Math\\\\nQuestion Answering Benchmarking: Paul Graham Essay\\\\nQuestion Answering Benchmarking: State of the Union Address\\\\nQA Generation\\\\nQuestion Answering\\\\nSQL Question Answering Benchmarking: Chinook\\\\n\\\\n\\\\n\\\\nReference\\\\n\\\\nInstallation\\\\nIntegrations\\\\nAPI References\\\\nPrompts\\\\nPromptTemplates\\\\nExample Selector\\\\n\\\\n\\\\nUtilities\\\\nPython REPL\\\\nSerpAPI\\\\nSearxNG Search\\\\nDocstore\\\\nText Splitter\\\\nEmbeddings\\\\nVectorStores\\\\n\\\\n\\\\nChains\\\\nAgents\\\\n\\\\n\\\\n\\\\nEcosystem\\\\n\\\\nLangChain Ecosystem\\\\nAI21 Labs\\\\nAtlasDB\\\\nBanana\\\\nCerebriumAI\\\\nChroma\\\\nCohere\\\\nDeepInfra\\\\nDeep Lake\\\\nForefrontAI\\\\nGoogle Search Wrapper\\\\nGoogle Serper Wrapper\\\\nGooseAI\\\\nGraphsignal\\\\nHazy Research\\\\nHelicone\\\\nHugging Face\\\\nMilvus\\\\nModal\\\\nNLPCloud\\\\nOpenAI\\\\nOpenSearch\\\\nPetals\\\\nPGVector\\\\nPinecone\\\\nPromptLayer\\\\nQdrant\\\\nRunhouse\\\\nSearxNG Search API\\\\nSerpAPI\\\\nStochasticAI\\\\nUnstructured\\\\nWeights & Biases\\\\nWeaviate\\\\nWolfram Alpha Wrapper\\\\nWriter\\\\n\\\\n\\\\n\\\\nAdditional Resources\\\\n\\\\nLangChainHub\\\\nGlossary\\\\nLangChain Gallery\\\\nDeployments\\\\nTracing\\\\nDiscord\\\\nProduction Support\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n.rst\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n.pdf\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain\\\\n\\\\n\\\\n\\\\n\\\\n Contents \\\\n\\\\n\\\\n\\\\nGetting Started\\\\nModules\\\\nUse Cases\\\\nReference Docs\\\\nLangChain Ecosystem\\\\nAdditional Resources\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain#\\\\nLangChain is a framework for developing applications powered by language models. We believe that the most powerful and differentiated applications will not only call out to a language model via an API, but will also:\\\\n\\\\nBe data-aware: connect a language model to other sources of data\\\\nBe agentic: allow a language model to interact with its environment\\\\n\\\\nThe LangChain framework is designed with the above principles in mind.\\\\nThis is the Python specific portion of the documentation. For a purely conceptual guide to LangChain, see here. For the JavaScript documentation, see here.\\\\n\\\\nGetting Started#\\\\nCheckout the below guide for a walkthrough of how to get started using LangChain to create an Language Model application.\\\\n\\\\nGetting Started Documentation\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nModules#\\\\nThere are several main modules that LangChain provides support for.\\\\nFor each module we provide some examples to get started, how-to guides, reference docs, and conceptual guides.\\\\nThese modules are, in increasing order of complexity:\\\\n\\\\nModels: The various model types and model integrations LangChain supports.\\\\nPrompts: This includes prompt management, prompt optimization, and prompt serialization.\\\\nMemory: Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory.\\\\nIndexes: Language models are often more powerful when combined with your own text data - this module covers best practices for doing exactly that.\\\\nChains: Chains go beyond just a single LLM call, and are sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.\\\\nAgents: Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end to end agents.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nUse Cases#\\\\nThe above modules can be used in a variety of ways. LangChain also provides guidance and assistance in this. Below are some of the common use cases LangChain supports.\\\\n\\\\nPersonal Assistants: The main LangChain use case. Personal assistants need to take actions, remember interactions, and have knowledge about your data.\\\\nQuestion Answering: The second big LangChain use case. Answering questions over specific documents, only utilizing the information in those documents to construct an answer.\\\\nChatbots: Since language models are good at producing text, that makes them ideal for creating chatbots.\\\\nQuerying Tabular Data: If you want to understand how to use LLMs to query data that is stored in a tabular format (csvs, SQL, dataframes, etc) you should read this page.\\\\nInteracting with APIs: Enabling LLMs to interact with APIs is extremely powerful in order to give them more up-to-date information and allow them to take actions.\\\\nExtraction: Extract structured information from text.\\\\nSummarization: Summarizing longer documents into shorter, more condensed chunks of information. A type of Data Augmented Generation.\\\\nEvaluation: Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nReference Docs#\\\\nAll of LangChain’s reference documentation, in one place. Full documentation on all methods, classes, installation methods, and integration setups for LangChain.\\\\n\\\\nReference Documentation\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nLangChain Ecosystem#\\\\nGuides for how other companies/products can be used with LangChain\\\\n\\\\nLangChain Ecosystem\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nAdditional Resources#\\\\nAdditional collection of resources we think may be useful as you develop your application!\\\\n\\\\nLangChainHub: The LangChainHub is a place to share and explore other prompts, chains, and agents.\\\\nGlossary: A glossary of all related terms, papers, methods, etc. Whether implemented in LangChain or not!\\\\nGallery: A collection of our favorite projects that use LangChain. Useful for finding inspiration or seeing how things were done in other applications.\\\\nDeployments: A collection of instructions, code snippets, and template repositories for deploying LangChain apps.\\\\nTracing: A guide on using tracing in LangChain to visualize the execution of chains and agents.\\\\nModel Laboratory: Experimenting with different prompts, models, and chains is a big part of developing the best possible application. The ModelLaboratory makes it easy to do so.\\\\nDiscord: Join us on our Discord to discuss all things LangChain!\\\\nProduction Support: As you move your LangChains into production, we’d love to offer more comprehensive support. Please fill out this form and we’ll set up a dedicated support Slack channel.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nnext\\\\nQuickstart Guide\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n Contents\\\\n \\\\n\\\\n\\\\nGetting Started\\\\nModules\\\\nUse Cases\\\\nReference Docs\\\\nLangChain Ecosystem\\\\nAdditional Resources\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nBy Harrison Chase\\\\n\\\\n\\\\n\\\\n\\\\n \\\\n © Copyright 2023, Harrison Chase.\\\\n \\\\n\\\\n\\\\n\\\\n\\\\n Last updated on Mar 27, 2023.\\\\n \\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\', lookup_str=\\'\\', metadata={\\'source\\': \\'https://python.langchain.com/en/latest/\\', \\'loc\\': \\'https://python.langchain.com/en/latest/\\', \\'lastmod\\': \\'2023-03-27T22:50:49.790324+00:00\\', \\'changefreq\\': \\'daily\\', \\'priority\\': \\'0.9\\'}, lookup_index=0)Add custom scraping rules\\u200bThe SitemapLoader uses beautifulsoup4 for the scraping process, and it scrapes every element on the page by default. The SitemapLoader constructor accepts a custom scraping function. This feature can be helpful to tailor the scraping process to your specific needs; for example, you might want to avoid scraping headers or navigation elements. The following example shows how to develop and use a custom function to avoid navigation and header elements.Import the beautifulsoup4 library and define the custom function.pip install beautifulsoup4from bs4 import BeautifulSoupdef remove_nav_and_header_elements(content: BeautifulSoup) -> str: # Find all \\'nav\\' and \\'header\\' elements in the BeautifulSoup object nav_elements = content.find_all(\"nav\") header_elements = content.find_all(\"header\") # Remove each \\'nav\\' and \\'header\\' element from the BeautifulSoup object for element in nav_elements + header_elements: element.decompose() return str(content.get_text())Add your custom function to the SitemapLoader object.loader = SitemapLoader( \"https://langchain.readthedocs.io/sitemap.xml\", filter_urls=[\"https://python.langchain.com/en/latest/\"], parsing_function=remove_nav_and_header_elements,)Local Sitemap\\u200bThe sitemap loader can also be used to load local files.sitemap_loader = SitemapLoader(web_path=\"example_data/sitemap.xml\", is_local=True)docs = sitemap_loader.load() Fetching pages: 100%|####################################################################################################################################| 3/3 [00:00<00:00, 3.91it/s]PreviousRSTNextSlackFiltering sitemap URLsAdd custom scraping rulesLocal SitemapCommunityDiscordTwitterGitHubPythonJS/TSMoreHomepageBlogCopyright © 2023 LangChain, Inc.\\n\\n\\n\\n', metadata={'source': 'https://python.langchain.com/docs/integrations/document_loaders/sitemap', 'loc': 'https://python.langchain.com/docs/integrations/document_loaders/sitemap', 'changefreq': 'weekly', 'priority': '0.5'})" + "Document(page_content='\\n\\n\\n\\n\\nSitemap | 🦜️🔗 Langchain\\n\\n\\n\\n\\n\\n\\nSkip to main content🦜️🔗 LangChainDocsUse casesIntegrationsAPICommunityChat our docsLangSmithJS/TS DocsSearchCTRLKProvidersAnthropicAWSGoogleMicrosoftOpenAIMoreComponentsLLMsChat modelsDocument loadersacreomAirbyte CDKAirbyte GongAirbyte HubspotAirbyte JSONAirbyte SalesforceAirbyte ShopifyAirbyte StripeAirbyte TypeformAirbyte Zendesk SupportAirtableAlibaba Cloud MaxComputeApify DatasetArcGISArxivAssemblyAI Audio TranscriptsAsync ChromiumAsyncHtmlAWS S3 DirectoryAWS S3 FileAZLyricsAzure Blob Storage ContainerAzure Blob Storage FileAzure Document IntelligenceBibTeXBiliBiliBlackboardBlockchainBrave SearchBrowserlessChatGPT DataCollege ConfidentialConcurrent LoaderConfluenceCoNLL-UCopy PasteCSVCube Semantic LayerDatadog LogsDiffbotDiscordDocugamiDropboxDuckDBEmailEmbaasEPubEtherscanEverNoteexample_dataMicrosoft ExcelFacebook ChatFaunaFigmaGeopandasGitGitBookGitHubGoogle BigQueryGoogle Cloud Storage DirectoryGoogle Cloud Storage FileGoogle DriveGrobidGutenbergHacker NewsHuawei OBS DirectoryHuawei OBS FileHuggingFace datasetiFixitImagesImage captionsIMSDbIuguJoplinJupyter NotebookLarkSuite (FeiShu)MastodonMediaWiki DumpMerge Documents LoadermhtmlMicrosoft OneDriveMicrosoft PowerPointMicrosoft SharePointMicrosoft WordModern TreasuryMongoDBNews URLNotion DB 1/2Notion DB 2/2NucliaObsidianOpen Document Format (ODT)Open City DataOrg-modePandas DataFrameAmazon TextractPolars DataFramePsychicPubMedPySparkReadTheDocs DocumentationRecursive URLRedditRoamRocksetrspaceRSS FeedsRSTSitemapSlackSnowflakeSource CodeSpreedlyStripeSubtitleTelegramTencent COS DirectoryTencent COS FileTensorFlow Datasets2MarkdownTOMLTrelloTSVTwitterUnstructured FileURLWeatherWebBaseLoaderWhatsApp ChatWikipediaXMLXorbits Pandas DataFrameYouTube audioYouTube transcriptsDocument transformersText embedding modelsVector storesRetrieversToolsAgents and toolkitsMemoryCallbacksChat loadersComponentsDocument loadersSitemapOn this pageSitemapExtends from the WebBaseLoader, SitemapLoader loads a sitemap from a given URL, and then scrape and load all pages in the sitemap, returning each page as a Document.The scraping is done concurrently. There are reasonable limits to concurrent requests, defaulting to 2 per second. If you aren\\'t concerned about being a good citizen, or you control the scrapped server, or don\\'t care about load. Note, while this will speed up the scraping process, but it may cause the server to block you. Be careful!pip install nest_asyncio Requirement already satisfied: nest_asyncio in /Users/tasp/Code/projects/langchain/.venv/lib/python3.10/site-packages (1.5.6) [notice] A new release of pip available: 22.3.1 -> 23.0.1 [notice] To update, run: pip install --upgrade pip# fixes a bug with asyncio and jupyterimport nest_asyncionest_asyncio.apply()from langchain_community.document_loaders.sitemap import SitemapLoadersitemap_loader = SitemapLoader(web_path=\"https://langchain.readthedocs.io/sitemap.xml\")docs = sitemap_loader.load()You can change the requests_per_second parameter to increase the max concurrent requests. and use requests_kwargs to pass kwargs when send requests.sitemap_loader.requests_per_second = 2# Optional: avoid `[SSL: CERTIFICATE_VERIFY_FAILED]` issuesitemap_loader.requests_kwargs = {\"verify\": False}docs[0] Document(page_content=\\'\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain — 🦜🔗 LangChain 0.0.123\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nSkip to main content\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nCtrl+K\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n🦜🔗 LangChain 0.0.123\\\\n\\\\n\\\\n\\\\nGetting Started\\\\n\\\\nQuickstart Guide\\\\n\\\\nModules\\\\n\\\\nPrompt Templates\\\\nGetting Started\\\\nKey Concepts\\\\nHow-To Guides\\\\nCreate a custom prompt template\\\\nCreate a custom example selector\\\\nProvide few shot examples to a prompt\\\\nPrompt Serialization\\\\nExample Selectors\\\\nOutput Parsers\\\\n\\\\n\\\\nReference\\\\nPromptTemplates\\\\nExample Selector\\\\n\\\\n\\\\n\\\\n\\\\nLLMs\\\\nGetting Started\\\\nKey Concepts\\\\nHow-To Guides\\\\nGeneric Functionality\\\\nCustom LLM\\\\nFake LLM\\\\nLLM Caching\\\\nLLM Serialization\\\\nToken Usage Tracking\\\\n\\\\n\\\\nIntegrations\\\\nAI21\\\\nAleph Alpha\\\\nAnthropic\\\\nAzure OpenAI LLM Example\\\\nBanana\\\\nCerebriumAI LLM Example\\\\nCohere\\\\nDeepInfra LLM Example\\\\nForefrontAI LLM Example\\\\nGooseAI LLM Example\\\\nHugging Face Hub\\\\nManifest\\\\nModal\\\\nOpenAI\\\\nPetals LLM Example\\\\nPromptLayer OpenAI\\\\nSageMakerEndpoint\\\\nSelf-Hosted Models via Runhouse\\\\nStochasticAI\\\\nWriter\\\\n\\\\n\\\\nAsync API for LLM\\\\nStreaming with LLMs\\\\n\\\\n\\\\nReference\\\\n\\\\n\\\\nDocument Loaders\\\\nKey Concepts\\\\nHow To Guides\\\\nCoNLL-U\\\\nAirbyte JSON\\\\nAZLyrics\\\\nBlackboard\\\\nCollege Confidential\\\\nCopy Paste\\\\nCSV Loader\\\\nDirectory Loader\\\\nEmail\\\\nEverNote\\\\nFacebook Chat\\\\nFigma\\\\nGCS Directory\\\\nGCS File Storage\\\\nGitBook\\\\nGoogle Drive\\\\nGutenberg\\\\nHacker News\\\\nHTML\\\\niFixit\\\\nImages\\\\nIMSDb\\\\nMarkdown\\\\nNotebook\\\\nNotion\\\\nObsidian\\\\nPDF\\\\nPowerPoint\\\\nReadTheDocs Documentation\\\\nRoam\\\\ns3 Directory\\\\ns3 File\\\\nSubtitle Files\\\\nTelegram\\\\nUnstructured File Loader\\\\nURL\\\\nWeb Base\\\\nWord Documents\\\\nYouTube\\\\n\\\\n\\\\n\\\\n\\\\nUtils\\\\nKey Concepts\\\\nGeneric Utilities\\\\nBash\\\\nBing Search\\\\nGoogle Search\\\\nGoogle Serper API\\\\nIFTTT WebHooks\\\\nPython REPL\\\\nRequests\\\\nSearxNG Search API\\\\nSerpAPI\\\\nWolfram Alpha\\\\nZapier Natural Language Actions API\\\\n\\\\n\\\\nReference\\\\nPython REPL\\\\nSerpAPI\\\\nSearxNG Search\\\\nDocstore\\\\nText Splitter\\\\nEmbeddings\\\\nVectorStores\\\\n\\\\n\\\\n\\\\n\\\\nIndexes\\\\nGetting Started\\\\nKey Concepts\\\\nHow To Guides\\\\nEmbeddings\\\\nHypothetical Document Embeddings\\\\nText Splitter\\\\nVectorStores\\\\nAtlasDB\\\\nChroma\\\\nDeep Lake\\\\nElasticSearch\\\\nFAISS\\\\nMilvus\\\\nOpenSearch\\\\nPGVector\\\\nPinecone\\\\nQdrant\\\\nRedis\\\\nWeaviate\\\\nChatGPT Plugin Retriever\\\\nVectorStore Retriever\\\\nAnalyze Document\\\\nChat Index\\\\nGraph QA\\\\nQuestion Answering with Sources\\\\nQuestion Answering\\\\nSummarization\\\\nRetrieval Question/Answering\\\\nRetrieval Question Answering with Sources\\\\nVector DB Text Generation\\\\n\\\\n\\\\n\\\\n\\\\nChains\\\\nGetting Started\\\\nHow-To Guides\\\\nGeneric Chains\\\\nLoading from LangChainHub\\\\nLLM Chain\\\\nSequential Chains\\\\nSerialization\\\\nTransformation Chain\\\\n\\\\n\\\\nUtility Chains\\\\nAPI Chains\\\\nSelf-Critique Chain with Constitutional AI\\\\nBashChain\\\\nLLMCheckerChain\\\\nLLM Math\\\\nLLMRequestsChain\\\\nLLMSummarizationCheckerChain\\\\nModeration\\\\nPAL\\\\nSQLite example\\\\n\\\\n\\\\nAsync API for Chain\\\\n\\\\n\\\\nKey Concepts\\\\nReference\\\\n\\\\n\\\\nAgents\\\\nGetting Started\\\\nKey Concepts\\\\nHow-To Guides\\\\nAgents and Vectorstores\\\\nAsync API for Agent\\\\nConversation Agent (for Chat Models)\\\\nChatGPT Plugins\\\\nCustom Agent\\\\nDefining Custom Tools\\\\nHuman as a tool\\\\nIntermediate Steps\\\\nLoading from LangChainHub\\\\nMax Iterations\\\\nMulti Input Tools\\\\nSearch Tools\\\\nSerialization\\\\nAdding SharedMemory to an Agent and its Tools\\\\nCSV Agent\\\\nJSON Agent\\\\nOpenAPI Agent\\\\nPandas Dataframe Agent\\\\nPython Agent\\\\nSQL Database Agent\\\\nVectorstore Agent\\\\nMRKL\\\\nMRKL Chat\\\\nReAct\\\\nSelf Ask With Search\\\\n\\\\n\\\\nReference\\\\n\\\\n\\\\nMemory\\\\nGetting Started\\\\nKey Concepts\\\\nHow-To Guides\\\\nConversationBufferMemory\\\\nConversationBufferWindowMemory\\\\nEntity Memory\\\\nConversation Knowledge Graph Memory\\\\nConversationSummaryMemory\\\\nConversationSummaryBufferMemory\\\\nConversationTokenBufferMemory\\\\nAdding Memory To an LLMChain\\\\nAdding Memory to a Multi-Input Chain\\\\nAdding Memory to an Agent\\\\nChatGPT Clone\\\\nConversation Agent\\\\nConversational Memory Customization\\\\nCustom Memory\\\\nMultiple Memory\\\\n\\\\n\\\\n\\\\n\\\\nChat\\\\nGetting Started\\\\nKey Concepts\\\\nHow-To Guides\\\\nAgent\\\\nChat Vector DB\\\\nFew Shot Examples\\\\nMemory\\\\nPromptLayer ChatOpenAI\\\\nStreaming\\\\nRetrieval Question/Answering\\\\nRetrieval Question Answering with Sources\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nUse Cases\\\\n\\\\nAgents\\\\nChatbots\\\\nGenerate Examples\\\\nData Augmented Generation\\\\nQuestion Answering\\\\nSummarization\\\\nQuerying Tabular Data\\\\nExtraction\\\\nEvaluation\\\\nAgent Benchmarking: Search + Calculator\\\\nAgent VectorDB Question Answering Benchmarking\\\\nBenchmarking Template\\\\nData Augmented Question Answering\\\\nUsing Hugging Face Datasets\\\\nLLM Math\\\\nQuestion Answering Benchmarking: Paul Graham Essay\\\\nQuestion Answering Benchmarking: State of the Union Address\\\\nQA Generation\\\\nQuestion Answering\\\\nSQL Question Answering Benchmarking: Chinook\\\\n\\\\n\\\\nModel Comparison\\\\n\\\\nReference\\\\n\\\\nInstallation\\\\nIntegrations\\\\nAPI References\\\\nPrompts\\\\nPromptTemplates\\\\nExample Selector\\\\n\\\\n\\\\nUtilities\\\\nPython REPL\\\\nSerpAPI\\\\nSearxNG Search\\\\nDocstore\\\\nText Splitter\\\\nEmbeddings\\\\nVectorStores\\\\n\\\\n\\\\nChains\\\\nAgents\\\\n\\\\n\\\\n\\\\nEcosystem\\\\n\\\\nLangChain Ecosystem\\\\nAI21 Labs\\\\nAtlasDB\\\\nBanana\\\\nCerebriumAI\\\\nChroma\\\\nCohere\\\\nDeepInfra\\\\nDeep Lake\\\\nForefrontAI\\\\nGoogle Search Wrapper\\\\nGoogle Serper Wrapper\\\\nGooseAI\\\\nGraphsignal\\\\nHazy Research\\\\nHelicone\\\\nHugging Face\\\\nMilvus\\\\nModal\\\\nNLPCloud\\\\nOpenAI\\\\nOpenSearch\\\\nPetals\\\\nPGVector\\\\nPinecone\\\\nPromptLayer\\\\nQdrant\\\\nRunhouse\\\\nSearxNG Search API\\\\nSerpAPI\\\\nStochasticAI\\\\nUnstructured\\\\nWeights & Biases\\\\nWeaviate\\\\nWolfram Alpha Wrapper\\\\nWriter\\\\n\\\\n\\\\n\\\\nAdditional Resources\\\\n\\\\nLangChainHub\\\\nGlossary\\\\nLangChain Gallery\\\\nDeployments\\\\nTracing\\\\nDiscord\\\\nProduction Support\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n.rst\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n.pdf\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain\\\\n\\\\n\\\\n\\\\n\\\\n Contents \\\\n\\\\n\\\\n\\\\nGetting Started\\\\nModules\\\\nUse Cases\\\\nReference Docs\\\\nLangChain Ecosystem\\\\nAdditional Resources\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain#\\\\nLarge language models (LLMs) are emerging as a transformative technology, enabling\\\\ndevelopers to build applications that they previously could not.\\\\nBut using these LLMs in isolation is often not enough to\\\\ncreate a truly powerful app - the real power comes when you are able to\\\\ncombine them with other sources of computation or knowledge.\\\\nThis library is aimed at assisting in the development of those types of applications. Common examples of these types of applications include:\\\\n❓ Question Answering over specific documents\\\\n\\\\nDocumentation\\\\nEnd-to-end Example: Question Answering over Notion Database\\\\n\\\\n💬 Chatbots\\\\n\\\\nDocumentation\\\\nEnd-to-end Example: Chat-LangChain\\\\n\\\\n🤖 Agents\\\\n\\\\nDocumentation\\\\nEnd-to-end Example: GPT+WolframAlpha\\\\n\\\\n\\\\nGetting Started#\\\\nCheckout the below guide for a walkthrough of how to get started using LangChain to create an Language Model application.\\\\n\\\\nGetting Started Documentation\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nModules#\\\\nThere are several main modules that LangChain provides support for.\\\\nFor each module we provide some examples to get started, how-to guides, reference docs, and conceptual guides.\\\\nThese modules are, in increasing order of complexity:\\\\n\\\\nPrompts: This includes prompt management, prompt optimization, and prompt serialization.\\\\nLLMs: This includes a generic interface for all LLMs, and common utilities for working with LLMs.\\\\nDocument Loaders: This includes a standard interface for loading documents, as well as specific integrations to all types of text data sources.\\\\nUtils: Language models are often more powerful when interacting with other sources of knowledge or computation. This can include Python REPLs, embeddings, search engines, and more. LangChain provides a large collection of common utils to use in your application.\\\\nChains: Chains go beyond just a single LLM call, and are sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.\\\\nIndexes: Language models are often more powerful when combined with your own text data - this module covers best practices for doing exactly that.\\\\nAgents: Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end to end agents.\\\\nMemory: Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory.\\\\nChat: Chat models are a variation on Language Models that expose a different API - rather than working with raw text, they work with messages. LangChain provides a standard interface for working with them and doing all the same things as above.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nUse Cases#\\\\nThe above modules can be used in a variety of ways. LangChain also provides guidance and assistance in this. Below are some of the common use cases LangChain supports.\\\\n\\\\nAgents: Agents are systems that use a language model to interact with other tools. These can be used to do more grounded question/answering, interact with APIs, or even take actions.\\\\nChatbots: Since language models are good at producing text, that makes them ideal for creating chatbots.\\\\nData Augmented Generation: Data Augmented Generation involves specific types of chains that first interact with an external datasource to fetch data to use in the generation step. Examples of this include summarization of long pieces of text and question/answering over specific data sources.\\\\nQuestion Answering: Answering questions over specific documents, only utilizing the information in those documents to construct an answer. A type of Data Augmented Generation.\\\\nSummarization: Summarizing longer documents into shorter, more condensed chunks of information. A type of Data Augmented Generation.\\\\nQuerying Tabular Data: If you want to understand how to use LLMs to query data that is stored in a tabular format (csvs, SQL, dataframes, etc) you should read this page.\\\\nEvaluation: Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.\\\\nGenerate similar examples: Generating similar examples to a given input. This is a common use case for many applications, and LangChain provides some prompts/chains for assisting in this.\\\\nCompare models: Experimenting with different prompts, models, and chains is a big part of developing the best possible application. The ModelLaboratory makes it easy to do so.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nReference Docs#\\\\nAll of LangChain’s reference documentation, in one place. Full documentation on all methods, classes, installation methods, and integration setups for LangChain.\\\\n\\\\nReference Documentation\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nLangChain Ecosystem#\\\\nGuides for how other companies/products can be used with LangChain\\\\n\\\\nLangChain Ecosystem\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nAdditional Resources#\\\\nAdditional collection of resources we think may be useful as you develop your application!\\\\n\\\\nLangChainHub: The LangChainHub is a place to share and explore other prompts, chains, and agents.\\\\nGlossary: A glossary of all related terms, papers, methods, etc. Whether implemented in LangChain or not!\\\\nGallery: A collection of our favorite projects that use LangChain. Useful for finding inspiration or seeing how things were done in other applications.\\\\nDeployments: A collection of instructions, code snippets, and template repositories for deploying LangChain apps.\\\\nDiscord: Join us on our Discord to discuss all things LangChain!\\\\nTracing: A guide on using tracing in LangChain to visualize the execution of chains and agents.\\\\nProduction Support: As you move your LangChains into production, we’d love to offer more comprehensive support. Please fill out this form and we’ll set up a dedicated support Slack channel.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nnext\\\\nQuickstart Guide\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n Contents\\\\n \\\\n\\\\n\\\\nGetting Started\\\\nModules\\\\nUse Cases\\\\nReference Docs\\\\nLangChain Ecosystem\\\\nAdditional Resources\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nBy Harrison Chase\\\\n\\\\n\\\\n\\\\n\\\\n \\\\n © Copyright 2023, Harrison Chase.\\\\n \\\\n\\\\n\\\\n\\\\n\\\\n Last updated on Mar 24, 2023.\\\\n \\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\', lookup_str=\\'\\', metadata={\\'source\\': \\'https://python.langchain.com/en/stable/\\', \\'loc\\': \\'https://python.langchain.com/en/stable/\\', \\'lastmod\\': \\'2023-03-24T19:30:54.647430+00:00\\', \\'changefreq\\': \\'weekly\\', \\'priority\\': \\'1\\'}, lookup_index=0)Filtering sitemap URLs\\u200bSitemaps can be massive files, with thousands of URLs. Often you don\\'t need every single one of them. You can filter the URLs by passing a list of strings or regex patterns to the url_filter parameter. Only URLs that match one of the patterns will be loaded.loader = SitemapLoader( \"https://langchain.readthedocs.io/sitemap.xml\", filter_urls=[\"https://python.langchain.com/en/latest/\"],)documents = loader.load()documents[0] Document(page_content=\\'\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain — 🦜🔗 LangChain 0.0.123\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nSkip to main content\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nCtrl+K\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n🦜🔗 LangChain 0.0.123\\\\n\\\\n\\\\n\\\\nGetting Started\\\\n\\\\nQuickstart Guide\\\\n\\\\nModules\\\\n\\\\nModels\\\\nLLMs\\\\nGetting Started\\\\nGeneric Functionality\\\\nHow to use the async API for LLMs\\\\nHow to write a custom LLM wrapper\\\\nHow (and why) to use the fake LLM\\\\nHow to cache LLM calls\\\\nHow to serialize LLM classes\\\\nHow to stream LLM responses\\\\nHow to track token usage\\\\n\\\\n\\\\nIntegrations\\\\nAI21\\\\nAleph Alpha\\\\nAnthropic\\\\nAzure OpenAI LLM Example\\\\nBanana\\\\nCerebriumAI LLM Example\\\\nCohere\\\\nDeepInfra LLM Example\\\\nForefrontAI LLM Example\\\\nGooseAI LLM Example\\\\nHugging Face Hub\\\\nManifest\\\\nModal\\\\nOpenAI\\\\nPetals LLM Example\\\\nPromptLayer OpenAI\\\\nSageMakerEndpoint\\\\nSelf-Hosted Models via Runhouse\\\\nStochasticAI\\\\nWriter\\\\n\\\\n\\\\nReference\\\\n\\\\n\\\\nChat Models\\\\nGetting Started\\\\nHow-To Guides\\\\nHow to use few shot examples\\\\nHow to stream responses\\\\n\\\\n\\\\nIntegrations\\\\nAzure\\\\nOpenAI\\\\nPromptLayer ChatOpenAI\\\\n\\\\n\\\\n\\\\n\\\\nText Embedding Models\\\\nAzureOpenAI\\\\nCohere\\\\nFake Embeddings\\\\nHugging Face Hub\\\\nInstructEmbeddings\\\\nOpenAI\\\\nSageMaker Endpoint Embeddings\\\\nSelf Hosted Embeddings\\\\nTensorflowHub\\\\n\\\\n\\\\n\\\\n\\\\nPrompts\\\\nPrompt Templates\\\\nGetting Started\\\\nHow-To Guides\\\\nHow to create a custom prompt template\\\\nHow to create a prompt template that uses few shot examples\\\\nHow to work with partial Prompt Templates\\\\nHow to serialize prompts\\\\n\\\\n\\\\nReference\\\\nPromptTemplates\\\\nExample Selector\\\\n\\\\n\\\\n\\\\n\\\\nChat Prompt Template\\\\nExample Selectors\\\\nHow to create a custom example selector\\\\nLengthBased ExampleSelector\\\\nMaximal Marginal Relevance ExampleSelector\\\\nNGram Overlap ExampleSelector\\\\nSimilarity ExampleSelector\\\\n\\\\n\\\\nOutput Parsers\\\\nOutput Parsers\\\\nCommaSeparatedListOutputParser\\\\nOutputFixingParser\\\\nPydanticOutputParser\\\\nRetryOutputParser\\\\nStructured Output Parser\\\\n\\\\n\\\\n\\\\n\\\\nIndexes\\\\nGetting Started\\\\nDocument Loaders\\\\nCoNLL-U\\\\nAirbyte JSON\\\\nAZLyrics\\\\nBlackboard\\\\nCollege Confidential\\\\nCopy Paste\\\\nCSV Loader\\\\nDirectory Loader\\\\nEmail\\\\nEverNote\\\\nFacebook Chat\\\\nFigma\\\\nGCS Directory\\\\nGCS File Storage\\\\nGitBook\\\\nGoogle Drive\\\\nGutenberg\\\\nHacker News\\\\nHTML\\\\niFixit\\\\nImages\\\\nIMSDb\\\\nMarkdown\\\\nNotebook\\\\nNotion\\\\nObsidian\\\\nPDF\\\\nPowerPoint\\\\nReadTheDocs Documentation\\\\nRoam\\\\ns3 Directory\\\\ns3 File\\\\nSubtitle Files\\\\nTelegram\\\\nUnstructured File Loader\\\\nURL\\\\nWeb Base\\\\nWord Documents\\\\nYouTube\\\\n\\\\n\\\\nText Splitters\\\\nGetting Started\\\\nCharacter Text Splitter\\\\nHuggingFace Length Function\\\\nLatex Text Splitter\\\\nMarkdown Text Splitter\\\\nNLTK Text Splitter\\\\nPython Code Text Splitter\\\\nRecursiveCharacterTextSplitter\\\\nSpacy Text Splitter\\\\ntiktoken (OpenAI) Length Function\\\\nTiktokenText Splitter\\\\n\\\\n\\\\nVectorstores\\\\nGetting Started\\\\nAtlasDB\\\\nChroma\\\\nDeep Lake\\\\nElasticSearch\\\\nFAISS\\\\nMilvus\\\\nOpenSearch\\\\nPGVector\\\\nPinecone\\\\nQdrant\\\\nRedis\\\\nWeaviate\\\\n\\\\n\\\\nRetrievers\\\\nChatGPT Plugin Retriever\\\\nVectorStore Retriever\\\\n\\\\n\\\\n\\\\n\\\\nMemory\\\\nGetting Started\\\\nHow-To Guides\\\\nConversationBufferMemory\\\\nConversationBufferWindowMemory\\\\nEntity Memory\\\\nConversation Knowledge Graph Memory\\\\nConversationSummaryMemory\\\\nConversationSummaryBufferMemory\\\\nConversationTokenBufferMemory\\\\nHow to add Memory to an LLMChain\\\\nHow to add memory to a Multi-Input Chain\\\\nHow to add Memory to an Agent\\\\nHow to customize conversational memory\\\\nHow to create a custom Memory class\\\\nHow to use multiple memroy classes in the same chain\\\\n\\\\n\\\\n\\\\n\\\\nChains\\\\nGetting Started\\\\nHow-To Guides\\\\nAsync API for Chain\\\\nLoading from LangChainHub\\\\nLLM Chain\\\\nSequential Chains\\\\nSerialization\\\\nTransformation Chain\\\\nAnalyze Document\\\\nChat Index\\\\nGraph QA\\\\nHypothetical Document Embeddings\\\\nQuestion Answering with Sources\\\\nQuestion Answering\\\\nSummarization\\\\nRetrieval Question/Answering\\\\nRetrieval Question Answering with Sources\\\\nVector DB Text Generation\\\\nAPI Chains\\\\nSelf-Critique Chain with Constitutional AI\\\\nBashChain\\\\nLLMCheckerChain\\\\nLLM Math\\\\nLLMRequestsChain\\\\nLLMSummarizationCheckerChain\\\\nModeration\\\\nPAL\\\\nSQLite example\\\\n\\\\n\\\\nReference\\\\n\\\\n\\\\nAgents\\\\nGetting Started\\\\nTools\\\\nGetting Started\\\\nDefining Custom Tools\\\\nMulti Input Tools\\\\nBash\\\\nBing Search\\\\nChatGPT Plugins\\\\nGoogle Search\\\\nGoogle Serper API\\\\nHuman as a tool\\\\nIFTTT WebHooks\\\\nPython REPL\\\\nRequests\\\\nSearch Tools\\\\nSearxNG Search API\\\\nSerpAPI\\\\nWolfram Alpha\\\\nZapier Natural Language Actions API\\\\n\\\\n\\\\nAgents\\\\nAgent Types\\\\nCustom Agent\\\\nConversation Agent (for Chat Models)\\\\nConversation Agent\\\\nMRKL\\\\nMRKL Chat\\\\nReAct\\\\nSelf Ask With Search\\\\n\\\\n\\\\nToolkits\\\\nCSV Agent\\\\nJSON Agent\\\\nOpenAPI Agent\\\\nPandas Dataframe Agent\\\\nPython Agent\\\\nSQL Database Agent\\\\nVectorstore Agent\\\\n\\\\n\\\\nAgent Executors\\\\nHow to combine agents and vectorstores\\\\nHow to use the async API for Agents\\\\nHow to create ChatGPT Clone\\\\nHow to access intermediate steps\\\\nHow to cap the max number of iterations\\\\nHow to add SharedMemory to an Agent and its Tools\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nUse Cases\\\\n\\\\nPersonal Assistants\\\\nQuestion Answering over Docs\\\\nChatbots\\\\nQuerying Tabular Data\\\\nInteracting with APIs\\\\nSummarization\\\\nExtraction\\\\nEvaluation\\\\nAgent Benchmarking: Search + Calculator\\\\nAgent VectorDB Question Answering Benchmarking\\\\nBenchmarking Template\\\\nData Augmented Question Answering\\\\nUsing Hugging Face Datasets\\\\nLLM Math\\\\nQuestion Answering Benchmarking: Paul Graham Essay\\\\nQuestion Answering Benchmarking: State of the Union Address\\\\nQA Generation\\\\nQuestion Answering\\\\nSQL Question Answering Benchmarking: Chinook\\\\n\\\\n\\\\n\\\\nReference\\\\n\\\\nInstallation\\\\nIntegrations\\\\nAPI References\\\\nPrompts\\\\nPromptTemplates\\\\nExample Selector\\\\n\\\\n\\\\nUtilities\\\\nPython REPL\\\\nSerpAPI\\\\nSearxNG Search\\\\nDocstore\\\\nText Splitter\\\\nEmbeddings\\\\nVectorStores\\\\n\\\\n\\\\nChains\\\\nAgents\\\\n\\\\n\\\\n\\\\nEcosystem\\\\n\\\\nLangChain Ecosystem\\\\nAI21 Labs\\\\nAtlasDB\\\\nBanana\\\\nCerebriumAI\\\\nChroma\\\\nCohere\\\\nDeepInfra\\\\nDeep Lake\\\\nForefrontAI\\\\nGoogle Search Wrapper\\\\nGoogle Serper Wrapper\\\\nGooseAI\\\\nGraphsignal\\\\nHazy Research\\\\nHelicone\\\\nHugging Face\\\\nMilvus\\\\nModal\\\\nNLPCloud\\\\nOpenAI\\\\nOpenSearch\\\\nPetals\\\\nPGVector\\\\nPinecone\\\\nPromptLayer\\\\nQdrant\\\\nRunhouse\\\\nSearxNG Search API\\\\nSerpAPI\\\\nStochasticAI\\\\nUnstructured\\\\nWeights & Biases\\\\nWeaviate\\\\nWolfram Alpha Wrapper\\\\nWriter\\\\n\\\\n\\\\n\\\\nAdditional Resources\\\\n\\\\nLangChainHub\\\\nGlossary\\\\nLangChain Gallery\\\\nDeployments\\\\nTracing\\\\nDiscord\\\\nProduction Support\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n.rst\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n.pdf\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain\\\\n\\\\n\\\\n\\\\n\\\\n Contents \\\\n\\\\n\\\\n\\\\nGetting Started\\\\nModules\\\\nUse Cases\\\\nReference Docs\\\\nLangChain Ecosystem\\\\nAdditional Resources\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nWelcome to LangChain#\\\\nLangChain is a framework for developing applications powered by language models. We believe that the most powerful and differentiated applications will not only call out to a language model via an API, but will also:\\\\n\\\\nBe data-aware: connect a language model to other sources of data\\\\nBe agentic: allow a language model to interact with its environment\\\\n\\\\nThe LangChain framework is designed with the above principles in mind.\\\\nThis is the Python specific portion of the documentation. For a purely conceptual guide to LangChain, see here. For the JavaScript documentation, see here.\\\\n\\\\nGetting Started#\\\\nCheckout the below guide for a walkthrough of how to get started using LangChain to create an Language Model application.\\\\n\\\\nGetting Started Documentation\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nModules#\\\\nThere are several main modules that LangChain provides support for.\\\\nFor each module we provide some examples to get started, how-to guides, reference docs, and conceptual guides.\\\\nThese modules are, in increasing order of complexity:\\\\n\\\\nModels: The various model types and model integrations LangChain supports.\\\\nPrompts: This includes prompt management, prompt optimization, and prompt serialization.\\\\nMemory: Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory.\\\\nIndexes: Language models are often more powerful when combined with your own text data - this module covers best practices for doing exactly that.\\\\nChains: Chains go beyond just a single LLM call, and are sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.\\\\nAgents: Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end to end agents.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nUse Cases#\\\\nThe above modules can be used in a variety of ways. LangChain also provides guidance and assistance in this. Below are some of the common use cases LangChain supports.\\\\n\\\\nPersonal Assistants: The main LangChain use case. Personal assistants need to take actions, remember interactions, and have knowledge about your data.\\\\nQuestion Answering: The second big LangChain use case. Answering questions over specific documents, only utilizing the information in those documents to construct an answer.\\\\nChatbots: Since language models are good at producing text, that makes them ideal for creating chatbots.\\\\nQuerying Tabular Data: If you want to understand how to use LLMs to query data that is stored in a tabular format (csvs, SQL, dataframes, etc) you should read this page.\\\\nInteracting with APIs: Enabling LLMs to interact with APIs is extremely powerful in order to give them more up-to-date information and allow them to take actions.\\\\nExtraction: Extract structured information from text.\\\\nSummarization: Summarizing longer documents into shorter, more condensed chunks of information. A type of Data Augmented Generation.\\\\nEvaluation: Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nReference Docs#\\\\nAll of LangChain’s reference documentation, in one place. Full documentation on all methods, classes, installation methods, and integration setups for LangChain.\\\\n\\\\nReference Documentation\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nLangChain Ecosystem#\\\\nGuides for how other companies/products can be used with LangChain\\\\n\\\\nLangChain Ecosystem\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nAdditional Resources#\\\\nAdditional collection of resources we think may be useful as you develop your application!\\\\n\\\\nLangChainHub: The LangChainHub is a place to share and explore other prompts, chains, and agents.\\\\nGlossary: A glossary of all related terms, papers, methods, etc. Whether implemented in LangChain or not!\\\\nGallery: A collection of our favorite projects that use LangChain. Useful for finding inspiration or seeing how things were done in other applications.\\\\nDeployments: A collection of instructions, code snippets, and template repositories for deploying LangChain apps.\\\\nTracing: A guide on using tracing in LangChain to visualize the execution of chains and agents.\\\\nModel Laboratory: Experimenting with different prompts, models, and chains is a big part of developing the best possible application. The ModelLaboratory makes it easy to do so.\\\\nDiscord: Join us on our Discord to discuss all things LangChain!\\\\nProduction Support: As you move your LangChains into production, we’d love to offer more comprehensive support. Please fill out this form and we’ll set up a dedicated support Slack channel.\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nnext\\\\nQuickstart Guide\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n Contents\\\\n \\\\n\\\\n\\\\nGetting Started\\\\nModules\\\\nUse Cases\\\\nReference Docs\\\\nLangChain Ecosystem\\\\nAdditional Resources\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\nBy Harrison Chase\\\\n\\\\n\\\\n\\\\n\\\\n \\\\n © Copyright 2023, Harrison Chase.\\\\n \\\\n\\\\n\\\\n\\\\n\\\\n Last updated on Mar 27, 2023.\\\\n \\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\\\n\\', lookup_str=\\'\\', metadata={\\'source\\': \\'https://python.langchain.com/en/latest/\\', \\'loc\\': \\'https://python.langchain.com/en/latest/\\', \\'lastmod\\': \\'2023-03-27T22:50:49.790324+00:00\\', \\'changefreq\\': \\'daily\\', \\'priority\\': \\'0.9\\'}, lookup_index=0)Add custom scraping rules\\u200bThe SitemapLoader uses beautifulsoup4 for the scraping process, and it scrapes every element on the page by default. The SitemapLoader constructor accepts a custom scraping function. This feature can be helpful to tailor the scraping process to your specific needs; for example, you might want to avoid scraping headers or navigation elements. The following example shows how to develop and use a custom function to avoid navigation and header elements.Import the beautifulsoup4 library and define the custom function.pip install beautifulsoup4from bs4 import BeautifulSoupdef remove_nav_and_header_elements(content: BeautifulSoup) -> str: # Find all \\'nav\\' and \\'header\\' elements in the BeautifulSoup object nav_elements = content.find_all(\"nav\") header_elements = content.find_all(\"header\") # Remove each \\'nav\\' and \\'header\\' element from the BeautifulSoup object for element in nav_elements + header_elements: element.decompose() return str(content.get_text())Add your custom function to the SitemapLoader object.loader = SitemapLoader( \"https://langchain.readthedocs.io/sitemap.xml\", filter_urls=[\"https://python.langchain.com/en/latest/\"], parsing_function=remove_nav_and_header_elements,)Local Sitemap\\u200bThe sitemap loader can also be used to load local files.sitemap_loader = SitemapLoader(web_path=\"example_data/sitemap.xml\", is_local=True)docs = sitemap_loader.load() Fetching pages: 100%|####################################################################################################################################| 3/3 [00:00<00:00, 3.91it/s]PreviousRSTNextSlackFiltering sitemap URLsAdd custom scraping rulesLocal SitemapCommunityDiscordTwitterGitHubPythonJS/TSMoreHomepageBlogCopyright © 2023 LangChain, Inc.\\n\\n\\n\\n', metadata={'source': 'https://python.langchain.com/docs/integrations/document_loaders/sitemap', 'loc': 'https://python.langchain.com/docs/integrations/document_loaders/sitemap', 'changefreq': 'weekly', 'priority': '0.5'})" ] }, "execution_count": 16, diff --git a/docs/docs/integrations/document_loaders/dropbox.ipynb b/docs/docs/integrations/document_loaders/dropbox.ipynb index 83a785cc44..4c73399425 100644 --- a/docs/docs/integrations/document_loaders/dropbox.ipynb +++ b/docs/docs/integrations/document_loaders/dropbox.ipynb @@ -56,7 +56,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import DropboxLoader" + "from langchain_community.document_loaders import DropboxLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/duckdb.ipynb b/docs/docs/integrations/document_loaders/duckdb.ipynb index 722b40fd8b..416029e1cd 100644 --- a/docs/docs/integrations/document_loaders/duckdb.ipynb +++ b/docs/docs/integrations/document_loaders/duckdb.ipynb @@ -30,7 +30,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import DuckDBLoader" + "from langchain_community.document_loaders import DuckDBLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/email.ipynb b/docs/docs/integrations/document_loaders/email.ipynb index 09eedd2e75..b2b29b3bc3 100644 --- a/docs/docs/integrations/document_loaders/email.ipynb +++ b/docs/docs/integrations/document_loaders/email.ipynb @@ -39,7 +39,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredEmailLoader" + "from langchain_community.document_loaders import UnstructuredEmailLoader" ] }, { @@ -220,7 +220,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import OutlookMessageLoader" + "from langchain_community.document_loaders import OutlookMessageLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/epub.ipynb b/docs/docs/integrations/document_loaders/epub.ipynb index 7866017149..0884e55122 100644 --- a/docs/docs/integrations/document_loaders/epub.ipynb +++ b/docs/docs/integrations/document_loaders/epub.ipynb @@ -31,7 +31,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredEPubLoader" + "from langchain_community.document_loaders import UnstructuredEPubLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/etherscan.ipynb b/docs/docs/integrations/document_loaders/etherscan.ipynb index 6a186f0f69..15fb7021d3 100644 --- a/docs/docs/integrations/document_loaders/etherscan.ipynb +++ b/docs/docs/integrations/document_loaders/etherscan.ipynb @@ -87,7 +87,7 @@ "source": [ "import os\n", "\n", - "from langchain.document_loaders import EtherscanLoader" + "from langchain_community.document_loaders import EtherscanLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/evernote.ipynb b/docs/docs/integrations/document_loaders/evernote.ipynb index ff9f1477f0..30798ab70c 100644 --- a/docs/docs/integrations/document_loaders/evernote.ipynb +++ b/docs/docs/integrations/document_loaders/evernote.ipynb @@ -51,7 +51,7 @@ } ], "source": [ - "from langchain.document_loaders import EverNoteLoader\n", + "from langchain_community.document_loaders import EverNoteLoader\n", "\n", "# By default all notes are combined into a single Document\n", "loader = EverNoteLoader(\"example_data/testing.enex\")\n", diff --git a/docs/docs/integrations/document_loaders/facebook_chat.ipynb b/docs/docs/integrations/document_loaders/facebook_chat.ipynb index c65acfab91..8ebb5287b9 100644 --- a/docs/docs/integrations/document_loaders/facebook_chat.ipynb +++ b/docs/docs/integrations/document_loaders/facebook_chat.ipynb @@ -28,7 +28,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import FacebookChatLoader" + "from langchain_community.document_loaders import FacebookChatLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/fauna.ipynb b/docs/docs/integrations/document_loaders/fauna.ipynb index 1c621a2465..5601d54f47 100644 --- a/docs/docs/integrations/document_loaders/fauna.ipynb +++ b/docs/docs/integrations/document_loaders/fauna.ipynb @@ -35,7 +35,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.fauna import FaunaLoader\n", + "from langchain_community.document_loaders.fauna import FaunaLoader\n", "\n", "secret = \"\"\n", "query = \"Item.all()\" # Fauna query. Assumes that the collection is called \"Item\"\n", diff --git a/docs/docs/integrations/document_loaders/figma.ipynb b/docs/docs/integrations/document_loaders/figma.ipynb index 1b6db34f2a..3e078ce984 100644 --- a/docs/docs/integrations/document_loaders/figma.ipynb +++ b/docs/docs/integrations/document_loaders/figma.ipynb @@ -23,14 +23,14 @@ "source": [ "import os\n", "\n", - "from langchain.document_loaders.figma import FigmaFileLoader\n", "from langchain.indexes import VectorstoreIndexCreator\n", "from langchain.prompts.chat import (\n", " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.document_loaders.figma import FigmaFileLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/geopandas.ipynb b/docs/docs/integrations/document_loaders/geopandas.ipynb index af147ebcb8..ed46dd0276 100644 --- a/docs/docs/integrations/document_loaders/geopandas.ipynb +++ b/docs/docs/integrations/document_loaders/geopandas.ipynb @@ -39,7 +39,7 @@ "\n", "import geopandas as gpd\n", "import pandas as pd\n", - "from langchain.document_loaders import OpenCityDataLoader" + "from langchain_community.document_loaders import OpenCityDataLoader" ] }, { @@ -137,7 +137,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import GeoDataFrameLoader\n", + "from langchain_community.document_loaders import GeoDataFrameLoader\n", "\n", "loader = GeoDataFrameLoader(data_frame=gdf, page_content_column=\"geometry\")\n", "docs = loader.load()" diff --git a/docs/docs/integrations/document_loaders/git.ipynb b/docs/docs/integrations/document_loaders/git.ipynb index 32a233c767..b520484c1b 100644 --- a/docs/docs/integrations/document_loaders/git.ipynb +++ b/docs/docs/integrations/document_loaders/git.ipynb @@ -53,7 +53,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import GitLoader" + "from langchain_community.document_loaders import GitLoader" ] }, { @@ -113,7 +113,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import GitLoader" + "from langchain_community.document_loaders import GitLoader" ] }, { @@ -171,7 +171,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import GitLoader\n", + "from langchain_community.document_loaders import GitLoader\n", "\n", "# e.g. loading only python files\n", "loader = GitLoader(\n", diff --git a/docs/docs/integrations/document_loaders/gitbook.ipynb b/docs/docs/integrations/document_loaders/gitbook.ipynb index 390e2b3533..8f9ec807df 100644 --- a/docs/docs/integrations/document_loaders/gitbook.ipynb +++ b/docs/docs/integrations/document_loaders/gitbook.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import GitbookLoader" + "from langchain_community.document_loaders import GitbookLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/github.ipynb b/docs/docs/integrations/document_loaders/github.ipynb index 3d194bb803..9ee4627cbf 100644 --- a/docs/docs/integrations/document_loaders/github.ipynb +++ b/docs/docs/integrations/document_loaders/github.ipynb @@ -52,7 +52,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import GitHubIssuesLoader" + "from langchain_community.document_loaders import GitHubIssuesLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/google_bigquery.ipynb b/docs/docs/integrations/document_loaders/google_bigquery.ipynb index 4b79e879fd..1408cba708 100644 --- a/docs/docs/integrations/document_loaders/google_bigquery.ipynb +++ b/docs/docs/integrations/document_loaders/google_bigquery.ipynb @@ -31,7 +31,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import BigQueryLoader" + "from langchain_community.document_loaders import BigQueryLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/google_cloud_storage_directory.ipynb b/docs/docs/integrations/document_loaders/google_cloud_storage_directory.ipynb index bb468d98a4..871f3589bc 100644 --- a/docs/docs/integrations/document_loaders/google_cloud_storage_directory.ipynb +++ b/docs/docs/integrations/document_loaders/google_cloud_storage_directory.ipynb @@ -31,7 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import GCSDirectoryLoader" + "from langchain_community.document_loaders import GCSDirectoryLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/google_cloud_storage_file.ipynb b/docs/docs/integrations/document_loaders/google_cloud_storage_file.ipynb index 3a87256cc8..e062f2badc 100644 --- a/docs/docs/integrations/document_loaders/google_cloud_storage_file.ipynb +++ b/docs/docs/integrations/document_loaders/google_cloud_storage_file.ipynb @@ -31,7 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import GCSFileLoader" + "from langchain_community.document_loaders import GCSFileLoader" ] }, { @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import PyPDFLoader\n", + "from langchain_community.document_loaders import PyPDFLoader\n", "\n", "\n", "def load_pdf(file_path):\n", diff --git a/docs/docs/integrations/document_loaders/google_drive.ipynb b/docs/docs/integrations/document_loaders/google_drive.ipynb index 1bd843830c..eff942b62d 100644 --- a/docs/docs/integrations/document_loaders/google_drive.ipynb +++ b/docs/docs/integrations/document_loaders/google_drive.ipynb @@ -48,7 +48,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import GoogleDriveLoader" + "from langchain_community.document_loaders import GoogleDriveLoader" ] }, { @@ -119,7 +119,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import GoogleDriveLoader, UnstructuredFileIOLoader" + "from langchain_community.document_loaders import (\n", + " GoogleDriveLoader,\n", + " UnstructuredFileIOLoader,\n", + ")" ] }, { @@ -215,7 +218,7 @@ "source": [ "## Extended usage\n", "An external component can manage the complexity of Google Drive : `langchain-googledrive`\n", - "It's compatible with the ̀`langchain.document_loaders.GoogleDriveLoader` and can be used\n", + "It's compatible with the ̀`langchain_community.document_loaders.GoogleDriveLoader` and can be used\n", "in its place.\n", "\n", "To be compatible with containers, the authentication uses an environment variable `̀GOOGLE_ACCOUNT_FILE` to credential file (for user or service)." diff --git a/docs/docs/integrations/document_loaders/google_speech_to_text.ipynb b/docs/docs/integrations/document_loaders/google_speech_to_text.ipynb index 34e36ed51d..5e22f0b680 100644 --- a/docs/docs/integrations/document_loaders/google_speech_to_text.ipynb +++ b/docs/docs/integrations/document_loaders/google_speech_to_text.ipynb @@ -52,7 +52,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import GoogleSpeechToTextLoader\n", + "from langchain_community.document_loaders import GoogleSpeechToTextLoader\n", "\n", "project_id = \"\"\n", "file_path = \"gs://cloud-samples-data/speech/audio.flac\"\n", @@ -152,7 +152,7 @@ " RecognitionConfig,\n", " RecognitionFeatures,\n", ")\n", - "from langchain.document_loaders import GoogleSpeechToTextLoader\n", + "from langchain_community.document_loaders import GoogleSpeechToTextLoader\n", "\n", "project_id = \"\"\n", "location = \"global\"\n", diff --git a/docs/docs/integrations/document_loaders/grobid.ipynb b/docs/docs/integrations/document_loaders/grobid.ipynb index 31b9378d7c..d385f40875 100644 --- a/docs/docs/integrations/document_loaders/grobid.ipynb +++ b/docs/docs/integrations/document_loaders/grobid.ipynb @@ -36,8 +36,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.generic import GenericLoader\n", - "from langchain.document_loaders.parsers import GrobidParser" + "from langchain_community.document_loaders.generic import GenericLoader\n", + "from langchain_community.document_loaders.parsers import GrobidParser" ] }, { diff --git a/docs/docs/integrations/document_loaders/gutenberg.ipynb b/docs/docs/integrations/document_loaders/gutenberg.ipynb index 6cf34ed21e..10505f2fc7 100644 --- a/docs/docs/integrations/document_loaders/gutenberg.ipynb +++ b/docs/docs/integrations/document_loaders/gutenberg.ipynb @@ -21,7 +21,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import GutenbergLoader" + "from langchain_community.document_loaders import GutenbergLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/hacker_news.ipynb b/docs/docs/integrations/document_loaders/hacker_news.ipynb index 578d2ae502..c9380d7192 100644 --- a/docs/docs/integrations/document_loaders/hacker_news.ipynb +++ b/docs/docs/integrations/document_loaders/hacker_news.ipynb @@ -21,7 +21,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import HNLoader" + "from langchain_community.document_loaders import HNLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/huawei_obs_directory.ipynb b/docs/docs/integrations/document_loaders/huawei_obs_directory.ipynb index b6cf8c544b..e05266cac7 100644 --- a/docs/docs/integrations/document_loaders/huawei_obs_directory.ipynb +++ b/docs/docs/integrations/document_loaders/huawei_obs_directory.ipynb @@ -27,7 +27,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import OBSDirectoryLoader" + "from langchain_community.document_loaders import OBSDirectoryLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/huawei_obs_file.ipynb b/docs/docs/integrations/document_loaders/huawei_obs_file.ipynb index bed63582e6..7cd89eec89 100644 --- a/docs/docs/integrations/document_loaders/huawei_obs_file.ipynb +++ b/docs/docs/integrations/document_loaders/huawei_obs_file.ipynb @@ -27,7 +27,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.obs_file import OBSFileLoader" + "from langchain_community.document_loaders.obs_file import OBSFileLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/hugging_face_dataset.ipynb b/docs/docs/integrations/document_loaders/hugging_face_dataset.ipynb index 4decf5a3fb..c6dfa964d0 100644 --- a/docs/docs/integrations/document_loaders/hugging_face_dataset.ipynb +++ b/docs/docs/integrations/document_loaders/hugging_face_dataset.ipynb @@ -21,7 +21,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import HuggingFaceDatasetLoader" + "from langchain_community.document_loaders import HuggingFaceDatasetLoader" ] }, { @@ -99,8 +99,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.hugging_face_dataset import HuggingFaceDatasetLoader\n", - "from langchain.indexes import VectorstoreIndexCreator" + "from langchain.indexes import VectorstoreIndexCreator\n", + "from langchain_community.document_loaders.hugging_face_dataset import (\n", + " HuggingFaceDatasetLoader,\n", + ")" ] }, { diff --git a/docs/docs/integrations/document_loaders/ifixit.ipynb b/docs/docs/integrations/document_loaders/ifixit.ipynb index 01f0985625..41b5ada26d 100644 --- a/docs/docs/integrations/document_loaders/ifixit.ipynb +++ b/docs/docs/integrations/document_loaders/ifixit.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import IFixitLoader" + "from langchain_community.document_loaders import IFixitLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/image.ipynb b/docs/docs/integrations/document_loaders/image.ipynb index e09f2fe7e3..986a8b24b9 100644 --- a/docs/docs/integrations/document_loaders/image.ipynb +++ b/docs/docs/integrations/document_loaders/image.ipynb @@ -39,7 +39,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders.image import UnstructuredImageLoader" + "from langchain_community.document_loaders.image import UnstructuredImageLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/image_captions.ipynb b/docs/docs/integrations/document_loaders/image_captions.ipynb index fa11c91afa..b0221108b9 100644 --- a/docs/docs/integrations/document_loaders/image_captions.ipynb +++ b/docs/docs/integrations/document_loaders/image_captions.ipynb @@ -35,7 +35,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import ImageCaptionLoader" + "from langchain_community.document_loaders import ImageCaptionLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/imsdb.ipynb b/docs/docs/integrations/document_loaders/imsdb.ipynb index de6866687d..fb0c499ae9 100644 --- a/docs/docs/integrations/document_loaders/imsdb.ipynb +++ b/docs/docs/integrations/document_loaders/imsdb.ipynb @@ -21,7 +21,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import IMSDbLoader" + "from langchain_community.document_loaders import IMSDbLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/iugu.ipynb b/docs/docs/integrations/document_loaders/iugu.ipynb index 7bd4b30165..4f3ba7233f 100644 --- a/docs/docs/integrations/document_loaders/iugu.ipynb +++ b/docs/docs/integrations/document_loaders/iugu.ipynb @@ -18,8 +18,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import IuguLoader\n", - "from langchain.indexes import VectorstoreIndexCreator" + "from langchain.indexes import VectorstoreIndexCreator\n", + "from langchain_community.document_loaders import IuguLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/joplin.ipynb b/docs/docs/integrations/document_loaders/joplin.ipynb index cd2d7725f5..5779ae9333 100644 --- a/docs/docs/integrations/document_loaders/joplin.ipynb +++ b/docs/docs/integrations/document_loaders/joplin.ipynb @@ -33,7 +33,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import JoplinLoader" + "from langchain_community.document_loaders import JoplinLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/jupyter_notebook.ipynb b/docs/docs/integrations/document_loaders/jupyter_notebook.ipynb index ee2b60e1a9..e8533803a7 100644 --- a/docs/docs/integrations/document_loaders/jupyter_notebook.ipynb +++ b/docs/docs/integrations/document_loaders/jupyter_notebook.ipynb @@ -19,7 +19,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import NotebookLoader" + "from langchain_community.document_loaders import NotebookLoader" ] }, { @@ -62,7 +62,7 @@ { "data": { "text/plain": [ - "[Document(page_content='\\'markdown\\' cell: \\'[\\'# Notebook\\', \\'\\', \\'This notebook covers how to load data from an .html notebook into a format suitable by LangChain.\\']\\'\\n\\n \\'code\\' cell: \\'[\\'from langchain.document_loaders import NotebookLoader\\']\\'\\n\\n \\'code\\' cell: \\'[\\'loader = NotebookLoader(\"example_data/notebook.html\")\\']\\'\\n\\n \\'markdown\\' cell: \\'[\\'`NotebookLoader.load()` loads the `.html` notebook file into a `Document` object.\\', \\'\\', \\'**Parameters**:\\', \\'\\', \\'* `include_outputs` (bool): whether to include cell outputs in the resulting document (default is False).\\', \\'* `max_output_length` (int): the maximum number of characters to include from each cell output (default is 10).\\', \\'* `remove_newline` (bool): whether to remove newline characters from the cell sources and outputs (default is False).\\', \\'* `traceback` (bool): whether to include full traceback (default is False).\\']\\'\\n\\n \\'code\\' cell: \\'[\\'loader.load(include_outputs=True, max_output_length=20, remove_newline=True)\\']\\'\\n\\n', metadata={'source': 'example_data/notebook.html'})]" + "[Document(page_content='\\'markdown\\' cell: \\'[\\'# Notebook\\', \\'\\', \\'This notebook covers how to load data from an .html notebook into a format suitable by LangChain.\\']\\'\\n\\n \\'code\\' cell: \\'[\\'from langchain_community.document_loaders import NotebookLoader\\']\\'\\n\\n \\'code\\' cell: \\'[\\'loader = NotebookLoader(\"example_data/notebook.html\")\\']\\'\\n\\n \\'markdown\\' cell: \\'[\\'`NotebookLoader.load()` loads the `.html` notebook file into a `Document` object.\\', \\'\\', \\'**Parameters**:\\', \\'\\', \\'* `include_outputs` (bool): whether to include cell outputs in the resulting document (default is False).\\', \\'* `max_output_length` (int): the maximum number of characters to include from each cell output (default is 10).\\', \\'* `remove_newline` (bool): whether to remove newline characters from the cell sources and outputs (default is False).\\', \\'* `traceback` (bool): whether to include full traceback (default is False).\\']\\'\\n\\n \\'code\\' cell: \\'[\\'loader.load(include_outputs=True, max_output_length=20, remove_newline=True)\\']\\'\\n\\n', metadata={'source': 'example_data/notebook.html'})]" ] }, "execution_count": 3, diff --git a/docs/docs/integrations/document_loaders/lakefs.ipynb b/docs/docs/integrations/document_loaders/lakefs.ipynb index 4db56ae0da..c3bb31a7ee 100644 --- a/docs/docs/integrations/document_loaders/lakefs.ipynb +++ b/docs/docs/integrations/document_loaders/lakefs.ipynb @@ -28,7 +28,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import LakeFSLoader" + "from langchain_community.document_loaders import LakeFSLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/larksuite.ipynb b/docs/docs/integrations/document_loaders/larksuite.ipynb index 1ecdc762d8..2f5f07ed96 100644 --- a/docs/docs/integrations/document_loaders/larksuite.ipynb +++ b/docs/docs/integrations/document_loaders/larksuite.ipynb @@ -30,7 +30,7 @@ "source": [ "from getpass import getpass\n", "\n", - "from langchain.document_loaders.larksuite import LarkSuiteDocLoader\n", + "from langchain_community.document_loaders.larksuite import LarkSuiteDocLoader\n", "\n", "DOMAIN = input(\"larksuite domain\")\n", "ACCESS_TOKEN = getpass(\"larksuite tenant_access_token or user_access_token\")\n", diff --git a/docs/docs/integrations/document_loaders/mastodon.ipynb b/docs/docs/integrations/document_loaders/mastodon.ipynb index bcf5da4d28..7f9bd101de 100644 --- a/docs/docs/integrations/document_loaders/mastodon.ipynb +++ b/docs/docs/integrations/document_loaders/mastodon.ipynb @@ -23,7 +23,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import MastodonTootsLoader" + "from langchain_community.document_loaders import MastodonTootsLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/mediawikidump.ipynb b/docs/docs/integrations/document_loaders/mediawikidump.ipynb index 917bac95df..c4b445ba35 100644 --- a/docs/docs/integrations/document_loaders/mediawikidump.ipynb +++ b/docs/docs/integrations/document_loaders/mediawikidump.ipynb @@ -38,7 +38,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import MWDumpLoader" + "from langchain_community.document_loaders import MWDumpLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/merge_doc.ipynb b/docs/docs/integrations/document_loaders/merge_doc.ipynb index 2cf0d55d72..d66250b434 100644 --- a/docs/docs/integrations/document_loaders/merge_doc.ipynb +++ b/docs/docs/integrations/document_loaders/merge_doc.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WebBaseLoader\n", + "from langchain_community.document_loaders import WebBaseLoader\n", "\n", "loader_web = WebBaseLoader(\n", " \"https://github.com/basecamp/handbook/blob/master/37signals-is-you.md\"\n", @@ -31,7 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import PyPDFLoader\n", + "from langchain_community.document_loaders import PyPDFLoader\n", "\n", "loader_pdf = PyPDFLoader(\"../MachineLearning-Lecture01.pdf\")" ] @@ -43,7 +43,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.merge import MergedDataLoader\n", + "from langchain_community.document_loaders.merge import MergedDataLoader\n", "\n", "loader_all = MergedDataLoader(loaders=[loader_web, loader_pdf])" ] diff --git a/docs/docs/integrations/document_loaders/mhtml.ipynb b/docs/docs/integrations/document_loaders/mhtml.ipynb index afad82a051..0bd7f53d3c 100644 --- a/docs/docs/integrations/document_loaders/mhtml.ipynb +++ b/docs/docs/integrations/document_loaders/mhtml.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import MHTMLLoader" + "from langchain_community.document_loaders import MHTMLLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/microsoft_excel.ipynb b/docs/docs/integrations/document_loaders/microsoft_excel.ipynb index 7be5044bd0..9a731cc10b 100644 --- a/docs/docs/integrations/document_loaders/microsoft_excel.ipynb +++ b/docs/docs/integrations/document_loaders/microsoft_excel.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredExcelLoader" + "from langchain_community.document_loaders import UnstructuredExcelLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/microsoft_onedrive.ipynb b/docs/docs/integrations/document_loaders/microsoft_onedrive.ipynb index 3a18c047a2..b42c141f8f 100644 --- a/docs/docs/integrations/document_loaders/microsoft_onedrive.ipynb +++ b/docs/docs/integrations/document_loaders/microsoft_onedrive.ipynb @@ -38,7 +38,7 @@ "\n", "\n", "```python\n", - "from langchain.document_loaders.onedrive import OneDriveLoader\n", + "from langchain_community.document_loaders.onedrive import OneDriveLoader\n", "\n", "loader = OneDriveLoader(drive_id=\"YOUR DRIVE ID\")\n", "```\n", @@ -46,7 +46,7 @@ "Once the authentication has been done, the loader will store a token (`o365_token.txt`) at `~/.credentials/` folder. This token could be used later to authenticate without the copy/paste steps explained earlier. To use this token for authentication, you need to change the `auth_with_token` parameter to True in the instantiation of the loader.\n", "\n", "```python\n", - "from langchain.document_loaders.onedrive import OneDriveLoader\n", + "from langchain_community.document_loaders.onedrive import OneDriveLoader\n", "\n", "loader = OneDriveLoader(drive_id=\"YOUR DRIVE ID\", auth_with_token=True)\n", "```\n", @@ -59,7 +59,7 @@ "\n", "\n", "```python\n", - "from langchain.document_loaders.onedrive import OneDriveLoader\n", + "from langchain_community.document_loaders.onedrive import OneDriveLoader\n", "\n", "loader = OneDriveLoader(drive_id=\"YOUR DRIVE ID\", folder_path=\"Documents/clients\", auth_with_token=True)\n", "documents = loader.load()\n", @@ -73,7 +73,7 @@ "\n", "\n", "```python\n", - "from langchain.document_loaders.onedrive import OneDriveLoader\n", + "from langchain_community.document_loaders.onedrive import OneDriveLoader\n", "\n", "loader = OneDriveLoader(drive_id=\"YOUR DRIVE ID\", object_ids=[\"ID_1\", \"ID_2\"], auth_with_token=True)\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/document_loaders/microsoft_onenote.ipynb b/docs/docs/integrations/document_loaders/microsoft_onenote.ipynb index c5dcfd037b..a6a194fa19 100644 --- a/docs/docs/integrations/document_loaders/microsoft_onenote.ipynb +++ b/docs/docs/integrations/document_loaders/microsoft_onenote.ipynb @@ -35,7 +35,7 @@ "\n", "\n", "```python\n", - "from langchain.document_loaders.onenote import OneNoteLoader\n", + "from langchain_community.document_loaders.onenote import OneNoteLoader\n", "\n", "loader = OneNoteLoader(notebook_name=\"NOTEBOOK NAME\", section_name=\"SECTION NAME\", page_title=\"PAGE TITLE\")\n", "```\n", @@ -43,7 +43,7 @@ "Once the authentication has been done, the loader will store a token (`onenote_graph_token.txt`) at `~/.credentials/` folder. This token could be used later to authenticate without the copy/paste steps explained earlier. To use this token for authentication, you need to change the `auth_with_token` parameter to True in the instantiation of the loader.\n", "\n", "```python\n", - "from langchain.document_loaders.onenote import OneNoteLoader\n", + "from langchain_community.document_loaders.onenote import OneNoteLoader\n", "\n", "loader = OneNoteLoader(notebook_name=\"NOTEBOOK NAME\", section_name=\"SECTION NAME\", page_title=\"PAGE TITLE\", auth_with_token=True)\n", "```\n", @@ -51,7 +51,7 @@ "Alternatively, you can also pass the token directly to the loader. This is useful when you want to authenticate with a token that was generated by another application. For instance, you can use the [Microsoft Graph Explorer](https://developer.microsoft.com/en-us/graph/graph-explorer) to generate a token and then pass it to the loader.\n", "\n", "```python\n", - "from langchain.document_loaders.onenote import OneNoteLoader\n", + "from langchain_community.document_loaders.onenote import OneNoteLoader\n", "\n", "loader = OneNoteLoader(notebook_name=\"NOTEBOOK NAME\", section_name=\"SECTION NAME\", page_title=\"PAGE TITLE\", access_token=\"TOKEN\")\n", "```\n", @@ -64,7 +64,7 @@ "\n", "\n", "```python\n", - "from langchain.document_loaders.onenote import OneNoteLoader\n", + "from langchain_community.document_loaders.onenote import OneNoteLoader\n", "\n", "loader = OneNoteLoader(section_name=\"Recipes\", auth_with_token=True)\n", "documents = loader.load()\n", @@ -78,7 +78,7 @@ "\n", "\n", "```python\n", - "from langchain.document_loaders.onenote import OneNoteLoader\n", + "from langchain_community.document_loaders.onenote import OneNoteLoader\n", "\n", "loader = OneNoteLoader(object_ids=[\"ID_1\", \"ID_2\"], auth_with_token=True)\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/document_loaders/microsoft_powerpoint.ipynb b/docs/docs/integrations/document_loaders/microsoft_powerpoint.ipynb index 380e758cf7..b8d6c6d05c 100644 --- a/docs/docs/integrations/document_loaders/microsoft_powerpoint.ipynb +++ b/docs/docs/integrations/document_loaders/microsoft_powerpoint.ipynb @@ -21,7 +21,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredPowerPointLoader" + "from langchain_community.document_loaders import UnstructuredPowerPointLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/microsoft_sharepoint.ipynb b/docs/docs/integrations/document_loaders/microsoft_sharepoint.ipynb index 434a03b93b..a525008e38 100644 --- a/docs/docs/integrations/document_loaders/microsoft_sharepoint.ipynb +++ b/docs/docs/integrations/document_loaders/microsoft_sharepoint.ipynb @@ -39,7 +39,7 @@ "This loader uses an authentication called [*on behalf of a user*](https://learn.microsoft.com/en-us/graph/auth-v2-user?context=graph%2Fapi%2F1.0&view=graph-rest-1.0). It is a 2 step authentication with user consent. When you instantiate the loader, it will call will print a url that the user must visit to give consent to the app on the required permissions. The user must then visit this url and give consent to the application. Then the user must copy the resulting page url and paste it back on the console. The method will then return True if the login attempt was succesful.\n", "\n", "```python\n", - "from langchain.document_loaders.sharepoint import SharePointLoader\n", + "from langchain_community.document_loaders.sharepoint import SharePointLoader\n", "\n", "loader = SharePointLoader(document_library_id=\"YOUR DOCUMENT LIBRARY ID\")\n", "```\n", @@ -47,7 +47,7 @@ "Once the authentication has been done, the loader will store a token (`o365_token.txt`) at `~/.credentials/` folder. This token could be used later to authenticate without the copy/paste steps explained earlier. To use this token for authentication, you need to change the `auth_with_token` parameter to True in the instantiation of the loader.\n", "\n", "```python\n", - "from langchain.document_loaders.sharepoint import SharePointLoader\n", + "from langchain_community.document_loaders.sharepoint import SharePointLoader\n", "\n", "loader = SharePointLoader(document_library_id=\"YOUR DOCUMENT LIBRARY ID\", auth_with_token=True)\n", "```\n", @@ -59,7 +59,7 @@ "`SharePointLoader` can load documents from a specific folder within your Document Library. For instance, you want to load all documents that are stored at `Documents/marketing` folder within your Document Library.\n", "\n", "```python\n", - "from langchain.document_loaders.sharepoint import SharePointLoader\n", + "from langchain_community.document_loaders.sharepoint import SharePointLoader\n", "\n", "loader = SharePointLoader(document_library_id=\"YOUR DOCUMENT LIBRARY ID\", folder_path=\"Documents/marketing\", auth_with_token=True)\n", "documents = loader.load()\n", @@ -72,7 +72,7 @@ "For instance, to retrieve information about all objects that are stored at `data/finance/` folder, you need make a request to: `https://graph.microsoft.com/v1.0/drives//root:/data/finance:/children`. Once you have the list of IDs that you are interested in, then you can instantiate the loader with the following parameters.\n", "\n", "```python\n", - "from langchain.document_loaders.sharepoint import SharePointLoader\n", + "from langchain_community.document_loaders.sharepoint import SharePointLoader\n", "\n", "loader = SharePointLoader(document_library_id=\"YOUR DOCUMENT LIBRARY ID\", object_ids=[\"ID_1\", \"ID_2\"], auth_with_token=True)\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/document_loaders/microsoft_word.ipynb b/docs/docs/integrations/document_loaders/microsoft_word.ipynb index 2caace2509..4186e3d190 100644 --- a/docs/docs/integrations/document_loaders/microsoft_word.ipynb +++ b/docs/docs/integrations/document_loaders/microsoft_word.ipynb @@ -39,7 +39,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import Docx2txtLoader" + "from langchain_community.document_loaders import Docx2txtLoader" ] }, { @@ -98,7 +98,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredWordDocumentLoader" + "from langchain_community.document_loaders import UnstructuredWordDocumentLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/modern_treasury.ipynb b/docs/docs/integrations/document_loaders/modern_treasury.ipynb index 7d9cbcc884..8856bf51b5 100644 --- a/docs/docs/integrations/document_loaders/modern_treasury.ipynb +++ b/docs/docs/integrations/document_loaders/modern_treasury.ipynb @@ -21,8 +21,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import ModernTreasuryLoader\n", - "from langchain.indexes import VectorstoreIndexCreator" + "from langchain.indexes import VectorstoreIndexCreator\n", + "from langchain_community.document_loaders import ModernTreasuryLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/mongodb.ipynb b/docs/docs/integrations/document_loaders/mongodb.ipynb index 2a167dcd33..0672882301 100644 --- a/docs/docs/integrations/document_loaders/mongodb.ipynb +++ b/docs/docs/integrations/document_loaders/mongodb.ipynb @@ -75,7 +75,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.mongodb import MongodbLoader" + "from langchain_community.document_loaders.mongodb import MongodbLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/news.ipynb b/docs/docs/integrations/document_loaders/news.ipynb index bc645de393..4ad0ec2d8c 100644 --- a/docs/docs/integrations/document_loaders/news.ipynb +++ b/docs/docs/integrations/document_loaders/news.ipynb @@ -22,7 +22,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import NewsURLLoader" + "from langchain_community.document_loaders import NewsURLLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/notion.ipynb b/docs/docs/integrations/document_loaders/notion.ipynb index 76e510de7e..1c81e3765c 100644 --- a/docs/docs/integrations/document_loaders/notion.ipynb +++ b/docs/docs/integrations/document_loaders/notion.ipynb @@ -37,7 +37,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import NotionDirectoryLoader" + "from langchain_community.document_loaders import NotionDirectoryLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/notiondb.ipynb b/docs/docs/integrations/document_loaders/notiondb.ipynb index 93d8a04fd6..d612728f6a 100644 --- a/docs/docs/integrations/document_loaders/notiondb.ipynb +++ b/docs/docs/integrations/document_loaders/notiondb.ipynb @@ -91,7 +91,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import NotionDBLoader" + "from langchain_community.document_loaders import NotionDBLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/nuclia.ipynb b/docs/docs/integrations/document_loaders/nuclia.ipynb index b1c3c818da..7561eef8c6 100644 --- a/docs/docs/integrations/document_loaders/nuclia.ipynb +++ b/docs/docs/integrations/document_loaders/nuclia.ipynb @@ -73,7 +73,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.nuclia import NucliaLoader\n", + "from langchain_community.document_loaders.nuclia import NucliaLoader\n", "\n", "loader = NucliaLoader(\"./interview.mp4\", nua)" ] diff --git a/docs/docs/integrations/document_loaders/obsidian.ipynb b/docs/docs/integrations/document_loaders/obsidian.ipynb index 6bd45ad883..cd74f7dc9c 100644 --- a/docs/docs/integrations/document_loaders/obsidian.ipynb +++ b/docs/docs/integrations/document_loaders/obsidian.ipynb @@ -26,7 +26,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import ObsidianLoader" + "from langchain_community.document_loaders import ObsidianLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/odt.ipynb b/docs/docs/integrations/document_loaders/odt.ipynb index d0fbbe1c1c..f36ace1da6 100644 --- a/docs/docs/integrations/document_loaders/odt.ipynb +++ b/docs/docs/integrations/document_loaders/odt.ipynb @@ -21,7 +21,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredODTLoader" + "from langchain_community.document_loaders import UnstructuredODTLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/open_city_data.ipynb b/docs/docs/integrations/document_loaders/open_city_data.ipynb index 7a9f86c8d9..369ab149ac 100644 --- a/docs/docs/integrations/document_loaders/open_city_data.ipynb +++ b/docs/docs/integrations/document_loaders/open_city_data.ipynb @@ -43,7 +43,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import OpenCityDataLoader" + "from langchain_community.document_loaders import OpenCityDataLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/org_mode.ipynb b/docs/docs/integrations/document_loaders/org_mode.ipynb index e8146a9eb5..39bab73945 100644 --- a/docs/docs/integrations/document_loaders/org_mode.ipynb +++ b/docs/docs/integrations/document_loaders/org_mode.ipynb @@ -24,7 +24,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredOrgModeLoader" + "from langchain_community.document_loaders import UnstructuredOrgModeLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/pandas_dataframe.ipynb b/docs/docs/integrations/document_loaders/pandas_dataframe.ipynb index d415a26bea..9650619750 100644 --- a/docs/docs/integrations/document_loaders/pandas_dataframe.ipynb +++ b/docs/docs/integrations/document_loaders/pandas_dataframe.ipynb @@ -132,7 +132,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import DataFrameLoader" + "from langchain_community.document_loaders import DataFrameLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/polars_dataframe.ipynb b/docs/docs/integrations/document_loaders/polars_dataframe.ipynb index 52936f1654..c47ad093d8 100644 --- a/docs/docs/integrations/document_loaders/polars_dataframe.ipynb +++ b/docs/docs/integrations/document_loaders/polars_dataframe.ipynb @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import PolarsDataFrameLoader" + "from langchain_community.document_loaders import PolarsDataFrameLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/psychic.ipynb b/docs/docs/integrations/document_loaders/psychic.ipynb index bb2b7cfe7d..79f75b488c 100644 --- a/docs/docs/integrations/document_loaders/psychic.ipynb +++ b/docs/docs/integrations/document_loaders/psychic.ipynb @@ -48,7 +48,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import PsychicLoader\n", + "from langchain_community.document_loaders import PsychicLoader\n", "from psychicapi import ConnectorId\n", "\n", "# Create a document loader for google drive. We can also load from other connectors by setting the connector_id to the appropriate value e.g. ConnectorId.notion.value\n", @@ -79,9 +79,9 @@ "source": [ "from langchain.chains import RetrievalQAWithSourcesChain\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.llms import OpenAI" + "from langchain_community.llms import OpenAI\n", + "from langchain_community.vectorstores import Chroma" ] }, { diff --git a/docs/docs/integrations/document_loaders/pubmed.ipynb b/docs/docs/integrations/document_loaders/pubmed.ipynb index 30b69e5640..3e71fe8d56 100644 --- a/docs/docs/integrations/document_loaders/pubmed.ipynb +++ b/docs/docs/integrations/document_loaders/pubmed.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import PubMedLoader" + "from langchain_community.document_loaders import PubMedLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/pyspark_dataframe.ipynb b/docs/docs/integrations/document_loaders/pyspark_dataframe.ipynb index 46b3c60692..9ba1eed4eb 100644 --- a/docs/docs/integrations/document_loaders/pyspark_dataframe.ipynb +++ b/docs/docs/integrations/document_loaders/pyspark_dataframe.ipynb @@ -61,7 +61,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import PySparkDataFrameLoader" + "from langchain_community.document_loaders import PySparkDataFrameLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/quip.ipynb b/docs/docs/integrations/document_loaders/quip.ipynb index 16358041a6..74c3ad4d74 100644 --- a/docs/docs/integrations/document_loaders/quip.ipynb +++ b/docs/docs/integrations/document_loaders/quip.ipynb @@ -61,7 +61,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import QuipLoader\n", + "from langchain_community.document_loaders import QuipLoader\n", "\n", "loader = QuipLoader(\n", " api_url=\"https://platform.quip.com\", access_token=\"change_me\", request_timeout=60\n", diff --git a/docs/docs/integrations/document_loaders/readthedocs_documentation.ipynb b/docs/docs/integrations/document_loaders/readthedocs_documentation.ipynb index 32d42b253c..b7e01eafcc 100644 --- a/docs/docs/integrations/document_loaders/readthedocs_documentation.ipynb +++ b/docs/docs/integrations/document_loaders/readthedocs_documentation.ipynb @@ -45,7 +45,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import ReadTheDocsLoader" + "from langchain_community.document_loaders import ReadTheDocsLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/recursive_url.ipynb b/docs/docs/integrations/document_loaders/recursive_url.ipynb index 59236302da..3573869cec 100644 --- a/docs/docs/integrations/document_loaders/recursive_url.ipynb +++ b/docs/docs/integrations/document_loaders/recursive_url.ipynb @@ -44,7 +44,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader" + "from langchain_community.document_loaders.recursive_url_loader import RecursiveUrlLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/reddit.ipynb b/docs/docs/integrations/document_loaders/reddit.ipynb index 1b251bfd26..7defd93428 100644 --- a/docs/docs/integrations/document_loaders/reddit.ipynb +++ b/docs/docs/integrations/document_loaders/reddit.ipynb @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import RedditPostsLoader" + "from langchain_community.document_loaders import RedditPostsLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/roam.ipynb b/docs/docs/integrations/document_loaders/roam.ipynb index 570f610141..2f140f7251 100644 --- a/docs/docs/integrations/document_loaders/roam.ipynb +++ b/docs/docs/integrations/document_loaders/roam.ipynb @@ -34,7 +34,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import RoamLoader" + "from langchain_community.document_loaders import RoamLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/rockset.ipynb b/docs/docs/integrations/document_loaders/rockset.ipynb index a9256f1379..922c373cf6 100644 --- a/docs/docs/integrations/document_loaders/rockset.ipynb +++ b/docs/docs/integrations/document_loaders/rockset.ipynb @@ -50,7 +50,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import RocksetLoader\n", + "from langchain_community.document_loaders import RocksetLoader\n", "from rockset import Regions, RocksetClient, models\n", "\n", "loader = RocksetLoader(\n", @@ -143,7 +143,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import RocksetLoader\n", + "from langchain_community.document_loaders import RocksetLoader\n", "from rockset import Regions, RocksetClient, models\n", "\n", "loader = RocksetLoader(\n", diff --git a/docs/docs/integrations/document_loaders/rspace.ipynb b/docs/docs/integrations/document_loaders/rspace.ipynb index 4f2009ae46..2edefc6219 100644 --- a/docs/docs/integrations/document_loaders/rspace.ipynb +++ b/docs/docs/integrations/document_loaders/rspace.ipynb @@ -48,7 +48,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.rspace import RSpaceLoader" + "from langchain_community.document_loaders.rspace import RSpaceLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/rss.ipynb b/docs/docs/integrations/document_loaders/rss.ipynb index 3b6744f0ee..2e18751e38 100644 --- a/docs/docs/integrations/document_loaders/rss.ipynb +++ b/docs/docs/integrations/document_loaders/rss.ipynb @@ -29,7 +29,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import RSSFeedLoader" + "from langchain_community.document_loaders import RSSFeedLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/rst.ipynb b/docs/docs/integrations/document_loaders/rst.ipynb index a88bb7f9c4..f7cff53fac 100644 --- a/docs/docs/integrations/document_loaders/rst.ipynb +++ b/docs/docs/integrations/document_loaders/rst.ipynb @@ -24,7 +24,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredRSTLoader" + "from langchain_community.document_loaders import UnstructuredRSTLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/sitemap.ipynb b/docs/docs/integrations/document_loaders/sitemap.ipynb index 284857953a..56e5ced981 100644 --- a/docs/docs/integrations/document_loaders/sitemap.ipynb +++ b/docs/docs/integrations/document_loaders/sitemap.ipynb @@ -49,7 +49,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.sitemap import SitemapLoader" + "from langchain_community.document_loaders.sitemap import SitemapLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/slack.ipynb b/docs/docs/integrations/document_loaders/slack.ipynb index d0f89ca5ab..71f8599982 100644 --- a/docs/docs/integrations/document_loaders/slack.ipynb +++ b/docs/docs/integrations/document_loaders/slack.ipynb @@ -29,7 +29,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import SlackDirectoryLoader" + "from langchain_community.document_loaders import SlackDirectoryLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/snowflake.ipynb b/docs/docs/integrations/document_loaders/snowflake.ipynb index c84086eb81..ce0ac97c4d 100644 --- a/docs/docs/integrations/document_loaders/snowflake.ipynb +++ b/docs/docs/integrations/document_loaders/snowflake.ipynb @@ -25,7 +25,7 @@ "outputs": [], "source": [ "import settings as s\n", - "from langchain.document_loaders import SnowflakeLoader" + "from langchain_community.document_loaders import SnowflakeLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/source_code.ipynb b/docs/docs/integrations/document_loaders/source_code.ipynb index 6a4908f169..d099d16e6d 100644 --- a/docs/docs/integrations/document_loaders/source_code.ipynb +++ b/docs/docs/integrations/document_loaders/source_code.ipynb @@ -34,9 +34,9 @@ "warnings.filterwarnings(\"ignore\")\n", "from pprint import pprint\n", "\n", - "from langchain.document_loaders.generic import GenericLoader\n", - "from langchain.document_loaders.parsers import LanguageParser\n", - "from langchain.text_splitter import Language" + "from langchain.text_splitter import Language\n", + "from langchain_community.document_loaders.generic import GenericLoader\n", + "from langchain_community.document_loaders.parsers import LanguageParser" ] }, { diff --git a/docs/docs/integrations/document_loaders/spreedly.ipynb b/docs/docs/integrations/document_loaders/spreedly.ipynb index 602d839aed..99c1d66c8b 100644 --- a/docs/docs/integrations/document_loaders/spreedly.ipynb +++ b/docs/docs/integrations/document_loaders/spreedly.ipynb @@ -22,8 +22,8 @@ "source": [ "import os\n", "\n", - "from langchain.document_loaders import SpreedlyLoader\n", - "from langchain.indexes import VectorstoreIndexCreator" + "from langchain.indexes import VectorstoreIndexCreator\n", + "from langchain_community.document_loaders import SpreedlyLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/stripe.ipynb b/docs/docs/integrations/document_loaders/stripe.ipynb index 3fa5cca186..be56c46e09 100644 --- a/docs/docs/integrations/document_loaders/stripe.ipynb +++ b/docs/docs/integrations/document_loaders/stripe.ipynb @@ -18,8 +18,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import StripeLoader\n", - "from langchain.indexes import VectorstoreIndexCreator" + "from langchain.indexes import VectorstoreIndexCreator\n", + "from langchain_community.document_loaders import StripeLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/subtitle.ipynb b/docs/docs/integrations/document_loaders/subtitle.ipynb index bde488d25b..91845d2d8b 100644 --- a/docs/docs/integrations/document_loaders/subtitle.ipynb +++ b/docs/docs/integrations/document_loaders/subtitle.ipynb @@ -35,7 +35,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import SRTLoader" + "from langchain_community.document_loaders import SRTLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/telegram.ipynb b/docs/docs/integrations/document_loaders/telegram.ipynb index 5317a6e523..dff0c666f7 100644 --- a/docs/docs/integrations/document_loaders/telegram.ipynb +++ b/docs/docs/integrations/document_loaders/telegram.ipynb @@ -19,7 +19,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TelegramChatApiLoader, TelegramChatFileLoader" + "from langchain_community.document_loaders import (\n", + " TelegramChatApiLoader,\n", + " TelegramChatFileLoader,\n", + ")" ] }, { diff --git a/docs/docs/integrations/document_loaders/tencent_cos_directory.ipynb b/docs/docs/integrations/document_loaders/tencent_cos_directory.ipynb index 1640322949..042aeb6831 100644 --- a/docs/docs/integrations/document_loaders/tencent_cos_directory.ipynb +++ b/docs/docs/integrations/document_loaders/tencent_cos_directory.ipynb @@ -38,7 +38,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TencentCOSDirectoryLoader\n", + "from langchain_community.document_loaders import TencentCOSDirectoryLoader\n", "from qcloud_cos import CosConfig" ] }, diff --git a/docs/docs/integrations/document_loaders/tencent_cos_file.ipynb b/docs/docs/integrations/document_loaders/tencent_cos_file.ipynb index b86f1a5a80..40c3b8e7fb 100644 --- a/docs/docs/integrations/document_loaders/tencent_cos_file.ipynb +++ b/docs/docs/integrations/document_loaders/tencent_cos_file.ipynb @@ -37,7 +37,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TencentCOSFileLoader\n", + "from langchain_community.document_loaders import TencentCOSFileLoader\n", "from qcloud_cos import CosConfig" ] }, diff --git a/docs/docs/integrations/document_loaders/tensorflow_datasets.ipynb b/docs/docs/integrations/document_loaders/tensorflow_datasets.ipynb index 7c145a68e5..1e333c1fd1 100644 --- a/docs/docs/integrations/document_loaders/tensorflow_datasets.ipynb +++ b/docs/docs/integrations/document_loaders/tensorflow_datasets.ipynb @@ -198,8 +198,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TensorflowDatasetLoader\n", "from langchain.schema import Document\n", + "from langchain_community.document_loaders import TensorflowDatasetLoader\n", "\n", "loader = TensorflowDatasetLoader(\n", " dataset_name=\"mlqa/en\",\n", diff --git a/docs/docs/integrations/document_loaders/tomarkdown.ipynb b/docs/docs/integrations/document_loaders/tomarkdown.ipynb index 359c4c88ee..28189b5081 100644 --- a/docs/docs/integrations/document_loaders/tomarkdown.ipynb +++ b/docs/docs/integrations/document_loaders/tomarkdown.ipynb @@ -29,7 +29,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import ToMarkdownLoader" + "from langchain_community.document_loaders import ToMarkdownLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/toml.ipynb b/docs/docs/integrations/document_loaders/toml.ipynb index 0a26cdffac..2f4300481b 100644 --- a/docs/docs/integrations/document_loaders/toml.ipynb +++ b/docs/docs/integrations/document_loaders/toml.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TomlLoader" + "from langchain_community.document_loaders import TomlLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/trello.ipynb b/docs/docs/integrations/document_loaders/trello.ipynb index 976eda67c3..d90aa8370d 100644 --- a/docs/docs/integrations/document_loaders/trello.ipynb +++ b/docs/docs/integrations/document_loaders/trello.ipynb @@ -87,7 +87,7 @@ } ], "source": [ - "from langchain.document_loaders import TrelloLoader\n", + "from langchain_community.document_loaders import TrelloLoader\n", "\n", "# Get the open cards from \"Awesome Board\"\n", "loader = TrelloLoader.from_credentials(\n", diff --git a/docs/docs/integrations/document_loaders/tsv.ipynb b/docs/docs/integrations/document_loaders/tsv.ipynb index f959ab6b74..9d8e192c43 100644 --- a/docs/docs/integrations/document_loaders/tsv.ipynb +++ b/docs/docs/integrations/document_loaders/tsv.ipynb @@ -24,7 +24,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.tsv import UnstructuredTSVLoader" + "from langchain_community.document_loaders.tsv import UnstructuredTSVLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/twitter.ipynb b/docs/docs/integrations/document_loaders/twitter.ipynb index e240211356..03cf4b3595 100644 --- a/docs/docs/integrations/document_loaders/twitter.ipynb +++ b/docs/docs/integrations/document_loaders/twitter.ipynb @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TwitterTweetLoader" + "from langchain_community.document_loaders import TwitterTweetLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/unstructured_file.ipynb b/docs/docs/integrations/document_loaders/unstructured_file.ipynb index 5882be15e6..5070066675 100644 --- a/docs/docs/integrations/document_loaders/unstructured_file.ipynb +++ b/docs/docs/integrations/document_loaders/unstructured_file.ipynb @@ -56,7 +56,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredFileLoader" + "from langchain_community.document_loaders import UnstructuredFileLoader" ] }, { @@ -174,7 +174,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredFileLoader" + "from langchain_community.document_loaders import UnstructuredFileLoader" ] }, { @@ -309,7 +309,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredFileLoader\n", + "from langchain_community.document_loaders import UnstructuredFileLoader\n", "from unstructured.cleaners.core import clean_extra_whitespace" ] }, @@ -379,7 +379,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredAPIFileLoader" + "from langchain_community.document_loaders import UnstructuredAPIFileLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/url.ipynb b/docs/docs/integrations/document_loaders/url.ipynb index f0f74dbe69..9c1a308242 100644 --- a/docs/docs/integrations/document_loaders/url.ipynb +++ b/docs/docs/integrations/document_loaders/url.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredURLLoader" + "from langchain_community.document_loaders import UnstructuredURLLoader" ] }, { @@ -85,7 +85,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import SeleniumURLLoader" + "from langchain_community.document_loaders import SeleniumURLLoader" ] }, { @@ -158,7 +158,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import PlaywrightURLLoader" + "from langchain_community.document_loaders import PlaywrightURLLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/weather.ipynb b/docs/docs/integrations/document_loaders/weather.ipynb index 9ed6f0d2b8..1409db2a46 100644 --- a/docs/docs/integrations/document_loaders/weather.ipynb +++ b/docs/docs/integrations/document_loaders/weather.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WeatherDataLoader" + "from langchain_community.document_loaders import WeatherDataLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/web_base.ipynb b/docs/docs/integrations/document_loaders/web_base.ipynb index 89da41ca72..e0ee80cd5f 100644 --- a/docs/docs/integrations/document_loaders/web_base.ipynb +++ b/docs/docs/integrations/document_loaders/web_base.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WebBaseLoader" + "from langchain_community.document_loaders import WebBaseLoader" ] }, { @@ -87,7 +87,7 @@ "html_doc = requests.get(\"{INSERT_NEW_URL_HERE}\")\n", "soup = BeautifulSoup(html_doc.text, 'html.parser')\n", "\n", - "# Beautiful soup logic to be exported to langchain.document_loaders.webpage.py\n", + "# Beautiful soup logic to be exported to langchain_community.document_loaders.webpage.py\n", "# Example: transcript = soup.select_one(\"td[class='scrtext']\").text\n", "# BS4 documentation can be found here: https://www.crummy.com/software/BeautifulSoup/bs4/doc/\n", "\n", diff --git a/docs/docs/integrations/document_loaders/whatsapp_chat.ipynb b/docs/docs/integrations/document_loaders/whatsapp_chat.ipynb index 0af681487e..78ca09b187 100644 --- a/docs/docs/integrations/document_loaders/whatsapp_chat.ipynb +++ b/docs/docs/integrations/document_loaders/whatsapp_chat.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WhatsAppChatLoader" + "from langchain_community.document_loaders import WhatsAppChatLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/wikipedia.ipynb b/docs/docs/integrations/document_loaders/wikipedia.ipynb index 6e0583ba26..4eadfae142 100644 --- a/docs/docs/integrations/document_loaders/wikipedia.ipynb +++ b/docs/docs/integrations/document_loaders/wikipedia.ipynb @@ -67,7 +67,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WikipediaLoader" + "from langchain_community.document_loaders import WikipediaLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/xml.ipynb b/docs/docs/integrations/document_loaders/xml.ipynb index 5c95986800..beb8783d3b 100644 --- a/docs/docs/integrations/document_loaders/xml.ipynb +++ b/docs/docs/integrations/document_loaders/xml.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import UnstructuredXMLLoader" + "from langchain_community.document_loaders import UnstructuredXMLLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/xorbits.ipynb b/docs/docs/integrations/document_loaders/xorbits.ipynb index cf5f60f028..3fe5145047 100644 --- a/docs/docs/integrations/document_loaders/xorbits.ipynb +++ b/docs/docs/integrations/document_loaders/xorbits.ipynb @@ -141,7 +141,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import XorbitsLoader" + "from langchain_community.document_loaders import XorbitsLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/youtube_audio.ipynb b/docs/docs/integrations/document_loaders/youtube_audio.ipynb index 6ed1a7b086..41a416bded 100644 --- a/docs/docs/integrations/document_loaders/youtube_audio.ipynb +++ b/docs/docs/integrations/document_loaders/youtube_audio.ipynb @@ -24,9 +24,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader\n", - "from langchain.document_loaders.generic import GenericLoader\n", - "from langchain.document_loaders.parsers import (\n", + "from langchain_community.document_loaders.blob_loaders.youtube_audio import (\n", + " YoutubeAudioLoader,\n", + ")\n", + "from langchain_community.document_loaders.generic import GenericLoader\n", + "from langchain_community.document_loaders.parsers import (\n", " OpenAIWhisperParser,\n", " OpenAIWhisperParserLocal,\n", ")" @@ -167,9 +169,9 @@ "source": [ "from langchain.chains import RetrievalQA\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/docs/docs/integrations/document_loaders/youtube_transcript.ipynb b/docs/docs/integrations/document_loaders/youtube_transcript.ipynb index 564b823ecc..fe806863ee 100644 --- a/docs/docs/integrations/document_loaders/youtube_transcript.ipynb +++ b/docs/docs/integrations/document_loaders/youtube_transcript.ipynb @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import YoutubeLoader" + "from langchain_community.document_loaders import YoutubeLoader" ] }, { @@ -150,7 +150,7 @@ "# Init the GoogleApiClient\n", "from pathlib import Path\n", "\n", - "from langchain.document_loaders import GoogleApiClient, GoogleApiYoutubeLoader\n", + "from langchain_community.document_loaders import GoogleApiClient, GoogleApiYoutubeLoader\n", "\n", "google_api_client = GoogleApiClient(credentials_path=Path(\"your_path_creds.json\"))\n", "\n", diff --git a/docs/docs/integrations/document_transformers/beautiful_soup.ipynb b/docs/docs/integrations/document_transformers/beautiful_soup.ipynb index 4e71be5499..dd05f8018a 100644 --- a/docs/docs/integrations/document_transformers/beautiful_soup.ipynb +++ b/docs/docs/integrations/document_transformers/beautiful_soup.ipynb @@ -34,8 +34,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import AsyncChromiumLoader\n", - "from langchain.document_transformers import BeautifulSoupTransformer\n", + "from langchain_community.document_loaders import AsyncChromiumLoader\n", + "from langchain_community.document_transformers import BeautifulSoupTransformer\n", "\n", "# Load HTML\n", "loader = AsyncChromiumLoader([\"https://www.wsj.com\"])\n", diff --git a/docs/docs/integrations/document_transformers/docai.ipynb b/docs/docs/integrations/document_transformers/docai.ipynb index 45e6524730..870c25f4a4 100644 --- a/docs/docs/integrations/document_transformers/docai.ipynb +++ b/docs/docs/integrations/document_transformers/docai.ipynb @@ -71,8 +71,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.blob_loaders import Blob\n", - "from langchain.document_loaders.parsers import DocAIParser" + "from langchain_community.document_loaders.blob_loaders import Blob\n", + "from langchain_community.document_loaders.parsers import DocAIParser" ] }, { diff --git a/docs/docs/integrations/document_transformers/doctran_extract_properties.ipynb b/docs/docs/integrations/document_transformers/doctran_extract_properties.ipynb index 55015d43e9..f0b1f2545b 100644 --- a/docs/docs/integrations/document_transformers/doctran_extract_properties.ipynb +++ b/docs/docs/integrations/document_transformers/doctran_extract_properties.ipynb @@ -32,8 +32,8 @@ "source": [ "import json\n", "\n", - "from langchain.document_transformers import DoctranPropertyExtractor\n", - "from langchain.schema import Document" + "from langchain.schema import Document\n", + "from langchain_community.document_transformers import DoctranPropertyExtractor" ] }, { diff --git a/docs/docs/integrations/document_transformers/doctran_interrogate_document.ipynb b/docs/docs/integrations/document_transformers/doctran_interrogate_document.ipynb index 41863407d7..122bf8f7f7 100644 --- a/docs/docs/integrations/document_transformers/doctran_interrogate_document.ipynb +++ b/docs/docs/integrations/document_transformers/doctran_interrogate_document.ipynb @@ -30,8 +30,8 @@ "source": [ "import json\n", "\n", - "from langchain.document_transformers import DoctranQATransformer\n", - "from langchain.schema import Document" + "from langchain.schema import Document\n", + "from langchain_community.document_transformers import DoctranQATransformer" ] }, { diff --git a/docs/docs/integrations/document_transformers/doctran_translate_document.ipynb b/docs/docs/integrations/document_transformers/doctran_translate_document.ipynb index 92c016ec02..fceef84e50 100644 --- a/docs/docs/integrations/document_transformers/doctran_translate_document.ipynb +++ b/docs/docs/integrations/document_transformers/doctran_translate_document.ipynb @@ -28,8 +28,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_transformers import DoctranTextTranslator\n", - "from langchain.schema import Document" + "from langchain.schema import Document\n", + "from langchain_community.document_transformers import DoctranTextTranslator" ] }, { diff --git a/docs/docs/integrations/document_transformers/google_translate.ipynb b/docs/docs/integrations/document_transformers/google_translate.ipynb index cc2b69e5e7..fc7619f462 100644 --- a/docs/docs/integrations/document_transformers/google_translate.ipynb +++ b/docs/docs/integrations/document_transformers/google_translate.ipynb @@ -31,8 +31,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_transformers import GoogleTranslateTransformer\n", - "from langchain.schema import Document" + "from langchain.schema import Document\n", + "from langchain_community.document_transformers import GoogleTranslateTransformer" ] }, { diff --git a/docs/docs/integrations/document_transformers/html2text.ipynb b/docs/docs/integrations/document_transformers/html2text.ipynb index c317158e15..5f600dd252 100644 --- a/docs/docs/integrations/document_transformers/html2text.ipynb +++ b/docs/docs/integrations/document_transformers/html2text.ipynb @@ -37,7 +37,7 @@ } ], "source": [ - "from langchain.document_loaders import AsyncHtmlLoader\n", + "from langchain_community.document_loaders import AsyncHtmlLoader\n", "\n", "urls = [\"https://www.espn.com\", \"https://lilianweng.github.io/posts/2023-06-23-agent/\"]\n", "loader = AsyncHtmlLoader(urls)\n", @@ -51,7 +51,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_transformers import Html2TextTransformer" + "from langchain_community.document_transformers import Html2TextTransformer" ] }, { diff --git a/docs/docs/integrations/document_transformers/nuclia_transformer.ipynb b/docs/docs/integrations/document_transformers/nuclia_transformer.ipynb index 3ca7ccef31..feaed6cdab 100644 --- a/docs/docs/integrations/document_transformers/nuclia_transformer.ipynb +++ b/docs/docs/integrations/document_transformers/nuclia_transformer.ipynb @@ -13,7 +13,7 @@ "\n", "To use the Nuclia Understanding API, you need to have a Nuclia account. You can create one for free at [https://nuclia.cloud](https://nuclia.cloud), and then [create a NUA key](https://docs.nuclia.dev/docs/docs/using/understanding/intro).\n", "\n", - "from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer" + "from langchain_community.document_transformers.nuclia_text_transform import NucliaTextTransformer" ] }, { @@ -73,7 +73,9 @@ "source": [ "import asyncio\n", "\n", - "from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer\n", + "from langchain_community.document_transformers.nuclia_text_transform import (\n", + " NucliaTextTransformer,\n", + ")\n", "from langchain_core.documents import Document\n", "\n", "\n", diff --git a/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb b/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb index abc62c8c78..d5efe1d844 100644 --- a/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb +++ b/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb @@ -21,9 +21,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_transformers.openai_functions import create_metadata_tagger\n", "from langchain.schema import Document\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.document_transformers.openai_functions import (\n", + " create_metadata_tagger,\n", + ")" ] }, { diff --git a/docs/docs/integrations/memory/aws_dynamodb.ipynb b/docs/docs/integrations/memory/aws_dynamodb.ipynb index 93931d98db..bd4e49096e 100644 --- a/docs/docs/integrations/memory/aws_dynamodb.ipynb +++ b/docs/docs/integrations/memory/aws_dynamodb.ipynb @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.memory.chat_message_histories import DynamoDBChatMessageHistory\n", + "from langchain_community.chat_message_histories import DynamoDBChatMessageHistory\n", "\n", "history = DynamoDBChatMessageHistory(table_name=\"SessionTable\", session_id=\"0\")\n", "\n", @@ -138,7 +138,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.memory.chat_message_histories import DynamoDBChatMessageHistory\n", + "from langchain_community.chat_message_histories import DynamoDBChatMessageHistory\n", "\n", "history = DynamoDBChatMessageHistory(\n", " table_name=\"SessionTable\",\n", @@ -194,7 +194,7 @@ } ], "source": [ - "from langchain.memory.chat_message_histories import DynamoDBChatMessageHistory\n", + "from langchain_community.chat_message_histories import DynamoDBChatMessageHistory\n", "\n", "composite_table = dynamodb.create_table(\n", " TableName=\"CompositeTable\",\n", diff --git a/docs/docs/integrations/memory/rockset_chat_message_history.ipynb b/docs/docs/integrations/memory/rockset_chat_message_history.ipynb index 41cfb1d25d..82acc64a9d 100644 --- a/docs/docs/integrations/memory/rockset_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/rockset_chat_message_history.ipynb @@ -52,7 +52,7 @@ }, "outputs": [], "source": [ - "from langchain.memory.chat_message_histories import RocksetChatMessageHistory\n", + "from langchain_community.chat_message_histories import RocksetChatMessageHistory\n", "from rockset import Regions, RocksetClient\n", "\n", "history = RocksetChatMessageHistory(\n", diff --git a/docs/docs/integrations/memory/sql_chat_message_history.ipynb b/docs/docs/integrations/memory/sql_chat_message_history.ipynb index fe4663c1a1..de15a53f6d 100644 --- a/docs/docs/integrations/memory/sql_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/sql_chat_message_history.ipynb @@ -56,7 +56,7 @@ }, "outputs": [], "source": [ - "from langchain.memory.chat_message_histories import SQLChatMessageHistory\n", + "from langchain_community.chat_message_histories import SQLChatMessageHistory\n", "\n", "chat_message_history = SQLChatMessageHistory(\n", " session_id=\"test_session\", connection_string=\"sqlite:///sqlite.db\"\n", @@ -130,8 +130,8 @@ "from datetime import datetime\n", "from typing import Any\n", "\n", - "from langchain.memory.chat_message_histories.sql import BaseMessageConverter\n", "from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n", + "from langchain_community.chat_message_histories.sql import BaseMessageConverter\n", "from sqlalchemy import Column, DateTime, Integer, Text\n", "from sqlalchemy.orm import declarative_base\n", "\n", diff --git a/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb b/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb index 078d107ae4..32fb1bc00f 100644 --- a/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb @@ -65,7 +65,7 @@ "outputs": [], "source": [ "from langchain.memory import ConversationBufferMemory\n", - "from langchain.memory.chat_message_histories import StreamlitChatMessageHistory\n", + "from langchain_community.chat_message_histories import StreamlitChatMessageHistory\n", "\n", "# Optionally, specify your own session_state key for storing messages\n", "msgs = StreamlitChatMessageHistory(key=\"special_app_key\")\n", diff --git a/docs/docs/integrations/memory/upstash_redis_chat_message_history.ipynb b/docs/docs/integrations/memory/upstash_redis_chat_message_history.ipynb index 0e7db59aad..4c6c4705d2 100644 --- a/docs/docs/integrations/memory/upstash_redis_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/upstash_redis_chat_message_history.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.memory.chat_message_histories.upstash_redis import (\n", + "from langchain_community.chat_message_histories.upstash_redis import (\n", " UpstashRedisChatMessageHistory,\n", ")\n", "\n", diff --git a/docs/docs/integrations/memory/xata_chat_message_history.ipynb b/docs/docs/integrations/memory/xata_chat_message_history.ipynb index e058d185ac..a44db96e8e 100644 --- a/docs/docs/integrations/memory/xata_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/xata_chat_message_history.ipynb @@ -154,8 +154,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores.xata import XataVectorStore\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores.xata import XataVectorStore\n", "\n", "embeddings = OpenAIEmbeddings()\n", "\n", diff --git a/docs/docs/integrations/platforms/aws.mdx b/docs/docs/integrations/platforms/aws.mdx index 2f374da8dd..d3b269b8b9 100644 --- a/docs/docs/integrations/platforms/aws.mdx +++ b/docs/docs/integrations/platforms/aws.mdx @@ -118,7 +118,7 @@ See a [usage example for S3DirectoryLoader](/docs/integrations/document_loaders/ See a [usage example for S3FileLoader](/docs/integrations/document_loaders/aws_s3_file). ```python -from langchain.document_loaders import S3DirectoryLoader, S3FileLoader +from langchain_community.document_loaders import S3DirectoryLoader, S3FileLoader ``` ### Amazon Textract @@ -129,7 +129,7 @@ from langchain.document_loaders import S3DirectoryLoader, S3FileLoader See a [usage example](/docs/integrations/document_loaders/amazon_textract). ```python -from langchain.document_loaders import AmazonTextractPDFLoader +from langchain_community.document_loaders import AmazonTextractPDFLoader ``` ## Memory @@ -217,7 +217,7 @@ pip install boto3 requests requests-aws4auth See a [usage example](/docs/integrations/vectorstores/opensearch#using-aos-amazon-opensearch-service). ```python -from langchain.vectorstores import OpenSearchVectorSearch +from langchain_community.vectorstores import OpenSearchVectorSearch ``` ## Tools diff --git a/docs/docs/integrations/platforms/google.mdx b/docs/docs/integrations/platforms/google.mdx index 314128af8e..aeb55736ab 100644 --- a/docs/docs/integrations/platforms/google.mdx +++ b/docs/docs/integrations/platforms/google.mdx @@ -84,7 +84,7 @@ pip install google-cloud-bigquery See a [usage example](/docs/integrations/document_loaders/google_bigquery). ```python -from langchain.document_loaders import BigQueryLoader +from langchain_community.document_loaders import BigQueryLoader ``` ## LLMs @@ -137,12 +137,12 @@ There are two loaders for the `Google Cloud Storage`: the `Directory` and the `F See a [usage example](/docs/integrations/document_loaders/google_cloud_storage_directory). ```python -from langchain.document_loaders import GCSDirectoryLoader +from langchain_community.document_loaders import GCSDirectoryLoader ``` See a [usage example](/docs/integrations/document_loaders/google_cloud_storage_file). ```python -from langchain.document_loaders import GCSFileLoader +from langchain_community.document_loaders import GCSFileLoader ``` ### Google Drive @@ -160,7 +160,7 @@ pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib See a [usage example and authorization instructions](/docs/integrations/document_loaders/google_drive). ```python -from langchain.document_loaders import GoogleDriveLoader +from langchain_community.document_loaders import GoogleDriveLoader ``` ### Speech-to-Text @@ -178,7 +178,7 @@ pip install google-cloud-speech See a [usage example and authorization instructions](/docs/integrations/document_loaders/google_speech_to_text). ```python -from langchain.document_loaders import GoogleSpeechToTextLoader +from langchain_community.document_loaders import GoogleSpeechToTextLoader ``` ## Vector Stores @@ -199,7 +199,7 @@ pip install tensorflow google-cloud-aiplatform tensorflow-hub tensorflow-text See a [usage example](/docs/integrations/vectorstores/matchingengine). ```python -from langchain.vectorstores import MatchingEngine +from langchain_community.vectorstores import MatchingEngine ``` ### Google ScaNN @@ -224,7 +224,7 @@ pip install scann See a [usage example](/docs/integrations/vectorstores/scann). ```python -from langchain.vectorstores import ScaNN +from langchain_community.vectorstores import ScaNN ``` ## Retrievers @@ -440,8 +440,8 @@ pip install google-cloud-documentai-toolbox See a [usage example](/docs/integrations/document_transformers/docai). ```python -from langchain.document_loaders.blob_loaders import Blob -from langchain.document_loaders.parsers import DocAIParser +from langchain_community.document_loaders.blob_loaders import Blob +from langchain_community.document_loaders.parsers import DocAIParser ``` ### Google Translate @@ -463,7 +463,7 @@ pip install google-cloud-translate See a [usage example and authorization instructions](/docs/integrations/document_transformers/google_translate). ```python -from langchain.document_transformers import GoogleTranslateTransformer +from langchain_community.document_transformers import GoogleTranslateTransformer ``` ## Toolkits @@ -502,7 +502,7 @@ pip install google-api-python-client google-auth-oauthlib google-auth-httplib2 See a [usage example and authorization instructions](/docs/integrations/chat_loaders/gmail). ```python -from langchain.chat_loaders.gmail import GMailLoader +from langchain_community.chat_loaders.gmail import GMailLoader ``` ## 3rd Party Integrations @@ -570,8 +570,8 @@ pip install yt_dlp pydub librosa See a [usage example and authorization instructions](/docs/integrations/document_loaders/youtube_audio). ```python -from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader -from langchain.document_loaders.parsers import OpenAIWhisperParser, OpenAIWhisperParserLocal +from langchain_community.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader +from langchain_community.document_loaders.parsers import OpenAIWhisperParser, OpenAIWhisperParserLocal ``` ### YouTube transcripts @@ -587,5 +587,5 @@ pip install youtube-transcript-api See a [usage example](/docs/integrations/document_loaders/youtube_transcript). ```python -from langchain.document_loaders import YoutubeLoader +from langchain_community.document_loaders import YoutubeLoader ``` \ No newline at end of file diff --git a/docs/docs/integrations/platforms/huggingface.mdx b/docs/docs/integrations/platforms/huggingface.mdx index 4d215f3f3e..341d8f6cbc 100644 --- a/docs/docs/integrations/platforms/huggingface.mdx +++ b/docs/docs/integrations/platforms/huggingface.mdx @@ -79,7 +79,7 @@ pip install datasets See a [usage example](/docs/integrations/document_loaders/hugging_face_dataset). ```python -from langchain.document_loaders.hugging_face_dataset import HuggingFaceDatasetLoader +from langchain_community.document_loaders.hugging_face_dataset import HuggingFaceDatasetLoader ``` diff --git a/docs/docs/integrations/platforms/microsoft.mdx b/docs/docs/integrations/platforms/microsoft.mdx index 153d19ac06..8fde47d110 100644 --- a/docs/docs/integrations/platforms/microsoft.mdx +++ b/docs/docs/integrations/platforms/microsoft.mdx @@ -72,13 +72,13 @@ pip install azure-storage-blob See a [usage example for the Azure Blob Storage](/docs/integrations/document_loaders/azure_blob_storage_container). ```python -from langchain.document_loaders import AzureBlobStorageContainerLoader +from langchain_community.document_loaders import AzureBlobStorageContainerLoader ``` See a [usage example for the Azure Files](/docs/integrations/document_loaders/azure_blob_storage_file). ```python -from langchain.document_loaders import AzureBlobStorageFileLoader +from langchain_community.document_loaders import AzureBlobStorageFileLoader ``` @@ -95,7 +95,7 @@ pip install o365 See a [usage example](/docs/integrations/document_loaders/microsoft_onedrive). ```python -from langchain.document_loaders import OneDriveLoader +from langchain_community.document_loaders import OneDriveLoader ``` @@ -106,7 +106,7 @@ from langchain.document_loaders import OneDriveLoader See a [usage example](/docs/integrations/document_loaders/microsoft_word). ```python -from langchain.document_loaders import UnstructuredWordDocumentLoader +from langchain_community.document_loaders import UnstructuredWordDocumentLoader ``` @@ -124,7 +124,7 @@ representation of the Excel file will be available in the document metadata unde See a [usage example](/docs/integrations/document_loaders/excel). ```python -from langchain.document_loaders import UnstructuredExcelLoader +from langchain_community.document_loaders import UnstructuredExcelLoader ``` @@ -137,7 +137,7 @@ from langchain.document_loaders import UnstructuredExcelLoader See a [usage example](/docs/integrations/document_loaders/microsoft_sharepoint). ```python -from langchain.document_loaders.sharepoint import SharePointLoader +from langchain_community.document_loaders.sharepoint import SharePointLoader ``` @@ -148,7 +148,7 @@ from langchain.document_loaders.sharepoint import SharePointLoader See a [usage example](/docs/integrations/document_loaders/microsoft_powerpoint). ```python -from langchain.document_loaders import UnstructuredPowerPointLoader +from langchain_community.document_loaders import UnstructuredPowerPointLoader ``` ### Microsoft OneNote @@ -162,7 +162,7 @@ pip install bs4 msal See a [usage example](/docs/integrations/document_loaders/onenote). ```python -from langchain.document_loaders.onenote import OneNoteLoader +from langchain_community.document_loaders.onenote import OneNoteLoader ``` @@ -195,7 +195,7 @@ With Cosmos DB for MongoDB vCore, developers can enjoy the benefits of native Az See a [usage example](/docs/integrations/vectorstores/azure_cosmos_db). ```python -from langchain.vectorstores import AzureCosmosDBVectorSearch +from langchain_community.vectorstores import AzureCosmosDBVectorSearch ``` ## Retrievers diff --git a/docs/docs/integrations/platforms/openai.mdx b/docs/docs/integrations/platforms/openai.mdx index 4c5f2e70cd..a8fae1e126 100644 --- a/docs/docs/integrations/platforms/openai.mdx +++ b/docs/docs/integrations/platforms/openai.mdx @@ -80,7 +80,7 @@ For a more detailed walkthrough of this, see [this notebook](/docs/modules/data_ See a [usage example](/docs/integrations/document_loaders/chatgpt_loader). ```python -from langchain.document_loaders.chatgpt import ChatGPTLoader +from langchain_community.document_loaders.chatgpt import ChatGPTLoader ``` ## Retriever diff --git a/docs/docs/integrations/providers/activeloop_deeplake.mdx b/docs/docs/integrations/providers/activeloop_deeplake.mdx index 51ac477cf8..05579943e4 100644 --- a/docs/docs/integrations/providers/activeloop_deeplake.mdx +++ b/docs/docs/integrations/providers/activeloop_deeplake.mdx @@ -29,7 +29,7 @@ There exists a wrapper around Deep Lake, a data lake for Deep Learning applicati To import this vectorstore: ```python -from langchain.vectorstores import DeepLake +from langchain_community.vectorstores import DeepLake ``` diff --git a/docs/docs/integrations/providers/airbyte.mdx b/docs/docs/integrations/providers/airbyte.mdx index 16b1deca8f..44519fbbed 100644 --- a/docs/docs/integrations/providers/airbyte.mdx +++ b/docs/docs/integrations/providers/airbyte.mdx @@ -25,5 +25,5 @@ Have `docker desktop` installed. See a [usage example](/docs/integrations/document_loaders/airbyte_json). ```python -from langchain.document_loaders import AirbyteJSONLoader +from langchain_community.document_loaders import AirbyteJSONLoader ``` diff --git a/docs/docs/integrations/providers/airtable.md b/docs/docs/integrations/providers/airtable.md index 635452adb5..e239c4ee6a 100644 --- a/docs/docs/integrations/providers/airtable.md +++ b/docs/docs/integrations/providers/airtable.md @@ -22,7 +22,7 @@ pip install pyairtable ```python -from langchain.document_loaders import AirtableLoader +from langchain_community.document_loaders import AirtableLoader ``` See an [example](/docs/integrations/document_loaders/airtable). diff --git a/docs/docs/integrations/providers/alibaba_cloud.mdx b/docs/docs/integrations/providers/alibaba_cloud.mdx index f210ce44de..af4748720c 100644 --- a/docs/docs/integrations/providers/alibaba_cloud.mdx +++ b/docs/docs/integrations/providers/alibaba_cloud.mdx @@ -23,7 +23,7 @@ from langchain_community.chat_models import PaiEasChatEndpoint See [installation instructions and a usage example](/docs/integrations/vectorstores/alibabacloud_opensearch). ```python -from langchain.vectorstores import AlibabaCloudOpenSearch, AlibabaCloudOpenSearchSettings +from langchain_community.vectorstores import AlibabaCloudOpenSearch, AlibabaCloudOpenSearchSettings ``` ## Document Loader @@ -31,5 +31,5 @@ from langchain.vectorstores import AlibabaCloudOpenSearch, AlibabaCloudOpenSearc See [installation instructions and a usage example](/docs/integrations/document_loaders/alibaba_cloud_maxcompute). ```python -from langchain.document_loaders import MaxComputeLoader +from langchain_community.document_loaders import MaxComputeLoader ``` diff --git a/docs/docs/integrations/providers/analyticdb.mdx b/docs/docs/integrations/providers/analyticdb.mdx index cde6db5fb4..a06157a8b4 100644 --- a/docs/docs/integrations/providers/analyticdb.mdx +++ b/docs/docs/integrations/providers/analyticdb.mdx @@ -9,7 +9,7 @@ whether for semantic search or example selection. To import this vectorstore: ```python -from langchain.vectorstores import AnalyticDB +from langchain_community.vectorstores import AnalyticDB ``` For a more detailed walkthrough of the AnalyticDB wrapper, see [this notebook](/docs/integrations/vectorstores/analyticdb) diff --git a/docs/docs/integrations/providers/annoy.mdx b/docs/docs/integrations/providers/annoy.mdx index 705ad3cf69..4a39b336b9 100644 --- a/docs/docs/integrations/providers/annoy.mdx +++ b/docs/docs/integrations/providers/annoy.mdx @@ -14,5 +14,5 @@ pip install annoy See a [usage example](/docs/integrations/vectorstores/annoy). ```python -from langchain.vectorstores import Annoy +from langchain_community.vectorstores import Annoy ``` diff --git a/docs/docs/integrations/providers/apify.mdx b/docs/docs/integrations/providers/apify.mdx index 6bc7486b37..11b684c0da 100644 --- a/docs/docs/integrations/providers/apify.mdx +++ b/docs/docs/integrations/providers/apify.mdx @@ -40,7 +40,7 @@ For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integ You can also use our `ApifyDatasetLoader` to get data from Apify dataset. ```python -from langchain.document_loaders import ApifyDatasetLoader +from langchain_community.document_loaders import ApifyDatasetLoader ``` For a more detailed walkthrough of this loader, see [this notebook](/docs/integrations/document_loaders/apify_dataset). diff --git a/docs/docs/integrations/providers/arangodb.mdx b/docs/docs/integrations/providers/arangodb.mdx index 2bf68a9a2b..95232ebbcc 100644 --- a/docs/docs/integrations/providers/arangodb.mdx +++ b/docs/docs/integrations/providers/arangodb.mdx @@ -18,6 +18,6 @@ See the notebook example [here](/docs/use_cases/graph/graph_arangodb_qa). ```python from arango import ArangoClient -from langchain.graphs import ArangoGraph +from langchain_community.graphs import ArangoGraph from langchain.chains import ArangoGraphQAChain ``` \ No newline at end of file diff --git a/docs/docs/integrations/providers/arxiv.mdx b/docs/docs/integrations/providers/arxiv.mdx index fb2fa5a9d8..6e1eae1d9a 100644 --- a/docs/docs/integrations/providers/arxiv.mdx +++ b/docs/docs/integrations/providers/arxiv.mdx @@ -24,7 +24,7 @@ pip install pymupdf See a [usage example](/docs/integrations/document_loaders/arxiv). ```python -from langchain.document_loaders import ArxivLoader +from langchain_community.document_loaders import ArxivLoader ``` ## Retriever diff --git a/docs/docs/integrations/providers/astradb.mdx b/docs/docs/integrations/providers/astradb.mdx index fc093aad3f..5059536ec4 100644 --- a/docs/docs/integrations/providers/astradb.mdx +++ b/docs/docs/integrations/providers/astradb.mdx @@ -18,7 +18,7 @@ pip install "astrapy>=0.5.3" ### Vector Store ```python -from langchain.vectorstores import AstraDB +from langchain_community.vectorstores import AstraDB vector_store = AstraDB( embedding=my_embedding, collection_name="my_store", @@ -83,7 +83,7 @@ Hence, a different set of connectors, outlined below, shall be used. ### Vector Store ```python -from langchain.vectorstores import Cassandra +from langchain_community.vectorstores import Cassandra vector_store = Cassandra( embedding=my_embedding, table_name="my_store", diff --git a/docs/docs/integrations/providers/atlas.mdx b/docs/docs/integrations/providers/atlas.mdx index 623c19818b..06545aca11 100644 --- a/docs/docs/integrations/providers/atlas.mdx +++ b/docs/docs/integrations/providers/atlas.mdx @@ -15,5 +15,5 @@ See a [usage example](/docs/integrations/vectorstores/atlas). ```python -from langchain.vectorstores import AtlasDB +from langchain_community.vectorstores import AtlasDB ``` \ No newline at end of file diff --git a/docs/docs/integrations/providers/awadb.md b/docs/docs/integrations/providers/awadb.md index 01a5914717..4290444752 100644 --- a/docs/docs/integrations/providers/awadb.md +++ b/docs/docs/integrations/providers/awadb.md @@ -13,7 +13,7 @@ pip install awadb ```python -from langchain.vectorstores import AwaDB +from langchain_community.vectorstores import AwaDB ``` See a [usage example](/docs/integrations/vectorstores/awadb). diff --git a/docs/docs/integrations/providers/azlyrics.mdx b/docs/docs/integrations/providers/azlyrics.mdx index 97e54bf1cc..78cbbc329d 100644 --- a/docs/docs/integrations/providers/azlyrics.mdx +++ b/docs/docs/integrations/providers/azlyrics.mdx @@ -12,5 +12,5 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/document_loaders/azlyrics). ```python -from langchain.document_loaders import AZLyricsLoader +from langchain_community.document_loaders import AZLyricsLoader ``` diff --git a/docs/docs/integrations/providers/bageldb.mdx b/docs/docs/integrations/providers/bageldb.mdx index ec05493169..dc9a8ea708 100644 --- a/docs/docs/integrations/providers/bageldb.mdx +++ b/docs/docs/integrations/providers/bageldb.mdx @@ -17,5 +17,5 @@ pip install betabageldb See a [usage example](/docs/integrations/vectorstores/bageldb). ```python -from langchain.vectorstores import Bagel +from langchain_community.vectorstores import Bagel ``` diff --git a/docs/docs/integrations/providers/beautiful_soup.mdx b/docs/docs/integrations/providers/beautiful_soup.mdx index 53469d41bf..289d4059fa 100644 --- a/docs/docs/integrations/providers/beautiful_soup.mdx +++ b/docs/docs/integrations/providers/beautiful_soup.mdx @@ -16,5 +16,5 @@ pip install beautifulsoup4 See a [usage example](/docs/integrations/document_transformers/beautiful_soup). ```python -from langchain.document_loaders import BeautifulSoupTransformer +from langchain_community.document_loaders import BeautifulSoupTransformer ``` diff --git a/docs/docs/integrations/providers/bilibili.mdx b/docs/docs/integrations/providers/bilibili.mdx index 6ff7f9b67c..ec497ec509 100644 --- a/docs/docs/integrations/providers/bilibili.mdx +++ b/docs/docs/integrations/providers/bilibili.mdx @@ -13,5 +13,5 @@ pip install bilibili-api-python See a [usage example](/docs/integrations/document_loaders/bilibili). ```python -from langchain.document_loaders import BiliBiliLoader +from langchain_community.document_loaders import BiliBiliLoader ``` diff --git a/docs/docs/integrations/providers/blackboard.mdx b/docs/docs/integrations/providers/blackboard.mdx index 69a2a176fe..09312bc4df 100644 --- a/docs/docs/integrations/providers/blackboard.mdx +++ b/docs/docs/integrations/providers/blackboard.mdx @@ -17,6 +17,6 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/document_loaders/blackboard). ```python -from langchain.document_loaders import BlackboardLoader +from langchain_community.document_loaders import BlackboardLoader ``` diff --git a/docs/docs/integrations/providers/brave_search.mdx b/docs/docs/integrations/providers/brave_search.mdx index 9291c99174..647004302c 100644 --- a/docs/docs/integrations/providers/brave_search.mdx +++ b/docs/docs/integrations/providers/brave_search.mdx @@ -24,7 +24,7 @@ To get access to the Brave Search API, you need to [create an account and get an See a [usage example](/docs/integrations/document_loaders/brave_search). ```python -from langchain.document_loaders import BraveSearchLoader +from langchain_community.document_loaders import BraveSearchLoader ``` ## Tool diff --git a/docs/docs/integrations/providers/chroma.mdx b/docs/docs/integrations/providers/chroma.mdx index 089f9fa64b..ffa2951213 100644 --- a/docs/docs/integrations/providers/chroma.mdx +++ b/docs/docs/integrations/providers/chroma.mdx @@ -15,7 +15,7 @@ There exists a wrapper around Chroma vector databases, allowing you to use it as whether for semantic search or example selection. ```python -from langchain.vectorstores import Chroma +from langchain_community.vectorstores import Chroma ``` For a more detailed walkthrough of the Chroma wrapper, see [this notebook](/docs/integrations/vectorstores/chroma) diff --git a/docs/docs/integrations/providers/clarifai.mdx b/docs/docs/integrations/providers/clarifai.mdx index 56c702c147..945dcae0f1 100644 --- a/docs/docs/integrations/providers/clarifai.mdx +++ b/docs/docs/integrations/providers/clarifai.mdx @@ -46,7 +46,7 @@ Clarifai's vector DB was launched in 2016 and has been optimized to support live You can also add data directly from LangChain as well, and the auto-indexing will take place for you. You'll notice this is a little different than other vectorstores where you need to provide an embedding model in their constructor and have LangChain coordinate getting the embeddings from text and writing those to the index. Not only is it more convenient, but it's much more scalable to use Clarifai's distributed cloud to do all the index in the background. ```python -from langchain.vectorstores import Clarifai +from langchain_community.vectorstores import Clarifai clarifai_vector_db = Clarifai.from_texts(user_id=USER_ID, app_id=APP_ID, texts=texts, pat=CLARIFAI_PAT, number_of_docs=NUMBER_OF_DOCS, metadatas = metadatas) ``` For more details, the docs on the Clarifai vector store provide a [detailed walkthrough](/docs/integrations/vectorstores/clarifai). diff --git a/docs/docs/integrations/providers/clickhouse.mdx b/docs/docs/integrations/providers/clickhouse.mdx index 08acfc6c6b..64e4608c53 100644 --- a/docs/docs/integrations/providers/clickhouse.mdx +++ b/docs/docs/integrations/providers/clickhouse.mdx @@ -20,6 +20,6 @@ pip install clickhouse-connect See a [usage example](/docs/integrations/vectorstores/clickhouse). ```python -from langchain.vectorstores import Clickhouse, ClickhouseSettings +from langchain_community.vectorstores import Clickhouse, ClickhouseSettings ``` diff --git a/docs/docs/integrations/providers/college_confidential.mdx b/docs/docs/integrations/providers/college_confidential.mdx index 6460800f07..4f081945b9 100644 --- a/docs/docs/integrations/providers/college_confidential.mdx +++ b/docs/docs/integrations/providers/college_confidential.mdx @@ -12,5 +12,5 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/document_loaders/college_confidential). ```python -from langchain.document_loaders import CollegeConfidentialLoader +from langchain_community.document_loaders import CollegeConfidentialLoader ``` diff --git a/docs/docs/integrations/providers/confluence.mdx b/docs/docs/integrations/providers/confluence.mdx index da5c323b45..27a7e274a2 100644 --- a/docs/docs/integrations/providers/confluence.mdx +++ b/docs/docs/integrations/providers/confluence.mdx @@ -18,5 +18,5 @@ See [instructions](https://support.atlassian.com/atlassian-account/docs/manage-a See a [usage example](/docs/integrations/document_loaders/confluence). ```python -from langchain.document_loaders import ConfluenceLoader +from langchain_community.document_loaders import ConfluenceLoader ``` diff --git a/docs/docs/integrations/providers/dashvector.mdx b/docs/docs/integrations/providers/dashvector.mdx index 1d42ad8421..b18fca590b 100644 --- a/docs/docs/integrations/providers/dashvector.mdx +++ b/docs/docs/integrations/providers/dashvector.mdx @@ -18,7 +18,7 @@ which allows it to be readily used for various scenarios, such as semantic searc You may import the vectorstore by: ```python -from langchain.vectorstores import DashVector +from langchain_community.vectorstores import DashVector ``` For a detailed walkthrough of the DashVector wrapper, please refer to [this notebook](/docs/integrations/vectorstores/dashvector) diff --git a/docs/docs/integrations/providers/datadog_logs.mdx b/docs/docs/integrations/providers/datadog_logs.mdx index 26bca92f1a..eb365eed92 100644 --- a/docs/docs/integrations/providers/datadog_logs.mdx +++ b/docs/docs/integrations/providers/datadog_logs.mdx @@ -15,5 +15,5 @@ We must initialize the loader with the Datadog API key and APP key, and we need See a [usage example](/docs/integrations/document_loaders/datadog_logs). ```python -from langchain.document_loaders import DatadogLogsLoader +from langchain_community.document_loaders import DatadogLogsLoader ``` diff --git a/docs/docs/integrations/providers/diffbot.mdx b/docs/docs/integrations/providers/diffbot.mdx index 8a423c2a72..da130e3cc1 100644 --- a/docs/docs/integrations/providers/diffbot.mdx +++ b/docs/docs/integrations/providers/diffbot.mdx @@ -14,5 +14,5 @@ Read [instructions](https://docs.diffbot.com/reference/authentication) how to ge See a [usage example](/docs/integrations/document_loaders/diffbot). ```python -from langchain.document_loaders import DiffbotLoader +from langchain_community.document_loaders import DiffbotLoader ``` diff --git a/docs/docs/integrations/providers/dingo.mdx b/docs/docs/integrations/providers/dingo.mdx index ab5bffa65e..be0c9f83fa 100644 --- a/docs/docs/integrations/providers/dingo.mdx +++ b/docs/docs/integrations/providers/dingo.mdx @@ -13,7 +13,7 @@ whether for semantic search or example selection. To import this vectorstore: ```python -from langchain.vectorstores import Dingo +from langchain_community.vectorstores import Dingo ``` For a more detailed walkthrough of the DingoDB wrapper, see [this notebook](/docs/integrations/vectorstores/dingo) diff --git a/docs/docs/integrations/providers/discord.mdx b/docs/docs/integrations/providers/discord.mdx index 07b5258e88..1aef837405 100644 --- a/docs/docs/integrations/providers/discord.mdx +++ b/docs/docs/integrations/providers/discord.mdx @@ -26,5 +26,5 @@ with Discord. That email will have a download button using which you would be ab See a [usage example](/docs/integrations/document_loaders/discord). ```python -from langchain.document_loaders import DiscordChatLoader +from langchain_community.document_loaders import DiscordChatLoader ``` diff --git a/docs/docs/integrations/providers/docarray.mdx b/docs/docs/integrations/providers/docarray.mdx index 5673c50c32..5895fa30f7 100644 --- a/docs/docs/integrations/providers/docarray.mdx +++ b/docs/docs/integrations/providers/docarray.mdx @@ -20,11 +20,11 @@ LangChain provides an access to the `In-memory` and `HNSW` vector stores from th See a [usage example](/docs/integrations/vectorstores/docarray_hnsw). ```python -from langchain.vectorstores DocArrayHnswSearch +from langchain_community.vectorstores DocArrayHnswSearch ``` See a [usage example](/docs/integrations/vectorstores/docarray_in_memory). ```python -from langchain.vectorstores DocArrayInMemorySearch +from langchain_community.vectorstores DocArrayInMemorySearch ``` diff --git a/docs/docs/integrations/providers/doctran.mdx b/docs/docs/integrations/providers/doctran.mdx index 98848b8a0a..c85844766e 100644 --- a/docs/docs/integrations/providers/doctran.mdx +++ b/docs/docs/integrations/providers/doctran.mdx @@ -19,19 +19,19 @@ pip install doctran See a [usage example for DoctranQATransformer](/docs/integrations/document_transformers/doctran_interrogate_document). ```python -from langchain.document_loaders import DoctranQATransformer +from langchain_community.document_loaders import DoctranQATransformer ``` ### Property Extractor See a [usage example for DoctranPropertyExtractor](/docs/integrations/document_transformers/doctran_extract_properties). ```python -from langchain.document_loaders import DoctranPropertyExtractor +from langchain_community.document_loaders import DoctranPropertyExtractor ``` ### Document Translator See a [usage example for DoctranTextTranslator](/docs/integrations/document_transformers/doctran_translate_document). ```python -from langchain.document_loaders import DoctranTextTranslator +from langchain_community.document_loaders import DoctranTextTranslator ``` diff --git a/docs/docs/integrations/providers/docugami.mdx b/docs/docs/integrations/providers/docugami.mdx index b0ea126528..a9689ecdd3 100644 --- a/docs/docs/integrations/providers/docugami.mdx +++ b/docs/docs/integrations/providers/docugami.mdx @@ -16,5 +16,5 @@ pip install dgml-utils See a [usage example](/docs/integrations/document_loaders/docugami). ```python -from langchain.document_loaders import DocugamiLoader +from langchain_community.document_loaders import DocugamiLoader ``` diff --git a/docs/docs/integrations/providers/duckdb.mdx b/docs/docs/integrations/providers/duckdb.mdx index 9e36b8cbd0..f965e129b9 100644 --- a/docs/docs/integrations/providers/duckdb.mdx +++ b/docs/docs/integrations/providers/duckdb.mdx @@ -15,5 +15,5 @@ pip install duckdb See a [usage example](/docs/integrations/document_loaders/duckdb). ```python -from langchain.document_loaders import DuckDBLoader +from langchain_community.document_loaders import DuckDBLoader ``` diff --git a/docs/docs/integrations/providers/elasticsearch.mdx b/docs/docs/integrations/providers/elasticsearch.mdx index e2e96779b5..a7125b55b3 100644 --- a/docs/docs/integrations/providers/elasticsearch.mdx +++ b/docs/docs/integrations/providers/elasticsearch.mdx @@ -31,9 +31,9 @@ pip install elasticsearch The vector store is a simple wrapper around Elasticsearch. It provides a simple interface to store and retrieve vectors. ```python -from langchain.vectorstores import ElasticsearchStore +from langchain_community.vectorstores import ElasticsearchStore -from langchain.document_loaders import TextLoader +from langchain_community.document_loaders import TextLoader from langchain.text_splitter import CharacterTextSplitter loader = TextLoader("./state_of_the_union.txt") diff --git a/docs/docs/integrations/providers/epsilla.mdx b/docs/docs/integrations/providers/epsilla.mdx index fb4fa4039c..78da4d6a98 100644 --- a/docs/docs/integrations/providers/epsilla.mdx +++ b/docs/docs/integrations/providers/epsilla.mdx @@ -17,7 +17,7 @@ whether for semantic search or example selection. To import this vectorstore: ```python -from langchain.vectorstores import Epsilla +from langchain_community.vectorstores import Epsilla ``` For a more detailed walkthrough of the Epsilla wrapper, see [this notebook](/docs/integrations/vectorstores/epsilla) \ No newline at end of file diff --git a/docs/docs/integrations/providers/evernote.mdx b/docs/docs/integrations/providers/evernote.mdx index a52cf5407f..a58c3fc0cf 100644 --- a/docs/docs/integrations/providers/evernote.mdx +++ b/docs/docs/integrations/providers/evernote.mdx @@ -16,5 +16,5 @@ pip install html2text See a [usage example](/docs/integrations/document_loaders/evernote). ```python -from langchain.document_loaders import EverNoteLoader +from langchain_community.document_loaders import EverNoteLoader ``` diff --git a/docs/docs/integrations/providers/facebook_chat.mdx b/docs/docs/integrations/providers/facebook_chat.mdx index 7d4ebfc1e4..b0c50b9d87 100644 --- a/docs/docs/integrations/providers/facebook_chat.mdx +++ b/docs/docs/integrations/providers/facebook_chat.mdx @@ -17,5 +17,5 @@ pip install pandas See a [usage example](/docs/integrations/document_loaders/facebook_chat). ```python -from langchain.document_loaders import FacebookChatLoader +from langchain_community.document_loaders import FacebookChatLoader ``` diff --git a/docs/docs/integrations/providers/facebook_faiss.mdx b/docs/docs/integrations/providers/facebook_faiss.mdx index d900d64fb3..c85d1907e4 100644 --- a/docs/docs/integrations/providers/facebook_faiss.mdx +++ b/docs/docs/integrations/providers/facebook_faiss.mdx @@ -28,5 +28,5 @@ pip install faiss-cpu # For CPU Installation See a [usage example](/docs/integrations/vectorstores/faiss). ```python -from langchain.vectorstores import FAISS +from langchain_community.vectorstores import FAISS ``` diff --git a/docs/docs/integrations/providers/figma.mdx b/docs/docs/integrations/providers/figma.mdx index f76485807c..6b108aaa21 100644 --- a/docs/docs/integrations/providers/figma.mdx +++ b/docs/docs/integrations/providers/figma.mdx @@ -17,5 +17,5 @@ The `file key` can be pulled from the URL. https://www.figma.com/file/{filekey} See a [usage example](/docs/integrations/document_loaders/figma). ```python -from langchain.document_loaders import FigmaFileLoader +from langchain_community.document_loaders import FigmaFileLoader ``` diff --git a/docs/docs/integrations/providers/git.mdx b/docs/docs/integrations/providers/git.mdx index fb4304ebc0..bc20c1710c 100644 --- a/docs/docs/integrations/providers/git.mdx +++ b/docs/docs/integrations/providers/git.mdx @@ -15,5 +15,5 @@ pip install GitPython See a [usage example](/docs/integrations/document_loaders/git). ```python -from langchain.document_loaders import GitLoader +from langchain_community.document_loaders import GitLoader ``` diff --git a/docs/docs/integrations/providers/gitbook.mdx b/docs/docs/integrations/providers/gitbook.mdx index fa0283ef50..4c8a855923 100644 --- a/docs/docs/integrations/providers/gitbook.mdx +++ b/docs/docs/integrations/providers/gitbook.mdx @@ -11,5 +11,5 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/document_loaders/gitbook). ```python -from langchain.document_loaders import GitbookLoader +from langchain_community.document_loaders import GitbookLoader ``` diff --git a/docs/docs/integrations/providers/grobid.mdx b/docs/docs/integrations/providers/grobid.mdx index 4fd52abe23..9740854ed1 100644 --- a/docs/docs/integrations/providers/grobid.mdx +++ b/docs/docs/integrations/providers/grobid.mdx @@ -21,8 +21,8 @@ you're ready to go. You can now use the GrobidParser to produce documents ```python -from langchain.document_loaders.parsers import GrobidParser -from langchain.document_loaders.generic import GenericLoader +from langchain_community.document_loaders.parsers import GrobidParser +from langchain_community.document_loaders.generic import GenericLoader #Produce chunks from article paragraphs loader = GenericLoader.from_filesystem( diff --git a/docs/docs/integrations/providers/gutenberg.mdx b/docs/docs/integrations/providers/gutenberg.mdx index e4421e4d86..36eb816383 100644 --- a/docs/docs/integrations/providers/gutenberg.mdx +++ b/docs/docs/integrations/providers/gutenberg.mdx @@ -11,5 +11,5 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/document_loaders/gutenberg). ```python -from langchain.document_loaders import GutenbergLoader +from langchain_community.document_loaders import GutenbergLoader ``` diff --git a/docs/docs/integrations/providers/hacker_news.mdx b/docs/docs/integrations/providers/hacker_news.mdx index 3c8a74b461..fc232a3db0 100644 --- a/docs/docs/integrations/providers/hacker_news.mdx +++ b/docs/docs/integrations/providers/hacker_news.mdx @@ -14,5 +14,5 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/document_loaders/hacker_news). ```python -from langchain.document_loaders import HNLoader +from langchain_community.document_loaders import HNLoader ``` diff --git a/docs/docs/integrations/providers/hologres.mdx b/docs/docs/integrations/providers/hologres.mdx index dea4c567ab..8dbb3d80fa 100644 --- a/docs/docs/integrations/providers/hologres.mdx +++ b/docs/docs/integrations/providers/hologres.mdx @@ -19,5 +19,5 @@ pip install hologres-vector See a [usage example](/docs/integrations/vectorstores/hologres). ```python -from langchain.vectorstores import Hologres +from langchain_community.vectorstores import Hologres ``` diff --git a/docs/docs/integrations/providers/html2text.mdx b/docs/docs/integrations/providers/html2text.mdx index ac7f66ba77..c8cf35210f 100644 --- a/docs/docs/integrations/providers/html2text.mdx +++ b/docs/docs/integrations/providers/html2text.mdx @@ -15,5 +15,5 @@ pip install html2text See a [usage example](/docs/integrations/document_transformers/html2text). ```python -from langchain.document_loaders import Html2TextTransformer +from langchain_community.document_loaders import Html2TextTransformer ``` diff --git a/docs/docs/integrations/providers/ifixit.mdx b/docs/docs/integrations/providers/ifixit.mdx index a4fee5bc01..fdcb4ba802 100644 --- a/docs/docs/integrations/providers/ifixit.mdx +++ b/docs/docs/integrations/providers/ifixit.mdx @@ -12,5 +12,5 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/document_loaders/ifixit). ```python -from langchain.document_loaders import IFixitLoader +from langchain_community.document_loaders import IFixitLoader ``` diff --git a/docs/docs/integrations/providers/imsdb.mdx b/docs/docs/integrations/providers/imsdb.mdx index 1e13821ef1..8b30a2dea9 100644 --- a/docs/docs/integrations/providers/imsdb.mdx +++ b/docs/docs/integrations/providers/imsdb.mdx @@ -12,5 +12,5 @@ See a [usage example](/docs/integrations/document_loaders/imsdb). ```python -from langchain.document_loaders import IMSDbLoader +from langchain_community.document_loaders import IMSDbLoader ``` diff --git a/docs/docs/integrations/providers/lancedb.mdx b/docs/docs/integrations/providers/lancedb.mdx index 1275e690bc..44440de047 100644 --- a/docs/docs/integrations/providers/lancedb.mdx +++ b/docs/docs/integrations/providers/lancedb.mdx @@ -17,7 +17,7 @@ whether for semantic search or example selection. To import this vectorstore: ```python -from langchain.vectorstores import LanceDB +from langchain_community.vectorstores import LanceDB ``` For a more detailed walkthrough of the LanceDB wrapper, see [this notebook](/docs/integrations/vectorstores/lancedb) diff --git a/docs/docs/integrations/providers/marqo.md b/docs/docs/integrations/providers/marqo.md index 3a6e24e35c..106db08599 100644 --- a/docs/docs/integrations/providers/marqo.md +++ b/docs/docs/integrations/providers/marqo.md @@ -25,7 +25,7 @@ The Marqo vectorstore can also work with existing multimodel indexes where your To import this vectorstore: ```python -from langchain.vectorstores import Marqo +from langchain_community.vectorstores import Marqo ``` For a more detailed walkthrough of the Marqo wrapper and some of its unique features, see [this notebook](/docs/integrations/vectorstores/marqo) diff --git a/docs/docs/integrations/providers/mediawikidump.mdx b/docs/docs/integrations/providers/mediawikidump.mdx index 03e02a3cc6..52f5fde1e7 100644 --- a/docs/docs/integrations/providers/mediawikidump.mdx +++ b/docs/docs/integrations/providers/mediawikidump.mdx @@ -27,5 +27,5 @@ See a [usage example](/docs/integrations/document_loaders/mediawikidump). ```python -from langchain.document_loaders import MWDumpLoader +from langchain_community.document_loaders import MWDumpLoader ``` diff --git a/docs/docs/integrations/providers/meilisearch.mdx b/docs/docs/integrations/providers/meilisearch.mdx index ffe899d984..4290e6ad1f 100644 --- a/docs/docs/integrations/providers/meilisearch.mdx +++ b/docs/docs/integrations/providers/meilisearch.mdx @@ -25,6 +25,6 @@ pip install meilisearchv See a [usage example](/docs/integrations/vectorstores/meilisearch). ```python -from langchain.vectorstores import Meilisearch +from langchain_community.vectorstores import Meilisearch ``` diff --git a/docs/docs/integrations/providers/milvus.mdx b/docs/docs/integrations/providers/milvus.mdx index 9f963233f5..ea11c08fd1 100644 --- a/docs/docs/integrations/providers/milvus.mdx +++ b/docs/docs/integrations/providers/milvus.mdx @@ -19,7 +19,7 @@ whether for semantic search or example selection. To import this vectorstore: ```python -from langchain.vectorstores import Milvus +from langchain_community.vectorstores import Milvus ``` For a more detailed walkthrough of the `Miluvs` wrapper, see [this notebook](/docs/integrations/vectorstores/milvus) diff --git a/docs/docs/integrations/providers/modern_treasury.mdx b/docs/docs/integrations/providers/modern_treasury.mdx index b6eb2d399c..908f17644e 100644 --- a/docs/docs/integrations/providers/modern_treasury.mdx +++ b/docs/docs/integrations/providers/modern_treasury.mdx @@ -15,5 +15,5 @@ See a [usage example](/docs/integrations/document_loaders/modern_treasury). ```python -from langchain.document_loaders import ModernTreasuryLoader +from langchain_community.document_loaders import ModernTreasuryLoader ``` diff --git a/docs/docs/integrations/providers/mongodb_atlas.mdx b/docs/docs/integrations/providers/mongodb_atlas.mdx index f883439356..9c5b792c43 100644 --- a/docs/docs/integrations/providers/mongodb_atlas.mdx +++ b/docs/docs/integrations/providers/mongodb_atlas.mdx @@ -19,6 +19,6 @@ pip install pymongo See a [usage example](/docs/integrations/vectorstores/mongodb_atlas). ```python -from langchain.vectorstores import MongoDBAtlasVectorSearch +from langchain_community.vectorstores import MongoDBAtlasVectorSearch ``` diff --git a/docs/docs/integrations/providers/myscale.mdx b/docs/docs/integrations/providers/myscale.mdx index 367b6d3628..1795b42a06 100644 --- a/docs/docs/integrations/providers/myscale.mdx +++ b/docs/docs/integrations/providers/myscale.mdx @@ -34,7 +34,7 @@ There are two ways to set up parameters for myscale index. ```python - from langchain.vectorstores import MyScale, MyScaleSettings + from langchain_community.vectorstores import MyScale, MyScaleSettings config = MyScaleSetting(host="", port=8443, ...) index = MyScale(embedding_function, config) index.add_documents(...) @@ -60,7 +60,7 @@ whether for semantic search or similar example retrieval. To import this vectorstore: ```python -from langchain.vectorstores import MyScale +from langchain_community.vectorstores import MyScale ``` For a more detailed walkthrough of the MyScale wrapper, see [this notebook](/docs/integrations/vectorstores/myscale) diff --git a/docs/docs/integrations/providers/neo4j.mdx b/docs/docs/integrations/providers/neo4j.mdx index 232de5c53f..37bb3c4ffc 100644 --- a/docs/docs/integrations/providers/neo4j.mdx +++ b/docs/docs/integrations/providers/neo4j.mdx @@ -26,7 +26,7 @@ whether for semantic search or example selection. To import this vectorstore: ```python -from langchain.vectorstores import Neo4jVector +from langchain_community.vectorstores import Neo4jVector ``` For a more detailed walkthrough of the Neo4j vector index wrapper, see [documentation](/docs/integrations/vectorstores/neo4jvector) @@ -37,7 +37,7 @@ There exists a wrapper around Neo4j graph database that allows you to generate C and use them to retrieve relevant information from the database. ```python -from langchain.graphs import Neo4jGraph +from langchain_community.graphs import Neo4jGraph from langchain.chains import GraphCypherQAChain ``` @@ -51,7 +51,7 @@ By coupling Diffbot's NLP API with Neo4j, a graph database, you can create power These graph structures are fully queryable and can be integrated into various applications. ```python -from langchain.graphs import Neo4jGraph +from langchain_community.graphs import Neo4jGraph from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer ``` diff --git a/docs/docs/integrations/providers/notion.mdx b/docs/docs/integrations/providers/notion.mdx index 6e16b1233e..7f51368680 100644 --- a/docs/docs/integrations/providers/notion.mdx +++ b/docs/docs/integrations/providers/notion.mdx @@ -16,12 +16,12 @@ See a [usage example for the NotionDirectoryLoader](/docs/integrations/document_ ```python -from langchain.document_loaders import NotionDirectoryLoader +from langchain_community.document_loaders import NotionDirectoryLoader ``` See a [usage example for the NotionDBLoader](/docs/integrations/document_loaders/notiondb). ```python -from langchain.document_loaders import NotionDBLoader +from langchain_community.document_loaders import NotionDBLoader ``` diff --git a/docs/docs/integrations/providers/nuclia.mdx b/docs/docs/integrations/providers/nuclia.mdx index b7a587a86c..f60dd58a2c 100644 --- a/docs/docs/integrations/providers/nuclia.mdx +++ b/docs/docs/integrations/providers/nuclia.mdx @@ -33,5 +33,5 @@ nua = NucliaUnderstandingAPI(enable_ml=True) See a [usage example](/docs/integrations/document_transformers/nuclia_transformer). ```python -from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer +from langchain_community.document_transformers.nuclia_text_transform import NucliaTextTransformer ``` diff --git a/docs/docs/integrations/providers/obsidian.mdx b/docs/docs/integrations/providers/obsidian.mdx index e7ab67f3e9..ce1169df90 100644 --- a/docs/docs/integrations/providers/obsidian.mdx +++ b/docs/docs/integrations/providers/obsidian.mdx @@ -14,6 +14,6 @@ See a [usage example](/docs/integrations/document_loaders/obsidian). ```python -from langchain.document_loaders import ObsidianLoader +from langchain_community.document_loaders import ObsidianLoader ``` diff --git a/docs/docs/integrations/providers/opensearch.mdx b/docs/docs/integrations/providers/opensearch.mdx index 6e428635dc..be55c26d7b 100644 --- a/docs/docs/integrations/providers/opensearch.mdx +++ b/docs/docs/integrations/providers/opensearch.mdx @@ -15,7 +15,7 @@ or using painless scripting and script scoring functions for bruteforce vector s To import this vectorstore: ```python -from langchain.vectorstores import OpenSearchVectorSearch +from langchain_community.vectorstores import OpenSearchVectorSearch ``` For a more detailed walkthrough of the OpenSearch wrapper, see [this notebook](/docs/integrations/vectorstores/opensearch) diff --git a/docs/docs/integrations/providers/pg_embedding.mdx b/docs/docs/integrations/providers/pg_embedding.mdx index b9de4b6c0a..7ce5a026b3 100644 --- a/docs/docs/integrations/providers/pg_embedding.mdx +++ b/docs/docs/integrations/providers/pg_embedding.mdx @@ -19,6 +19,6 @@ pip install tiktoken See a [usage example](/docs/integrations/vectorstores/pgembedding). ```python -from langchain.vectorstores import PGEmbedding +from langchain_community.vectorstores import PGEmbedding ``` diff --git a/docs/docs/integrations/providers/pgvector.mdx b/docs/docs/integrations/providers/pgvector.mdx index e7cefb5b79..c98aaea19a 100644 --- a/docs/docs/integrations/providers/pgvector.mdx +++ b/docs/docs/integrations/providers/pgvector.mdx @@ -21,7 +21,7 @@ whether for semantic search or example selection. To import this vectorstore: ```python -from langchain.vectorstores.pgvector import PGVector +from langchain_community.vectorstores.pgvector import PGVector ``` ### Usage diff --git a/docs/docs/integrations/providers/pinecone.mdx b/docs/docs/integrations/providers/pinecone.mdx index 61a9552760..905df1baa5 100644 --- a/docs/docs/integrations/providers/pinecone.mdx +++ b/docs/docs/integrations/providers/pinecone.mdx @@ -18,7 +18,7 @@ There exists a wrapper around Pinecone indexes, allowing you to use it as a vect whether for semantic search or example selection. ```python -from langchain.vectorstores import Pinecone +from langchain_community.vectorstores import Pinecone ``` For a more detailed walkthrough of the Pinecone vectorstore, see [this notebook](/docs/integrations/vectorstores/pinecone) diff --git a/docs/docs/integrations/providers/pubmed.md b/docs/docs/integrations/providers/pubmed.md index b6b32420d2..aa0a810cb6 100644 --- a/docs/docs/integrations/providers/pubmed.md +++ b/docs/docs/integrations/providers/pubmed.md @@ -26,5 +26,5 @@ from langchain.retrievers import PubMedRetriever See a [usage example](/docs/integrations/document_loaders/pubmed). ```python -from langchain.document_loaders import PubMedLoader +from langchain_community.document_loaders import PubMedLoader ``` diff --git a/docs/docs/integrations/providers/qdrant.mdx b/docs/docs/integrations/providers/qdrant.mdx index ace4c34f9d..fdaaef5abf 100644 --- a/docs/docs/integrations/providers/qdrant.mdx +++ b/docs/docs/integrations/providers/qdrant.mdx @@ -21,7 +21,7 @@ whether for semantic search or example selection. To import this vectorstore: ```python -from langchain.vectorstores import Qdrant +from langchain_community.vectorstores import Qdrant ``` For a more detailed walkthrough of the Qdrant wrapper, see [this notebook](/docs/integrations/vectorstores/qdrant) diff --git a/docs/docs/integrations/providers/reddit.mdx b/docs/docs/integrations/providers/reddit.mdx index 36f96171a7..a7327cfed0 100644 --- a/docs/docs/integrations/providers/reddit.mdx +++ b/docs/docs/integrations/providers/reddit.mdx @@ -18,5 +18,5 @@ See a [usage example](/docs/integrations/document_loaders/reddit). ```python -from langchain.document_loaders import RedditPostsLoader +from langchain_community.document_loaders import RedditPostsLoader ``` diff --git a/docs/docs/integrations/providers/redis.mdx b/docs/docs/integrations/providers/redis.mdx index bc1277d6d2..18f706795f 100644 --- a/docs/docs/integrations/providers/redis.mdx +++ b/docs/docs/integrations/providers/redis.mdx @@ -118,7 +118,7 @@ The vectorstore wrapper turns Redis into a low-latency [vector database](https:/ To import this vectorstore: ```python -from langchain.vectorstores import Redis +from langchain_community.vectorstores import Redis ``` For a more detailed walkthrough of the Redis vectorstore wrapper, see [this notebook](/docs/integrations/vectorstores/redis). diff --git a/docs/docs/integrations/providers/roam.mdx b/docs/docs/integrations/providers/roam.mdx index 03fd1d790c..322ade8d29 100644 --- a/docs/docs/integrations/providers/roam.mdx +++ b/docs/docs/integrations/providers/roam.mdx @@ -13,5 +13,5 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/document_loaders/roam). ```python -from langchain.document_loaders import RoamLoader +from langchain_community.document_loaders import RoamLoader ``` diff --git a/docs/docs/integrations/providers/rockset.mdx b/docs/docs/integrations/providers/rockset.mdx index b13b4fb944..735c218178 100644 --- a/docs/docs/integrations/providers/rockset.mdx +++ b/docs/docs/integrations/providers/rockset.mdx @@ -15,19 +15,19 @@ pip install rockset See a [usage example](/docs/integrations/vectorstores/rockset). ```python -from langchain.vectorstores import Rockset +from langchain_community.vectorstores import Rockset ``` ## Document Loader See a [usage example](/docs/integrations/document_loaders/rockset). ```python -from langchain.document_loaders import RocksetLoader +from langchain_community.document_loaders import RocksetLoader ``` ## Chat Message History See a [usage example](/docs/integrations/memory/rockset_chat_message_history). ```python -from langchain.memory.chat_message_histories import RocksetChatMessageHistory +from langchain_community.chat_message_histories import RocksetChatMessageHistory ``` \ No newline at end of file diff --git a/docs/docs/integrations/providers/semadb.mdx b/docs/docs/integrations/providers/semadb.mdx index 700f44fab4..905ef96613 100644 --- a/docs/docs/integrations/providers/semadb.mdx +++ b/docs/docs/integrations/providers/semadb.mdx @@ -13,7 +13,7 @@ None required, get started directly with SemaDB Cloud at [RapidAPI](https://rapi There is a basic wrapper around `SemaDB` collections allowing you to use it as a vectorstore. ```python -from langchain.vectorstores import SemaDB +from langchain_community.vectorstores import SemaDB ``` You can follow a tutorial on how to use the wrapper in [this notebook](/docs/integrations/vectorstores/semadb). \ No newline at end of file diff --git a/docs/docs/integrations/providers/singlestoredb.mdx b/docs/docs/integrations/providers/singlestoredb.mdx index d22f8b89c8..f2d41d67d7 100644 --- a/docs/docs/integrations/providers/singlestoredb.mdx +++ b/docs/docs/integrations/providers/singlestoredb.mdx @@ -16,5 +16,5 @@ pip install singlestoredb See a [usage example](/docs/integrations/vectorstores/singlestoredb). ```python -from langchain.vectorstores import SingleStoreDB +from langchain_community.vectorstores import SingleStoreDB ``` diff --git a/docs/docs/integrations/providers/sklearn.mdx b/docs/docs/integrations/providers/sklearn.mdx index 5dedf25391..ebb93942af 100644 --- a/docs/docs/integrations/providers/sklearn.mdx +++ b/docs/docs/integrations/providers/sklearn.mdx @@ -16,7 +16,7 @@ scikit-learn package, allowing you to use it as a vectorstore. To import this vectorstore: ```python -from langchain.vectorstores import SKLearnVectorStore +from langchain_community.vectorstores import SKLearnVectorStore ``` For a more detailed walkthrough of the SKLearnVectorStore wrapper, see [this notebook](/docs/integrations/vectorstores/sklearn). diff --git a/docs/docs/integrations/providers/slack.mdx b/docs/docs/integrations/providers/slack.mdx index 778d643160..10bf01b015 100644 --- a/docs/docs/integrations/providers/slack.mdx +++ b/docs/docs/integrations/providers/slack.mdx @@ -13,5 +13,5 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/document_loaders/slack). ```python -from langchain.document_loaders import SlackDirectoryLoader +from langchain_community.document_loaders import SlackDirectoryLoader ``` diff --git a/docs/docs/integrations/providers/spreedly.mdx b/docs/docs/integrations/providers/spreedly.mdx index e7996b6224..16930aa06e 100644 --- a/docs/docs/integrations/providers/spreedly.mdx +++ b/docs/docs/integrations/providers/spreedly.mdx @@ -11,5 +11,5 @@ See [setup instructions](/docs/integrations/document_loaders/spreedly). See a [usage example](/docs/integrations/document_loaders/spreedly). ```python -from langchain.document_loaders import SpreedlyLoader +from langchain_community.document_loaders import SpreedlyLoader ``` diff --git a/docs/docs/integrations/providers/starrocks.mdx b/docs/docs/integrations/providers/starrocks.mdx index c6a1b65b0b..bc5c9983c9 100644 --- a/docs/docs/integrations/providers/starrocks.mdx +++ b/docs/docs/integrations/providers/starrocks.mdx @@ -17,5 +17,5 @@ pip install pymysql See a [usage example](/docs/integrations/vectorstores/starrocks). ```python -from langchain.vectorstores import StarRocks +from langchain_community.vectorstores import StarRocks ``` diff --git a/docs/docs/integrations/providers/stripe.mdx b/docs/docs/integrations/providers/stripe.mdx index 05cc6d4639..a7e80d97a7 100644 --- a/docs/docs/integrations/providers/stripe.mdx +++ b/docs/docs/integrations/providers/stripe.mdx @@ -12,5 +12,5 @@ See [setup instructions](/docs/integrations/document_loaders/stripe). See a [usage example](/docs/integrations/document_loaders/stripe). ```python -from langchain.document_loaders import StripeLoader +from langchain_community.document_loaders import StripeLoader ``` diff --git a/docs/docs/integrations/providers/supabase.mdx b/docs/docs/integrations/providers/supabase.mdx index 5c7d89eabb..7a574800d0 100644 --- a/docs/docs/integrations/providers/supabase.mdx +++ b/docs/docs/integrations/providers/supabase.mdx @@ -21,6 +21,6 @@ pip install supabase See a [usage example](/docs/integrations/vectorstores/supabase). ```python -from langchain.vectorstores import SupabaseVectorStore +from langchain_community.vectorstores import SupabaseVectorStore ``` diff --git a/docs/docs/integrations/providers/tair.mdx b/docs/docs/integrations/providers/tair.mdx index 8a0e6ad24c..a6c70f9c40 100644 --- a/docs/docs/integrations/providers/tair.mdx +++ b/docs/docs/integrations/providers/tair.mdx @@ -16,7 +16,7 @@ whether for semantic search or example selection. To import this vectorstore: ```python -from langchain.vectorstores import Tair +from langchain_community.vectorstores import Tair ``` For a more detailed walkthrough of the Tair wrapper, see [this notebook](/docs/integrations/vectorstores/tair) diff --git a/docs/docs/integrations/providers/telegram.mdx b/docs/docs/integrations/providers/telegram.mdx index 25ebd990b5..4a562ca529 100644 --- a/docs/docs/integrations/providers/telegram.mdx +++ b/docs/docs/integrations/providers/telegram.mdx @@ -12,6 +12,6 @@ See [setup instructions](/docs/integrations/document_loaders/telegram). See a [usage example](/docs/integrations/document_loaders/telegram). ```python -from langchain.document_loaders import TelegramChatFileLoader -from langchain.document_loaders import TelegramChatApiLoader +from langchain_community.document_loaders import TelegramChatFileLoader +from langchain_community.document_loaders import TelegramChatApiLoader ``` diff --git a/docs/docs/integrations/providers/tencent.mdx b/docs/docs/integrations/providers/tencent.mdx index 731ee68ab8..bd0190e61d 100644 --- a/docs/docs/integrations/providers/tencent.mdx +++ b/docs/docs/integrations/providers/tencent.mdx @@ -42,7 +42,7 @@ pip install tcvectordb For more information, see [this notebook](/docs/integrations/vectorstores/tencentvectordb) ```python -from langchain.vectorstores import TencentVectorDB +from langchain_community.vectorstores import TencentVectorDB ``` ## Document Loaders @@ -68,7 +68,7 @@ pip install cos-python-sdk-v5 For more information, see [this notebook](/docs/integrations/document_loaders/tencent_cos_directory) ```python -from langchain.document_loaders import TencentCOSDirectoryLoader +from langchain_community.document_loaders import TencentCOSDirectoryLoader from qcloud_cos import CosConfig ``` @@ -77,6 +77,6 @@ from qcloud_cos import CosConfig For more information, see [this notebook](/docs/integrations/document_loaders/tencent_cos_file) ```python -from langchain.document_loaders import TencentCOSFileLoader +from langchain_community.document_loaders import TencentCOSFileLoader from qcloud_cos import CosConfig ``` \ No newline at end of file diff --git a/docs/docs/integrations/providers/tensorflow_datasets.mdx b/docs/docs/integrations/providers/tensorflow_datasets.mdx index 6b77756344..b3cc150977 100644 --- a/docs/docs/integrations/providers/tensorflow_datasets.mdx +++ b/docs/docs/integrations/providers/tensorflow_datasets.mdx @@ -27,5 +27,5 @@ pip install tensorflow-dataset See a [usage example](/docs/integrations/document_loaders/tensorflow_datasets). ```python -from langchain.document_loaders import TensorflowDatasetLoader +from langchain_community.document_loaders import TensorflowDatasetLoader ``` diff --git a/docs/docs/integrations/providers/tigris.mdx b/docs/docs/integrations/providers/tigris.mdx index 4485e8379a..0604db17c4 100644 --- a/docs/docs/integrations/providers/tigris.mdx +++ b/docs/docs/integrations/providers/tigris.mdx @@ -15,5 +15,5 @@ pip install tigrisdb openapi-schema-pydantic openai tiktoken See a [usage example](/docs/integrations/vectorstores/tigris). ```python -from langchain.vectorstores import Tigris +from langchain_community.vectorstores import Tigris ``` diff --git a/docs/docs/integrations/providers/tomarkdown.mdx b/docs/docs/integrations/providers/tomarkdown.mdx index e311d3ad5c..08787f9439 100644 --- a/docs/docs/integrations/providers/tomarkdown.mdx +++ b/docs/docs/integrations/providers/tomarkdown.mdx @@ -12,5 +12,5 @@ We need the `API key`. See [instructions how to get it](https://2markdown.com/lo See a [usage example](/docs/integrations/document_loaders/tomarkdown). ```python -from langchain.document_loaders import ToMarkdownLoader +from langchain_community.document_loaders import ToMarkdownLoader ``` diff --git a/docs/docs/integrations/providers/trello.mdx b/docs/docs/integrations/providers/trello.mdx index 0aecb76cfc..0b897ae660 100644 --- a/docs/docs/integrations/providers/trello.mdx +++ b/docs/docs/integrations/providers/trello.mdx @@ -18,5 +18,5 @@ See [setup instructions](/docs/integrations/document_loaders/trello). See a [usage example](/docs/integrations/document_loaders/trello). ```python -from langchain.document_loaders import TrelloLoader +from langchain_community.document_loaders import TrelloLoader ``` diff --git a/docs/docs/integrations/providers/twitter.mdx b/docs/docs/integrations/providers/twitter.mdx index 365b996b24..7455283bf7 100644 --- a/docs/docs/integrations/providers/twitter.mdx +++ b/docs/docs/integrations/providers/twitter.mdx @@ -17,5 +17,5 @@ We must initialize the loader with the `Twitter API` token, and we need to set u See a [usage example](/docs/integrations/document_loaders/twitter). ```python -from langchain.document_loaders import TwitterTweetLoader +from langchain_community.document_loaders import TwitterTweetLoader ``` diff --git a/docs/docs/integrations/providers/typesense.mdx b/docs/docs/integrations/providers/typesense.mdx index 472d2a40fd..97f1601dd6 100644 --- a/docs/docs/integrations/providers/typesense.mdx +++ b/docs/docs/integrations/providers/typesense.mdx @@ -18,5 +18,5 @@ pip install typesense openapi-schema-pydantic openai tiktoken See a [usage example](/docs/integrations/vectorstores/typesense). ```python -from langchain.vectorstores import Typesense +from langchain_community.vectorstores import Typesense ``` diff --git a/docs/docs/integrations/providers/unstructured.mdx b/docs/docs/integrations/providers/unstructured.mdx index b0bccdbc94..1c0ad91b09 100644 --- a/docs/docs/integrations/providers/unstructured.mdx +++ b/docs/docs/integrations/providers/unstructured.mdx @@ -41,10 +41,10 @@ Check out the instructions The primary `unstructured` wrappers within `langchain` are data loaders. The following shows how to use the most basic unstructured data loader. There are other file-specific -data loaders available in the `langchain.document_loaders` module. +data loaders available in the `langchain_community.document_loaders` module. ```python -from langchain.document_loaders import UnstructuredFileLoader +from langchain_community.document_loaders import UnstructuredFileLoader loader = UnstructuredFileLoader("state_of_the_union.txt") loader.load() diff --git a/docs/docs/integrations/providers/usearch.mdx b/docs/docs/integrations/providers/usearch.mdx index 68044246c4..cdbc99ecc9 100644 --- a/docs/docs/integrations/providers/usearch.mdx +++ b/docs/docs/integrations/providers/usearch.mdx @@ -20,6 +20,6 @@ pip install usearch See a [usage example](/docs/integrations/vectorstores/usearch). ```python -from langchain.vectorstores import USearch +from langchain_community.vectorstores import USearch ``` diff --git a/docs/docs/integrations/providers/vearch.md b/docs/docs/integrations/providers/vearch.md index 84bff2e8b4..96350e12c2 100644 --- a/docs/docs/integrations/providers/vearch.md +++ b/docs/docs/integrations/providers/vearch.md @@ -11,5 +11,5 @@ Vearch Python SDK enables vearch to use locally. Vearch python sdk can be instal Vearch also can used as vectorstore. Most detalis in [this notebook](/docs/integrations/vectorstores/vearch) ```python -from langchain.vectorstores import Vearch +from langchain_community.vectorstores import Vearch ``` diff --git a/docs/docs/integrations/providers/vectara/index.mdx b/docs/docs/integrations/providers/vectara/index.mdx index 5e931753ee..d19ad12ebe 100644 --- a/docs/docs/integrations/providers/vectara/index.mdx +++ b/docs/docs/integrations/providers/vectara/index.mdx @@ -26,7 +26,7 @@ There exists a wrapper around the Vectara platform, allowing you to use it as a To import this vectorstore: ```python -from langchain.vectorstores import Vectara +from langchain_community.vectorstores import Vectara ``` To create an instance of the Vectara vectorstore: diff --git a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb index 17a4a99d65..ff1c640beb 100644 --- a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb +++ b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb @@ -61,8 +61,8 @@ "import os\n", "\n", "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.vectorstores import Vectara\n", - "from langchain_community.llms import OpenAI" + "from langchain_community.llms import OpenAI\n", + "from langchain_community.vectorstores import Vectara" ] }, { @@ -82,7 +82,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"state_of_the_union.txt\")\n", "documents = loader.load()" diff --git a/docs/docs/integrations/providers/vectara/vectara_summary.ipynb b/docs/docs/integrations/providers/vectara/vectara_summary.ipynb index 2830e8de43..d519102d41 100644 --- a/docs/docs/integrations/providers/vectara/vectara_summary.ipynb +++ b/docs/docs/integrations/providers/vectara/vectara_summary.ipynb @@ -75,8 +75,8 @@ "outputs": [], "source": [ "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import Vectara\n", "from langchain_community.embeddings import FakeEmbeddings\n", + "from langchain_community.vectorstores import Vectara\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough" ] diff --git a/docs/docs/integrations/providers/weather.mdx b/docs/docs/integrations/providers/weather.mdx index 5d557190b9..199af6ccb9 100644 --- a/docs/docs/integrations/providers/weather.mdx +++ b/docs/docs/integrations/providers/weather.mdx @@ -17,5 +17,5 @@ We must set up the `OpenWeatherMap API token`. See a [usage example](/docs/integrations/document_loaders/weather). ```python -from langchain.document_loaders import WeatherDataLoader +from langchain_community.document_loaders import WeatherDataLoader ``` diff --git a/docs/docs/integrations/providers/weaviate.mdx b/docs/docs/integrations/providers/weaviate.mdx index 9f5bf64cd6..a6c98d7ce8 100644 --- a/docs/docs/integrations/providers/weaviate.mdx +++ b/docs/docs/integrations/providers/weaviate.mdx @@ -32,7 +32,7 @@ whether for semantic search or example selection. To import this vectorstore: ```python -from langchain.vectorstores import Weaviate +from langchain_community.vectorstores import Weaviate ``` For a more detailed walkthrough of the Weaviate wrapper, see [this notebook](/docs/integrations/vectorstores/weaviate) diff --git a/docs/docs/integrations/providers/whatsapp.mdx b/docs/docs/integrations/providers/whatsapp.mdx index 524945adfa..dbe45e1b86 100644 --- a/docs/docs/integrations/providers/whatsapp.mdx +++ b/docs/docs/integrations/providers/whatsapp.mdx @@ -14,5 +14,5 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/document_loaders/whatsapp_chat). ```python -from langchain.document_loaders import WhatsAppChatLoader +from langchain_community.document_loaders import WhatsAppChatLoader ``` diff --git a/docs/docs/integrations/providers/wikipedia.mdx b/docs/docs/integrations/providers/wikipedia.mdx index b976dbc999..cf1b08a50a 100644 --- a/docs/docs/integrations/providers/wikipedia.mdx +++ b/docs/docs/integrations/providers/wikipedia.mdx @@ -16,7 +16,7 @@ pip install wikipedia See a [usage example](/docs/integrations/document_loaders/wikipedia). ```python -from langchain.document_loaders import WikipediaLoader +from langchain_community.document_loaders import WikipediaLoader ``` ## Retriever diff --git a/docs/docs/integrations/providers/xata.mdx b/docs/docs/integrations/providers/xata.mdx index 880f302f9c..9d4f80bef5 100644 --- a/docs/docs/integrations/providers/xata.mdx +++ b/docs/docs/integrations/providers/xata.mdx @@ -23,6 +23,6 @@ pip install xata==1.0.0a7 See a [usage example](/docs/integrations/vectorstores/xata). ```python -from langchain.vectorstores import XataVectorStore +from langchain_community.vectorstores import XataVectorStore ``` diff --git a/docs/docs/integrations/providers/youtube.mdx b/docs/docs/integrations/providers/youtube.mdx index c0e004df88..8f3d69b819 100644 --- a/docs/docs/integrations/providers/youtube.mdx +++ b/docs/docs/integrations/providers/youtube.mdx @@ -17,6 +17,6 @@ See a [usage example](/docs/integrations/document_loaders/youtube_transcript). See a [usage example](/docs/integrations/document_loaders/youtube_transcript). ```python -from langchain.document_loaders import YoutubeLoader -from langchain.document_loaders import GoogleApiYoutubeLoader +from langchain_community.document_loaders import YoutubeLoader +from langchain_community.document_loaders import GoogleApiYoutubeLoader ``` diff --git a/docs/docs/integrations/providers/zep.mdx b/docs/docs/integrations/providers/zep.mdx index 914180e080..265d5d41de 100644 --- a/docs/docs/integrations/providers/zep.mdx +++ b/docs/docs/integrations/providers/zep.mdx @@ -66,7 +66,7 @@ Zep supports both similarity search and [Maximum Marginal Relevance (MMR) rerank MMR search is useful for ensuring that the retrieved documents are diverse and not too similar to each other. ```python -from langchain.vectorstores.zep import ZepVectorStore +from langchain_community.vectorstores.zep import ZepVectorStore ``` See a [usage example](/docs/integrations/vectorstores/zep). \ No newline at end of file diff --git a/docs/docs/integrations/providers/zilliz.mdx b/docs/docs/integrations/providers/zilliz.mdx index b791adeb6c..6170afd351 100644 --- a/docs/docs/integrations/providers/zilliz.mdx +++ b/docs/docs/integrations/providers/zilliz.mdx @@ -16,7 +16,7 @@ A wrapper around Zilliz indexes allows you to use it as a vectorstore, whether for semantic search or example selection. ```python -from langchain.vectorstores import Milvus +from langchain_community.vectorstores import Milvus ``` For a more detailed walkthrough of the Miluvs wrapper, see [this notebook](/docs/integrations/vectorstores/zilliz) diff --git a/docs/docs/integrations/retrievers/chatgpt-plugin.ipynb b/docs/docs/integrations/retrievers/chatgpt-plugin.ipynb index a271942c25..7861d05889 100644 --- a/docs/docs/integrations/retrievers/chatgpt-plugin.ipynb +++ b/docs/docs/integrations/retrievers/chatgpt-plugin.ipynb @@ -31,7 +31,7 @@ "# Load documents using LangChain's DocumentLoaders\n", "# This is from https://langchain.readthedocs.io/en/latest/modules/document_loaders/examples/csv.html\n", "\n", - "from langchain.document_loaders.csv_loader import CSVLoader\n", + "from langchain_community.document_loaders.csv_loader import CSVLoader\n", "\n", "loader = CSVLoader(\n", " file_path=\"../../document_loaders/examples/example_data/mlb_teams_2012.csv\"\n", diff --git a/docs/docs/integrations/retrievers/cohere-reranker.ipynb b/docs/docs/integrations/retrievers/cohere-reranker.ipynb index c5cce3feed..dc23c3e782 100644 --- a/docs/docs/integrations/retrievers/cohere-reranker.ipynb +++ b/docs/docs/integrations/retrievers/cohere-reranker.ipynb @@ -325,10 +325,10 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "documents = TextLoader(\"../../modules/state_of_the_union.txt\").load()\n", "text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\n", diff --git a/docs/docs/integrations/retrievers/fleet_context.ipynb b/docs/docs/integrations/retrievers/fleet_context.ipynb index ca8f6e6c4b..e7622c9318 100644 --- a/docs/docs/integrations/retrievers/fleet_context.ipynb +++ b/docs/docs/integrations/retrievers/fleet_context.ipynb @@ -35,8 +35,8 @@ "import pandas as pd\n", "from langchain.retrievers import MultiVectorRetriever\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.stores import BaseStore\n", "from langchain_core.vectorstores import VectorStore\n", "\n", @@ -191,9 +191,9 @@ { "data": { "text/plain": [ - "[Document(page_content='Vector store-backed retriever | 🦜️🔗 Langchain\\n# Vector store-backed retriever A vector store retriever is a retriever that uses a vector store to retrieve documents. It is a lightweight wrapper around the vector store class to make it conform to the retriever interface. It uses the search methods implemented by a vector store, like similarity search and MMR, to query the texts in the vector store. Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example.Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example. ``` from langchain.document_loaders import TextLoaderloader = TextLoader(\\'../../../state_of_the_union.txt\\') ``` ``` from langchain.text_splitter import CharacterTextSplitterfrom langchain.vectorstores import FAISSfrom langchain_community.embeddings import OpenAIEmbeddingsdocuments = loader.load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)texts = text_splitter.split_documents(documents)embeddings = OpenAIEmbeddings()db = FAISS.from_documents(texts, embeddings) ``` ``` Exiting: Cleaning up .chroma directory ``` ``` retriever = db.as_retriever() ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Maximum marginal relevance retrieval[\\u200b](#maximum-marginal-relevance-retrieval) By default, the vector store retriever uses similarity search.If the underlying vector store supports maximum marginal relevance search, you can specify that as the search type. ``` retriever = db.as_retriever(search_type=\"mmr\") ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Similarity score threshold retrieval[\\u200b](#similarity-score-threshold-retrieval) You can also a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold. ``` retriever = db.as_retriever(search_type=\"similarity_score_threshold\", search_kwargs={\"score_threshold\": .5}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Specifying top k[\\u200b](#specifying-top-k) You can also specify search kwargs like `k` to use when doing retrieval.``` retriever = db.as_retriever(search_kwargs={\"k\": 1}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ``` len(docs) ``` ``` 1 ```', metadata={'title': 'Vector store-backed retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore', 'id': 'c153ebd9-2611-4a43-9db6-daa1f5f214f6'}),\n", - " Document(page_content='MultiVector Retriever | 🦜️🔗 Langchain\\n# MultiVector Retriever It can often be beneficial to store multiple vectors per document. There are multiple use cases where this is beneficial. LangChain has a base `MultiVectorRetriever` which makes querying this type of setup easy. A lot of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`. The methods to create multiple vectors per document include: - Smaller chunks: split a document into smaller chunks, and embed those (this is ParentDocumentRetriever). - Summary: create a summary for each document, embed that along with (or instead of) the document. - Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document. Note that this also enables another method of adding embeddings - manually.Note that this also enables another method of adding embeddings - manually. This is great because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control. ``` from langchain.retrievers.multi_vector import MultiVectorRetriever ``` ``` from langchain.vectorstores import Chromafrom langchain_community.embeddings import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitterfrom langchain.storage import InMemoryStorefrom langchain.document_loaders import TextLoader ``` ``` loaders = [ TextLoader(\\'../../paul_graham_essay.txt\\'), TextLoader(\\'../../state_of_the_union.txt\\'),]docs = []for l in loaders: docs.extend(l.load())text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)docs = text_splitter.split_documents(docs) ``` ## Smaller chunks[\\u200b](#smaller-chunks) Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks.This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the `ParentDocumentRetriever` does. Here we show what is going on under the hood.``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)import uuiddoc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` # The splitter to use to create smaller chunkschild_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400) ``` ``` sub_docs = []for i, doc in enumerate(docs): _id = doc_ids[i] _sub_docs = child_text_splitter.split_documents([doc]) for _doc in _sub_docs: _doc.metadata[id_key] = _id sub_docs.extend(_sub_docs) ``` ``` retriever.vectorstore.add_documents(sub_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # Vectorstore alone retrieves the small chunksretriever.vectorstore.similarity_search(\"justice breyer\")[0] ``` ``` Document(page_content=\\'Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.Justice Breyer, thank you for your service. \\\\n\\\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\', metadata={\\'doc_id\\': \\'10e9cbc0-4ba5-4d79-a09b-c033d1ba7b01\\', \\'source\\': \\'../../state_of_the_union.txt\\'}) ``` ``` # Retriever returns larger chunkslen(retriever.get_relevant_documents(\"justice breyer\")[0].page_content) ``` ``` 9874 ``` ## Summary[\\u200b](#summary) Oftentimes a summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those.``` from langchain_community.chat_models import ChatOpenAIfrom langchain.prompts import ChatPromptTemplatefrom langchain_core.output_parsers import StrOutputParserimport uuidfrom langchain_core.documents import Document ``` ``` chain = ( {\"doc\": lambda x: x.page_content} | ChatPromptTemplate.from_template(\"Summarize the following document:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0) | StrOutputParser()) ``` ``` summaries = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` summary_docs = [Document(page_content=s,metadata={id_key: doc_ids[i]}) for i, s in enumerate(summaries)] ``` ``` retriever.vectorstore.add_documents(summary_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # # We can also add the original chunks to the vectorstore if we so want# for i, doc in enumerate(docs):# doc.metadata[id_key] = doc_ids[i]# retriever.vectorstore.add_documents(docs) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs[0] ``` ``` Document(page_content=\"The document is a transcript of a speech given by the President of the United States.The President discusses several important issues and initiatives, including the nomination of a Supreme Court Justice, border security and immigration reform, protecting women\\'s rights, advancing LGBTQ+ equality, bipartisan legislation, addressing the opioid epidemic and mental health, supporting veterans, investigating the health effects of burn pits on military personnel, ending cancer, and the strength and resilience of the American people. \", metadata={\\'doc_id\\': \\'79fa2e9f-28d9-4372-8af3-2caf4f1de312\\'}) ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ``` ## Hypothetical Queries[\\u200b](#hypothetical-queries) An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document.These questions can then be embedded ``` functions = [ { \"name\": \"hypothetical_questions\", \"description\": \"Generate hypothetical questions\", \"parameters\": { \"type\": \"object\", \"properties\": { \"questions\": { \"type\": \"array\", \"items\": { \"type\": \"string\" }, }, }, \"required\": [\"questions\"] } } ] ``` ``` from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParserchain = ( {\"doc\": lambda x: x.page_content} # Only asking for 3 hypothetical questions, but this could be adjusted | ChatPromptTemplate.from_template(\"Generate a list of 3 hypothetical questions that the below document could be used to answer:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0, model=\"gpt-4\").bind(functions=functions, function_call={\"name\": \"hypothetical_questions\"}) | JsonKeyOutputFunctionsParser(key_name=\"questions\")) ``` ``` chain.invoke(docs[0]) ``` ``` [\"What was the author\\'s initial impression of philosophy as a field of study, and how did it change when they got to college?\", \\'Why did the author decide to switch their focus to Artificial Intelligence (AI)? \\', \"What led to the author\\'s disillusionment with the field of AI as it was practiced at the time?\"]``` ``` hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` question_docs = []for i, question_list in enumerate(hypothetical_questions): question_docs.extend([Document(page_content=s,metadata={id_key: doc_ids[i]}) for s in question_list]) ``` ``` retriever.vectorstore.add_documents(question_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs ``` ``` [Document(page_content=\"What is the President\\'s stance on immigration reform?\", metadata={\\'doc_id\\': \\'505d73e3-8350-46ec-a58e-3af032f04ab3\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'1c9618f0-7660-4b4f-a37c-509cbbbf6dba\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'}), Document(page_content=\\'What measures is the President proposing to protect the rights of LGBTQ+ Americans? \\', metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'})] ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ```', metadata={'title': 'MultiVector Retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector', 'id': 'beec5531-16a7-453c-80ab-c5628e0236ce'}),\n", - " Document(page_content='MultiQueryRetriever | 🦜️🔗 Langchain\\n# MultiQueryRetriever Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on \"distance\". But, retrieval may produce different results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious. The `MultiQueryRetriever` automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results.By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results. ``` # Build a sample vectorDBfrom langchain.vectorstores import Chromafrom langchain.document_loaders import WebBaseLoaderfrom langchain_community.embeddings.openai import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitter# Load blog postloader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")data = loader.load()# Splittext_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)splits = text_splitter.split_documents(data)# VectorDBembedding = OpenAIEmbeddings()vectordb = Chroma.from_documents(documents=splits, embedding=embedding) ``` #### Simple usage[\\u200b](#simple-usage) Specify the LLM to use for query generation, and the retriever will do the rest.``` from langchain_community.chat_models import ChatOpenAIfrom langchain.retrievers.multi_query import MultiQueryRetrieverquestion = \"What are the approaches to Task Decomposition? \"llm = ChatOpenAI(temperature=0)retriever_from_llm = MultiQueryRetriever.from_llm( retriever=vectordb.as_retriever(), llm=llm) ``` ``` # Set logging for the queriesimport logginglogging.basicConfig()logging.getLogger(\"langchain.retrievers.multi_query\").setLevel(logging.INFO) ``` ``` unique_docs = retriever_from_llm.get_relevant_documents(query=question)len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\\'1. How can Task Decomposition be approached? \\', \\'2. What are the different methods for Task Decomposition? \\', \\'3. What are the various approaches to decomposing tasks?\\'] 5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries.5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries. ``` from typing import Listfrom langchain.chains import LLMChainfrom pydantic import BaseModel, Fieldfrom langchain.prompts import PromptTemplatefrom langchain.output_parsers import PydanticOutputParser# Output parser will split the LLM result into a list of queriesclass LineList(BaseModel): # \"lines\" is the key (attribute name) of the parsed output lines: List[str] = Field(description=\"Lines of text\")class LineListOutputParser(PydanticOutputParser): def __init__(self) -> None: super().__init__(pydantic_object=LineList) def parse(self, text: str) -> LineList: lines = text.strip().split(\"\\\\n\") return LineList(lines=lines)output_parser = LineListOutputParser()QUERY_PROMPT = PromptTemplate( input_variables=[\"question\"], template=\"\"\"You are an AI language model assistant.Your task is to generate five different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of the distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}\"\"\",)llm = ChatOpenAI(temperature=0)# Chainllm_chain = LLMChain(llm=llm, prompt=QUERY_PROMPT, output_parser=output_parser)# Other inputsquestion = \"What are the approaches to Task Decomposition?\" ``` ``` # Runretriever = MultiQueryRetriever( retriever=vectordb.as_retriever(), llm_chain=llm_chain, parser_key=\"lines\") # \"lines\" is the key (attribute name) of the parsed output# Resultsunique_docs = retriever.get_relevant_documents( query=\"What does the course say about regression? \")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1.\")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1. What is the course\\'s perspective on regression? \", \\'2. Can you provide information on regression as discussed in the course? \\', \\'3. How does the course cover the topic of regression? \\', \"4. What are the course\\'s teachings on regression? \", \\'5. In relation to the course, what is mentioned about regression?\\'] 11 ```', metadata={'title': 'MultiQueryRetriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever', 'id': 'f7c20633-6a60-4ca3-96b1-13fee66e321d'}),\n", + "[Document(page_content='Vector store-backed retriever | 🦜️🔗 Langchain\\n# Vector store-backed retriever A vector store retriever is a retriever that uses a vector store to retrieve documents. It is a lightweight wrapper around the vector store class to make it conform to the retriever interface. It uses the search methods implemented by a vector store, like similarity search and MMR, to query the texts in the vector store. Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example.Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example. ``` from langchain_community.document_loaders import TextLoaderloader = TextLoader(\\'../../../state_of_the_union.txt\\') ``` ``` from langchain.text_splitter import CharacterTextSplitterfrom langchain_community.vectorstores import FAISSfrom langchain_community.embeddings import OpenAIEmbeddingsdocuments = loader.load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)texts = text_splitter.split_documents(documents)embeddings = OpenAIEmbeddings()db = FAISS.from_documents(texts, embeddings) ``` ``` Exiting: Cleaning up .chroma directory ``` ``` retriever = db.as_retriever() ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Maximum marginal relevance retrieval[\\u200b](#maximum-marginal-relevance-retrieval) By default, the vector store retriever uses similarity search.If the underlying vector store supports maximum marginal relevance search, you can specify that as the search type. ``` retriever = db.as_retriever(search_type=\"mmr\") ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Similarity score threshold retrieval[\\u200b](#similarity-score-threshold-retrieval) You can also a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold. ``` retriever = db.as_retriever(search_type=\"similarity_score_threshold\", search_kwargs={\"score_threshold\": .5}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Specifying top k[\\u200b](#specifying-top-k) You can also specify search kwargs like `k` to use when doing retrieval.``` retriever = db.as_retriever(search_kwargs={\"k\": 1}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ``` len(docs) ``` ``` 1 ```', metadata={'title': 'Vector store-backed retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore', 'id': 'c153ebd9-2611-4a43-9db6-daa1f5f214f6'}),\n", + " Document(page_content='MultiVector Retriever | 🦜️🔗 Langchain\\n# MultiVector Retriever It can often be beneficial to store multiple vectors per document. There are multiple use cases where this is beneficial. LangChain has a base `MultiVectorRetriever` which makes querying this type of setup easy. A lot of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`. The methods to create multiple vectors per document include: - Smaller chunks: split a document into smaller chunks, and embed those (this is ParentDocumentRetriever). - Summary: create a summary for each document, embed that along with (or instead of) the document. - Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document. Note that this also enables another method of adding embeddings - manually.Note that this also enables another method of adding embeddings - manually. This is great because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control. ``` from langchain.retrievers.multi_vector import MultiVectorRetriever ``` ``` from langchain_community.vectorstores import Chromafrom langchain_community.embeddings import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitterfrom langchain.storage import InMemoryStorefrom langchain_community.document_loaders import TextLoader ``` ``` loaders = [ TextLoader(\\'../../paul_graham_essay.txt\\'), TextLoader(\\'../../state_of_the_union.txt\\'),]docs = []for l in loaders: docs.extend(l.load())text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)docs = text_splitter.split_documents(docs) ``` ## Smaller chunks[\\u200b](#smaller-chunks) Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks.This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the `ParentDocumentRetriever` does. Here we show what is going on under the hood.``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)import uuiddoc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` # The splitter to use to create smaller chunkschild_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400) ``` ``` sub_docs = []for i, doc in enumerate(docs): _id = doc_ids[i] _sub_docs = child_text_splitter.split_documents([doc]) for _doc in _sub_docs: _doc.metadata[id_key] = _id sub_docs.extend(_sub_docs) ``` ``` retriever.vectorstore.add_documents(sub_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # Vectorstore alone retrieves the small chunksretriever.vectorstore.similarity_search(\"justice breyer\")[0] ``` ``` Document(page_content=\\'Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.Justice Breyer, thank you for your service. \\\\n\\\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\', metadata={\\'doc_id\\': \\'10e9cbc0-4ba5-4d79-a09b-c033d1ba7b01\\', \\'source\\': \\'../../state_of_the_union.txt\\'}) ``` ``` # Retriever returns larger chunkslen(retriever.get_relevant_documents(\"justice breyer\")[0].page_content) ``` ``` 9874 ``` ## Summary[\\u200b](#summary) Oftentimes a summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those.``` from langchain_community.chat_models import ChatOpenAIfrom langchain.prompts import ChatPromptTemplatefrom langchain_core.output_parsers import StrOutputParserimport uuidfrom langchain_core.documents import Document ``` ``` chain = ( {\"doc\": lambda x: x.page_content} | ChatPromptTemplate.from_template(\"Summarize the following document:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0) | StrOutputParser()) ``` ``` summaries = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` summary_docs = [Document(page_content=s,metadata={id_key: doc_ids[i]}) for i, s in enumerate(summaries)] ``` ``` retriever.vectorstore.add_documents(summary_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # # We can also add the original chunks to the vectorstore if we so want# for i, doc in enumerate(docs):# doc.metadata[id_key] = doc_ids[i]# retriever.vectorstore.add_documents(docs) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs[0] ``` ``` Document(page_content=\"The document is a transcript of a speech given by the President of the United States.The President discusses several important issues and initiatives, including the nomination of a Supreme Court Justice, border security and immigration reform, protecting women\\'s rights, advancing LGBTQ+ equality, bipartisan legislation, addressing the opioid epidemic and mental health, supporting veterans, investigating the health effects of burn pits on military personnel, ending cancer, and the strength and resilience of the American people. \", metadata={\\'doc_id\\': \\'79fa2e9f-28d9-4372-8af3-2caf4f1de312\\'}) ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ``` ## Hypothetical Queries[\\u200b](#hypothetical-queries) An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document.These questions can then be embedded ``` functions = [ { \"name\": \"hypothetical_questions\", \"description\": \"Generate hypothetical questions\", \"parameters\": { \"type\": \"object\", \"properties\": { \"questions\": { \"type\": \"array\", \"items\": { \"type\": \"string\" }, }, }, \"required\": [\"questions\"] } } ] ``` ``` from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParserchain = ( {\"doc\": lambda x: x.page_content} # Only asking for 3 hypothetical questions, but this could be adjusted | ChatPromptTemplate.from_template(\"Generate a list of 3 hypothetical questions that the below document could be used to answer:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0, model=\"gpt-4\").bind(functions=functions, function_call={\"name\": \"hypothetical_questions\"}) | JsonKeyOutputFunctionsParser(key_name=\"questions\")) ``` ``` chain.invoke(docs[0]) ``` ``` [\"What was the author\\'s initial impression of philosophy as a field of study, and how did it change when they got to college?\", \\'Why did the author decide to switch their focus to Artificial Intelligence (AI)? \\', \"What led to the author\\'s disillusionment with the field of AI as it was practiced at the time?\"]``` ``` hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` question_docs = []for i, question_list in enumerate(hypothetical_questions): question_docs.extend([Document(page_content=s,metadata={id_key: doc_ids[i]}) for s in question_list]) ``` ``` retriever.vectorstore.add_documents(question_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs ``` ``` [Document(page_content=\"What is the President\\'s stance on immigration reform?\", metadata={\\'doc_id\\': \\'505d73e3-8350-46ec-a58e-3af032f04ab3\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'1c9618f0-7660-4b4f-a37c-509cbbbf6dba\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'}), Document(page_content=\\'What measures is the President proposing to protect the rights of LGBTQ+ Americans? \\', metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'})] ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ```', metadata={'title': 'MultiVector Retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector', 'id': 'beec5531-16a7-453c-80ab-c5628e0236ce'}),\n", + " Document(page_content='MultiQueryRetriever | 🦜️🔗 Langchain\\n# MultiQueryRetriever Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on \"distance\". But, retrieval may produce different results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious. The `MultiQueryRetriever` automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results.By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results. ``` # Build a sample vectorDBfrom langchain_community.vectorstores import Chromafrom langchain_community.document_loaders import WebBaseLoaderfrom langchain_community.embeddings.openai import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitter# Load blog postloader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")data = loader.load()# Splittext_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)splits = text_splitter.split_documents(data)# VectorDBembedding = OpenAIEmbeddings()vectordb = Chroma.from_documents(documents=splits, embedding=embedding) ``` #### Simple usage[\\u200b](#simple-usage) Specify the LLM to use for query generation, and the retriever will do the rest.``` from langchain_community.chat_models import ChatOpenAIfrom langchain.retrievers.multi_query import MultiQueryRetrieverquestion = \"What are the approaches to Task Decomposition? \"llm = ChatOpenAI(temperature=0)retriever_from_llm = MultiQueryRetriever.from_llm( retriever=vectordb.as_retriever(), llm=llm) ``` ``` # Set logging for the queriesimport logginglogging.basicConfig()logging.getLogger(\"langchain.retrievers.multi_query\").setLevel(logging.INFO) ``` ``` unique_docs = retriever_from_llm.get_relevant_documents(query=question)len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\\'1. How can Task Decomposition be approached? \\', \\'2. What are the different methods for Task Decomposition? \\', \\'3. What are the various approaches to decomposing tasks?\\'] 5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries.5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries. ``` from typing import Listfrom langchain.chains import LLMChainfrom pydantic import BaseModel, Fieldfrom langchain.prompts import PromptTemplatefrom langchain.output_parsers import PydanticOutputParser# Output parser will split the LLM result into a list of queriesclass LineList(BaseModel): # \"lines\" is the key (attribute name) of the parsed output lines: List[str] = Field(description=\"Lines of text\")class LineListOutputParser(PydanticOutputParser): def __init__(self) -> None: super().__init__(pydantic_object=LineList) def parse(self, text: str) -> LineList: lines = text.strip().split(\"\\\\n\") return LineList(lines=lines)output_parser = LineListOutputParser()QUERY_PROMPT = PromptTemplate( input_variables=[\"question\"], template=\"\"\"You are an AI language model assistant.Your task is to generate five different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of the distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}\"\"\",)llm = ChatOpenAI(temperature=0)# Chainllm_chain = LLMChain(llm=llm, prompt=QUERY_PROMPT, output_parser=output_parser)# Other inputsquestion = \"What are the approaches to Task Decomposition?\" ``` ``` # Runretriever = MultiQueryRetriever( retriever=vectordb.as_retriever(), llm_chain=llm_chain, parser_key=\"lines\") # \"lines\" is the key (attribute name) of the parsed output# Resultsunique_docs = retriever.get_relevant_documents( query=\"What does the course say about regression? \")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1.\")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1. What is the course\\'s perspective on regression? \", \\'2. Can you provide information on regression as discussed in the course? \\', \\'3. How does the course cover the topic of regression? \\', \"4. What are the course\\'s teachings on regression? \", \\'5. In relation to the course, what is mentioned about regression?\\'] 11 ```', metadata={'title': 'MultiQueryRetriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever', 'id': 'f7c20633-6a60-4ca3-96b1-13fee66e321d'}),\n", " Document(page_content='langchain.retrievers.multi_vector.MultiVectorRetriever — 🦜🔗 LangChain 0.0.322\\n# `langchain.retrievers.multi_vector`.MultiVectorRetriever[¶](#langchain-retrievers-multi-vector-multivectorretriever) *class *langchain.retrievers.multi_vector.MultiVectorRetriever[[source]](../_modules/langchain/retrievers/multi_vector.html#MultiVectorRetriever)[¶](#langchain.retrievers.multi_vector.MultiVectorRetriever) # Examples using MultiVectorRetriever[¶](#langchain-retrievers-multi-vector-multivectorretriever) - [MultiVector Retriever](https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector)', metadata={'title': 'langchain.retrievers.multi_vector.MultiVectorRetriever — 🦜🔗 LangChain 0.0.322', 'type': None, 'url': 'https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html#langchain-retrievers-multi-vector-multivectorretriever', 'id': '1820c44d-7783-4846-a11c-106b18da015d'})]" ] }, @@ -276,7 +276,7 @@ "\n", "```python\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "# Assuming you have already loaded and split your documents\n", "# into `texts` and initialized your `embeddings` object\n", diff --git a/docs/docs/integrations/retrievers/jaguar.ipynb b/docs/docs/integrations/retrievers/jaguar.ipynb index 62a83758ce..b7b1d8a00f 100644 --- a/docs/docs/integrations/retrievers/jaguar.ipynb +++ b/docs/docs/integrations/retrievers/jaguar.ipynb @@ -52,8 +52,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores.jaguar import Jaguar\n", "\n", diff --git a/docs/docs/integrations/retrievers/merger_retriever.ipynb b/docs/docs/integrations/retrievers/merger_retriever.ipynb index 77824115a8..8ae9ccbf19 100644 --- a/docs/docs/integrations/retrievers/merger_retriever.ipynb +++ b/docs/docs/integrations/retrievers/merger_retriever.ipynb @@ -23,15 +23,15 @@ "import os\n", "\n", "import chromadb\n", - "from langchain.document_transformers import (\n", - " EmbeddingsClusteringFilter,\n", - " EmbeddingsRedundantFilter,\n", - ")\n", "from langchain.retrievers import ContextualCompressionRetriever\n", "from langchain.retrievers.document_compressors import DocumentCompressorPipeline\n", "from langchain.retrievers.merger_retriever import MergerRetriever\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.document_transformers import (\n", + " EmbeddingsClusteringFilter,\n", + " EmbeddingsRedundantFilter,\n", + ")\n", "from langchain_community.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "# Get 3 diff embeddings.\n", "all_mini = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", @@ -158,7 +158,7 @@ "outputs": [], "source": [ "# You can use an additional document transformer to reorder documents after removing redundancy.\n", - "from langchain.document_transformers import LongContextReorder\n", + "from langchain_community.document_transformers import LongContextReorder\n", "\n", "filter = EmbeddingsRedundantFilter(embeddings=filter_embeddings)\n", "reordering = LongContextReorder()\n", diff --git a/docs/docs/integrations/retrievers/re_phrase.ipynb b/docs/docs/integrations/retrievers/re_phrase.ipynb index 8199ba4185..7c43568bb6 100644 --- a/docs/docs/integrations/retrievers/re_phrase.ipynb +++ b/docs/docs/integrations/retrievers/re_phrase.ipynb @@ -27,12 +27,12 @@ "source": [ "import logging\n", "\n", - "from langchain.document_loaders import WebBaseLoader\n", "from langchain.retrievers import RePhraseQueryRetriever\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.document_loaders import WebBaseLoader\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma" ] }, { diff --git a/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb index 44c41a6324..e222082a59 100644 --- a/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb @@ -84,8 +84,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import DeepLake\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import DeepLake\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb index d6f3e17af4..59728052e9 100644 --- a/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb @@ -88,8 +88,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/retrievers/self_query/dashvector.ipynb b/docs/docs/integrations/retrievers/self_query/dashvector.ipynb index 07e2e18bb1..b3c074a3ac 100644 --- a/docs/docs/integrations/retrievers/self_query/dashvector.ipynb +++ b/docs/docs/integrations/retrievers/self_query/dashvector.ipynb @@ -93,8 +93,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import DashVector\n", "from langchain_community.embeddings import DashScopeEmbeddings\n", + "from langchain_community.vectorstores import DashVector\n", "\n", "embeddings = DashScopeEmbeddings()\n", "\n", diff --git a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb index cb7d6e4242..9cb67704f4 100644 --- a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb @@ -61,8 +61,8 @@ "import os\n", "\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import ElasticsearchStore\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import ElasticsearchStore\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", diff --git a/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb index 5257022076..31fe42b7ce 100644 --- a/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb @@ -68,8 +68,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import Milvus\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Milvus\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb b/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb index 50d769216c..3119494393 100644 --- a/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb +++ b/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb @@ -58,8 +58,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import MongoDBAtlasVectorSearch\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import MongoDBAtlasVectorSearch\n", "from pymongo import MongoClient\n", "\n", "CONNECTION_STRING = \"Use your MongoDB Atlas connection string\"\n", diff --git a/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb index ae09425718..43ee52cb2c 100644 --- a/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb @@ -79,8 +79,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import MyScale\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import MyScale\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb index 0d9e01853a..9a8f0416cd 100644 --- a/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb @@ -60,8 +60,8 @@ "import os\n", "\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import OpenSearchVectorSearch\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import OpenSearchVectorSearch\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", diff --git a/docs/docs/integrations/retrievers/self_query/pinecone.ipynb b/docs/docs/integrations/retrievers/self_query/pinecone.ipynb index 845e2feede..725df33737 100644 --- a/docs/docs/integrations/retrievers/self_query/pinecone.ipynb +++ b/docs/docs/integrations/retrievers/self_query/pinecone.ipynb @@ -78,8 +78,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import Pinecone\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Pinecone\n", "\n", "embeddings = OpenAIEmbeddings()\n", "# create new index\n", diff --git a/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb index 08b68335d1..5e70a1db75 100644 --- a/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb @@ -71,8 +71,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import Qdrant\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Qdrant\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb index bb9534c852..26299da969 100644 --- a/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb @@ -68,8 +68,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import Redis\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Redis\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb index 26c4e316f1..82405ae695 100644 --- a/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb @@ -218,8 +218,8 @@ "import os\n", "\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import SupabaseVectorStore\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import SupabaseVectorStore\n", "from supabase.client import Client, create_client\n", "\n", "supabase_url = os.environ.get(\"SUPABASE_URL\")\n", diff --git a/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb index fd6882d7a1..64d939b6a0 100644 --- a/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb @@ -144,8 +144,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores.timescalevector import TimescaleVector\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores.timescalevector import TimescaleVector\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb index fc0885a7d8..ef0c1f0b30 100644 --- a/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb @@ -88,13 +88,13 @@ "source": [ "from langchain.chains import ConversationalRetrievalChain\n", "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", "from langchain.schema import Document\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Vectara\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings import FakeEmbeddings\n", - "from langchain_community.llms import OpenAI" + "from langchain_community.llms import OpenAI\n", + "from langchain_community.vectorstores import Vectara" ] }, { diff --git a/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb index 347d4a3d24..d0fcb4cdf1 100644 --- a/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb @@ -46,8 +46,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import Weaviate\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Weaviate\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/retrievers/singlestoredb.ipynb b/docs/docs/integrations/retrievers/singlestoredb.ipynb index 13843727ec..a2949b8a4f 100644 --- a/docs/docs/integrations/retrievers/singlestoredb.ipynb +++ b/docs/docs/integrations/retrievers/singlestoredb.ipynb @@ -50,10 +50,10 @@ "# We want to use OpenAIEmbeddings so we have to get the OpenAI API Key.\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import SingleStoreDB\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import SingleStoreDB\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/text_embedding/nvidia_ai_endpoints.ipynb b/docs/docs/integrations/text_embedding/nvidia_ai_endpoints.ipynb index 330c73a5d8..b6ecc70de1 100644 --- a/docs/docs/integrations/text_embedding/nvidia_ai_endpoints.ipynb +++ b/docs/docs/integrations/text_embedding/nvidia_ai_endpoints.ipynb @@ -452,7 +452,7 @@ "\n", "from operator import itemgetter\n", "\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", diff --git a/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb b/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb index b36cd42358..737f24d53d 100644 --- a/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb +++ b/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb @@ -29,8 +29,8 @@ "\n", "import pandas as pd\n", "from langchain.agents import AgentType, create_pandas_dataframe_agent\n", - "from langchain.document_loaders.airbyte import AirbyteStripeLoader\n", "from langchain_community.chat_models.openai import ChatOpenAI\n", + "from langchain_community.document_loaders.airbyte import AirbyteStripeLoader\n", "\n", "stream_name = \"customers\"\n", "config = {\n", diff --git a/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb b/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb index fadfa3c7ea..51966e24bc 100644 --- a/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb +++ b/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb @@ -21,11 +21,11 @@ "source": [ "from langchain.agents import Tool\n", "from langchain.chains import RetrievalQA\n", - "from langchain.document_loaders import PyPDFLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.document_loaders import PyPDFLoader\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "from pydantic import BaseModel, Field" ] }, diff --git a/docs/docs/integrations/tools/apify.ipynb b/docs/docs/integrations/tools/apify.ipynb index 20656982e1..3c647c0a25 100644 --- a/docs/docs/integrations/tools/apify.ipynb +++ b/docs/docs/integrations/tools/apify.ipynb @@ -40,9 +40,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.base import Document\n", "from langchain.indexes import VectorstoreIndexCreator\n", - "from langchain.utilities import ApifyWrapper" + "from langchain.utilities import ApifyWrapper\n", + "from langchain_community.document_loaders.base import Document" ] }, { diff --git a/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb b/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb index 933ba4912d..6253598c63 100644 --- a/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb +++ b/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb @@ -52,8 +52,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import DeepLake\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import DeepLake" ] }, { @@ -76,7 +76,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb b/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb index 1d059b08b8..2666d5b971 100644 --- a/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb +++ b/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb @@ -135,11 +135,11 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import (\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import (\n", " AlibabaCloudOpenSearch,\n", " AlibabaCloudOpenSearchSettings,\n", - ")\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + ")" ] }, { @@ -163,7 +163,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../../state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/analyticdb.ipynb b/docs/docs/integrations/vectorstores/analyticdb.ipynb index b763e3c6b2..331fc23cff 100644 --- a/docs/docs/integrations/vectorstores/analyticdb.ipynb +++ b/docs/docs/integrations/vectorstores/analyticdb.ipynb @@ -24,8 +24,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import AnalyticDB\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import AnalyticDB" ] }, { @@ -41,7 +41,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/annoy.ipynb b/docs/docs/integrations/vectorstores/annoy.ipynb index fe8c20e4b8..819d2614e9 100644 --- a/docs/docs/integrations/vectorstores/annoy.ipynb +++ b/docs/docs/integrations/vectorstores/annoy.ipynb @@ -52,8 +52,8 @@ }, "outputs": [], "source": [ - "from langchain.vectorstores import Annoy\n", "from langchain_community.embeddings import HuggingFaceEmbeddings\n", + "from langchain_community.vectorstores import Annoy\n", "\n", "embeddings_func = HuggingFaceEmbeddings()" ] @@ -148,8 +148,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txtn.txtn.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/astradb.ipynb b/docs/docs/integrations/vectorstores/astradb.ipynb index dd89dd1d93..ace4c990db 100644 --- a/docs/docs/integrations/vectorstores/astradb.ipynb +++ b/docs/docs/integrations/vectorstores/astradb.ipynb @@ -60,11 +60,11 @@ "from datasets import (\n", " load_dataset,\n", ")\n", - "from langchain.document_loaders import PyPDFLoader\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import Document\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.document_loaders import PyPDFLoader\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough" @@ -121,7 +121,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import AstraDB" + "from langchain_community.vectorstores import AstraDB" ] }, { @@ -539,7 +539,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Cassandra" + "from langchain_community.vectorstores import Cassandra" ] }, { diff --git a/docs/docs/integrations/vectorstores/atlas.ipynb b/docs/docs/integrations/vectorstores/atlas.ipynb index a4c39a3c9e..f5dc614918 100644 --- a/docs/docs/integrations/vectorstores/atlas.ipynb +++ b/docs/docs/integrations/vectorstores/atlas.ipynb @@ -71,9 +71,9 @@ "source": [ "import time\n", "\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import SpacyTextSplitter\n", - "from langchain.vectorstores import AtlasDB" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.vectorstores import AtlasDB" ] }, { diff --git a/docs/docs/integrations/vectorstores/awadb.ipynb b/docs/docs/integrations/vectorstores/awadb.ipynb index 6a30308438..469163d1d4 100644 --- a/docs/docs/integrations/vectorstores/awadb.ipynb +++ b/docs/docs/integrations/vectorstores/awadb.ipynb @@ -28,9 +28,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import AwaDB" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.vectorstores import AwaDB" ] }, { diff --git a/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb b/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb index a2fd13f3b3..7fed5acdb9 100644 --- a/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb +++ b/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb @@ -130,13 +130,13 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.azure_cosmos_db_vector_search import (\n", + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores.azure_cosmos_db_vector_search import (\n", " AzureCosmosDBVectorSearch,\n", " CosmosDBSimilarityType,\n", ")\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "SOURCE_FILE_NAME = \"../../modules/state_of_the_union.txt\"\n", "\n", diff --git a/docs/docs/integrations/vectorstores/azuresearch.ipynb b/docs/docs/integrations/vectorstores/azuresearch.ipynb index 2b9389a3ae..a4d2d29955 100644 --- a/docs/docs/integrations/vectorstores/azuresearch.ipynb +++ b/docs/docs/integrations/vectorstores/azuresearch.ipynb @@ -45,8 +45,8 @@ "source": [ "import os\n", "\n", - "from langchain.vectorstores.azuresearch import AzureSearch\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores.azuresearch import AzureSearch" ] }, { @@ -130,8 +130,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\", encoding=\"utf-8\")\n", "\n", diff --git a/docs/docs/integrations/vectorstores/bageldb.ipynb b/docs/docs/integrations/vectorstores/bageldb.ipynb index 6f846d5442..5e19ad33a8 100644 --- a/docs/docs/integrations/vectorstores/bageldb.ipynb +++ b/docs/docs/integrations/vectorstores/bageldb.ipynb @@ -32,7 +32,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Bagel\n", + "from langchain_community.vectorstores import Bagel\n", "\n", "texts = [\"hello bagel\", \"hello langchain\", \"I love salad\", \"my car\", \"a dog\"]\n", "# create cluster and add texts\n", @@ -108,8 +108,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb b/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb index 92edb132d9..b146089741 100644 --- a/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb +++ b/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb @@ -77,8 +77,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../../state_of_the_union.txt\")\n", "documents = loader.load()\n", @@ -105,7 +105,7 @@ "outputs": [], "source": [ "# Create a bes instance and index docs.\n", - "from langchain.vectorstores import BESVectorStore\n", + "from langchain_community.vectorstores import BESVectorStore\n", "\n", "bes = BESVectorStore.from_documents(\n", " documents=docs,\n", diff --git a/docs/docs/integrations/vectorstores/chroma.ipynb b/docs/docs/integrations/vectorstores/chroma.ipynb index 852946f1c1..5fd390ed37 100644 --- a/docs/docs/integrations/vectorstores/chroma.ipynb +++ b/docs/docs/integrations/vectorstores/chroma.ipynb @@ -73,12 +73,12 @@ ], "source": [ "# import\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.sentence_transformer import (\n", " SentenceTransformerEmbeddings,\n", ")\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "# load the document and split it into chunks\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", diff --git a/docs/docs/integrations/vectorstores/clarifai.ipynb b/docs/docs/integrations/vectorstores/clarifai.ipynb index f6f7b7ec9a..17ac1b1075 100644 --- a/docs/docs/integrations/vectorstores/clarifai.ipynb +++ b/docs/docs/integrations/vectorstores/clarifai.ipynb @@ -79,9 +79,9 @@ "outputs": [], "source": [ "# Import the required modules\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Clarifai" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.vectorstores import Clarifai" ] }, { diff --git a/docs/docs/integrations/vectorstores/clickhouse.ipynb b/docs/docs/integrations/vectorstores/clickhouse.ipynb index c74ec7d7a3..3b639da02a 100644 --- a/docs/docs/integrations/vectorstores/clickhouse.ipynb +++ b/docs/docs/integrations/vectorstores/clickhouse.ipynb @@ -102,8 +102,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Clickhouse, ClickhouseSettings\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Clickhouse, ClickhouseSettings" ] }, { @@ -119,7 +119,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", @@ -303,8 +303,8 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", - "from langchain.vectorstores import Clickhouse, ClickhouseSettings\n", + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.vectorstores import Clickhouse, ClickhouseSettings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/dashvector.ipynb b/docs/docs/integrations/vectorstores/dashvector.ipynb index 9f29f6f7c7..50c1b05831 100644 --- a/docs/docs/integrations/vectorstores/dashvector.ipynb +++ b/docs/docs/integrations/vectorstores/dashvector.ipynb @@ -102,8 +102,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import DashVector\n", - "from langchain_community.embeddings.dashscope import DashScopeEmbeddings" + "from langchain_community.embeddings.dashscope import DashScopeEmbeddings\n", + "from langchain_community.vectorstores import DashVector" ] }, { @@ -121,7 +121,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb b/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb index a4c0942b60..adef3bda51 100644 --- a/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb +++ b/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb @@ -59,8 +59,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", @@ -147,7 +147,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import DatabricksVectorSearch\n", + "from langchain_community.vectorstores import DatabricksVectorSearch\n", "\n", "dvs = DatabricksVectorSearch(\n", " index, text_column=\"text\", embedding=embeddings, columns=[\"source\"]\n", diff --git a/docs/docs/integrations/vectorstores/dingo.ipynb b/docs/docs/integrations/vectorstores/dingo.ipynb index 4b03290be3..bd23461639 100644 --- a/docs/docs/integrations/vectorstores/dingo.ipynb +++ b/docs/docs/integrations/vectorstores/dingo.ipynb @@ -68,10 +68,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Dingo\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Dingo" ] }, { @@ -83,7 +83,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", @@ -130,10 +130,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Dingo\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Dingo" ] }, { diff --git a/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb b/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb index 9848d95af5..0b3213bd98 100644 --- a/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb +++ b/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb @@ -73,10 +73,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import DocArrayHnswSearch\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import DocArrayHnswSearch" ] }, { diff --git a/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb b/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb index f1f3482eac..93ce327b9a 100644 --- a/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb +++ b/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb @@ -70,10 +70,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import DocArrayInMemorySearch\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import DocArrayInMemorySearch" ] }, { diff --git a/docs/docs/integrations/vectorstores/elasticsearch.ipynb b/docs/docs/integrations/vectorstores/elasticsearch.ipynb index e54f358779..72726384f8 100644 --- a/docs/docs/integrations/vectorstores/elasticsearch.ipynb +++ b/docs/docs/integrations/vectorstores/elasticsearch.ipynb @@ -64,7 +64,7 @@ "\n", "Example:\n", "```python\n", - " from langchain.vectorstores.elasticsearch import ElasticsearchStore\n", + " from langchain_community.vectorstores.elasticsearch import ElasticsearchStore\n", " from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", " embedding = OpenAIEmbeddings()\n", @@ -79,7 +79,7 @@ "\n", "Example:\n", "```python\n", - " from langchain.vectorstores import ElasticsearchStore\n", + " from langchain_community.vectorstores import ElasticsearchStore\n", " from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", " embedding = OpenAIEmbeddings()\n", @@ -115,7 +115,7 @@ "\n", "Example:\n", "```python\n", - " from langchain.vectorstores.elasticsearch import ElasticsearchStore\n", + " from langchain_community.vectorstores.elasticsearch import ElasticsearchStore\n", " from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", " embedding = OpenAIEmbeddings()\n", @@ -180,8 +180,8 @@ }, "outputs": [], "source": [ - "from langchain.vectorstores import ElasticsearchStore\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import ElasticsearchStore" ] }, { @@ -194,8 +194,8 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", @@ -866,7 +866,7 @@ "\n", "```python\n", "\n", - "from langchain.vectorstores.elastic_vector_search import ElasticKNNSearch\n", + "from langchain_community.vectorstores.elastic_vector_search import ElasticKNNSearch\n", "\n", "db = ElasticKNNSearch(\n", " elasticsearch_url=\"http://localhost:9200\",\n", @@ -880,7 +880,7 @@ "\n", "```python\n", "\n", - "from langchain.vectorstores.elasticsearch import ElasticsearchStore\n", + "from langchain_community.vectorstores.elasticsearch import ElasticsearchStore\n", "\n", "db = ElasticsearchStore(\n", " es_url=\"http://localhost:9200\",\n", @@ -900,7 +900,7 @@ "\n", "```python\n", "\n", - "from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch\n", + "from langchain_community.vectorstores.elastic_vector_search import ElasticVectorSearch\n", "\n", "db = ElasticVectorSearch(\n", " elasticsearch_url=\"http://localhost:9200\",\n", @@ -914,7 +914,7 @@ "\n", "```python\n", "\n", - "from langchain.vectorstores.elasticsearch import ElasticsearchStore\n", + "from langchain_community.vectorstores.elasticsearch import ElasticsearchStore\n", "\n", "db = ElasticsearchStore(\n", " es_url=\"http://localhost:9200\",\n", diff --git a/docs/docs/integrations/vectorstores/epsilla.ipynb b/docs/docs/integrations/vectorstores/epsilla.ipynb index dcd21e8ce5..07aa440664 100644 --- a/docs/docs/integrations/vectorstores/epsilla.ipynb +++ b/docs/docs/integrations/vectorstores/epsilla.ipynb @@ -57,8 +57,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Epsilla\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Epsilla" ] }, { @@ -67,8 +67,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/faiss.ipynb b/docs/docs/integrations/vectorstores/faiss.ipynb index 2867dd2907..c1d8b2e2f4 100644 --- a/docs/docs/integrations/vectorstores/faiss.ipynb +++ b/docs/docs/integrations/vectorstores/faiss.ipynb @@ -53,10 +53,10 @@ "# Uncomment the following line if you need to initialize FAISS with no AVX2 optimization\n", "# os.environ['FAISS_NO_AVX2'] = '1'\n", "\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "loader = TextLoader(\"../../../extras/modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/faiss_async.ipynb b/docs/docs/integrations/vectorstores/faiss_async.ipynb index 876b141202..dde2b6a3a4 100644 --- a/docs/docs/integrations/vectorstores/faiss_async.ipynb +++ b/docs/docs/integrations/vectorstores/faiss_async.ipynb @@ -56,10 +56,10 @@ "# Uncomment the following line if you need to initialize FAISS with no AVX2 optimization\n", "# os.environ['FAISS_NO_AVX2'] = '1'\n", "\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "loader = TextLoader(\"../../../extras/modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/google_vertex_ai_vector_search.ipynb b/docs/docs/integrations/vectorstores/google_vertex_ai_vector_search.ipynb index 90161cec54..11d0245932 100644 --- a/docs/docs/integrations/vectorstores/google_vertex_ai_vector_search.ipynb +++ b/docs/docs/integrations/vectorstores/google_vertex_ai_vector_search.ipynb @@ -29,7 +29,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import MatchingEngine" + "from langchain_community.vectorstores import MatchingEngine" ] }, { diff --git a/docs/docs/integrations/vectorstores/hippo.ipynb b/docs/docs/integrations/vectorstores/hippo.ipynb index 3a69192723..ac119ddc6c 100644 --- a/docs/docs/integrations/vectorstores/hippo.ipynb +++ b/docs/docs/integrations/vectorstores/hippo.ipynb @@ -97,11 +97,11 @@ "source": [ "import os\n", "\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.hippo import Hippo\n", "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores.hippo import Hippo" ] }, { diff --git a/docs/docs/integrations/vectorstores/hologres.ipynb b/docs/docs/integrations/vectorstores/hologres.ipynb index c1fde04045..1de91e14ce 100644 --- a/docs/docs/integrations/vectorstores/hologres.ipynb +++ b/docs/docs/integrations/vectorstores/hologres.ipynb @@ -34,8 +34,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Hologres\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Hologres" ] }, { @@ -51,7 +51,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/jaguar.ipynb b/docs/docs/integrations/vectorstores/jaguar.ipynb index b151359862..83be4ace3f 100644 --- a/docs/docs/integrations/vectorstores/jaguar.ipynb +++ b/docs/docs/integrations/vectorstores/jaguar.ipynb @@ -53,10 +53,10 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQAWithSourcesChain\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.llms import OpenAI\n", "from langchain_community.vectorstores.jaguar import Jaguar\n", diff --git a/docs/docs/integrations/vectorstores/lancedb.ipynb b/docs/docs/integrations/vectorstores/lancedb.ipynb index ccf8091bd6..c7fa077ed9 100644 --- a/docs/docs/integrations/vectorstores/lancedb.ipynb +++ b/docs/docs/integrations/vectorstores/lancedb.ipynb @@ -64,8 +64,8 @@ }, "outputs": [], "source": [ - "from langchain.vectorstores import LanceDB\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import LanceDB" ] }, { @@ -75,8 +75,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/llm_rails.ipynb b/docs/docs/integrations/vectorstores/llm_rails.ipynb index e9a66c2c60..8e22959353 100644 --- a/docs/docs/integrations/vectorstores/llm_rails.ipynb +++ b/docs/docs/integrations/vectorstores/llm_rails.ipynb @@ -75,7 +75,7 @@ "source": [ "import os\n", "\n", - "from langchain.vectorstores import LLMRails\n", + "from langchain_community.vectorstores import LLMRails\n", "\n", "os.environ[\"LLM_RAILS_DATASTORE_ID\"] = \"Your datastore id \"\n", "os.environ[\"LLM_RAILS_API_KEY\"] = \"Your API Key\"\n", @@ -263,7 +263,7 @@ { "data": { "text/plain": [ - "LLMRailsRetriever(tags=None, metadata=None, vectorstore=, search_type='similarity', search_kwargs={'k': 5})" + "LLMRailsRetriever(tags=None, metadata=None, vectorstore=, search_type='similarity', search_kwargs={'k': 5})" ] }, "execution_count": 10, diff --git a/docs/docs/integrations/vectorstores/marqo.ipynb b/docs/docs/integrations/vectorstores/marqo.ipynb index 64f841913f..9b9da57c7b 100644 --- a/docs/docs/integrations/vectorstores/marqo.ipynb +++ b/docs/docs/integrations/vectorstores/marqo.ipynb @@ -38,9 +38,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Marqo" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.vectorstores import Marqo" ] }, { @@ -50,7 +50,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/meilisearch.ipynb b/docs/docs/integrations/vectorstores/meilisearch.ipynb index edcaf4b9d0..d1c1130dbe 100644 --- a/docs/docs/integrations/vectorstores/meilisearch.ipynb +++ b/docs/docs/integrations/vectorstores/meilisearch.ipynb @@ -127,8 +127,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Meilisearch\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Meilisearch\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -177,7 +177,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "# Load text\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", @@ -217,7 +217,7 @@ "outputs": [], "source": [ "import meilisearch\n", - "from langchain.vectorstores import Meilisearch\n", + "from langchain_community.vectorstores import Meilisearch\n", "\n", "client = meilisearch.Client(url=\"http://127.0.0.1:7700\", api_key=\"***\")\n", "vector_store = Meilisearch(\n", diff --git a/docs/docs/integrations/vectorstores/milvus.ipynb b/docs/docs/integrations/vectorstores/milvus.ipynb index 1ee9d6d8a2..adc5dd2d86 100644 --- a/docs/docs/integrations/vectorstores/milvus.ipynb +++ b/docs/docs/integrations/vectorstores/milvus.ipynb @@ -66,10 +66,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Milvus\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Milvus" ] }, { @@ -81,7 +81,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb index 5e5e0b0b8f..e003a2179e 100644 --- a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb +++ b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb @@ -143,10 +143,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import MomentoVectorIndex\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import MomentoVectorIndex" ] }, { diff --git a/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb b/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb index 23296ac25e..150b7f5100 100644 --- a/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb +++ b/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb @@ -157,7 +157,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import PyPDFLoader\n", + "from langchain_community.document_loaders import PyPDFLoader\n", "\n", "# Load the PDF\n", "loader = PyPDFLoader(\"https://arxiv.org/pdf/2303.08774.pdf\")\n", @@ -194,8 +194,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import MongoDBAtlasVectorSearch\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import MongoDBAtlasVectorSearch\n", "\n", "# insert the documents in MongoDB Atlas with their embedding\n", "vector_search = MongoDBAtlasVectorSearch.from_documents(\n", @@ -243,8 +243,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import MongoDBAtlasVectorSearch\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import MongoDBAtlasVectorSearch\n", "\n", "vector_search = MongoDBAtlasVectorSearch.from_connection_string(\n", " MONGODB_ATLAS_CLUSTER_URI,\n", diff --git a/docs/docs/integrations/vectorstores/myscale.ipynb b/docs/docs/integrations/vectorstores/myscale.ipynb index 4d027a195e..78abda403b 100644 --- a/docs/docs/integrations/vectorstores/myscale.ipynb +++ b/docs/docs/integrations/vectorstores/myscale.ipynb @@ -82,7 +82,7 @@ "\n", "\n", " ```python\n", - " from langchain.vectorstores import MyScale, MyScaleSettings\n", + " from langchain_community.vectorstores import MyScale, MyScaleSettings\n", " config = MyScaleSetting(host=\"\", port=8443, ...)\n", " index = MyScale(embedding_function, config)\n", " index.add_documents(...)\n", @@ -98,10 +98,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import MyScale\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import MyScale" ] }, { @@ -113,7 +113,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", @@ -219,8 +219,8 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", - "from langchain.vectorstores import MyScale\n", + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.vectorstores import MyScale\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/neo4jvector.ipynb b/docs/docs/integrations/vectorstores/neo4jvector.ipynb index ffb9229764..755bb9bb59 100644 --- a/docs/docs/integrations/vectorstores/neo4jvector.ipynb +++ b/docs/docs/integrations/vectorstores/neo4jvector.ipynb @@ -73,10 +73,10 @@ "outputs": [], "source": [ "from langchain.docstore.document import Document\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Neo4jVector\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Neo4jVector" ] }, { diff --git a/docs/docs/integrations/vectorstores/nucliadb.ipynb b/docs/docs/integrations/vectorstores/nucliadb.ipynb index 3ba5ef0b61..6f26b6602e 100644 --- a/docs/docs/integrations/vectorstores/nucliadb.ipynb +++ b/docs/docs/integrations/vectorstores/nucliadb.ipynb @@ -33,7 +33,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores.nucliadb import NucliaDB\n", + "from langchain_community.vectorstores.nucliadb import NucliaDB\n", "\n", "API_KEY = \"YOUR_API_KEY\"\n", "\n", @@ -55,7 +55,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores.nucliadb import NucliaDB\n", + "from langchain_community.vectorstores.nucliadb import NucliaDB\n", "\n", "ndb = NucliaDB(knowledge_box=\"YOUR_KB_ID\", local=True, backend=\"http://my-local-server\")" ] diff --git a/docs/docs/integrations/vectorstores/opensearch.ipynb b/docs/docs/integrations/vectorstores/opensearch.ipynb index f8d015086b..f8bd48ddb9 100644 --- a/docs/docs/integrations/vectorstores/opensearch.ipynb +++ b/docs/docs/integrations/vectorstores/opensearch.ipynb @@ -68,10 +68,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import OpenSearchVectorSearch\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import OpenSearchVectorSearch" ] }, { @@ -81,7 +81,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/pgembedding.ipynb b/docs/docs/integrations/vectorstores/pgembedding.ipynb index f9c996b54a..56a6268552 100644 --- a/docs/docs/integrations/vectorstores/pgembedding.ipynb +++ b/docs/docs/integrations/vectorstores/pgembedding.ipynb @@ -82,10 +82,10 @@ "outputs": [], "source": [ "from langchain.docstore.document import Document\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import PGEmbedding\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import PGEmbedding" ] }, { @@ -262,7 +262,7 @@ { "data": { "text/plain": [ - "VectorStoreRetriever(vectorstore=, search_type='similarity', search_kwargs={})" + "VectorStoreRetriever(vectorstore=, search_type='similarity', search_kwargs={})" ] }, "execution_count": 16, diff --git a/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb b/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb index 4565fca524..7210b276e6 100644 --- a/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb +++ b/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb @@ -30,10 +30,10 @@ "from typing import List\n", "\n", "from langchain.docstore.document import Document\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.pgvecto_rs import PGVecto_rs\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores.pgvecto_rs import PGVecto_rs" ] }, { diff --git a/docs/docs/integrations/vectorstores/pgvector.ipynb b/docs/docs/integrations/vectorstores/pgvector.ipynb index 1457bc2ee2..b02ae38d52 100644 --- a/docs/docs/integrations/vectorstores/pgvector.ipynb +++ b/docs/docs/integrations/vectorstores/pgvector.ipynb @@ -101,10 +101,10 @@ "outputs": [], "source": [ "from langchain.docstore.document import Document\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.pgvector import PGVector\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores.pgvector import PGVector" ] }, { @@ -587,7 +587,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "tags=None metadata=None vectorstore= search_type='similarity' search_kwargs={}\n" + "tags=None metadata=None vectorstore= search_type='similarity' search_kwargs={}\n" ] } ], diff --git a/docs/docs/integrations/vectorstores/pinecone.ipynb b/docs/docs/integrations/vectorstores/pinecone.ipynb index d3c48312f0..4de6d019cd 100644 --- a/docs/docs/integrations/vectorstores/pinecone.ipynb +++ b/docs/docs/integrations/vectorstores/pinecone.ipynb @@ -79,10 +79,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Pinecone\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Pinecone" ] }, { @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/qdrant.ipynb b/docs/docs/integrations/vectorstores/qdrant.ipynb index 384f52313c..14d4b7bdd6 100644 --- a/docs/docs/integrations/vectorstores/qdrant.ipynb +++ b/docs/docs/integrations/vectorstores/qdrant.ipynb @@ -78,10 +78,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Qdrant\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Qdrant" ] }, { @@ -532,7 +532,7 @@ { "data": { "text/plain": [ - "VectorStoreRetriever(vectorstore=, search_type='similarity', search_kwargs={})" + "VectorStoreRetriever(vectorstore=, search_type='similarity', search_kwargs={})" ] }, "execution_count": 15, @@ -568,7 +568,7 @@ { "data": { "text/plain": [ - "VectorStoreRetriever(vectorstore=, search_type='mmr', search_kwargs={})" + "VectorStoreRetriever(vectorstore=, search_type='mmr', search_kwargs={})" ] }, "execution_count": 16, @@ -690,7 +690,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 19, diff --git a/docs/docs/integrations/vectorstores/redis.ipynb b/docs/docs/integrations/vectorstores/redis.ipynb index 41f51d0b71..ed5354519e 100644 --- a/docs/docs/integrations/vectorstores/redis.ipynb +++ b/docs/docs/integrations/vectorstores/redis.ipynb @@ -249,7 +249,7 @@ }, "outputs": [], "source": [ - "from langchain.vectorstores.redis import Redis\n", + "from langchain_community.vectorstores.redis import Redis\n", "\n", "rds = Redis.from_texts(\n", " texts,\n", @@ -825,7 +825,7 @@ "\n", "```python\n", "\n", - "from langchain.vectorstores.redis import RedisText, RedisNum, RedisTag\n", + "from langchain_community.vectorstores.redis import RedisText, RedisNum, RedisTag\n", "\n", "# exact matching\n", "has_high_credit = RedisTag(\"credit_score\") == \"high\"\n", @@ -850,7 +850,7 @@ "\n", "```python\n", "\n", - "from langchain.vectorstores.redis import RedisFilter\n", + "from langchain_community.vectorstores.redis import RedisFilter\n", "\n", "# same examples as above\n", "has_high_credit = RedisFilter.tag(\"credit_score\") == \"high\"\n", @@ -876,7 +876,7 @@ } ], "source": [ - "from langchain.vectorstores.redis import RedisText\n", + "from langchain_community.vectorstores.redis import RedisText\n", "\n", "is_engineer = RedisText(\"job\") == \"engineer\"\n", "results = rds.similarity_search(\"foo\", k=3, filter=is_engineer)\n", @@ -926,7 +926,7 @@ } ], "source": [ - "from langchain.vectorstores.redis import RedisNum\n", + "from langchain_community.vectorstores.redis import RedisNum\n", "\n", "is_over_18 = RedisNum(\"age\") > 18\n", "is_under_99 = RedisNum(\"age\") < 99\n", diff --git a/docs/docs/integrations/vectorstores/rockset.ipynb b/docs/docs/integrations/vectorstores/rockset.ipynb index cd70260f61..be6f06ba79 100644 --- a/docs/docs/integrations/vectorstores/rockset.ipynb +++ b/docs/docs/integrations/vectorstores/rockset.ipynb @@ -108,10 +108,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Rockset\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Rockset\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/scann.ipynb b/docs/docs/integrations/vectorstores/scann.ipynb index 4b904508e9..b2d853cf0a 100644 --- a/docs/docs/integrations/vectorstores/scann.ipynb +++ b/docs/docs/integrations/vectorstores/scann.ipynb @@ -59,10 +59,10 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import ScaNN\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings import HuggingFaceEmbeddings\n", + "from langchain_community.vectorstores import ScaNN\n", "\n", "loader = TextLoader(\"state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/semadb.ipynb b/docs/docs/integrations/vectorstores/semadb.ipynb index 6f1804c725..b99220ea43 100644 --- a/docs/docs/integrations/vectorstores/semadb.ipynb +++ b/docs/docs/integrations/vectorstores/semadb.ipynb @@ -61,8 +61,8 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", @@ -109,8 +109,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import SemaDB\n", - "from langchain.vectorstores.utils import DistanceStrategy" + "from langchain_community.vectorstores import SemaDB\n", + "from langchain_community.vectorstores.utils import DistanceStrategy" ] }, { diff --git a/docs/docs/integrations/vectorstores/singlestoredb.ipynb b/docs/docs/integrations/vectorstores/singlestoredb.ipynb index ff5b20fbf9..70e5e5ef0b 100644 --- a/docs/docs/integrations/vectorstores/singlestoredb.ipynb +++ b/docs/docs/integrations/vectorstores/singlestoredb.ipynb @@ -46,10 +46,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import SingleStoreDB\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import SingleStoreDB" ] }, { diff --git a/docs/docs/integrations/vectorstores/sklearn.ipynb b/docs/docs/integrations/vectorstores/sklearn.ipynb index a6f08e2e6a..516ecae361 100644 --- a/docs/docs/integrations/vectorstores/sklearn.ipynb +++ b/docs/docs/integrations/vectorstores/sklearn.ipynb @@ -60,10 +60,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import SKLearnVectorStore\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import SKLearnVectorStore\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/sqlitevss.ipynb b/docs/docs/integrations/vectorstores/sqlitevss.ipynb index 5c80d66484..bb8f62d6ca 100644 --- a/docs/docs/integrations/vectorstores/sqlitevss.ipynb +++ b/docs/docs/integrations/vectorstores/sqlitevss.ipynb @@ -69,12 +69,12 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import SQLiteVSS\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.sentence_transformer import (\n", " SentenceTransformerEmbeddings,\n", ")\n", + "from langchain_community.vectorstores import SQLiteVSS\n", "\n", "# load the document and split it into chunks\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", @@ -146,12 +146,12 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import SQLiteVSS\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.sentence_transformer import (\n", " SentenceTransformerEmbeddings,\n", ")\n", + "from langchain_community.vectorstores import SQLiteVSS\n", "\n", "# load the document and split it into chunks\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", diff --git a/docs/docs/integrations/vectorstores/starrocks.ipynb b/docs/docs/integrations/vectorstores/starrocks.ipynb index 690f177d2f..88d0566653 100644 --- a/docs/docs/integrations/vectorstores/starrocks.ipynb +++ b/docs/docs/integrations/vectorstores/starrocks.ipynb @@ -58,12 +58,15 @@ ], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.document_loaders import DirectoryLoader, UnstructuredMarkdownLoader\n", "from langchain.text_splitter import TokenTextSplitter\n", - "from langchain.vectorstores import StarRocks\n", - "from langchain.vectorstores.starrocks import StarRocksSettings\n", + "from langchain_community.document_loaders import (\n", + " DirectoryLoader,\n", + " UnstructuredMarkdownLoader,\n", + ")\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.llms import OpenAI\n", + "from langchain_community.vectorstores import StarRocks\n", + "from langchain_community.vectorstores.starrocks import StarRocksSettings\n", "\n", "update_vectordb = False" ] diff --git a/docs/docs/integrations/vectorstores/supabase.ipynb b/docs/docs/integrations/vectorstores/supabase.ipynb index 5331106107..7b84f595a0 100644 --- a/docs/docs/integrations/vectorstores/supabase.ipynb +++ b/docs/docs/integrations/vectorstores/supabase.ipynb @@ -155,8 +155,8 @@ "source": [ "import os\n", "\n", - "from langchain.vectorstores import SupabaseVectorStore\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import SupabaseVectorStore\n", "from supabase.client import Client, create_client\n", "\n", "supabase_url = os.environ.get(\"SUPABASE_URL\")\n", @@ -183,8 +183,8 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/tair.ipynb b/docs/docs/integrations/vectorstores/tair.ipynb index d9345634dc..a7ec2a53d0 100644 --- a/docs/docs/integrations/vectorstores/tair.ipynb +++ b/docs/docs/integrations/vectorstores/tair.ipynb @@ -21,8 +21,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Tair\n", - "from langchain_community.embeddings.fake import FakeEmbeddings" + "from langchain_community.embeddings.fake import FakeEmbeddings\n", + "from langchain_community.vectorstores import Tair" ] }, { @@ -31,7 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/tencentvectordb.ipynb b/docs/docs/integrations/vectorstores/tencentvectordb.ipynb index bd48d7e3c0..98047438ec 100644 --- a/docs/docs/integrations/vectorstores/tencentvectordb.ipynb +++ b/docs/docs/integrations/vectorstores/tencentvectordb.ipynb @@ -33,11 +33,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import TencentVectorDB\n", - "from langchain.vectorstores.tencentvectordb import ConnectionParams\n", - "from langchain_community.embeddings.fake import FakeEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.fake import FakeEmbeddings\n", + "from langchain_community.vectorstores import TencentVectorDB\n", + "from langchain_community.vectorstores.tencentvectordb import ConnectionParams" ] }, { diff --git a/docs/docs/integrations/vectorstores/tigris.ipynb b/docs/docs/integrations/vectorstores/tigris.ipynb index c5c1b90d79..0ac07a4a28 100644 --- a/docs/docs/integrations/vectorstores/tigris.ipynb +++ b/docs/docs/integrations/vectorstores/tigris.ipynb @@ -85,10 +85,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Tigris\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Tigris" ] }, { diff --git a/docs/docs/integrations/vectorstores/tiledb.ipynb b/docs/docs/integrations/vectorstores/tiledb.ipynb index 8853ec4970..070e2f8e77 100644 --- a/docs/docs/integrations/vectorstores/tiledb.ipynb +++ b/docs/docs/integrations/vectorstores/tiledb.ipynb @@ -43,10 +43,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import TileDB\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings import HuggingFaceEmbeddings\n", + "from langchain_community.vectorstores import TileDB\n", "\n", "raw_documents = TextLoader(\"../../modules/state_of_the_union.txt\").load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", diff --git a/docs/docs/integrations/vectorstores/timescalevector.ipynb b/docs/docs/integrations/vectorstores/timescalevector.ipynb index 177c2325a4..be4a9693cd 100644 --- a/docs/docs/integrations/vectorstores/timescalevector.ipynb +++ b/docs/docs/integrations/vectorstores/timescalevector.ipynb @@ -123,11 +123,11 @@ "from datetime import datetime, timedelta\n", "\n", "from langchain.docstore.document import Document\n", - "from langchain.document_loaders import TextLoader\n", - "from langchain.document_loaders.json_loader import JSONLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.timescalevector import TimescaleVector\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.document_loaders.json_loader import JSONLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores.timescalevector import TimescaleVector" ] }, { @@ -329,7 +329,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "tags=['TimescaleVector', 'OpenAIEmbeddings'] metadata=None vectorstore= search_type='similarity' search_kwargs={}\n" + "tags=['TimescaleVector', 'OpenAIEmbeddings'] metadata=None vectorstore= search_type='similarity' search_kwargs={}\n" ] } ], diff --git a/docs/docs/integrations/vectorstores/typesense.ipynb b/docs/docs/integrations/vectorstores/typesense.ipynb index 6750e4f6fe..d7ecd59d1f 100644 --- a/docs/docs/integrations/vectorstores/typesense.ipynb +++ b/docs/docs/integrations/vectorstores/typesense.ipynb @@ -84,10 +84,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Typesense\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Typesense" ] }, { diff --git a/docs/docs/integrations/vectorstores/usearch.ipynb b/docs/docs/integrations/vectorstores/usearch.ipynb index 3752242d2e..daf9661595 100644 --- a/docs/docs/integrations/vectorstores/usearch.ipynb +++ b/docs/docs/integrations/vectorstores/usearch.ipynb @@ -55,10 +55,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import USearch\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import USearch" ] }, { @@ -70,7 +70,7 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../../extras/modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/vald.ipynb b/docs/docs/integrations/vectorstores/vald.ipynb index e7ec96d1b4..5f9554a3fb 100644 --- a/docs/docs/integrations/vectorstores/vald.ipynb +++ b/docs/docs/integrations/vectorstores/vald.ipynb @@ -42,10 +42,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Vald\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings import HuggingFaceEmbeddings\n", + "from langchain_community.vectorstores import Vald\n", "\n", "raw_documents = TextLoader(\"state_of_the_union.txt\").load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", @@ -189,10 +189,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Vald\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings import HuggingFaceEmbeddings\n", + "from langchain_community.vectorstores import Vald\n", "\n", "raw_documents = TextLoader(\"state_of_the_union.txt\").load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", diff --git a/docs/docs/integrations/vectorstores/vearch.ipynb b/docs/docs/integrations/vectorstores/vearch.ipynb index 0b655464bc..2045a279e6 100644 --- a/docs/docs/integrations/vectorstores/vearch.ipynb +++ b/docs/docs/integrations/vectorstores/vearch.ipynb @@ -54,10 +54,10 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores.vearch import Vearch\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings\n", + "from langchain_community.vectorstores.vearch import Vearch\n", "from transformers import AutoModel, AutoTokenizer\n", "\n", "# repalce to your local model path\n", diff --git a/docs/docs/integrations/vectorstores/vectara.ipynb b/docs/docs/integrations/vectorstores/vectara.ipynb index 943d5f1049..dbf8ea9bdf 100644 --- a/docs/docs/integrations/vectorstores/vectara.ipynb +++ b/docs/docs/integrations/vectorstores/vectara.ipynb @@ -84,10 +84,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Vectara\n", - "from langchain_community.embeddings.fake import FakeEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.fake import FakeEmbeddings\n", + "from langchain_community.vectorstores import Vectara" ] }, { @@ -484,7 +484,7 @@ { "data": { "text/plain": [ - "VectorStoreRetriever(tags=['Vectara'], vectorstore=)" + "VectorStoreRetriever(tags=['Vectara'], vectorstore=)" ] }, "execution_count": 14, diff --git a/docs/docs/integrations/vectorstores/vespa.ipynb b/docs/docs/integrations/vectorstores/vespa.ipynb index d64d72a899..b8372936f7 100644 --- a/docs/docs/integrations/vectorstores/vespa.ipynb +++ b/docs/docs/integrations/vectorstores/vespa.ipynb @@ -150,8 +150,8 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", @@ -202,7 +202,7 @@ " input_field=\"query_embedding\",\n", ")\n", "\n", - "from langchain.vectorstores import VespaStore\n", + "from langchain_community.vectorstores import VespaStore\n", "\n", "db = VespaStore.from_documents(docs, embedding_function, app=vespa_app, **vespa_config)" ] diff --git a/docs/docs/integrations/vectorstores/weaviate.ipynb b/docs/docs/integrations/vectorstores/weaviate.ipynb index 827887adbb..b486ee777e 100644 --- a/docs/docs/integrations/vectorstores/weaviate.ipynb +++ b/docs/docs/integrations/vectorstores/weaviate.ipynb @@ -119,10 +119,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Weaviate\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Weaviate" ] }, { @@ -132,7 +132,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", @@ -214,7 +214,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "\n" + "\n" ] } ], diff --git a/docs/docs/integrations/vectorstores/xata.ipynb b/docs/docs/integrations/vectorstores/xata.ipynb index ac31e51522..eb20b76458 100644 --- a/docs/docs/integrations/vectorstores/xata.ipynb +++ b/docs/docs/integrations/vectorstores/xata.ipynb @@ -107,10 +107,10 @@ }, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.xata import XataVectorStore\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores.xata import XataVectorStore" ] }, { diff --git a/docs/docs/integrations/vectorstores/yellowbrick.ipynb b/docs/docs/integrations/vectorstores/yellowbrick.ipynb index 59ee6b6b1c..7518af2d3e 100644 --- a/docs/docs/integrations/vectorstores/yellowbrick.ipynb +++ b/docs/docs/integrations/vectorstores/yellowbrick.ipynb @@ -100,9 +100,9 @@ "from langchain.chains import LLMChain, RetrievalQAWithSourcesChain\n", "from langchain.docstore.document import Document\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import Yellowbrick\n", "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Yellowbrick\n", "\n", "# Establish connection parameters to Yellowbrick. If you've signed up for Sandbox, fill in the information from your welcome mail here:\n", "yellowbrick_connection_string = (\n", diff --git a/docs/docs/integrations/vectorstores/zep.ipynb b/docs/docs/integrations/vectorstores/zep.ipynb index 2f08e0ed2e..36d52c8abf 100644 --- a/docs/docs/integrations/vectorstores/zep.ipynb +++ b/docs/docs/integrations/vectorstores/zep.ipynb @@ -77,10 +77,10 @@ "source": [ "from uuid import uuid4\n", "\n", - "from langchain.document_loaders import WebBaseLoader\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import ZepVectorStore\n", - "from langchain.vectorstores.zep import CollectionConfig\n", + "from langchain_community.document_loaders import WebBaseLoader\n", + "from langchain_community.vectorstores import ZepVectorStore\n", + "from langchain_community.vectorstores.zep import CollectionConfig\n", "\n", "ZEP_API_URL = \"http://localhost:8000\" # this is the API url of your Zep instance\n", "ZEP_API_KEY = \"\" # optional API Key for your Zep instance\n", diff --git a/docs/docs/integrations/vectorstores/zilliz.ipynb b/docs/docs/integrations/vectorstores/zilliz.ipynb index 6f8be8bd29..fa97283669 100644 --- a/docs/docs/integrations/vectorstores/zilliz.ipynb +++ b/docs/docs/integrations/vectorstores/zilliz.ipynb @@ -76,10 +76,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Milvus\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Milvus" ] }, { @@ -89,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/modules/agents/how_to/agent_structured.ipynb b/docs/docs/modules/agents/how_to/agent_structured.ipynb index ac54d1888f..7e6a6acf22 100644 --- a/docs/docs/modules/agents/how_to/agent_structured.ipynb +++ b/docs/docs/modules/agents/how_to/agent_structured.ipynb @@ -53,10 +53,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma" ] }, { diff --git a/docs/docs/modules/agents/quick_start.ipynb b/docs/docs/modules/agents/quick_start.ipynb index a38b5acf57..ba8d5d4071 100644 --- a/docs/docs/modules/agents/quick_start.ipynb +++ b/docs/docs/modules/agents/quick_start.ipynb @@ -539,7 +539,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.memory.chat_message_histories import ChatMessageHistory\n", + "from langchain_community.chat_message_histories import ChatMessageHistory\n", "from langchain_core.runnables.history import RunnableWithMessageHistory" ] }, diff --git a/docs/docs/modules/chains/foundational/router.ipynb b/docs/docs/modules/chains/foundational/router.ipynb index 5de1993a9a..91dd9878c6 100644 --- a/docs/docs/modules/chains/foundational/router.ipynb +++ b/docs/docs/modules/chains/foundational/router.ipynb @@ -444,8 +444,8 @@ "outputs": [], "source": [ "from langchain.chains.router.embedding_router import EmbeddingRouterChain\n", - "from langchain.vectorstores import Chroma\n", - "from langchain_community.embeddings import CohereEmbeddings" + "from langchain_community.embeddings import CohereEmbeddings\n", + "from langchain_community.vectorstores import Chroma" ] }, { diff --git a/docs/docs/modules/data_connection/document_loaders/csv.mdx b/docs/docs/modules/data_connection/document_loaders/csv.mdx index 471e9504c9..46d336baf8 100644 --- a/docs/docs/modules/data_connection/document_loaders/csv.mdx +++ b/docs/docs/modules/data_connection/document_loaders/csv.mdx @@ -5,7 +5,7 @@ Load CSV data with a single row per document. ```python -from langchain.document_loaders.csv_loader import CSVLoader +from langchain_community.document_loaders.csv_loader import CSVLoader loader = CSVLoader(file_path='./example_data/mlb_teams_2012.csv') diff --git a/docs/docs/modules/data_connection/document_loaders/file_directory.mdx b/docs/docs/modules/data_connection/document_loaders/file_directory.mdx index 600a58a569..8b7fac44ed 100644 --- a/docs/docs/modules/data_connection/document_loaders/file_directory.mdx +++ b/docs/docs/modules/data_connection/document_loaders/file_directory.mdx @@ -5,7 +5,7 @@ This covers how to load all documents in a directory. Under the hood, by default this uses the [UnstructuredLoader](/docs/integrations/document_loaders/unstructured_file). ```python -from langchain.document_loaders import DirectoryLoader +from langchain_community.document_loaders import DirectoryLoader ``` We can use the `glob` parameter to control which files to load. Note that here it doesn't load the `.rst` file or the `.html` files. @@ -69,7 +69,7 @@ By default this uses the `UnstructuredLoader` class. However, you can change up ```python -from langchain.document_loaders import TextLoader +from langchain_community.document_loaders import TextLoader ``` @@ -99,7 +99,7 @@ If you need to load Python source code files, use the `PythonLoader`. ```python -from langchain.document_loaders import PythonLoader +from langchain_community.document_loaders import PythonLoader ``` diff --git a/docs/docs/modules/data_connection/document_loaders/html.mdx b/docs/docs/modules/data_connection/document_loaders/html.mdx index e6ac3c983a..041b51151a 100644 --- a/docs/docs/modules/data_connection/document_loaders/html.mdx +++ b/docs/docs/modules/data_connection/document_loaders/html.mdx @@ -5,7 +5,7 @@ This covers how to load `HTML` documents into a document format that we can use downstream. ```python -from langchain.document_loaders import UnstructuredHTMLLoader +from langchain_community.document_loaders import UnstructuredHTMLLoader ``` @@ -37,7 +37,7 @@ We can also use `BeautifulSoup4` to load HTML documents using the `BSHTMLLoader` ```python -from langchain.document_loaders import BSHTMLLoader +from langchain_community.document_loaders import BSHTMLLoader ``` diff --git a/docs/docs/modules/data_connection/document_loaders/index.mdx b/docs/docs/modules/data_connection/document_loaders/index.mdx index f12629a9b8..0c9d7489c8 100644 --- a/docs/docs/modules/data_connection/document_loaders/index.mdx +++ b/docs/docs/modules/data_connection/document_loaders/index.mdx @@ -19,7 +19,7 @@ implement a "lazy load" as well for lazily loading data into memory. The simplest loader reads in a file as text and places it all into one document. ```python -from langchain.document_loaders import TextLoader +from langchain_community.document_loaders import TextLoader loader = TextLoader("./index.md") loader.load() diff --git a/docs/docs/modules/data_connection/document_loaders/json.mdx b/docs/docs/modules/data_connection/document_loaders/json.mdx index 44b7a458ce..93e85612a7 100644 --- a/docs/docs/modules/data_connection/document_loaders/json.mdx +++ b/docs/docs/modules/data_connection/document_loaders/json.mdx @@ -14,7 +14,7 @@ Check this [manual](https://stedolan.github.io/jq/manual/#Basicfilters) for a de ```python -from langchain.document_loaders import JSONLoader +from langchain_community.document_loaders import JSONLoader ``` diff --git a/docs/docs/modules/data_connection/document_loaders/markdown.mdx b/docs/docs/modules/data_connection/document_loaders/markdown.mdx index 6da25d8af7..b21492e79d 100644 --- a/docs/docs/modules/data_connection/document_loaders/markdown.mdx +++ b/docs/docs/modules/data_connection/document_loaders/markdown.mdx @@ -10,7 +10,7 @@ This covers how to load `Markdown` documents into a document format that we can ```python -from langchain.document_loaders import UnstructuredMarkdownLoader +from langchain_community.document_loaders import UnstructuredMarkdownLoader ``` diff --git a/docs/docs/modules/data_connection/document_loaders/pdf.mdx b/docs/docs/modules/data_connection/document_loaders/pdf.mdx index 33554cad7a..ab7672b031 100644 --- a/docs/docs/modules/data_connection/document_loaders/pdf.mdx +++ b/docs/docs/modules/data_connection/document_loaders/pdf.mdx @@ -19,7 +19,7 @@ pip install pypdf ```python -from langchain.document_loaders import PyPDFLoader +from langchain_community.document_loaders import PyPDFLoader loader = PyPDFLoader("example_data/layout-parser-paper.pdf") pages = loader.load_and_split() @@ -60,7 +60,7 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ```python -from langchain.vectorstores import FAISS +from langchain_community.vectorstores import FAISS from langchain_community.embeddings.openai import OpenAIEmbeddings faiss_index = FAISS.from_documents(pages, OpenAIEmbeddings()) @@ -114,7 +114,7 @@ Inspired by Daniel Gross's [https://gist.github.com/danielgross/3ab4104e14faccc1 ```python -from langchain.document_loaders import MathpixPDFLoader +from langchain_community.document_loaders import MathpixPDFLoader ``` @@ -131,7 +131,7 @@ data = loader.load() ```python -from langchain.document_loaders import UnstructuredPDFLoader +from langchain_community.document_loaders import UnstructuredPDFLoader ``` @@ -180,7 +180,7 @@ Note: all other PDF loaders can also be used to fetch remote PDFs, but `OnlinePD ```python -from langchain.document_loaders import OnlinePDFLoader +from langchain_community.document_loaders import OnlinePDFLoader ``` @@ -210,7 +210,7 @@ print(data) ```python -from langchain.document_loaders import PyPDFium2Loader +from langchain_community.document_loaders import PyPDFium2Loader ``` @@ -227,7 +227,7 @@ data = loader.load() ```python -from langchain.document_loaders import PDFMinerLoader +from langchain_community.document_loaders import PDFMinerLoader ``` @@ -246,7 +246,7 @@ This can be helpful for chunking texts semantically into sections as the output ```python -from langchain.document_loaders import PDFMinerPDFasHTMLLoader +from langchain_community.document_loaders import PDFMinerPDFasHTMLLoader ``` @@ -345,7 +345,7 @@ This is the fastest of the PDF parsing options, and contains detailed metadata a ```python -from langchain.document_loaders import PyMuPDFLoader +from langchain_community.document_loaders import PyMuPDFLoader ``` @@ -379,7 +379,7 @@ Load PDFs from directory ```python -from langchain.document_loaders import PyPDFDirectoryLoader +from langchain_community.document_loaders import PyPDFDirectoryLoader ``` @@ -398,7 +398,7 @@ Like PyMuPDF, the output Documents contain detailed metadata about the PDF and i ```python -from langchain.document_loaders import PDFPlumberLoader +from langchain_community.document_loaders import PDFPlumberLoader ``` @@ -433,7 +433,7 @@ For the call to be successful an AWS account is required, similar to the [AWS CL Besides the AWS configuration, it is very similar to the other PDF loaders, while also supporting JPEG, PNG and TIFF and non-native PDF formats. ```python -from langchain.document_loaders import AmazonTextractPDFLoader +from langchain_community.document_loaders import AmazonTextractPDFLoader loader = AmazonTextractPDFLoader("example_data/alejandro_rosalez_sample-small.jpeg") documents = loader.load() ``` \ No newline at end of file diff --git a/docs/docs/modules/data_connection/indexing.ipynb b/docs/docs/modules/data_connection/indexing.ipynb index 23264d1a84..0e850f9f46 100644 --- a/docs/docs/modules/data_connection/indexing.ipynb +++ b/docs/docs/modules/data_connection/indexing.ipynb @@ -60,7 +60,7 @@ " * document addition by id (`add_documents` method with `ids` argument)\n", " * delete by id (`delete` method with `ids` argument)\n", "\n", - "Compatible Vectorstores: `AnalyticDB`, `AstraDB`, `AwaDB`, `Bagel`, `Cassandra`, `Chroma`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `MyScale`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `ScaNN`, `SupabaseVectorStore`, `TimescaleVector`, `Vald`, `Vearch`, `VespaStore`, `Weaviate`, `ZepVectorStore`.\n", + "Compatible Vectorstores: `AnalyticDB`, `AstraDB`, `AwaDB`, `Bagel`, `Cassandra`, `Chroma`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `MyScale`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `Vearch`, `VespaStore`, `Weaviate`, `ZepVectorStore`.\n", " \n", "## Caution\n", "\n", @@ -92,8 +92,8 @@ "source": [ "from langchain.indexes import SQLRecordManager, index\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import ElasticsearchStore\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import ElasticsearchStore" ] }, { @@ -784,7 +784,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.base import BaseLoader\n", + "from langchain_community.document_loaders.base import BaseLoader\n", "\n", "\n", "class MyCustomLoader(BaseLoader):\n", diff --git a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb index 16aa498569..9eb0c369c9 100644 --- a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb +++ b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb @@ -20,10 +20,10 @@ "outputs": [], "source": [ "# Build a sample vectorDB\n", - "from langchain.document_loaders import WebBaseLoader\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.document_loaders import WebBaseLoader\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "# Load blog post\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", diff --git a/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb b/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb index fc1b68431c..ede56e2b65 100644 --- a/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb +++ b/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb @@ -118,10 +118,10 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "documents = TextLoader(\"../../state_of_the_union.txt\").load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", @@ -345,9 +345,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_transformers import EmbeddingsRedundantFilter\n", "from langchain.retrievers.document_compressors import DocumentCompressorPipeline\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_transformers import EmbeddingsRedundantFilter\n", "\n", "splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=\". \")\n", "redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)\n", diff --git a/docs/docs/modules/data_connection/retrievers/ensemble.ipynb b/docs/docs/modules/data_connection/retrievers/ensemble.ipynb index 3edc714fcc..0e27e91d25 100644 --- a/docs/docs/modules/data_connection/retrievers/ensemble.ipynb +++ b/docs/docs/modules/data_connection/retrievers/ensemble.ipynb @@ -29,8 +29,8 @@ "outputs": [], "source": [ "from langchain.retrievers import BM25Retriever, EnsembleRetriever\n", - "from langchain.vectorstores import FAISS\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb b/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb index 6b2d35f4b9..52d161ee1c 100644 --- a/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb +++ b/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb @@ -52,13 +52,13 @@ ], "source": [ "from langchain.chains import LLMChain, StuffDocumentsChain\n", - "from langchain.document_transformers import (\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.document_transformers import (\n", " LongContextReorder,\n", ")\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "from langchain_community.llms import OpenAI\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "# Get embeddings.\n", "embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", diff --git a/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb b/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb index 7bbe277807..8dad4fb50b 100644 --- a/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb +++ b/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb @@ -36,11 +36,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.storage import InMemoryByteStore\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb b/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb index 5363ff5a59..753e9d41c2 100644 --- a/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb +++ b/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb @@ -42,11 +42,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.storage import InMemoryStore\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/self_query.ipynb b/docs/docs/modules/data_connection/retrievers/self_query.ipynb index 9cfd8e0320..2d6ecff948 100644 --- a/docs/docs/modules/data_connection/retrievers/self_query.ipynb +++ b/docs/docs/modules/data_connection/retrievers/self_query.ipynb @@ -41,8 +41,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "docs = [\n", " Document(\n", diff --git a/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb b/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb index 33da6ac0ca..61b5997f7b 100644 --- a/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb +++ b/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb @@ -31,8 +31,8 @@ "from langchain.docstore import InMemoryDocstore\n", "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb b/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb index d8f6b8da67..ea736209bc 100644 --- a/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb +++ b/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb @@ -30,7 +30,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../state_of_the_union.txt\")" ] @@ -43,8 +43,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "documents = loader.load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", diff --git a/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb b/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb index e99b8d0dda..e812a9acbd 100644 --- a/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb +++ b/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb @@ -70,11 +70,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TextLoader\n", "from langchain.storage import LocalFileStore\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "underlying_embeddings = OpenAIEmbeddings()\n", "\n", diff --git a/docs/docs/modules/data_connection/vectorstores/index.mdx b/docs/docs/modules/data_connection/vectorstores/index.mdx index b756302785..d5eabdf69f 100644 --- a/docs/docs/modules/data_connection/vectorstores/index.mdx +++ b/docs/docs/modules/data_connection/vectorstores/index.mdx @@ -43,10 +43,10 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ``` ```python -from langchain.document_loaders import TextLoader +from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores import Chroma +from langchain_community.vectorstores import Chroma # Load the document, split it into chunks, embed each chunk and load it into the vector store. raw_documents = TextLoader('../../../state_of_the_union.txt').load() @@ -75,10 +75,10 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ``` ```python -from langchain.document_loaders import TextLoader +from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores import FAISS +from langchain_community.vectorstores import FAISS # Load the document, split it into chunks, embed each chunk and load it into the vector store. raw_documents = TextLoader('../../../state_of_the_union.txt').load() @@ -107,10 +107,10 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ``` ```python -from langchain.document_loaders import TextLoader +from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores import LanceDB +from langchain_community.vectorstores import LanceDB import lancedb @@ -200,7 +200,7 @@ pip install qdrant-client ``` ```python -from langchain.vectorstores import Qdrant +from langchain_community.vectorstores import Qdrant ``` ### Create a vector store asynchronously diff --git a/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb b/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb index e169361217..d873f0efcf 100644 --- a/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb +++ b/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb @@ -18,8 +18,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma" ] }, { diff --git a/docs/docs/modules/memory/agent_with_memory_in_db.ipynb b/docs/docs/modules/memory/agent_with_memory_in_db.ipynb index 529b2d7e03..3ce8f1104a 100644 --- a/docs/docs/modules/memory/agent_with_memory_in_db.ipynb +++ b/docs/docs/modules/memory/agent_with_memory_in_db.ipynb @@ -36,8 +36,8 @@ "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.memory.chat_message_histories import RedisChatMessageHistory\n", "from langchain.utilities import GoogleSearchAPIWrapper\n", + "from langchain_community.chat_message_histories import RedisChatMessageHistory\n", "from langchain_community.llms import OpenAI" ] }, diff --git a/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx b/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx index a197afb1af..0c509eb1fb 100644 --- a/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx +++ b/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx @@ -24,7 +24,7 @@ Depending on the store you choose, this step may look different. Consult the rel import faiss from langchain.docstore import InMemoryDocstore -from langchain.vectorstores import FAISS +from langchain_community.vectorstores import FAISS embedding_size = 1536 # Dimensions of the OpenAIEmbeddings diff --git a/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb b/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb index b2c6d188c7..7d0323f68f 100644 --- a/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb +++ b/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb @@ -22,8 +22,8 @@ " MaxMarginalRelevanceExampleSelector,\n", " SemanticSimilarityExampleSelector,\n", ")\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "example_prompt = PromptTemplate(\n", " input_variables=[\"input\", \"output\"],\n", diff --git a/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb b/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb index 20b31b1188..e2d5806508 100644 --- a/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb +++ b/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb @@ -19,8 +19,8 @@ "source": [ "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", "from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "example_prompt = PromptTemplate(\n", " input_variables=[\"input\", \"output\"],\n", diff --git a/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb b/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb index d542fa648a..b0642a0ce0 100644 --- a/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb +++ b/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb @@ -244,8 +244,8 @@ ], "source": [ "from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "example_selector = SemanticSimilarityExampleSelector.from_examples(\n", " # This is the list of examples available to select from.\n", diff --git a/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb b/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb index 0fae2134e0..7ce7cdffd5 100644 --- a/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb +++ b/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb @@ -192,8 +192,8 @@ "outputs": [], "source": [ "from langchain.prompts import SemanticSimilarityExampleSelector\n", - "from langchain.vectorstores import Chroma\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma" ] }, { diff --git a/docs/docs/use_cases/chatbots.ipynb b/docs/docs/use_cases/chatbots.ipynb index 590e5b5352..f26456a28f 100644 --- a/docs/docs/use_cases/chatbots.ipynb +++ b/docs/docs/use_cases/chatbots.ipynb @@ -589,7 +589,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WebBaseLoader\n", + "from langchain_community.document_loaders import WebBaseLoader\n", "\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", "data = loader.load()" @@ -615,8 +615,8 @@ "text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n", "all_splits = text_splitter.split_documents(data)\n", "\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings())" ] diff --git a/docs/docs/use_cases/code_understanding.ipynb b/docs/docs/use_cases/code_understanding.ipynb index 15cd6bebf5..937ebe03e0 100644 --- a/docs/docs/use_cases/code_understanding.ipynb +++ b/docs/docs/use_cases/code_understanding.ipynb @@ -66,7 +66,7 @@ "### Loading\n", "\n", "\n", - "We will upload all python project files using the `langchain.document_loaders.TextLoader`.\n", + "We will upload all python project files using the `langchain_community.document_loaders.TextLoader`.\n", "\n", "The following script iterates over the files in the LangChain repository and loads every `.py` file (a.k.a. **documents**):" ] @@ -78,9 +78,9 @@ "outputs": [], "source": [ "# from git import Repo\n", - "from langchain.document_loaders.generic import GenericLoader\n", - "from langchain.document_loaders.parsers import LanguageParser\n", - "from langchain.text_splitter import Language" + "from langchain.text_splitter import Language\n", + "from langchain_community.document_loaders.generic import GenericLoader\n", + "from langchain_community.document_loaders.parsers import LanguageParser" ] }, { @@ -200,8 +200,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Chroma\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "db = Chroma.from_documents(texts, OpenAIEmbeddings(disallowed_special=()))\n", "retriever = db.as_retriever(\n", diff --git a/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb b/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb index f2d507bd62..eae3b9f7d1 100644 --- a/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb +++ b/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb @@ -89,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WikipediaLoader\n", + "from langchain_community.document_loaders import WikipediaLoader\n", "\n", "query = \"Warren Buffett\"\n", "raw_documents = WikipediaLoader(query=query).load()\n", @@ -123,7 +123,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.graphs import Neo4jGraph\n", + "from langchain_community.graphs import Neo4jGraph\n", "\n", "url = \"bolt://localhost:7687\"\n", "username = \"neo4j\"\n", diff --git a/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb b/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb index a74c6e0173..3a10578b22 100644 --- a/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb @@ -101,7 +101,7 @@ "outputs": [], "source": [ "# Instantiate the ArangoDB-LangChain Graph\n", - "from langchain.graphs import ArangoGraph\n", + "from langchain_community.graphs import ArangoGraph\n", "\n", "graph = ArangoGraph(db)" ] diff --git a/docs/docs/use_cases/graph/graph_cypher_qa.ipynb b/docs/docs/use_cases/graph/graph_cypher_qa.ipynb index c126a82711..8c86bbfbf1 100644 --- a/docs/docs/use_cases/graph/graph_cypher_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_cypher_qa.ipynb @@ -39,8 +39,8 @@ "outputs": [], "source": [ "from langchain.chains import GraphCypherQAChain\n", - "from langchain.graphs import Neo4jGraph\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.graphs import Neo4jGraph" ] }, { diff --git a/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb b/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb index 211e2a927a..57361ac98a 100644 --- a/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb @@ -29,8 +29,8 @@ "outputs": [], "source": [ "from langchain.chains import FalkorDBQAChain\n", - "from langchain.graphs import FalkorDBGraph\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.graphs import FalkorDBGraph" ] }, { diff --git a/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb b/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb index 926bcc4a3a..bcc5f12424 100644 --- a/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb @@ -156,8 +156,8 @@ "outputs": [], "source": [ "from langchain.chains import HugeGraphQAChain\n", - "from langchain.graphs import HugeGraph\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.graphs import HugeGraph" ] }, { diff --git a/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb b/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb index 19f49db5b1..6c50c8354d 100644 --- a/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb @@ -131,8 +131,8 @@ "outputs": [], "source": [ "from langchain.chains import KuzuQAChain\n", - "from langchain.graphs import KuzuGraph\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.graphs import KuzuGraph" ] }, { diff --git a/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb b/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb index cc82b20646..7a5612d4b3 100644 --- a/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb @@ -68,9 +68,9 @@ "\n", "from gqlalchemy import Memgraph\n", "from langchain.chains import GraphCypherQAChain\n", - "from langchain.graphs import MemgraphGraph\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.graphs import MemgraphGraph" ] }, { diff --git a/docs/docs/use_cases/graph/graph_nebula_qa.ipynb b/docs/docs/use_cases/graph/graph_nebula_qa.ipynb index 1e9b872d0c..d1b1d5b91a 100644 --- a/docs/docs/use_cases/graph/graph_nebula_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_nebula_qa.ipynb @@ -122,8 +122,8 @@ "outputs": [], "source": [ "from langchain.chains import NebulaGraphQAChain\n", - "from langchain.graphs import NebulaGraph\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.graphs import NebulaGraph" ] }, { diff --git a/docs/docs/use_cases/graph/graph_sparql_qa.ipynb b/docs/docs/use_cases/graph/graph_sparql_qa.ipynb index 34e315cd60..714181967c 100644 --- a/docs/docs/use_cases/graph/graph_sparql_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_sparql_qa.ipynb @@ -31,8 +31,8 @@ "outputs": [], "source": [ "from langchain.chains import GraphSparqlQAChain\n", - "from langchain.graphs import RdfGraph\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.graphs import RdfGraph" ] }, { diff --git a/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb b/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb index 492cea4d21..7e026b371a 100644 --- a/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb +++ b/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb @@ -14,7 +14,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.graphs import NeptuneGraph\n", + "from langchain_community.graphs import NeptuneGraph\n", "\n", "host = \"\"\n", "port = 8182\n", diff --git a/docs/docs/use_cases/qa_structured/sql.ipynb b/docs/docs/use_cases/qa_structured/sql.ipynb index 24c554ed66..b11f929333 100644 --- a/docs/docs/use_cases/qa_structured/sql.ipynb +++ b/docs/docs/use_cases/qa_structured/sql.ipynb @@ -783,8 +783,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "embeddings = OpenAIEmbeddings()\n", "\n", @@ -970,8 +970,8 @@ "outputs": [], "source": [ "from langchain.agents.agent_toolkits import create_retriever_tool\n", - "from langchain.vectorstores import FAISS\n", "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import FAISS\n", "\n", "texts = artists + albums\n", "\n", diff --git a/docs/docs/use_cases/question_answering/per_user.ipynb b/docs/docs/use_cases/question_answering/per_user.ipynb index 6c63e9b529..cb2b9c32a1 100644 --- a/docs/docs/use_cases/question_answering/per_user.ipynb +++ b/docs/docs/use_cases/question_answering/per_user.ipynb @@ -55,8 +55,8 @@ ], "source": [ "import pinecone\n", - "from langchain.vectorstores import Pinecone\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Pinecone" ] }, { diff --git a/docs/docs/use_cases/summarization.ipynb b/docs/docs/use_cases/summarization.ipynb index d11d9ffef2..f0e1799317 100644 --- a/docs/docs/use_cases/summarization.ipynb +++ b/docs/docs/use_cases/summarization.ipynb @@ -206,8 +206,8 @@ ], "source": [ "from langchain.chains.summarize import load_summarize_chain\n", - "from langchain.document_loaders import WebBaseLoader\n", "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.document_loaders import WebBaseLoader\n", "\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", "docs = loader.load()\n", diff --git a/docs/docs/use_cases/web_scraping.ipynb b/docs/docs/use_cases/web_scraping.ipynb index d23ba0dea8..e2116a62ac 100644 --- a/docs/docs/use_cases/web_scraping.ipynb +++ b/docs/docs/use_cases/web_scraping.ipynb @@ -71,8 +71,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import AsyncChromiumLoader\n", - "from langchain.document_transformers import BeautifulSoupTransformer\n", + "from langchain_community.document_loaders import AsyncChromiumLoader\n", + "from langchain_community.document_transformers import BeautifulSoupTransformer\n", "\n", "# Load HTML\n", "loader = AsyncChromiumLoader([\"https://www.wsj.com\"])\n", @@ -162,7 +162,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import AsyncHtmlLoader\n", + "from langchain_community.document_loaders import AsyncHtmlLoader\n", "\n", "urls = [\"https://www.espn.com\", \"https://lilianweng.github.io/posts/2023-06-23-agent/\"]\n", "loader = AsyncHtmlLoader(urls)\n", @@ -204,7 +204,7 @@ } ], "source": [ - "from langchain.document_loaders import AsyncHtmlLoader\n", + "from langchain_community.document_loaders import AsyncHtmlLoader\n", "\n", "urls = [\"https://www.espn.com\", \"https://lilianweng.github.io/posts/2023-06-23-agent/\"]\n", "loader = AsyncHtmlLoader(urls)\n", @@ -229,7 +229,7 @@ } ], "source": [ - "from langchain.document_transformers import Html2TextTransformer\n", + "from langchain_community.document_transformers import Html2TextTransformer\n", "\n", "html2text = Html2TextTransformer()\n", "docs_transformed = html2text.transform_documents(docs)\n", @@ -481,9 +481,9 @@ "source": [ "from langchain.retrievers.web_research import WebResearchRetriever\n", "from langchain.utilities import GoogleSearchAPIWrapper\n", - "from langchain.vectorstores import Chroma\n", "from langchain_community.chat_models.openai import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma" ] }, { diff --git a/libs/community/langchain_community/document_loaders/parsers/pdf.py b/libs/community/langchain_community/document_loaders/parsers/pdf.py index 5ef03f3f69..44349f5760 100644 --- a/libs/community/langchain_community/document_loaders/parsers/pdf.py +++ b/libs/community/langchain_community/document_loaders/parsers/pdf.py @@ -546,10 +546,10 @@ class DocumentIntelligenceParser(BaseBlobParser): def __init__(self, client: Any, model: str): warnings.warn( - "langchain.document_loaders.parsers.pdf.DocumentIntelligenceParser" - "and langchain.document_loaders.pdf.DocumentIntelligenceLoader" + "langchain_community.document_loaders.parsers.pdf.DocumentIntelligenceParser" + "and langchain_community.document_loaders.pdf.DocumentIntelligenceLoader" " are deprecated. Please upgrade to " - "langchain.document_loaders.DocumentIntelligenceLoader " + "langchain_community.document_loaders.DocumentIntelligenceLoader " "for any file parsing purpose using Azure Document Intelligence " "service." ) diff --git a/libs/community/langchain_community/vectorstores/chroma.py b/libs/community/langchain_community/vectorstores/chroma.py index e40366c214..025212527c 100644 --- a/libs/community/langchain_community/vectorstores/chroma.py +++ b/libs/community/langchain_community/vectorstores/chroma.py @@ -225,7 +225,7 @@ class Chroma(VectorStore): if "Expected metadata value to be" in str(e): msg = ( "Try filtering complex metadata using " - "langchain.vectorstores.utils.filter_complex_metadata." + "langchain_community.vectorstores.utils.filter_complex_metadata." ) raise ValueError(e.args[0] + "\n\n" + msg) else: @@ -304,7 +304,7 @@ class Chroma(VectorStore): if "Expected metadata value to be" in str(e): msg = ( "Try filtering complex metadata from the document using " - "langchain.vectorstores.utils.filter_complex_metadata." + "langchain_community.vectorstores.utils.filter_complex_metadata." ) raise ValueError(e.args[0] + "\n\n" + msg) else: diff --git a/libs/community/langchain_community/vectorstores/redis/base.py b/libs/community/langchain_community/vectorstores/redis/base.py index 6b7732bb4e..bcad15f369 100644 --- a/libs/community/langchain_community/vectorstores/redis/base.py +++ b/libs/community/langchain_community/vectorstores/redis/base.py @@ -196,7 +196,7 @@ class Redis(VectorStore): - All strings are indexed as text fields - All numbers are indexed as numeric fields - All lists of strings are indexed as tag fields (joined by - langchain.vectorstores.redis.constants.REDIS_TAG_SEPARATOR) + langchain_community.vectorstores.redis.constants.REDIS_TAG_SEPARATOR) - All None values are not indexed but still stored in Redis these are not retrievable through the interface here, but the raw Redis client can be used to retrieve them. diff --git a/libs/community/langchain_community/vectorstores/surrealdb.py b/libs/community/langchain_community/vectorstores/surrealdb.py index febc7ea55c..951ec04463 100644 --- a/libs/community/langchain_community/vectorstores/surrealdb.py +++ b/libs/community/langchain_community/vectorstores/surrealdb.py @@ -31,7 +31,7 @@ class SurrealDBStore(VectorStore): Example: .. code-block:: python - from langchain.vectorstores.surrealdb import SurrealDBStore + from langchain_community.vectorstores.surrealdb import SurrealDBStore from langchain_community.embeddings import HuggingFaceEmbeddings embedding_function = HuggingFaceEmbeddings() diff --git a/libs/community/langchain_community/vectorstores/vectara.py b/libs/community/langchain_community/vectorstores/vectara.py index 2e352302e7..4a9334b3fb 100644 --- a/libs/community/langchain_community/vectorstores/vectara.py +++ b/libs/community/langchain_community/vectorstores/vectara.py @@ -83,7 +83,7 @@ class Vectara(VectorStore): Example: .. code-block:: python - from langchain.vectorstores import Vectara + from langchain_community.vectorstores import Vectara vectorstore = Vectara( vectara_customer_id=vectara_customer_id, @@ -512,7 +512,7 @@ class Vectara(VectorStore): Example: .. code-block:: python - from langchain.vectorstores import Vectara + from langchain_community.vectorstores import Vectara vectara = Vectara.from_texts( texts, vectara_customer_id=customer_id, @@ -544,7 +544,7 @@ class Vectara(VectorStore): Example: .. code-block:: python - from langchain.vectorstores import Vectara + from langchain_community.vectorstores import Vectara vectara = Vectara.from_files( files_list, vectara_customer_id=customer_id, diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index ed65c8d1c0..98c7f23b73 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -47,7 +47,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): from typing import Optional from langchain_community.chat_models import ChatAnthropic - from langchain.memory.chat_message_histories import RedisChatMessageHistory + from langchain_community.chat_message_histories import RedisChatMessageHistory from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.runnables.history import RunnableWithMessageHistory diff --git a/libs/experimental/langchain_experimental/cpal/models.py b/libs/experimental/langchain_experimental/cpal/models.py index 62eab9cb64..ca9b222876 100644 --- a/libs/experimental/langchain_experimental/cpal/models.py +++ b/libs/experimental/langchain_experimental/cpal/models.py @@ -3,7 +3,7 @@ from __future__ import annotations # allows pydantic model to reference itself import re from typing import Any, List, Optional, Union -from langchain.graphs.networkx_graph import NetworkxEntityGraph +from langchain_community.graphs.networkx_graph import NetworkxEntityGraph from langchain_experimental.cpal.constants import Constant from langchain_experimental.pydantic_v1 import ( diff --git a/libs/experimental/langchain_experimental/graph_transformers/diffbot.py b/libs/experimental/langchain_experimental/graph_transformers/diffbot.py index 000c70de4b..302a4d1fb1 100644 --- a/libs/experimental/langchain_experimental/graph_transformers/diffbot.py +++ b/libs/experimental/langchain_experimental/graph_transformers/diffbot.py @@ -1,9 +1,9 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, Union import requests -from langchain.graphs.graph_document import GraphDocument, Node, Relationship from langchain.schema import Document from langchain.utils import get_from_env +from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship def format_property_key(s: str) -> str: diff --git a/libs/langchain/langchain/__init__.py b/libs/langchain/langchain/__init__.py index d7eac58892..6843ba3cb5 100644 --- a/libs/langchain/langchain/__init__.py +++ b/libs/langchain/langchain/__init__.py @@ -313,15 +313,17 @@ def __getattr__(name: str) -> Any: return SQLDatabase elif name == "FAISS": - from langchain.vectorstores import FAISS + from langchain_community.vectorstores import FAISS - _warn_on_import(name, replacement="langchain.vectorstores.FAISS") + _warn_on_import(name, replacement="langchain_community.vectorstores.FAISS") return FAISS elif name == "ElasticVectorSearch": - from langchain.vectorstores import ElasticVectorSearch + from langchain_community.vectorstores import ElasticVectorSearch - _warn_on_import(name, replacement="langchain.vectorstores.ElasticVectorSearch") + _warn_on_import( + name, replacement="langchain_community.vectorstores.ElasticVectorSearch" + ) return ElasticVectorSearch # For backwards compatibility diff --git a/libs/langchain/langchain/chains/graph_qa/arangodb.py b/libs/langchain/langchain/chains/graph_qa/arangodb.py index ae992d550f..ac797f7195 100644 --- a/libs/langchain/langchain/chains/graph_qa/arangodb.py +++ b/libs/langchain/langchain/chains/graph_qa/arangodb.py @@ -4,6 +4,7 @@ from __future__ import annotations import re from typing import Any, Dict, List, Optional +from langchain_community.graphs.arangodb_graph import ArangoGraph from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field @@ -16,7 +17,6 @@ from langchain.chains.graph_qa.prompts import ( AQL_QA_PROMPT, ) from langchain.chains.llm import LLMChain -from langchain.graphs.arangodb_graph import ArangoGraph class ArangoGraphQAChain(Chain): diff --git a/libs/langchain/langchain/chains/graph_qa/base.py b/libs/langchain/langchain/chains/graph_qa/base.py index 76de72128c..2465bfae6a 100644 --- a/libs/langchain/langchain/chains/graph_qa/base.py +++ b/libs/langchain/langchain/chains/graph_qa/base.py @@ -3,6 +3,7 @@ from __future__ import annotations from typing import Any, Dict, List, Optional +from langchain_community.graphs.networkx_graph import NetworkxEntityGraph, get_entities from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field @@ -11,7 +12,6 @@ from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ENTITY_EXTRACTION_PROMPT, GRAPH_QA_PROMPT from langchain.chains.llm import LLMChain -from langchain.graphs.networkx_graph import NetworkxEntityGraph, get_entities class GraphQAChain(Chain): diff --git a/libs/langchain/langchain/chains/graph_qa/cypher.py b/libs/langchain/langchain/chains/graph_qa/cypher.py index 1e91d22b07..c15837cce6 100644 --- a/libs/langchain/langchain/chains/graph_qa/cypher.py +++ b/libs/langchain/langchain/chains/graph_qa/cypher.py @@ -4,6 +4,7 @@ from __future__ import annotations import re from typing import Any, Dict, List, Optional +from langchain_community.graphs.graph_store import GraphStore from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field @@ -13,7 +14,6 @@ from langchain.chains.base import Chain from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT from langchain.chains.llm import LLMChain -from langchain.graphs.graph_store import GraphStore INTERMEDIATE_STEPS_KEY = "intermediate_steps" diff --git a/libs/langchain/langchain/chains/graph_qa/falkordb.py b/libs/langchain/langchain/chains/graph_qa/falkordb.py index 125160fda0..6c9f7110df 100644 --- a/libs/langchain/langchain/chains/graph_qa/falkordb.py +++ b/libs/langchain/langchain/chains/graph_qa/falkordb.py @@ -4,6 +4,7 @@ from __future__ import annotations import re from typing import Any, Dict, List, Optional +from langchain_community.graphs import FalkorDBGraph from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field @@ -12,7 +13,6 @@ from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT from langchain.chains.llm import LLMChain -from langchain.graphs import FalkorDBGraph INTERMEDIATE_STEPS_KEY = "intermediate_steps" diff --git a/libs/langchain/langchain/chains/graph_qa/hugegraph.py b/libs/langchain/langchain/chains/graph_qa/hugegraph.py index 3e10adb7dc..0ca54111cb 100644 --- a/libs/langchain/langchain/chains/graph_qa/hugegraph.py +++ b/libs/langchain/langchain/chains/graph_qa/hugegraph.py @@ -3,6 +3,7 @@ from __future__ import annotations from typing import Any, Dict, List, Optional +from langchain_community.graphs.hugegraph import HugeGraph from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field @@ -14,7 +15,6 @@ from langchain.chains.graph_qa.prompts import ( GREMLIN_GENERATION_PROMPT, ) from langchain.chains.llm import LLMChain -from langchain.graphs.hugegraph import HugeGraph class HugeGraphQAChain(Chain): diff --git a/libs/langchain/langchain/chains/graph_qa/kuzu.py b/libs/langchain/langchain/chains/graph_qa/kuzu.py index f45fb786e1..3735e82283 100644 --- a/libs/langchain/langchain/chains/graph_qa/kuzu.py +++ b/libs/langchain/langchain/chains/graph_qa/kuzu.py @@ -3,6 +3,7 @@ from __future__ import annotations from typing import Any, Dict, List, Optional +from langchain_community.graphs.kuzu_graph import KuzuGraph from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field @@ -11,7 +12,6 @@ from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, KUZU_GENERATION_PROMPT from langchain.chains.llm import LLMChain -from langchain.graphs.kuzu_graph import KuzuGraph class KuzuQAChain(Chain): diff --git a/libs/langchain/langchain/chains/graph_qa/nebulagraph.py b/libs/langchain/langchain/chains/graph_qa/nebulagraph.py index d9c0c09996..d722c3a851 100644 --- a/libs/langchain/langchain/chains/graph_qa/nebulagraph.py +++ b/libs/langchain/langchain/chains/graph_qa/nebulagraph.py @@ -3,6 +3,7 @@ from __future__ import annotations from typing import Any, Dict, List, Optional +from langchain_community.graphs.nebula_graph import NebulaGraph from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field @@ -11,7 +12,6 @@ from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import CYPHER_QA_PROMPT, NGQL_GENERATION_PROMPT from langchain.chains.llm import LLMChain -from langchain.graphs.nebula_graph import NebulaGraph class NebulaGraphQAChain(Chain): diff --git a/libs/langchain/langchain/chains/graph_qa/neptune_cypher.py b/libs/langchain/langchain/chains/graph_qa/neptune_cypher.py index 0627ced580..74985029d4 100644 --- a/libs/langchain/langchain/chains/graph_qa/neptune_cypher.py +++ b/libs/langchain/langchain/chains/graph_qa/neptune_cypher.py @@ -3,6 +3,7 @@ from __future__ import annotations import re from typing import Any, Dict, List, Optional +from langchain_community.graphs import NeptuneGraph from langchain_core.prompts.base import BasePromptTemplate from langchain_core.pydantic_v1 import Field @@ -16,7 +17,6 @@ from langchain.chains.graph_qa.prompts import ( ) from langchain.chains.llm import LLMChain from langchain.chains.prompt_selector import ConditionalPromptSelector -from langchain.graphs import NeptuneGraph INTERMEDIATE_STEPS_KEY = "intermediate_steps" diff --git a/libs/langchain/langchain/chains/graph_qa/sparql.py b/libs/langchain/langchain/chains/graph_qa/sparql.py index 9465aebc22..1d8150b4ba 100644 --- a/libs/langchain/langchain/chains/graph_qa/sparql.py +++ b/libs/langchain/langchain/chains/graph_qa/sparql.py @@ -5,6 +5,7 @@ from __future__ import annotations from typing import Any, Dict, List, Optional +from langchain_community.graphs.rdf_graph import RdfGraph from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts.base import BasePromptTemplate from langchain_core.pydantic_v1 import Field @@ -18,7 +19,6 @@ from langchain.chains.graph_qa.prompts import ( SPARQL_QA_PROMPT, ) from langchain.chains.llm import LLMChain -from langchain.graphs.rdf_graph import RdfGraph class GraphSparqlQAChain(Chain): diff --git a/libs/langchain/langchain/chains/retrieval_qa/base.py b/libs/langchain/langchain/chains/retrieval_qa/base.py index b73d0bc648..94fd932078 100644 --- a/libs/langchain/langchain/chains/retrieval_qa/base.py +++ b/libs/langchain/langchain/chains/retrieval_qa/base.py @@ -202,7 +202,7 @@ class RetrievalQA(BaseRetrievalQA): from langchain_community.llms import OpenAI from langchain.chains import RetrievalQA - from langchain.vectorstores import FAISS + from langchain_community.vectorstores import FAISS from langchain_core.vectorstores import VectorStoreRetriever retriever = VectorStoreRetriever(vectorstore=FAISS(...)) retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever) diff --git a/libs/langchain/langchain/document_loaders/__init__.py b/libs/langchain/langchain/document_loaders/__init__.py index ba3867ffcb..48143a30b6 100644 --- a/libs/langchain/langchain/document_loaders/__init__.py +++ b/libs/langchain/langchain/document_loaders/__init__.py @@ -15,8 +15,8 @@ Document, TextSplitter """ -from langchain.document_loaders.acreom import AcreomLoader -from langchain.document_loaders.airbyte import ( +from langchain_community.document_loaders.acreom import AcreomLoader +from langchain_community.document_loaders.airbyte import ( AirbyteCDKLoader, AirbyteGongLoader, AirbyteHubspotLoader, @@ -26,108 +26,119 @@ from langchain.document_loaders.airbyte import ( AirbyteTypeformLoader, AirbyteZendeskSupportLoader, ) -from langchain.document_loaders.airbyte_json import AirbyteJSONLoader -from langchain.document_loaders.airtable import AirtableLoader -from langchain.document_loaders.apify_dataset import ApifyDatasetLoader -from langchain.document_loaders.arcgis_loader import ArcGISLoader -from langchain.document_loaders.arxiv import ArxivLoader -from langchain.document_loaders.assemblyai import AssemblyAIAudioTranscriptLoader -from langchain.document_loaders.async_html import AsyncHtmlLoader -from langchain.document_loaders.azlyrics import AZLyricsLoader -from langchain.document_loaders.azure_ai_data import ( +from langchain_community.document_loaders.airbyte_json import AirbyteJSONLoader +from langchain_community.document_loaders.airtable import AirtableLoader +from langchain_community.document_loaders.apify_dataset import ApifyDatasetLoader +from langchain_community.document_loaders.arcgis_loader import ArcGISLoader +from langchain_community.document_loaders.arxiv import ArxivLoader +from langchain_community.document_loaders.assemblyai import ( + AssemblyAIAudioTranscriptLoader, +) +from langchain_community.document_loaders.async_html import AsyncHtmlLoader +from langchain_community.document_loaders.azlyrics import AZLyricsLoader +from langchain_community.document_loaders.azure_ai_data import ( AzureAIDataLoader, ) -from langchain.document_loaders.azure_blob_storage_container import ( +from langchain_community.document_loaders.azure_blob_storage_container import ( AzureBlobStorageContainerLoader, ) -from langchain.document_loaders.azure_blob_storage_file import ( +from langchain_community.document_loaders.azure_blob_storage_file import ( AzureBlobStorageFileLoader, ) -from langchain.document_loaders.bibtex import BibtexLoader -from langchain.document_loaders.bigquery import BigQueryLoader -from langchain.document_loaders.bilibili import BiliBiliLoader -from langchain.document_loaders.blackboard import BlackboardLoader -from langchain.document_loaders.blob_loaders import ( +from langchain_community.document_loaders.bibtex import BibtexLoader +from langchain_community.document_loaders.bigquery import BigQueryLoader +from langchain_community.document_loaders.bilibili import BiliBiliLoader +from langchain_community.document_loaders.blackboard import BlackboardLoader +from langchain_community.document_loaders.blob_loaders import ( Blob, BlobLoader, FileSystemBlobLoader, YoutubeAudioLoader, ) -from langchain.document_loaders.blockchain import BlockchainDocumentLoader -from langchain.document_loaders.brave_search import BraveSearchLoader -from langchain.document_loaders.browserless import BrowserlessLoader -from langchain.document_loaders.chatgpt import ChatGPTLoader -from langchain.document_loaders.chromium import AsyncChromiumLoader -from langchain.document_loaders.college_confidential import CollegeConfidentialLoader -from langchain.document_loaders.concurrent import ConcurrentLoader -from langchain.document_loaders.confluence import ConfluenceLoader -from langchain.document_loaders.conllu import CoNLLULoader -from langchain.document_loaders.couchbase import CouchbaseLoader -from langchain.document_loaders.csv_loader import CSVLoader, UnstructuredCSVLoader -from langchain.document_loaders.cube_semantic import CubeSemanticLoader -from langchain.document_loaders.datadog_logs import DatadogLogsLoader -from langchain.document_loaders.dataframe import DataFrameLoader -from langchain.document_loaders.diffbot import DiffbotLoader -from langchain.document_loaders.directory import DirectoryLoader -from langchain.document_loaders.discord import DiscordChatLoader -from langchain.document_loaders.docugami import DocugamiLoader -from langchain.document_loaders.docusaurus import DocusaurusLoader -from langchain.document_loaders.dropbox import DropboxLoader -from langchain.document_loaders.duckdb_loader import DuckDBLoader -from langchain.document_loaders.email import ( +from langchain_community.document_loaders.blockchain import BlockchainDocumentLoader +from langchain_community.document_loaders.brave_search import BraveSearchLoader +from langchain_community.document_loaders.browserless import BrowserlessLoader +from langchain_community.document_loaders.chatgpt import ChatGPTLoader +from langchain_community.document_loaders.chromium import AsyncChromiumLoader +from langchain_community.document_loaders.college_confidential import ( + CollegeConfidentialLoader, +) +from langchain_community.document_loaders.concurrent import ConcurrentLoader +from langchain_community.document_loaders.confluence import ConfluenceLoader +from langchain_community.document_loaders.conllu import CoNLLULoader +from langchain_community.document_loaders.couchbase import CouchbaseLoader +from langchain_community.document_loaders.csv_loader import ( + CSVLoader, + UnstructuredCSVLoader, +) +from langchain_community.document_loaders.cube_semantic import CubeSemanticLoader +from langchain_community.document_loaders.datadog_logs import DatadogLogsLoader +from langchain_community.document_loaders.dataframe import DataFrameLoader +from langchain_community.document_loaders.diffbot import DiffbotLoader +from langchain_community.document_loaders.directory import DirectoryLoader +from langchain_community.document_loaders.discord import DiscordChatLoader +from langchain_community.document_loaders.docugami import DocugamiLoader +from langchain_community.document_loaders.docusaurus import DocusaurusLoader +from langchain_community.document_loaders.dropbox import DropboxLoader +from langchain_community.document_loaders.duckdb_loader import DuckDBLoader +from langchain_community.document_loaders.email import ( OutlookMessageLoader, UnstructuredEmailLoader, ) -from langchain.document_loaders.epub import UnstructuredEPubLoader -from langchain.document_loaders.etherscan import EtherscanLoader -from langchain.document_loaders.evernote import EverNoteLoader -from langchain.document_loaders.excel import UnstructuredExcelLoader -from langchain.document_loaders.facebook_chat import FacebookChatLoader -from langchain.document_loaders.fauna import FaunaLoader -from langchain.document_loaders.figma import FigmaFileLoader -from langchain.document_loaders.gcs_directory import GCSDirectoryLoader -from langchain.document_loaders.gcs_file import GCSFileLoader -from langchain.document_loaders.geodataframe import GeoDataFrameLoader -from langchain.document_loaders.git import GitLoader -from langchain.document_loaders.gitbook import GitbookLoader -from langchain.document_loaders.github import GitHubIssuesLoader -from langchain.document_loaders.google_speech_to_text import GoogleSpeechToTextLoader -from langchain.document_loaders.googledrive import GoogleDriveLoader -from langchain.document_loaders.gutenberg import GutenbergLoader -from langchain.document_loaders.hn import HNLoader -from langchain.document_loaders.html import UnstructuredHTMLLoader -from langchain.document_loaders.html_bs import BSHTMLLoader -from langchain.document_loaders.hugging_face_dataset import HuggingFaceDatasetLoader -from langchain.document_loaders.ifixit import IFixitLoader -from langchain.document_loaders.image import UnstructuredImageLoader -from langchain.document_loaders.image_captions import ImageCaptionLoader -from langchain.document_loaders.imsdb import IMSDbLoader -from langchain.document_loaders.iugu import IuguLoader -from langchain.document_loaders.joplin import JoplinLoader -from langchain.document_loaders.json_loader import JSONLoader -from langchain.document_loaders.lakefs import LakeFSLoader -from langchain.document_loaders.larksuite import LarkSuiteDocLoader -from langchain.document_loaders.markdown import UnstructuredMarkdownLoader -from langchain.document_loaders.mastodon import MastodonTootsLoader -from langchain.document_loaders.max_compute import MaxComputeLoader -from langchain.document_loaders.mediawikidump import MWDumpLoader -from langchain.document_loaders.merge import MergedDataLoader -from langchain.document_loaders.mhtml import MHTMLLoader -from langchain.document_loaders.modern_treasury import ModernTreasuryLoader -from langchain.document_loaders.mongodb import MongodbLoader -from langchain.document_loaders.news import NewsURLLoader -from langchain.document_loaders.notebook import NotebookLoader -from langchain.document_loaders.notion import NotionDirectoryLoader -from langchain.document_loaders.notiondb import NotionDBLoader -from langchain.document_loaders.obs_directory import OBSDirectoryLoader -from langchain.document_loaders.obs_file import OBSFileLoader -from langchain.document_loaders.obsidian import ObsidianLoader -from langchain.document_loaders.odt import UnstructuredODTLoader -from langchain.document_loaders.onedrive import OneDriveLoader -from langchain.document_loaders.onedrive_file import OneDriveFileLoader -from langchain.document_loaders.open_city_data import OpenCityDataLoader -from langchain.document_loaders.org_mode import UnstructuredOrgModeLoader -from langchain.document_loaders.pdf import ( +from langchain_community.document_loaders.epub import UnstructuredEPubLoader +from langchain_community.document_loaders.etherscan import EtherscanLoader +from langchain_community.document_loaders.evernote import EverNoteLoader +from langchain_community.document_loaders.excel import UnstructuredExcelLoader +from langchain_community.document_loaders.facebook_chat import FacebookChatLoader +from langchain_community.document_loaders.fauna import FaunaLoader +from langchain_community.document_loaders.figma import FigmaFileLoader +from langchain_community.document_loaders.gcs_directory import GCSDirectoryLoader +from langchain_community.document_loaders.gcs_file import GCSFileLoader +from langchain_community.document_loaders.geodataframe import GeoDataFrameLoader +from langchain_community.document_loaders.git import GitLoader +from langchain_community.document_loaders.gitbook import GitbookLoader +from langchain_community.document_loaders.github import GitHubIssuesLoader +from langchain_community.document_loaders.google_speech_to_text import ( + GoogleSpeechToTextLoader, +) +from langchain_community.document_loaders.googledrive import GoogleDriveLoader +from langchain_community.document_loaders.gutenberg import GutenbergLoader +from langchain_community.document_loaders.hn import HNLoader +from langchain_community.document_loaders.html import UnstructuredHTMLLoader +from langchain_community.document_loaders.html_bs import BSHTMLLoader +from langchain_community.document_loaders.hugging_face_dataset import ( + HuggingFaceDatasetLoader, +) +from langchain_community.document_loaders.ifixit import IFixitLoader +from langchain_community.document_loaders.image import UnstructuredImageLoader +from langchain_community.document_loaders.image_captions import ImageCaptionLoader +from langchain_community.document_loaders.imsdb import IMSDbLoader +from langchain_community.document_loaders.iugu import IuguLoader +from langchain_community.document_loaders.joplin import JoplinLoader +from langchain_community.document_loaders.json_loader import JSONLoader +from langchain_community.document_loaders.lakefs import LakeFSLoader +from langchain_community.document_loaders.larksuite import LarkSuiteDocLoader +from langchain_community.document_loaders.markdown import UnstructuredMarkdownLoader +from langchain_community.document_loaders.mastodon import MastodonTootsLoader +from langchain_community.document_loaders.max_compute import MaxComputeLoader +from langchain_community.document_loaders.mediawikidump import MWDumpLoader +from langchain_community.document_loaders.merge import MergedDataLoader +from langchain_community.document_loaders.mhtml import MHTMLLoader +from langchain_community.document_loaders.modern_treasury import ModernTreasuryLoader +from langchain_community.document_loaders.mongodb import MongodbLoader +from langchain_community.document_loaders.news import NewsURLLoader +from langchain_community.document_loaders.notebook import NotebookLoader +from langchain_community.document_loaders.notion import NotionDirectoryLoader +from langchain_community.document_loaders.notiondb import NotionDBLoader +from langchain_community.document_loaders.obs_directory import OBSDirectoryLoader +from langchain_community.document_loaders.obs_file import OBSFileLoader +from langchain_community.document_loaders.obsidian import ObsidianLoader +from langchain_community.document_loaders.odt import UnstructuredODTLoader +from langchain_community.document_loaders.onedrive import OneDriveLoader +from langchain_community.document_loaders.onedrive_file import OneDriveFileLoader +from langchain_community.document_loaders.open_city_data import OpenCityDataLoader +from langchain_community.document_loaders.org_mode import UnstructuredOrgModeLoader +from langchain_community.document_loaders.pdf import ( AmazonTextractPDFLoader, MathpixPDFLoader, OnlinePDFLoader, @@ -140,62 +151,68 @@ from langchain.document_loaders.pdf import ( PyPDFLoader, UnstructuredPDFLoader, ) -from langchain.document_loaders.polars_dataframe import PolarsDataFrameLoader -from langchain.document_loaders.powerpoint import UnstructuredPowerPointLoader -from langchain.document_loaders.psychic import PsychicLoader -from langchain.document_loaders.pubmed import PubMedLoader -from langchain.document_loaders.pyspark_dataframe import PySparkDataFrameLoader -from langchain.document_loaders.python import PythonLoader -from langchain.document_loaders.readthedocs import ReadTheDocsLoader -from langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader -from langchain.document_loaders.reddit import RedditPostsLoader -from langchain.document_loaders.roam import RoamLoader -from langchain.document_loaders.rocksetdb import RocksetLoader -from langchain.document_loaders.rss import RSSFeedLoader -from langchain.document_loaders.rst import UnstructuredRSTLoader -from langchain.document_loaders.rtf import UnstructuredRTFLoader -from langchain.document_loaders.s3_directory import S3DirectoryLoader -from langchain.document_loaders.s3_file import S3FileLoader -from langchain.document_loaders.sharepoint import SharePointLoader -from langchain.document_loaders.sitemap import SitemapLoader -from langchain.document_loaders.slack_directory import SlackDirectoryLoader -from langchain.document_loaders.snowflake_loader import SnowflakeLoader -from langchain.document_loaders.spreedly import SpreedlyLoader -from langchain.document_loaders.srt import SRTLoader -from langchain.document_loaders.stripe import StripeLoader -from langchain.document_loaders.telegram import ( +from langchain_community.document_loaders.polars_dataframe import PolarsDataFrameLoader +from langchain_community.document_loaders.powerpoint import UnstructuredPowerPointLoader +from langchain_community.document_loaders.psychic import PsychicLoader +from langchain_community.document_loaders.pubmed import PubMedLoader +from langchain_community.document_loaders.pyspark_dataframe import ( + PySparkDataFrameLoader, +) +from langchain_community.document_loaders.python import PythonLoader +from langchain_community.document_loaders.readthedocs import ReadTheDocsLoader +from langchain_community.document_loaders.recursive_url_loader import RecursiveUrlLoader +from langchain_community.document_loaders.reddit import RedditPostsLoader +from langchain_community.document_loaders.roam import RoamLoader +from langchain_community.document_loaders.rocksetdb import RocksetLoader +from langchain_community.document_loaders.rss import RSSFeedLoader +from langchain_community.document_loaders.rst import UnstructuredRSTLoader +from langchain_community.document_loaders.rtf import UnstructuredRTFLoader +from langchain_community.document_loaders.s3_directory import S3DirectoryLoader +from langchain_community.document_loaders.s3_file import S3FileLoader +from langchain_community.document_loaders.sharepoint import SharePointLoader +from langchain_community.document_loaders.sitemap import SitemapLoader +from langchain_community.document_loaders.slack_directory import SlackDirectoryLoader +from langchain_community.document_loaders.snowflake_loader import SnowflakeLoader +from langchain_community.document_loaders.spreedly import SpreedlyLoader +from langchain_community.document_loaders.srt import SRTLoader +from langchain_community.document_loaders.stripe import StripeLoader +from langchain_community.document_loaders.telegram import ( TelegramChatApiLoader, TelegramChatFileLoader, ) -from langchain.document_loaders.tencent_cos_directory import TencentCOSDirectoryLoader -from langchain.document_loaders.tencent_cos_file import TencentCOSFileLoader -from langchain.document_loaders.tensorflow_datasets import TensorflowDatasetLoader -from langchain.document_loaders.text import TextLoader -from langchain.document_loaders.tomarkdown import ToMarkdownLoader -from langchain.document_loaders.toml import TomlLoader -from langchain.document_loaders.trello import TrelloLoader -from langchain.document_loaders.tsv import UnstructuredTSVLoader -from langchain.document_loaders.twitter import TwitterTweetLoader -from langchain.document_loaders.unstructured import ( +from langchain_community.document_loaders.tencent_cos_directory import ( + TencentCOSDirectoryLoader, +) +from langchain_community.document_loaders.tencent_cos_file import TencentCOSFileLoader +from langchain_community.document_loaders.tensorflow_datasets import ( + TensorflowDatasetLoader, +) +from langchain_community.document_loaders.text import TextLoader +from langchain_community.document_loaders.tomarkdown import ToMarkdownLoader +from langchain_community.document_loaders.toml import TomlLoader +from langchain_community.document_loaders.trello import TrelloLoader +from langchain_community.document_loaders.tsv import UnstructuredTSVLoader +from langchain_community.document_loaders.twitter import TwitterTweetLoader +from langchain_community.document_loaders.unstructured import ( UnstructuredAPIFileIOLoader, UnstructuredAPIFileLoader, UnstructuredFileIOLoader, UnstructuredFileLoader, ) -from langchain.document_loaders.url import UnstructuredURLLoader -from langchain.document_loaders.url_playwright import PlaywrightURLLoader -from langchain.document_loaders.url_selenium import SeleniumURLLoader -from langchain.document_loaders.weather import WeatherDataLoader -from langchain.document_loaders.web_base import WebBaseLoader -from langchain.document_loaders.whatsapp_chat import WhatsAppChatLoader -from langchain.document_loaders.wikipedia import WikipediaLoader -from langchain.document_loaders.word_document import ( +from langchain_community.document_loaders.url import UnstructuredURLLoader +from langchain_community.document_loaders.url_playwright import PlaywrightURLLoader +from langchain_community.document_loaders.url_selenium import SeleniumURLLoader +from langchain_community.document_loaders.weather import WeatherDataLoader +from langchain_community.document_loaders.web_base import WebBaseLoader +from langchain_community.document_loaders.whatsapp_chat import WhatsAppChatLoader +from langchain_community.document_loaders.wikipedia import WikipediaLoader +from langchain_community.document_loaders.word_document import ( Docx2txtLoader, UnstructuredWordDocumentLoader, ) -from langchain.document_loaders.xml import UnstructuredXMLLoader -from langchain.document_loaders.xorbits import XorbitsLoader -from langchain.document_loaders.youtube import ( +from langchain_community.document_loaders.xml import UnstructuredXMLLoader +from langchain_community.document_loaders.xorbits import XorbitsLoader +from langchain_community.document_loaders.youtube import ( GoogleApiClient, GoogleApiYoutubeLoader, YoutubeLoader, diff --git a/libs/langchain/langchain/document_loaders/blob_loaders/__init__.py b/libs/langchain/langchain/document_loaders/blob_loaders/__init__.py index a96bc407c4..174c71de02 100644 --- a/libs/langchain/langchain/document_loaders/blob_loaders/__init__.py +++ b/libs/langchain/langchain/document_loaders/blob_loaders/__init__.py @@ -1,5 +1,9 @@ -from langchain.document_loaders.blob_loaders.file_system import FileSystemBlobLoader -from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader -from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader +from langchain_community.document_loaders.blob_loaders.file_system import ( + FileSystemBlobLoader, +) +from langchain_community.document_loaders.blob_loaders.schema import Blob, BlobLoader +from langchain_community.document_loaders.blob_loaders.youtube_audio import ( + YoutubeAudioLoader, +) __all__ = ["BlobLoader", "Blob", "FileSystemBlobLoader", "YoutubeAudioLoader"] diff --git a/libs/langchain/langchain/document_loaders/parsers/__init__.py b/libs/langchain/langchain/document_loaders/parsers/__init__.py index e2233e5cc6..c7bd6d73df 100644 --- a/libs/langchain/langchain/document_loaders/parsers/__init__.py +++ b/libs/langchain/langchain/document_loaders/parsers/__init__.py @@ -1,9 +1,9 @@ -from langchain.document_loaders.parsers.audio import OpenAIWhisperParser -from langchain.document_loaders.parsers.docai import DocAIParser -from langchain.document_loaders.parsers.grobid import GrobidParser -from langchain.document_loaders.parsers.html import BS4HTMLParser -from langchain.document_loaders.parsers.language import LanguageParser -from langchain.document_loaders.parsers.pdf import ( +from langchain_community.document_loaders.parsers.audio import OpenAIWhisperParser +from langchain_community.document_loaders.parsers.docai import DocAIParser +from langchain_community.document_loaders.parsers.grobid import GrobidParser +from langchain_community.document_loaders.parsers.html import BS4HTMLParser +from langchain_community.document_loaders.parsers.language import LanguageParser +from langchain_community.document_loaders.parsers.pdf import ( PDFMinerParser, PDFPlumberParser, PyMuPDFParser, diff --git a/libs/langchain/langchain/document_loaders/parsers/html/__init__.py b/libs/langchain/langchain/document_loaders/parsers/html/__init__.py index bceacaed23..f59e804b30 100644 --- a/libs/langchain/langchain/document_loaders/parsers/html/__init__.py +++ b/libs/langchain/langchain/document_loaders/parsers/html/__init__.py @@ -1,3 +1,3 @@ -from langchain.document_loaders.parsers.html.bs4 import BS4HTMLParser +from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser __all__ = ["BS4HTMLParser"] diff --git a/libs/langchain/langchain/document_loaders/parsers/language/__init__.py b/libs/langchain/langchain/document_loaders/parsers/language/__init__.py index 4c93ed849a..e56cc143cf 100644 --- a/libs/langchain/langchain/document_loaders/parsers/language/__init__.py +++ b/libs/langchain/langchain/document_loaders/parsers/language/__init__.py @@ -1,3 +1,5 @@ -from langchain.document_loaders.parsers.language.language_parser import LanguageParser +from langchain_community.document_loaders.parsers.language.language_parser import ( + LanguageParser, +) __all__ = ["LanguageParser"] diff --git a/libs/langchain/langchain/document_transformers/__init__.py b/libs/langchain/langchain/document_transformers/__init__.py index abaed3894c..fce2184af3 100644 --- a/libs/langchain/langchain/document_transformers/__init__.py +++ b/libs/langchain/langchain/document_transformers/__init__.py @@ -15,24 +15,36 @@ Document """ # noqa: E501 -from langchain.document_transformers.beautiful_soup_transformer import ( +from langchain_community.document_transformers.beautiful_soup_transformer import ( BeautifulSoupTransformer, ) -from langchain.document_transformers.doctran_text_extract import ( +from langchain_community.document_transformers.doctran_text_extract import ( DoctranPropertyExtractor, ) -from langchain.document_transformers.doctran_text_qa import DoctranQATransformer -from langchain.document_transformers.doctran_text_translate import DoctranTextTranslator -from langchain.document_transformers.embeddings_redundant_filter import ( +from langchain_community.document_transformers.doctran_text_qa import ( + DoctranQATransformer, +) +from langchain_community.document_transformers.doctran_text_translate import ( + DoctranTextTranslator, +) +from langchain_community.document_transformers.embeddings_redundant_filter import ( EmbeddingsClusteringFilter, EmbeddingsRedundantFilter, get_stateful_documents, ) -from langchain.document_transformers.google_translate import GoogleTranslateTransformer -from langchain.document_transformers.html2text import Html2TextTransformer -from langchain.document_transformers.long_context_reorder import LongContextReorder -from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer -from langchain.document_transformers.openai_functions import OpenAIMetadataTagger +from langchain_community.document_transformers.google_translate import ( + GoogleTranslateTransformer, +) +from langchain_community.document_transformers.html2text import Html2TextTransformer +from langchain_community.document_transformers.long_context_reorder import ( + LongContextReorder, +) +from langchain_community.document_transformers.nuclia_text_transform import ( + NucliaTextTransformer, +) +from langchain_community.document_transformers.openai_functions import ( + OpenAIMetadataTagger, +) __all__ = [ "BeautifulSoupTransformer", diff --git a/libs/langchain/langchain/graphs/__init__.py b/libs/langchain/langchain/graphs/__init__.py index b2a43168c9..7de3bdbc7b 100644 --- a/libs/langchain/langchain/graphs/__init__.py +++ b/libs/langchain/langchain/graphs/__init__.py @@ -1,15 +1,15 @@ """**Graphs** provide a natural language interface to graph databases.""" -from langchain.graphs.arangodb_graph import ArangoGraph -from langchain.graphs.falkordb_graph import FalkorDBGraph -from langchain.graphs.hugegraph import HugeGraph -from langchain.graphs.kuzu_graph import KuzuGraph -from langchain.graphs.memgraph_graph import MemgraphGraph -from langchain.graphs.nebula_graph import NebulaGraph -from langchain.graphs.neo4j_graph import Neo4jGraph -from langchain.graphs.neptune_graph import NeptuneGraph -from langchain.graphs.networkx_graph import NetworkxEntityGraph -from langchain.graphs.rdf_graph import RdfGraph +from langchain_community.graphs.arangodb_graph import ArangoGraph +from langchain_community.graphs.falkordb_graph import FalkorDBGraph +from langchain_community.graphs.hugegraph import HugeGraph +from langchain_community.graphs.kuzu_graph import KuzuGraph +from langchain_community.graphs.memgraph_graph import MemgraphGraph +from langchain_community.graphs.nebula_graph import NebulaGraph +from langchain_community.graphs.neo4j_graph import Neo4jGraph +from langchain_community.graphs.neptune_graph import NeptuneGraph +from langchain_community.graphs.networkx_graph import NetworkxEntityGraph +from langchain_community.graphs.rdf_graph import RdfGraph __all__ = [ "MemgraphGraph", diff --git a/libs/langchain/langchain/indexes/_api.py b/libs/langchain/langchain/indexes/_api.py index 03a84499cf..2f91c2ae45 100644 --- a/libs/langchain/langchain/indexes/_api.py +++ b/libs/langchain/langchain/indexes/_api.py @@ -24,11 +24,11 @@ from typing import ( cast, ) +from langchain_community.document_loaders.base import BaseLoader from langchain_core.documents import Document from langchain_core.pydantic_v1 import root_validator from langchain_core.vectorstores import VectorStore -from langchain.document_loaders.base import BaseLoader from langchain.indexes.base import NAMESPACE_UUID, RecordManager T = TypeVar("T") diff --git a/libs/langchain/langchain/indexes/graph.py b/libs/langchain/langchain/indexes/graph.py index a1ded5b7bd..dc8e2ab38a 100644 --- a/libs/langchain/langchain/indexes/graph.py +++ b/libs/langchain/langchain/indexes/graph.py @@ -1,12 +1,12 @@ """Graph Index Creator.""" from typing import Optional, Type +from langchain_community.graphs.networkx_graph import NetworkxEntityGraph, parse_triples from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import BaseModel from langchain.chains.llm import LLMChain -from langchain.graphs.networkx_graph import NetworkxEntityGraph, parse_triples from langchain.indexes.prompts.knowledge_triplet_extraction import ( KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) diff --git a/libs/langchain/langchain/indexes/prompts/knowledge_triplet_extraction.py b/libs/langchain/langchain/indexes/prompts/knowledge_triplet_extraction.py index 70b6d4e275..0176dd428d 100644 --- a/libs/langchain/langchain/indexes/prompts/knowledge_triplet_extraction.py +++ b/libs/langchain/langchain/indexes/prompts/knowledge_triplet_extraction.py @@ -1,6 +1,6 @@ # flake8: noqa -from langchain.graphs.networkx_graph import KG_TRIPLE_DELIMITER +from langchain_community.graphs.networkx_graph import KG_TRIPLE_DELIMITER from langchain_core.prompts.prompt import PromptTemplate _DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = ( diff --git a/libs/langchain/langchain/indexes/vectorstore.py b/libs/langchain/langchain/indexes/vectorstore.py index c3e611601f..25a70a65d2 100644 --- a/libs/langchain/langchain/indexes/vectorstore.py +++ b/libs/langchain/langchain/indexes/vectorstore.py @@ -1,7 +1,9 @@ from typing import Any, Dict, List, Optional, Type +from langchain_community.document_loaders.base import BaseLoader from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain_community.llms.openai import OpenAI +from langchain_community.vectorstores.chroma import Chroma from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.language_models import BaseLanguageModel @@ -10,9 +12,7 @@ from langchain_core.vectorstores import VectorStore from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain from langchain.chains.retrieval_qa.base import RetrievalQA -from langchain.document_loaders.base import BaseLoader from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter -from langchain.vectorstores.chroma import Chroma def _get_default_text_splitter() -> TextSplitter: diff --git a/libs/langchain/langchain/memory/__init__.py b/libs/langchain/langchain/memory/__init__.py index c10bbf492c..dda324bc32 100644 --- a/libs/langchain/langchain/memory/__init__.py +++ b/libs/langchain/langchain/memory/__init__.py @@ -26,12 +26,7 @@ AIMessage, BaseMessage, HumanMessage """ # noqa: E501 -from langchain.memory.buffer import ( - ConversationBufferMemory, - ConversationStringBufferMemory, -) -from langchain.memory.buffer_window import ConversationBufferWindowMemory -from langchain.memory.chat_message_histories import ( +from langchain_community.chat_message_histories import ( AstraDBChatMessageHistory, CassandraChatMessageHistory, ChatMessageHistory, @@ -50,6 +45,12 @@ from langchain.memory.chat_message_histories import ( XataChatMessageHistory, ZepChatMessageHistory, ) + +from langchain.memory.buffer import ( + ConversationBufferMemory, + ConversationStringBufferMemory, +) +from langchain.memory.buffer_window import ConversationBufferWindowMemory from langchain.memory.combined import CombinedMemory from langchain.memory.entity import ( ConversationEntityMemory, diff --git a/libs/langchain/langchain/memory/chat_memory.py b/libs/langchain/langchain/memory/chat_memory.py index d6afb4079e..7808264209 100644 --- a/libs/langchain/langchain/memory/chat_memory.py +++ b/libs/langchain/langchain/memory/chat_memory.py @@ -1,11 +1,11 @@ from abc import ABC from typing import Any, Dict, Optional, Tuple +from langchain_community.chat_message_histories.in_memory import ChatMessageHistory from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.memory import BaseMemory from langchain_core.pydantic_v1 import Field -from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory from langchain.memory.utils import get_prompt_input_key diff --git a/libs/langchain/langchain/memory/chat_message_histories/__init__.py b/libs/langchain/langchain/memory/chat_message_histories/__init__.py index 83fc7fa519..a45ecb7ead 100644 --- a/libs/langchain/langchain/memory/chat_message_histories/__init__.py +++ b/libs/langchain/langchain/memory/chat_message_histories/__init__.py @@ -1,37 +1,45 @@ -from langchain.memory.chat_message_histories.astradb import ( +from langchain_community.chat_message_histories.astradb import ( AstraDBChatMessageHistory, ) -from langchain.memory.chat_message_histories.cassandra import ( +from langchain_community.chat_message_histories.cassandra import ( CassandraChatMessageHistory, ) -from langchain.memory.chat_message_histories.cosmos_db import CosmosDBChatMessageHistory -from langchain.memory.chat_message_histories.dynamodb import DynamoDBChatMessageHistory -from langchain.memory.chat_message_histories.elasticsearch import ( +from langchain_community.chat_message_histories.cosmos_db import ( + CosmosDBChatMessageHistory, +) +from langchain_community.chat_message_histories.dynamodb import ( + DynamoDBChatMessageHistory, +) +from langchain_community.chat_message_histories.elasticsearch import ( ElasticsearchChatMessageHistory, ) -from langchain.memory.chat_message_histories.file import FileChatMessageHistory -from langchain.memory.chat_message_histories.firestore import ( +from langchain_community.chat_message_histories.file import FileChatMessageHistory +from langchain_community.chat_message_histories.firestore import ( FirestoreChatMessageHistory, ) -from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory -from langchain.memory.chat_message_histories.momento import MomentoChatMessageHistory -from langchain.memory.chat_message_histories.mongodb import MongoDBChatMessageHistory -from langchain.memory.chat_message_histories.neo4j import Neo4jChatMessageHistory -from langchain.memory.chat_message_histories.postgres import PostgresChatMessageHistory -from langchain.memory.chat_message_histories.redis import RedisChatMessageHistory -from langchain.memory.chat_message_histories.rocksetdb import RocksetChatMessageHistory -from langchain.memory.chat_message_histories.singlestoredb import ( +from langchain_community.chat_message_histories.in_memory import ChatMessageHistory +from langchain_community.chat_message_histories.momento import MomentoChatMessageHistory +from langchain_community.chat_message_histories.mongodb import MongoDBChatMessageHistory +from langchain_community.chat_message_histories.neo4j import Neo4jChatMessageHistory +from langchain_community.chat_message_histories.postgres import ( + PostgresChatMessageHistory, +) +from langchain_community.chat_message_histories.redis import RedisChatMessageHistory +from langchain_community.chat_message_histories.rocksetdb import ( + RocksetChatMessageHistory, +) +from langchain_community.chat_message_histories.singlestoredb import ( SingleStoreDBChatMessageHistory, ) -from langchain.memory.chat_message_histories.sql import SQLChatMessageHistory -from langchain.memory.chat_message_histories.streamlit import ( +from langchain_community.chat_message_histories.sql import SQLChatMessageHistory +from langchain_community.chat_message_histories.streamlit import ( StreamlitChatMessageHistory, ) -from langchain.memory.chat_message_histories.upstash_redis import ( +from langchain_community.chat_message_histories.upstash_redis import ( UpstashRedisChatMessageHistory, ) -from langchain.memory.chat_message_histories.xata import XataChatMessageHistory -from langchain.memory.chat_message_histories.zep import ZepChatMessageHistory +from langchain_community.chat_message_histories.xata import XataChatMessageHistory +from langchain_community.chat_message_histories.zep import ZepChatMessageHistory __all__ = [ "AstraDBChatMessageHistory", diff --git a/libs/langchain/langchain/memory/kg.py b/libs/langchain/langchain/memory/kg.py index 39b83c67ed..4e6a29dbb7 100644 --- a/libs/langchain/langchain/memory/kg.py +++ b/libs/langchain/langchain/memory/kg.py @@ -1,13 +1,17 @@ from typing import Any, Dict, List, Type, Union +from langchain_community.graphs import NetworkxEntityGraph +from langchain_community.graphs.networkx_graph import ( + KnowledgeTriple, + get_entities, + parse_triples, +) from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field from langchain.chains.llm import LLMChain -from langchain.graphs import NetworkxEntityGraph -from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, diff --git a/libs/langchain/langchain/memory/zep_memory.py b/libs/langchain/langchain/memory/zep_memory.py index 0c276c69bf..ef3153ed62 100644 --- a/libs/langchain/langchain/memory/zep_memory.py +++ b/libs/langchain/langchain/memory/zep_memory.py @@ -2,8 +2,9 @@ from __future__ import annotations from typing import Any, Dict, Optional +from langchain_community.chat_message_histories import ZepChatMessageHistory + from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories import ZepChatMessageHistory class ZepMemory(ConversationBufferMemory): diff --git a/libs/langchain/langchain/retrievers/document_compressors/embeddings_filter.py b/libs/langchain/langchain/retrievers/document_compressors/embeddings_filter.py index cc3e9d5ad3..eb8066a352 100644 --- a/libs/langchain/langchain/retrievers/document_compressors/embeddings_filter.py +++ b/libs/langchain/langchain/retrievers/document_compressors/embeddings_filter.py @@ -1,15 +1,15 @@ from typing import Callable, Dict, Optional, Sequence import numpy as np +from langchain_community.document_transformers.embeddings_redundant_filter import ( + _get_embeddings_from_stateful_docs, + get_stateful_documents, +) from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.pydantic_v1 import root_validator from langchain.callbacks.manager import Callbacks -from langchain.document_transformers.embeddings_redundant_filter import ( - _get_embeddings_from_stateful_docs, - get_stateful_documents, -) from langchain.retrievers.document_compressors.base import ( BaseDocumentCompressor, ) diff --git a/libs/langchain/langchain/retrievers/parent_document_retriever.py b/libs/langchain/langchain/retrievers/parent_document_retriever.py index 86a5f98b2b..86e4cef186 100644 --- a/libs/langchain/langchain/retrievers/parent_document_retriever.py +++ b/libs/langchain/langchain/retrievers/parent_document_retriever.py @@ -32,7 +32,7 @@ class ParentDocumentRetriever(MultiVectorRetriever): .. code-block:: python # Imports - from langchain.vectorstores import Chroma + from langchain_community.vectorstores import Chroma from langchain_community.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore diff --git a/libs/langchain/langchain/retrievers/self_query/base.py b/libs/langchain/langchain/retrievers/self_query/base.py index dc1bf174b8..5c5e4a18e0 100644 --- a/libs/langchain/langchain/retrievers/self_query/base.py +++ b/libs/langchain/langchain/retrievers/self_query/base.py @@ -2,6 +2,23 @@ import logging from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union +from langchain_community.vectorstores import ( + Chroma, + DashVector, + DeepLake, + ElasticsearchStore, + Milvus, + MongoDBAtlasVectorSearch, + MyScale, + OpenSearchVectorSearch, + Pinecone, + Qdrant, + Redis, + SupabaseVectorStore, + TimescaleVector, + Vectara, + Weaviate, +) from langchain_core.documents import Document from langchain_core.language_models import BaseLanguageModel from langchain_core.pydantic_v1 import BaseModel, Field, root_validator @@ -31,23 +48,6 @@ from langchain.retrievers.self_query.supabase import SupabaseVectorTranslator from langchain.retrievers.self_query.timescalevector import TimescaleVectorTranslator from langchain.retrievers.self_query.vectara import VectaraTranslator from langchain.retrievers.self_query.weaviate import WeaviateTranslator -from langchain.vectorstores import ( - Chroma, - DashVector, - DeepLake, - ElasticsearchStore, - Milvus, - MongoDBAtlasVectorSearch, - MyScale, - OpenSearchVectorSearch, - Pinecone, - Qdrant, - Redis, - SupabaseVectorStore, - TimescaleVector, - Vectara, - Weaviate, -) logger = logging.getLogger(__name__) diff --git a/libs/langchain/langchain/retrievers/self_query/redis.py b/libs/langchain/langchain/retrievers/self_query/redis.py index a13eb73407..cefe576182 100644 --- a/libs/langchain/langchain/retrievers/self_query/redis.py +++ b/libs/langchain/langchain/retrievers/self_query/redis.py @@ -2,6 +2,17 @@ from __future__ import annotations from typing import Any, Tuple +from langchain_community.vectorstores.redis import Redis +from langchain_community.vectorstores.redis.filters import ( + RedisFilterExpression, + RedisFilterField, + RedisFilterOperator, + RedisNum, + RedisTag, + RedisText, +) +from langchain_community.vectorstores.redis.schema import RedisModel + from langchain.chains.query_constructor.ir import ( Comparator, Comparison, @@ -10,16 +21,6 @@ from langchain.chains.query_constructor.ir import ( StructuredQuery, Visitor, ) -from langchain.vectorstores.redis import Redis -from langchain.vectorstores.redis.filters import ( - RedisFilterExpression, - RedisFilterField, - RedisFilterOperator, - RedisNum, - RedisTag, - RedisText, -) -from langchain.vectorstores.redis.schema import RedisModel _COMPARATOR_TO_BUILTIN_METHOD = { Comparator.EQ: "__eq__", diff --git a/libs/langchain/langchain/retrievers/web_research.py b/libs/langchain/langchain/retrievers/web_research.py index 9af87e91b6..02d31d43a3 100644 --- a/libs/langchain/langchain/retrievers/web_research.py +++ b/libs/langchain/langchain/retrievers/web_research.py @@ -2,6 +2,8 @@ import logging import re from typing import List, Optional +from langchain_community.document_loaders import AsyncHtmlLoader +from langchain_community.document_transformers import Html2TextTransformer from langchain_community.llms import LlamaCpp from langchain_core.documents import Document from langchain_core.language_models import BaseLLM @@ -16,8 +18,6 @@ from langchain.callbacks.manager import ( ) from langchain.chains import LLMChain from langchain.chains.prompt_selector import ConditionalPromptSelector -from langchain.document_loaders import AsyncHtmlLoader -from langchain.document_transformers import Html2TextTransformer from langchain.output_parsers.pydantic import PydanticOutputParser from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter from langchain.utilities import GoogleSearchAPIWrapper diff --git a/libs/langchain/langchain/vectorstores/__init__.py b/libs/langchain/langchain/vectorstores/__init__.py index 5c9b08b5d7..df64f984cc 100644 --- a/libs/langchain/langchain/vectorstores/__init__.py +++ b/libs/langchain/langchain/vectorstores/__init__.py @@ -25,13 +25,15 @@ from langchain_core.vectorstores import VectorStore def _import_alibaba_cloud_open_search() -> Any: - from langchain.vectorstores.alibabacloud_opensearch import AlibabaCloudOpenSearch + from langchain_community.vectorstores.alibabacloud_opensearch import ( + AlibabaCloudOpenSearch, + ) return AlibabaCloudOpenSearch def _import_alibaba_cloud_open_search_settings() -> Any: - from langchain.vectorstores.alibabacloud_opensearch import ( + from langchain_community.vectorstores.alibabacloud_opensearch import ( AlibabaCloudOpenSearchSettings, ) @@ -39,397 +41,405 @@ def _import_alibaba_cloud_open_search_settings() -> Any: def _import_azure_cosmos_db() -> Any: - from langchain.vectorstores.azure_cosmos_db import AzureCosmosDBVectorSearch + from langchain_community.vectorstores.azure_cosmos_db import ( + AzureCosmosDBVectorSearch, + ) return AzureCosmosDBVectorSearch def _import_elastic_knn_search() -> Any: - from langchain.vectorstores.elastic_vector_search import ElasticKnnSearch + from langchain_community.vectorstores.elastic_vector_search import ElasticKnnSearch return ElasticKnnSearch def _import_elastic_vector_search() -> Any: - from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch + from langchain_community.vectorstores.elastic_vector_search import ( + ElasticVectorSearch, + ) return ElasticVectorSearch def _import_analyticdb() -> Any: - from langchain.vectorstores.analyticdb import AnalyticDB + from langchain_community.vectorstores.analyticdb import AnalyticDB return AnalyticDB def _import_annoy() -> Any: - from langchain.vectorstores.annoy import Annoy + from langchain_community.vectorstores.annoy import Annoy return Annoy def _import_atlas() -> Any: - from langchain.vectorstores.atlas import AtlasDB + from langchain_community.vectorstores.atlas import AtlasDB return AtlasDB def _import_awadb() -> Any: - from langchain.vectorstores.awadb import AwaDB + from langchain_community.vectorstores.awadb import AwaDB return AwaDB def _import_azuresearch() -> Any: - from langchain.vectorstores.azuresearch import AzureSearch + from langchain_community.vectorstores.azuresearch import AzureSearch return AzureSearch def _import_bageldb() -> Any: - from langchain.vectorstores.bageldb import Bagel + from langchain_community.vectorstores.bageldb import Bagel return Bagel def _import_baiducloud_vector_search() -> Any: - from langchain.vectorstores.baiducloud_vector_search import BESVectorStore + from langchain_community.vectorstores.baiducloud_vector_search import BESVectorStore return BESVectorStore def _import_cassandra() -> Any: - from langchain.vectorstores.cassandra import Cassandra + from langchain_community.vectorstores.cassandra import Cassandra return Cassandra def _import_astradb() -> Any: - from langchain.vectorstores.astradb import AstraDB + from langchain_community.vectorstores.astradb import AstraDB return AstraDB def _import_chroma() -> Any: - from langchain.vectorstores.chroma import Chroma + from langchain_community.vectorstores.chroma import Chroma return Chroma def _import_clarifai() -> Any: - from langchain.vectorstores.clarifai import Clarifai + from langchain_community.vectorstores.clarifai import Clarifai return Clarifai def _import_clickhouse() -> Any: - from langchain.vectorstores.clickhouse import Clickhouse + from langchain_community.vectorstores.clickhouse import Clickhouse return Clickhouse def _import_clickhouse_settings() -> Any: - from langchain.vectorstores.clickhouse import ClickhouseSettings + from langchain_community.vectorstores.clickhouse import ClickhouseSettings return ClickhouseSettings def _import_dashvector() -> Any: - from langchain.vectorstores.dashvector import DashVector + from langchain_community.vectorstores.dashvector import DashVector return DashVector def _import_databricks_vector_search() -> Any: - from langchain.vectorstores.databricks_vector_search import DatabricksVectorSearch + from langchain_community.vectorstores.databricks_vector_search import ( + DatabricksVectorSearch, + ) return DatabricksVectorSearch def _import_deeplake() -> Any: - from langchain.vectorstores.deeplake import DeepLake + from langchain_community.vectorstores.deeplake import DeepLake return DeepLake def _import_dingo() -> Any: - from langchain.vectorstores.dingo import Dingo + from langchain_community.vectorstores.dingo import Dingo return Dingo def _import_docarray_hnsw() -> Any: - from langchain.vectorstores.docarray import DocArrayHnswSearch + from langchain_community.vectorstores.docarray import DocArrayHnswSearch return DocArrayHnswSearch def _import_docarray_inmemory() -> Any: - from langchain.vectorstores.docarray import DocArrayInMemorySearch + from langchain_community.vectorstores.docarray import DocArrayInMemorySearch return DocArrayInMemorySearch def _import_elasticsearch() -> Any: - from langchain.vectorstores.elasticsearch import ElasticsearchStore + from langchain_community.vectorstores.elasticsearch import ElasticsearchStore return ElasticsearchStore def _import_epsilla() -> Any: - from langchain.vectorstores.epsilla import Epsilla + from langchain_community.vectorstores.epsilla import Epsilla return Epsilla def _import_faiss() -> Any: - from langchain.vectorstores.faiss import FAISS + from langchain_community.vectorstores.faiss import FAISS return FAISS def _import_hologres() -> Any: - from langchain.vectorstores.hologres import Hologres + from langchain_community.vectorstores.hologres import Hologres return Hologres def _import_lancedb() -> Any: - from langchain.vectorstores.lancedb import LanceDB + from langchain_community.vectorstores.lancedb import LanceDB return LanceDB def _import_llm_rails() -> Any: - from langchain.vectorstores.llm_rails import LLMRails + from langchain_community.vectorstores.llm_rails import LLMRails return LLMRails def _import_marqo() -> Any: - from langchain.vectorstores.marqo import Marqo + from langchain_community.vectorstores.marqo import Marqo return Marqo def _import_matching_engine() -> Any: - from langchain.vectorstores.matching_engine import MatchingEngine + from langchain_community.vectorstores.matching_engine import MatchingEngine return MatchingEngine def _import_meilisearch() -> Any: - from langchain.vectorstores.meilisearch import Meilisearch + from langchain_community.vectorstores.meilisearch import Meilisearch return Meilisearch def _import_milvus() -> Any: - from langchain.vectorstores.milvus import Milvus + from langchain_community.vectorstores.milvus import Milvus return Milvus def _import_momento_vector_index() -> Any: - from langchain.vectorstores.momento_vector_index import MomentoVectorIndex + from langchain_community.vectorstores.momento_vector_index import MomentoVectorIndex return MomentoVectorIndex def _import_mongodb_atlas() -> Any: - from langchain.vectorstores.mongodb_atlas import MongoDBAtlasVectorSearch + from langchain_community.vectorstores.mongodb_atlas import MongoDBAtlasVectorSearch return MongoDBAtlasVectorSearch def _import_myscale() -> Any: - from langchain.vectorstores.myscale import MyScale + from langchain_community.vectorstores.myscale import MyScale return MyScale def _import_myscale_settings() -> Any: - from langchain.vectorstores.myscale import MyScaleSettings + from langchain_community.vectorstores.myscale import MyScaleSettings return MyScaleSettings def _import_neo4j_vector() -> Any: - from langchain.vectorstores.neo4j_vector import Neo4jVector + from langchain_community.vectorstores.neo4j_vector import Neo4jVector return Neo4jVector def _import_opensearch_vector_search() -> Any: - from langchain.vectorstores.opensearch_vector_search import OpenSearchVectorSearch + from langchain_community.vectorstores.opensearch_vector_search import ( + OpenSearchVectorSearch, + ) return OpenSearchVectorSearch def _import_pgembedding() -> Any: - from langchain.vectorstores.pgembedding import PGEmbedding + from langchain_community.vectorstores.pgembedding import PGEmbedding return PGEmbedding def _import_pgvector() -> Any: - from langchain.vectorstores.pgvector import PGVector + from langchain_community.vectorstores.pgvector import PGVector return PGVector def _import_pinecone() -> Any: - from langchain.vectorstores.pinecone import Pinecone + from langchain_community.vectorstores.pinecone import Pinecone return Pinecone def _import_qdrant() -> Any: - from langchain.vectorstores.qdrant import Qdrant + from langchain_community.vectorstores.qdrant import Qdrant return Qdrant def _import_redis() -> Any: - from langchain.vectorstores.redis import Redis + from langchain_community.vectorstores.redis import Redis return Redis def _import_rocksetdb() -> Any: - from langchain.vectorstores.rocksetdb import Rockset + from langchain_community.vectorstores.rocksetdb import Rockset return Rockset def _import_vespa() -> Any: - from langchain.vectorstores.vespa import VespaStore + from langchain_community.vectorstores.vespa import VespaStore return VespaStore def _import_scann() -> Any: - from langchain.vectorstores.scann import ScaNN + from langchain_community.vectorstores.scann import ScaNN return ScaNN def _import_semadb() -> Any: - from langchain.vectorstores.semadb import SemaDB + from langchain_community.vectorstores.semadb import SemaDB return SemaDB def _import_singlestoredb() -> Any: - from langchain.vectorstores.singlestoredb import SingleStoreDB + from langchain_community.vectorstores.singlestoredb import SingleStoreDB return SingleStoreDB def _import_sklearn() -> Any: - from langchain.vectorstores.sklearn import SKLearnVectorStore + from langchain_community.vectorstores.sklearn import SKLearnVectorStore return SKLearnVectorStore def _import_sqlitevss() -> Any: - from langchain.vectorstores.sqlitevss import SQLiteVSS + from langchain_community.vectorstores.sqlitevss import SQLiteVSS return SQLiteVSS def _import_starrocks() -> Any: - from langchain.vectorstores.starrocks import StarRocks + from langchain_community.vectorstores.starrocks import StarRocks return StarRocks def _import_supabase() -> Any: - from langchain.vectorstores.supabase import SupabaseVectorStore + from langchain_community.vectorstores.supabase import SupabaseVectorStore return SupabaseVectorStore def _import_tair() -> Any: - from langchain.vectorstores.tair import Tair + from langchain_community.vectorstores.tair import Tair return Tair def _import_tencentvectordb() -> Any: - from langchain.vectorstores.tencentvectordb import TencentVectorDB + from langchain_community.vectorstores.tencentvectordb import TencentVectorDB return TencentVectorDB def _import_tiledb() -> Any: - from langchain.vectorstores.tiledb import TileDB + from langchain_community.vectorstores.tiledb import TileDB return TileDB def _import_tigris() -> Any: - from langchain.vectorstores.tigris import Tigris + from langchain_community.vectorstores.tigris import Tigris return Tigris def _import_timescalevector() -> Any: - from langchain.vectorstores.timescalevector import TimescaleVector + from langchain_community.vectorstores.timescalevector import TimescaleVector return TimescaleVector def _import_typesense() -> Any: - from langchain.vectorstores.typesense import Typesense + from langchain_community.vectorstores.typesense import Typesense return Typesense def _import_usearch() -> Any: - from langchain.vectorstores.usearch import USearch + from langchain_community.vectorstores.usearch import USearch return USearch def _import_vald() -> Any: - from langchain.vectorstores.vald import Vald + from langchain_community.vectorstores.vald import Vald return Vald def _import_vearch() -> Any: - from langchain.vectorstores.vearch import Vearch + from langchain_community.vectorstores.vearch import Vearch return Vearch def _import_vectara() -> Any: - from langchain.vectorstores.vectara import Vectara + from langchain_community.vectorstores.vectara import Vectara return Vectara def _import_weaviate() -> Any: - from langchain.vectorstores.weaviate import Weaviate + from langchain_community.vectorstores.weaviate import Weaviate return Weaviate def _import_yellowbrick() -> Any: - from langchain.vectorstores.yellowbrick import Yellowbrick + from langchain_community.vectorstores.yellowbrick import Yellowbrick return Yellowbrick def _import_zep() -> Any: - from langchain.vectorstores.zep import ZepVectorStore + from langchain_community.vectorstores.zep import ZepVectorStore return ZepVectorStore def _import_zilliz() -> Any: - from langchain.vectorstores.zilliz import Zilliz + from langchain_community.vectorstores.zilliz import Zilliz return Zilliz diff --git a/libs/langchain/langchain/vectorstores/docarray/__init__.py b/libs/langchain/langchain/vectorstores/docarray/__init__.py index be3d5bde65..b5877fec88 100644 --- a/libs/langchain/langchain/vectorstores/docarray/__init__.py +++ b/libs/langchain/langchain/vectorstores/docarray/__init__.py @@ -1,5 +1,5 @@ -from langchain.vectorstores.docarray.hnsw import DocArrayHnswSearch -from langchain.vectorstores.docarray.in_memory import DocArrayInMemorySearch +from langchain_community.vectorstores.docarray.hnsw import DocArrayHnswSearch +from langchain_community.vectorstores.docarray.in_memory import DocArrayInMemorySearch __all__ = [ "DocArrayHnswSearch", diff --git a/libs/langchain/tests/integration_tests/chains/test_graph_database.py b/libs/langchain/tests/integration_tests/chains/test_graph_database.py index 247a47ed99..0543e8dc25 100644 --- a/libs/langchain/tests/integration_tests/chains/test_graph_database.py +++ b/libs/langchain/tests/integration_tests/chains/test_graph_database.py @@ -1,11 +1,11 @@ """Test Graph Database Chain.""" import os +from langchain_community.graphs import Neo4jGraph from langchain_community.llms.openai import OpenAI from langchain.chains.graph_qa.cypher import GraphCypherQAChain from langchain.chains.loading import load_chain -from langchain.graphs import Neo4jGraph def test_connect_neo4j() -> None: diff --git a/libs/langchain/tests/integration_tests/chains/test_graph_database_arangodb.py b/libs/langchain/tests/integration_tests/chains/test_graph_database_arangodb.py index 22fe43f4e0..9de49ff9a5 100644 --- a/libs/langchain/tests/integration_tests/chains/test_graph_database_arangodb.py +++ b/libs/langchain/tests/integration_tests/chains/test_graph_database_arangodb.py @@ -1,11 +1,11 @@ """Test Graph Database Chain.""" from typing import Any +from langchain_community.graphs import ArangoGraph +from langchain_community.graphs.arangodb_graph import get_arangodb_client from langchain_community.llms.openai import OpenAI from langchain.chains.graph_qa.arangodb import ArangoGraphQAChain -from langchain.graphs import ArangoGraph -from langchain.graphs.arangodb_graph import get_arangodb_client def populate_arangodb_database(db: Any) -> None: diff --git a/libs/langchain/tests/integration_tests/chains/test_graph_database_sparql.py b/libs/langchain/tests/integration_tests/chains/test_graph_database_sparql.py index f655719420..5a0fbde3cf 100644 --- a/libs/langchain/tests/integration_tests/chains/test_graph_database_sparql.py +++ b/libs/langchain/tests/integration_tests/chains/test_graph_database_sparql.py @@ -1,10 +1,10 @@ """Test RDF/ SPARQL Graph Database Chain.""" import os +from langchain_community.graphs import RdfGraph from langchain_community.llms.openai import OpenAI from langchain.chains.graph_qa.sparql import GraphSparqlQAChain -from langchain.graphs import RdfGraph def test_connect_file_rdf() -> None: diff --git a/libs/langchain/tests/integration_tests/chains/test_retrieval_qa.py b/libs/langchain/tests/integration_tests/chains/test_retrieval_qa.py index 23322f523a..dec373ceb1 100644 --- a/libs/langchain/tests/integration_tests/chains/test_retrieval_qa.py +++ b/libs/langchain/tests/integration_tests/chains/test_retrieval_qa.py @@ -1,14 +1,14 @@ """Test RetrievalQA functionality.""" from pathlib import Path +from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain_community.llms import OpenAI +from langchain_community.vectorstores import FAISS from langchain.chains import RetrievalQA from langchain.chains.loading import load_chain -from langchain.document_loaders import TextLoader from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores import FAISS def test_retrieval_qa_saving_loading(tmp_path: Path) -> None: diff --git a/libs/langchain/tests/integration_tests/chains/test_retrieval_qa_with_sources.py b/libs/langchain/tests/integration_tests/chains/test_retrieval_qa_with_sources.py index d9f7f4ac4a..9c3b65ca1b 100644 --- a/libs/langchain/tests/integration_tests/chains/test_retrieval_qa_with_sources.py +++ b/libs/langchain/tests/integration_tests/chains/test_retrieval_qa_with_sources.py @@ -1,12 +1,12 @@ """Test RetrievalQA functionality.""" +from langchain_community.document_loaders import DirectoryLoader from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain_community.llms import OpenAI +from langchain_community.vectorstores import FAISS from langchain.chains import RetrievalQAWithSourcesChain from langchain.chains.loading import load_chain -from langchain.document_loaders import DirectoryLoader from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores import FAISS def test_retrieval_qa_with_sources_chain_saving_loading(tmp_path: str) -> None: diff --git a/libs/langchain/tests/integration_tests/memory/test_astradb.py b/libs/langchain/tests/integration_tests/memory/test_astradb.py index c1753807b5..a8ed9e7574 100644 --- a/libs/langchain/tests/integration_tests/memory/test_astradb.py +++ b/libs/langchain/tests/integration_tests/memory/test_astradb.py @@ -2,12 +2,12 @@ import os from typing import Iterable import pytest +from langchain_community.chat_message_histories.astradb import ( + AstraDBChatMessageHistory, +) from langchain_core.messages import AIMessage, HumanMessage from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories.astradb import ( - AstraDBChatMessageHistory, -) def _has_env_vars() -> bool: diff --git a/libs/langchain/tests/integration_tests/memory/test_cassandra.py b/libs/langchain/tests/integration_tests/memory/test_cassandra.py index 131eb96113..668cc87a44 100644 --- a/libs/langchain/tests/integration_tests/memory/test_cassandra.py +++ b/libs/langchain/tests/integration_tests/memory/test_cassandra.py @@ -2,12 +2,12 @@ import os import time from typing import Optional +from langchain_community.chat_message_histories.cassandra import ( + CassandraChatMessageHistory, +) from langchain_core.messages import AIMessage, HumanMessage from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories.cassandra import ( - CassandraChatMessageHistory, -) def _chat_message_history( diff --git a/libs/langchain/tests/integration_tests/memory/test_cosmos_db.py b/libs/langchain/tests/integration_tests/memory/test_cosmos_db.py index ed119353af..c2c085144c 100644 --- a/libs/langchain/tests/integration_tests/memory/test_cosmos_db.py +++ b/libs/langchain/tests/integration_tests/memory/test_cosmos_db.py @@ -1,10 +1,10 @@ import json import os +from langchain_community.chat_message_histories import CosmosDBChatMessageHistory from langchain_core.messages import message_to_dict from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories import CosmosDBChatMessageHistory # Replace these with your Azure Cosmos DB endpoint and key endpoint = os.environ.get("COSMOS_DB_ENDPOINT", "") diff --git a/libs/langchain/tests/integration_tests/memory/test_elasticsearch.py b/libs/langchain/tests/integration_tests/memory/test_elasticsearch.py index 8374dd7beb..75c486e616 100644 --- a/libs/langchain/tests/integration_tests/memory/test_elasticsearch.py +++ b/libs/langchain/tests/integration_tests/memory/test_elasticsearch.py @@ -4,10 +4,10 @@ import uuid from typing import Generator, Union import pytest +from langchain_community.chat_message_histories import ElasticsearchChatMessageHistory from langchain_core.messages import message_to_dict from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories import ElasticsearchChatMessageHistory """ cd tests/integration_tests/memory/docker-compose diff --git a/libs/langchain/tests/integration_tests/memory/test_firestore.py b/libs/langchain/tests/integration_tests/memory/test_firestore.py index e7c6b398b0..5d9fabf4ae 100644 --- a/libs/langchain/tests/integration_tests/memory/test_firestore.py +++ b/libs/langchain/tests/integration_tests/memory/test_firestore.py @@ -1,9 +1,9 @@ import json +from langchain_community.chat_message_histories import FirestoreChatMessageHistory from langchain_core.messages import message_to_dict from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories import FirestoreChatMessageHistory def test_memory_with_message_store() -> None: diff --git a/libs/langchain/tests/integration_tests/memory/test_momento.py b/libs/langchain/tests/integration_tests/memory/test_momento.py index ea28c55c0b..b5d0e7f43c 100644 --- a/libs/langchain/tests/integration_tests/memory/test_momento.py +++ b/libs/langchain/tests/integration_tests/memory/test_momento.py @@ -10,10 +10,10 @@ from datetime import timedelta from typing import Iterator import pytest +from langchain_community.chat_message_histories import MomentoChatMessageHistory from langchain_core.messages import message_to_dict from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories import MomentoChatMessageHistory def random_string() -> str: diff --git a/libs/langchain/tests/integration_tests/memory/test_mongodb.py b/libs/langchain/tests/integration_tests/memory/test_mongodb.py index e2a2d8421b..f99242ebee 100644 --- a/libs/langchain/tests/integration_tests/memory/test_mongodb.py +++ b/libs/langchain/tests/integration_tests/memory/test_mongodb.py @@ -1,10 +1,10 @@ import json import os +from langchain_community.chat_message_histories import MongoDBChatMessageHistory from langchain_core.messages import message_to_dict from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories import MongoDBChatMessageHistory # Replace these with your mongodb connection string connection_string = os.environ.get("MONGODB_CONNECTION_STRING", "") diff --git a/libs/langchain/tests/integration_tests/memory/test_neo4j.py b/libs/langchain/tests/integration_tests/memory/test_neo4j.py index aa2fe30ade..06a4956944 100644 --- a/libs/langchain/tests/integration_tests/memory/test_neo4j.py +++ b/libs/langchain/tests/integration_tests/memory/test_neo4j.py @@ -1,9 +1,9 @@ import json +from langchain_community.chat_message_histories import Neo4jChatMessageHistory from langchain_core.messages import message_to_dict from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories import Neo4jChatMessageHistory def test_memory_with_message_store() -> None: diff --git a/libs/langchain/tests/integration_tests/memory/test_redis.py b/libs/langchain/tests/integration_tests/memory/test_redis.py index 38ef78175c..cb00d04fa0 100644 --- a/libs/langchain/tests/integration_tests/memory/test_redis.py +++ b/libs/langchain/tests/integration_tests/memory/test_redis.py @@ -1,9 +1,9 @@ import json +from langchain_community.chat_message_histories import RedisChatMessageHistory from langchain_core.messages import message_to_dict from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories import RedisChatMessageHistory def test_memory_with_message_store() -> None: diff --git a/libs/langchain/tests/integration_tests/memory/test_rockset.py b/libs/langchain/tests/integration_tests/memory/test_rockset.py index 7d6a9d62db..ce220aea72 100644 --- a/libs/langchain/tests/integration_tests/memory/test_rockset.py +++ b/libs/langchain/tests/integration_tests/memory/test_rockset.py @@ -8,10 +8,10 @@ and ROCKSET_REGION environment variables set. import json import os +from langchain_community.chat_message_histories import RocksetChatMessageHistory from langchain_core.messages import message_to_dict from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories import RocksetChatMessageHistory collection_name = "langchain_demo" session_id = "MySession" diff --git a/libs/langchain/tests/integration_tests/memory/test_upstash_redis.py b/libs/langchain/tests/integration_tests/memory/test_upstash_redis.py index eed8b094f9..f03aa5fca1 100644 --- a/libs/langchain/tests/integration_tests/memory/test_upstash_redis.py +++ b/libs/langchain/tests/integration_tests/memory/test_upstash_redis.py @@ -1,12 +1,12 @@ import json import pytest +from langchain_community.chat_message_histories.upstash_redis import ( + UpstashRedisChatMessageHistory, +) from langchain_core.messages import message_to_dict from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories.upstash_redis import ( - UpstashRedisChatMessageHistory, -) URL = "" TOKEN = "" diff --git a/libs/langchain/tests/integration_tests/memory/test_xata.py b/libs/langchain/tests/integration_tests/memory/test_xata.py index 74691b485d..bfcf90e98a 100644 --- a/libs/langchain/tests/integration_tests/memory/test_xata.py +++ b/libs/langchain/tests/integration_tests/memory/test_xata.py @@ -6,10 +6,10 @@ Before running this test, please create a Xata database. import json import os +from langchain_community.chat_message_histories import XataChatMessageHistory from langchain_core.messages import message_to_dict from langchain.memory import ConversationBufferMemory -from langchain.memory.chat_message_histories import XataChatMessageHistory class TestXata: diff --git a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_base.py b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_base.py index 3df24d4076..80ccd54f67 100644 --- a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_base.py +++ b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_base.py @@ -1,8 +1,8 @@ """Integration test for compression pipelines.""" +from langchain_community.document_transformers import EmbeddingsRedundantFilter from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.documents import Document -from langchain.document_transformers import EmbeddingsRedundantFilter from langchain.retrievers.document_compressors import ( DocumentCompressorPipeline, EmbeddingsFilter, diff --git a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_embeddings_filter.py b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_embeddings_filter.py index bf88058d6a..7f0cf64c74 100644 --- a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_embeddings_filter.py +++ b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_embeddings_filter.py @@ -1,11 +1,11 @@ """Integration test for embedding-based relevant doc filtering.""" import numpy as np +from langchain_community.document_transformers.embeddings_redundant_filter import ( + _DocumentWithState, +) from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.documents import Document -from langchain.document_transformers.embeddings_redundant_filter import ( - _DocumentWithState, -) from langchain.retrievers.document_compressors import EmbeddingsFilter diff --git a/libs/langchain/tests/integration_tests/retrievers/test_contextual_compression.py b/libs/langchain/tests/integration_tests/retrievers/test_contextual_compression.py index 769a63f541..54c8a170c3 100644 --- a/libs/langchain/tests/integration_tests/retrievers/test_contextual_compression.py +++ b/libs/langchain/tests/integration_tests/retrievers/test_contextual_compression.py @@ -1,8 +1,8 @@ from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import FAISS from langchain.retrievers.contextual_compression import ContextualCompressionRetriever from langchain.retrievers.document_compressors import EmbeddingsFilter -from langchain.vectorstores import FAISS def test_contextual_compression_retriever_get_relevant_docs() -> None: diff --git a/libs/langchain/tests/integration_tests/retrievers/test_merger_retriever.py b/libs/langchain/tests/integration_tests/retrievers/test_merger_retriever.py index 70a18a74a3..897931e65b 100644 --- a/libs/langchain/tests/integration_tests/retrievers/test_merger_retriever.py +++ b/libs/langchain/tests/integration_tests/retrievers/test_merger_retriever.py @@ -1,7 +1,7 @@ from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Chroma from langchain.retrievers.merger_retriever import MergerRetriever -from langchain.vectorstores import Chroma def test_merger_retriever_get_relevant_docs() -> None: diff --git a/libs/langchain/tests/integration_tests/test_document_transformers.py b/libs/langchain/tests/integration_tests/test_document_transformers.py index 3b95a7a221..25188f2ae7 100644 --- a/libs/langchain/tests/integration_tests/test_document_transformers.py +++ b/libs/langchain/tests/integration_tests/test_document_transformers.py @@ -1,12 +1,11 @@ """Integration test for embedding-based redundant doc filtering.""" -from langchain_community.embeddings import OpenAIEmbeddings -from langchain_core.documents import Document - -from langchain.document_transformers.embeddings_redundant_filter import ( +from langchain_community.document_transformers.embeddings_redundant_filter import ( EmbeddingsClusteringFilter, EmbeddingsRedundantFilter, _DocumentWithState, ) +from langchain_community.embeddings import OpenAIEmbeddings +from langchain_core.documents import Document def test_embeddings_redundant_filter() -> None: diff --git a/libs/langchain/tests/integration_tests/test_long_context_reorder.py b/libs/langchain/tests/integration_tests/test_long_context_reorder.py index 98a0230250..4649a150a5 100644 --- a/libs/langchain/tests/integration_tests/test_long_context_reorder.py +++ b/libs/langchain/tests/integration_tests/test_long_context_reorder.py @@ -1,8 +1,9 @@ """Integration test for doc reordering.""" +from langchain_community.document_transformers.long_context_reorder import ( + LongContextReorder, +) from langchain_community.embeddings import OpenAIEmbeddings - -from langchain.document_transformers.long_context_reorder import LongContextReorder -from langchain.vectorstores import Chroma +from langchain_community.vectorstores import Chroma def test_long_context_reorder() -> None: diff --git a/libs/langchain/tests/integration_tests/test_nuclia_transformer.py b/libs/langchain/tests/integration_tests/test_nuclia_transformer.py index 902cf4d2ec..b80169209d 100644 --- a/libs/langchain/tests/integration_tests/test_nuclia_transformer.py +++ b/libs/langchain/tests/integration_tests/test_nuclia_transformer.py @@ -3,9 +3,11 @@ import json from typing import Any from unittest import mock +from langchain_community.document_transformers.nuclia_text_transform import ( + NucliaTextTransformer, +) from langchain_core.documents import Document -from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer from langchain.tools.nuclia.tool import NucliaUnderstandingAPI diff --git a/libs/langchain/tests/integration_tests/test_pdf_pagesplitter.py b/libs/langchain/tests/integration_tests/test_pdf_pagesplitter.py index 4cb6d73e0e..9fe48af380 100644 --- a/libs/langchain/tests/integration_tests/test_pdf_pagesplitter.py +++ b/libs/langchain/tests/integration_tests/test_pdf_pagesplitter.py @@ -1,10 +1,9 @@ """Test splitting with page numbers included.""" import os +from langchain_community.document_loaders import PyPDFLoader from langchain_community.embeddings.openai import OpenAIEmbeddings - -from langchain.document_loaders import PyPDFLoader -from langchain.vectorstores import FAISS +from langchain_community.vectorstores import FAISS def test_pdf_pagesplitter() -> None: diff --git a/libs/langchain/tests/unit_tests/chains/test_graph_qa.py b/libs/langchain/tests/unit_tests/chains/test_graph_qa.py index d7655409a4..32dfbfe18b 100644 --- a/libs/langchain/tests/unit_tests/chains/test_graph_qa.py +++ b/libs/langchain/tests/unit_tests/chains/test_graph_qa.py @@ -1,6 +1,8 @@ from typing import Any, Dict, List import pandas as pd +from langchain_community.graphs.graph_document import GraphDocument +from langchain_community.graphs.graph_store import GraphStore from langchain_core.prompts import PromptTemplate from langchain.chains.graph_qa.cypher import ( @@ -10,8 +12,6 @@ from langchain.chains.graph_qa.cypher import ( ) from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT -from langchain.graphs.graph_document import GraphDocument -from langchain.graphs.graph_store import GraphStore from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory from tests.unit_tests.llms.fake_llm import FakeLLM diff --git a/libs/langchain/tests/unit_tests/document_loaders/test_base.py b/libs/langchain/tests/unit_tests/document_loaders/test_base.py index cc3c049f9f..682978eaba 100644 --- a/libs/langchain/tests/unit_tests/document_loaders/test_base.py +++ b/libs/langchain/tests/unit_tests/document_loaders/test_base.py @@ -1,11 +1,10 @@ """Test Base Schema of documents.""" from typing import Iterator +from langchain_community.document_loaders.base import BaseBlobParser +from langchain_community.document_loaders.blob_loaders import Blob from langchain_core.documents import Document -from langchain.document_loaders.base import BaseBlobParser -from langchain.document_loaders.blob_loaders import Blob - def test_base_blob_parser() -> None: """Verify that the eager method is hooked up to the lazy method by default.""" diff --git a/libs/langchain/tests/unit_tests/indexes/test_indexing.py b/libs/langchain/tests/unit_tests/indexes/test_indexing.py index 884654f38e..352800e04d 100644 --- a/libs/langchain/tests/unit_tests/indexes/test_indexing.py +++ b/libs/langchain/tests/unit_tests/indexes/test_indexing.py @@ -12,14 +12,14 @@ from typing import ( ) from unittest.mock import patch +import langchain_community.vectorstores import pytest import pytest_asyncio +from langchain_community.document_loaders.base import BaseLoader from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VST, VectorStore -import langchain.vectorstores -from langchain.document_loaders.base import BaseLoader from langchain.indexes import aindex, index from langchain.indexes._api import _abatch from langchain.indexes._sql_record_manager import SQLRecordManager @@ -1207,9 +1207,9 @@ def test_compatible_vectorstore_documentation() -> None: # Check all vector store classes for compatibility compatible = set() - for class_name in langchain.vectorstores.__all__: + for class_name in langchain_community.vectorstores.__all__: # Get the definition of the class - cls = getattr(langchain.vectorstores, class_name) + cls = getattr(langchain_community.vectorstores, class_name) # If the class corresponds to a vectorstore, check its compatibility if issubclass(cls, VectorStore): @@ -1242,6 +1242,7 @@ def test_compatible_vectorstore_documentation() -> None: "ScaNN", "SemaDB", "SupabaseVectorStore", + "SurrealDBStore", "TileDB", "TimescaleVector", "Vald", diff --git a/libs/langchain/tests/unit_tests/load/test_serializable.py b/libs/langchain/tests/unit_tests/load/test_serializable.py index 9e356ae744..a8da39f8f7 100644 --- a/libs/langchain/tests/unit_tests/load/test_serializable.py +++ b/libs/langchain/tests/unit_tests/load/test_serializable.py @@ -28,7 +28,7 @@ def import_all_modules(package_name: str) -> dict: "langchain.chains.llm_bash", "langchain.chains.llm_symbolic_math", "langchain.tools.python", - "langchain.vectorstores._pgvector_data_models", + "langchain_community.vectorstores._pgvector_data_models", ): importlib.import_module(module_name) new_classes = import_all_modules(module_name) diff --git a/libs/langchain/tests/unit_tests/retrievers/self_query/test_redis.py b/libs/langchain/tests/unit_tests/retrievers/self_query/test_redis.py index 62c225fe02..490e247f93 100644 --- a/libs/langchain/tests/unit_tests/retrievers/self_query/test_redis.py +++ b/libs/langchain/tests/unit_tests/retrievers/self_query/test_redis.py @@ -1,28 +1,28 @@ from typing import Dict, Tuple import pytest - -from langchain.chains.query_constructor.ir import ( - Comparator, - Comparison, - Operation, - Operator, - StructuredQuery, -) -from langchain.retrievers.self_query.redis import RedisTranslator -from langchain.vectorstores.redis.filters import ( +from langchain_community.vectorstores.redis.filters import ( RedisFilterExpression, RedisNum, RedisTag, RedisText, ) -from langchain.vectorstores.redis.schema import ( +from langchain_community.vectorstores.redis.schema import ( NumericFieldSchema, RedisModel, TagFieldSchema, TextFieldSchema, ) +from langchain.chains.query_constructor.ir import ( + Comparator, + Comparison, + Operation, + Operator, + StructuredQuery, +) +from langchain.retrievers.self_query.redis import RedisTranslator + @pytest.fixture def translator() -> RedisTranslator: diff --git a/libs/langchain/tests/unit_tests/test_dependencies.py b/libs/langchain/tests/unit_tests/test_dependencies.py index 590faeb3d3..a887187156 100644 --- a/libs/langchain/tests/unit_tests/test_dependencies.py +++ b/libs/langchain/tests/unit_tests/test_dependencies.py @@ -96,18 +96,18 @@ def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None: def test_imports() -> None: """Test that you can import all top level things okay.""" from langchain_community.chat_models import ChatOpenAI # noqa: F401 + from langchain_community.document_loaders import BSHTMLLoader # noqa: F401 from langchain_community.embeddings import OpenAIEmbeddings # noqa: F401 from langchain_community.llms import OpenAI # noqa: F401 + from langchain_community.vectorstores import FAISS # noqa: F401 from langchain_core.prompts import BasePromptTemplate # noqa: F401 from langchain.agents import OpenAIFunctionsAgent # noqa: F401 from langchain.callbacks import OpenAICallbackHandler # noqa: F401 from langchain.chains import LLMChain # noqa: F401 - from langchain.document_loaders import BSHTMLLoader # noqa: F401 from langchain.retrievers import VespaRetriever # noqa: F401 from langchain.tools import DuckDuckGoSearchResults # noqa: F401 from langchain.utilities import ( SearchApiAPIWrapper, # noqa: F401 SerpAPIWrapper, # noqa: F401 ) - from langchain.vectorstores import FAISS # noqa: F401 diff --git a/libs/langchain/tests/unit_tests/test_document_transformers.py b/libs/langchain/tests/unit_tests/test_document_transformers.py index f589055999..b995de0a17 100644 --- a/libs/langchain/tests/unit_tests/test_document_transformers.py +++ b/libs/langchain/tests/unit_tests/test_document_transformers.py @@ -1,7 +1,8 @@ """Unit tests for document transformers.""" -from langchain.document_transformers.embeddings_redundant_filter import ( +from langchain_community.document_transformers.embeddings_redundant_filter import ( _filter_similar_embeddings, ) + from langchain.utils.math import cosine_similarity diff --git a/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py b/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py index 9751ff3705..a61aae2587 100644 --- a/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py +++ b/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py @@ -2,9 +2,9 @@ import os import cassio from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores import Cassandra from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Cassandra from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough diff --git a/templates/cohere-librarian/cohere_librarian/blurb_matcher.py b/templates/cohere-librarian/cohere_librarian/blurb_matcher.py index 55388aa86a..ac48e683f0 100644 --- a/templates/cohere-librarian/cohere_librarian/blurb_matcher.py +++ b/templates/cohere-librarian/cohere_librarian/blurb_matcher.py @@ -2,8 +2,8 @@ import csv from langchain.chains.question_answering import load_qa_chain from langchain.prompts import PromptTemplate -from langchain.vectorstores import Chroma from langchain_community.embeddings import CohereEmbeddings +from langchain_community.vectorstores import Chroma from .chat import chat diff --git a/templates/csv-agent/csv_agent/agent.py b/templates/csv-agent/csv_agent/agent.py index a3c1765a09..98baf35558 100644 --- a/templates/csv-agent/csv_agent/agent.py +++ b/templates/csv-agent/csv_agent/agent.py @@ -4,9 +4,9 @@ import pandas as pd from langchain.agents import AgentExecutor, OpenAIFunctionsAgent from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.tools.retriever import create_retriever_tool -from langchain.vectorstores import FAISS from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import FAISS from langchain_core.pydantic_v1 import BaseModel, Field from langchain_experimental.tools import PythonAstREPLTool diff --git a/templates/csv-agent/ingest.py b/templates/csv-agent/ingest.py index 8cc99a9a8a..44a4bc8ade 100644 --- a/templates/csv-agent/ingest.py +++ b/templates/csv-agent/ingest.py @@ -1,6 +1,6 @@ -from langchain.document_loaders import CSVLoader from langchain.indexes import VectorstoreIndexCreator -from langchain.vectorstores import FAISS +from langchain_community.document_loaders import CSVLoader +from langchain_community.vectorstores import FAISS loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv") diff --git a/templates/extraction-anthropic-functions/extraction_anthropic_functions.ipynb b/templates/extraction-anthropic-functions/extraction_anthropic_functions.ipynb index f118beb14e..a5a5400471 100644 --- a/templates/extraction-anthropic-functions/extraction_anthropic_functions.ipynb +++ b/templates/extraction-anthropic-functions/extraction_anthropic_functions.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WebBaseLoader\n", + "from langchain_community.document_loaders import WebBaseLoader\n", "\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", "text = loader.load()" diff --git a/templates/extraction-openai-functions/extraction_openai_functions.ipynb b/templates/extraction-openai-functions/extraction_openai_functions.ipynb index 8544f41366..98ee676a92 100644 --- a/templates/extraction-openai-functions/extraction_openai_functions.ipynb +++ b/templates/extraction-openai-functions/extraction_openai_functions.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WebBaseLoader\n", + "from langchain_community.document_loaders import WebBaseLoader\n", "\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", "text = loader.load()" diff --git a/templates/hyde/hyde/chain.py b/templates/hyde/hyde/chain.py index 82123b8468..1f0caa9cd8 100644 --- a/templates/hyde/hyde/chain.py +++ b/templates/hyde/hyde/chain.py @@ -1,7 +1,7 @@ from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel @@ -12,7 +12,7 @@ from hyde.prompts import hyde_prompt """ # Load -from langchain.document_loaders import WebBaseLoader +from langchain_community.document_loaders import WebBaseLoader loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") data = loader.load() diff --git a/templates/mongo-parent-document-retrieval/ingest.py b/templates/mongo-parent-document-retrieval/ingest.py index 43d21b0616..7dd3464647 100644 --- a/templates/mongo-parent-document-retrieval/ingest.py +++ b/templates/mongo-parent-document-retrieval/ingest.py @@ -1,10 +1,10 @@ import os import uuid -from langchain.document_loaders import PyPDFLoader from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import MongoDBAtlasVectorSearch +from langchain_community.document_loaders import PyPDFLoader from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import MongoDBAtlasVectorSearch from pymongo import MongoClient PARENT_DOC_ID_KEY = "parent_doc_id" diff --git a/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py b/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py index 44745743e2..2241a7868a 100644 --- a/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py +++ b/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py @@ -1,9 +1,9 @@ import os from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores import MongoDBAtlasVectorSearch from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import MongoDBAtlasVectorSearch from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/neo4j-advanced-rag/ingest.py b/templates/neo4j-advanced-rag/ingest.py index 83e1ece324..db9bb8b471 100644 --- a/templates/neo4j-advanced-rag/ingest.py +++ b/templates/neo4j-advanced-rag/ingest.py @@ -2,12 +2,12 @@ from pathlib import Path from typing import List from langchain.chains.openai_functions import create_structured_output_chain -from langchain.document_loaders import TextLoader -from langchain.graphs import Neo4jGraph from langchain.prompts import ChatPromptTemplate from langchain.text_splitter import TokenTextSplitter from langchain_community.chat_models import ChatOpenAI +from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_community.graphs import Neo4jGraph from langchain_core.pydantic_v1 import BaseModel, Field from neo4j.exceptions import ClientError diff --git a/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py b/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py index eccb33e159..aa7d9f4648 100644 --- a/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py +++ b/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py @@ -1,5 +1,5 @@ -from langchain.vectorstores import Neo4jVector from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Neo4jVector # Typical RAG retriever diff --git a/templates/neo4j-cypher-ft/ingest.py b/templates/neo4j-cypher-ft/ingest.py index 5668604fa2..99e90dfe41 100644 --- a/templates/neo4j-cypher-ft/ingest.py +++ b/templates/neo4j-cypher-ft/ingest.py @@ -1,4 +1,4 @@ -from langchain.graphs import Neo4jGraph +from langchain_community.graphs import Neo4jGraph graph = Neo4jGraph() diff --git a/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py b/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py index 49c6ed278c..396d51ec86 100644 --- a/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py +++ b/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py @@ -2,9 +2,9 @@ from typing import List, Optional from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema from langchain.chains.openai_functions import create_structured_output_chain -from langchain.graphs import Neo4jGraph from langchain.prompts import ChatPromptTemplate from langchain_community.chat_models import ChatOpenAI +from langchain_community.graphs import Neo4jGraph from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import RunnablePassthrough diff --git a/templates/neo4j-cypher-memory/ingest.py b/templates/neo4j-cypher-memory/ingest.py index 7fb01dc413..bbd4c6f57f 100644 --- a/templates/neo4j-cypher-memory/ingest.py +++ b/templates/neo4j-cypher-memory/ingest.py @@ -1,4 +1,4 @@ -from langchain.graphs import Neo4jGraph +from langchain_community.graphs import Neo4jGraph graph = Neo4jGraph() diff --git a/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py b/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py index dbfb54b78c..ec63766dea 100644 --- a/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py +++ b/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py @@ -1,10 +1,10 @@ from typing import Any, Dict, List from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema -from langchain.graphs import Neo4jGraph from langchain.memory import ChatMessageHistory from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_community.chat_models import ChatOpenAI +from langchain_community.graphs import Neo4jGraph from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough diff --git a/templates/neo4j-cypher/ingest.py b/templates/neo4j-cypher/ingest.py index 61adaa5135..3b097b9462 100644 --- a/templates/neo4j-cypher/ingest.py +++ b/templates/neo4j-cypher/ingest.py @@ -1,4 +1,4 @@ -from langchain.graphs import Neo4jGraph +from langchain_community.graphs import Neo4jGraph graph = Neo4jGraph() diff --git a/templates/neo4j-cypher/neo4j_cypher/chain.py b/templates/neo4j-cypher/neo4j_cypher/chain.py index a08942320a..18ec68b08b 100644 --- a/templates/neo4j-cypher/neo4j_cypher/chain.py +++ b/templates/neo4j-cypher/neo4j_cypher/chain.py @@ -1,7 +1,7 @@ from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema -from langchain.graphs import Neo4jGraph from langchain.prompts import ChatPromptTemplate from langchain_community.chat_models import ChatOpenAI +from langchain_community.graphs import Neo4jGraph from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough diff --git a/templates/neo4j-generation/neo4j_generation/chain.py b/templates/neo4j-generation/neo4j_generation/chain.py index 8c6459343a..21a6116e49 100644 --- a/templates/neo4j-generation/neo4j_generation/chain.py +++ b/templates/neo4j-generation/neo4j_generation/chain.py @@ -3,11 +3,11 @@ from typing import List, Optional from langchain.chains.openai_functions import ( create_structured_output_chain, ) -from langchain.graphs import Neo4jGraph -from langchain.graphs.graph_document import GraphDocument from langchain.prompts import ChatPromptTemplate from langchain.schema import Document from langchain_community.chat_models import ChatOpenAI +from langchain_community.graphs import Neo4jGraph +from langchain_community.graphs.graph_document import GraphDocument from neo4j_generation.utils import ( KnowledgeGraph, diff --git a/templates/neo4j-generation/neo4j_generation/utils.py b/templates/neo4j-generation/neo4j_generation/utils.py index d888db37db..a226940081 100644 --- a/templates/neo4j-generation/neo4j_generation/utils.py +++ b/templates/neo4j-generation/neo4j_generation/utils.py @@ -1,7 +1,7 @@ from typing import List, Optional -from langchain.graphs.graph_document import Node as BaseNode -from langchain.graphs.graph_document import Relationship as BaseRelationship +from langchain_community.graphs.graph_document import Node as BaseNode +from langchain_community.graphs.graph_document import Relationship as BaseRelationship from langchain_core.pydantic_v1 import BaseModel, Field diff --git a/templates/neo4j-parent/ingest.py b/templates/neo4j-parent/ingest.py index 8a5d82f4df..1ac3d6bb42 100644 --- a/templates/neo4j-parent/ingest.py +++ b/templates/neo4j-parent/ingest.py @@ -1,10 +1,10 @@ from pathlib import Path -from langchain.document_loaders import TextLoader -from langchain.graphs import Neo4jGraph from langchain.text_splitter import TokenTextSplitter -from langchain.vectorstores import Neo4jVector +from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_community.graphs import Neo4jGraph +from langchain_community.vectorstores import Neo4jVector txt_path = Path(__file__).parent / "dune.txt" diff --git a/templates/neo4j-parent/neo4j_parent/chain.py b/templates/neo4j-parent/neo4j_parent/chain.py index 736b64dea2..c4507863c9 100644 --- a/templates/neo4j-parent/neo4j_parent/chain.py +++ b/templates/neo4j-parent/neo4j_parent/chain.py @@ -1,7 +1,7 @@ from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores import Neo4jVector from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Neo4jVector from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/neo4j-vector-memory/ingest.py b/templates/neo4j-vector-memory/ingest.py index 3c24b66c67..5a8be54a56 100644 --- a/templates/neo4j-vector-memory/ingest.py +++ b/templates/neo4j-vector-memory/ingest.py @@ -1,9 +1,9 @@ from pathlib import Path -from langchain.document_loaders import TextLoader from langchain.text_splitter import TokenTextSplitter -from langchain.vectorstores import Neo4jVector +from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_community.vectorstores import Neo4jVector txt_path = Path(__file__).parent / "dune.txt" diff --git a/templates/neo4j-vector-memory/neo4j_vector_memory/chain.py b/templates/neo4j-vector-memory/neo4j_vector_memory/chain.py index fd0a629930..3977124e40 100644 --- a/templates/neo4j-vector-memory/neo4j_vector_memory/chain.py +++ b/templates/neo4j-vector-memory/neo4j_vector_memory/chain.py @@ -1,9 +1,9 @@ from operator import itemgetter from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate -from langchain.vectorstores import Neo4jVector from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Neo4jVector from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough diff --git a/templates/neo4j-vector-memory/neo4j_vector_memory/history.py b/templates/neo4j-vector-memory/neo4j_vector_memory/history.py index f9d17a015d..9fccc10978 100644 --- a/templates/neo4j-vector-memory/neo4j_vector_memory/history.py +++ b/templates/neo4j-vector-memory/neo4j_vector_memory/history.py @@ -1,8 +1,8 @@ from typing import Any, Dict, List, Union -from langchain.graphs import Neo4jGraph from langchain.memory import ChatMessageHistory from langchain.schema import AIMessage, HumanMessage +from langchain_community.graphs import Neo4jGraph graph = Neo4jGraph() diff --git a/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/agent.py b/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/agent.py index ef2aa3a8d0..090a832635 100644 --- a/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/agent.py +++ b/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/agent.py @@ -14,9 +14,9 @@ from langchain.schema import Document from langchain.tools.render import format_tool_to_openai_function from langchain.tools.tavily_search import TavilySearchResults from langchain.utilities.tavily_search import TavilySearchAPIWrapper -from langchain.vectorstores import FAISS from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import FAISS from langchain_core.messages import AIMessage, HumanMessage from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import Runnable, RunnableLambda, RunnableParallel diff --git a/templates/rag-astradb/astradb_entomology_rag/__init__.py b/templates/rag-astradb/astradb_entomology_rag/__init__.py index ff99f159de..df10e4ac2e 100644 --- a/templates/rag-astradb/astradb_entomology_rag/__init__.py +++ b/templates/rag-astradb/astradb_entomology_rag/__init__.py @@ -1,9 +1,9 @@ import os from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores import AstraDB from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import AstraDB from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough diff --git a/templates/rag-aws-bedrock/rag_aws_bedrock/chain.py b/templates/rag-aws-bedrock/rag_aws_bedrock/chain.py index 09dd778479..82f63f796a 100644 --- a/templates/rag-aws-bedrock/rag_aws_bedrock/chain.py +++ b/templates/rag-aws-bedrock/rag_aws_bedrock/chain.py @@ -1,9 +1,9 @@ import os from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores import FAISS from langchain_community.embeddings import BedrockEmbeddings from langchain_community.llms.bedrock import Bedrock +from langchain_community.vectorstores import FAISS from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-chroma-multi-modal-multi-vector/ingest.py b/templates/rag-chroma-multi-modal-multi-vector/ingest.py index 5700676e14..9447ca478e 100644 --- a/templates/rag-chroma-multi-modal-multi-vector/ingest.py +++ b/templates/rag-chroma-multi-modal-multi-vector/ingest.py @@ -8,9 +8,9 @@ from pathlib import Path import pypdfium2 as pdfium from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore, UpstashRedisByteStore -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from PIL import Image diff --git a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py b/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py index 6432c78903..7650b337dc 100644 --- a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py +++ b/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py @@ -6,9 +6,9 @@ from pathlib import Path from langchain.pydantic_v1 import BaseModel from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore, UpstashRedisByteStore -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-chroma-multi-modal/ingest.py b/templates/rag-chroma-multi-modal/ingest.py index 443c387cac..67c5f070c5 100644 --- a/templates/rag-chroma-multi-modal/ingest.py +++ b/templates/rag-chroma-multi-modal/ingest.py @@ -2,7 +2,7 @@ import os from pathlib import Path import pypdfium2 as pdfium -from langchain.vectorstores import Chroma +from langchain_community.vectorstores import Chroma from langchain_experimental.open_clip import OpenCLIPEmbeddings diff --git a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py b/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py index a45b3e1fe0..4773494af0 100644 --- a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py +++ b/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py @@ -2,8 +2,8 @@ import base64 import io from pathlib import Path -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOpenAI +from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-chroma-private/rag_chroma_private/chain.py b/templates/rag-chroma-private/rag_chroma_private/chain.py index 874a40efe1..b917060f82 100644 --- a/templates/rag-chroma-private/rag_chroma_private/chain.py +++ b/templates/rag-chroma-private/rag_chroma_private/chain.py @@ -1,10 +1,10 @@ # Load -from langchain.document_loaders import WebBaseLoader from langchain.prompts import ChatPromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOllama +from langchain_community.document_loaders import WebBaseLoader from langchain_community.embeddings import GPT4AllEmbeddings +from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-chroma/rag_chroma/chain.py b/templates/rag-chroma/rag_chroma/chain.py index 799a1e0079..88ade854f9 100644 --- a/templates/rag-chroma/rag_chroma/chain.py +++ b/templates/rag-chroma/rag_chroma/chain.py @@ -1,7 +1,7 @@ from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough @@ -10,7 +10,7 @@ from langchain_core.runnables import RunnableParallel, RunnablePassthrough """ # Load -from langchain.document_loaders import WebBaseLoader +from langchain_community.document_loaders import WebBaseLoader loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") data = loader.load() diff --git a/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py b/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py index 65b442ed16..b578157b73 100644 --- a/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py +++ b/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py @@ -1,13 +1,13 @@ import os from git import Repo -from langchain.document_loaders.generic import GenericLoader -from langchain.document_loaders.parsers import LanguageParser from langchain.prompts import ChatPromptTemplate from langchain.text_splitter import Language, RecursiveCharacterTextSplitter -from langchain.vectorstores import Chroma +from langchain_community.document_loaders.generic import GenericLoader +from langchain_community.document_loaders.parsers import LanguageParser from langchain_community.embeddings import GPT4AllEmbeddings from langchain_community.llms.fireworks import Fireworks +from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-conversation-zep/ingest.py b/templates/rag-conversation-zep/ingest.py index d9caaa5228..86809a7a08 100644 --- a/templates/rag-conversation-zep/ingest.py +++ b/templates/rag-conversation-zep/ingest.py @@ -1,10 +1,10 @@ # Ingest Documents into a Zep Collection import os -from langchain.document_loaders import WebBaseLoader from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores.zep import CollectionConfig, ZepVectorStore +from langchain_community.document_loaders import WebBaseLoader from langchain_community.embeddings import FakeEmbeddings +from langchain_community.vectorstores.zep import CollectionConfig, ZepVectorStore ZEP_API_URL = os.environ.get("ZEP_API_URL", "http://localhost:8000") ZEP_API_KEY = os.environ.get("ZEP_API_KEY", None) diff --git a/templates/rag-conversation-zep/rag_conversation_zep/chain.py b/templates/rag-conversation-zep/rag_conversation_zep/chain.py index 860e5641ef..c303fc3658 100644 --- a/templates/rag-conversation-zep/rag_conversation_zep/chain.py +++ b/templates/rag-conversation-zep/rag_conversation_zep/chain.py @@ -5,8 +5,8 @@ from typing import List, Tuple from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.prompts.prompt import PromptTemplate from langchain.schema import AIMessage, HumanMessage, format_document -from langchain.vectorstores.zep import CollectionConfig, ZepVectorStore from langchain_community.chat_models import ChatOpenAI +from langchain_community.vectorstores.zep import CollectionConfig, ZepVectorStore from langchain_core.documents import Document from langchain_core.messages import BaseMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-conversation/rag_conversation/chain.py b/templates/rag-conversation/rag_conversation/chain.py index 1e84ccc243..b1005aa806 100644 --- a/templates/rag-conversation/rag_conversation/chain.py +++ b/templates/rag-conversation/rag_conversation/chain.py @@ -5,9 +5,9 @@ from typing import List, Tuple from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.prompts.prompt import PromptTemplate from langchain.schema import AIMessage, HumanMessage, format_document -from langchain.vectorstores import Pinecone from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Pinecone from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import ( @@ -27,7 +27,7 @@ PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") ### Ingest code - you may need to run this the first time # # Load -# from langchain.document_loaders import WebBaseLoader +# from langchain_community.document_loaders import WebBaseLoader # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") # data = loader.load() diff --git a/templates/rag-elasticsearch/ingest.py b/templates/rag-elasticsearch/ingest.py index a799d1a97e..e0393c78a4 100644 --- a/templates/rag-elasticsearch/ingest.py +++ b/templates/rag-elasticsearch/ingest.py @@ -1,9 +1,9 @@ import os -from langchain.document_loaders import JSONLoader from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores.elasticsearch import ElasticsearchStore +from langchain_community.document_loaders import JSONLoader from langchain_community.embeddings import HuggingFaceEmbeddings +from langchain_community.vectorstores.elasticsearch import ElasticsearchStore ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID") ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic") diff --git a/templates/rag-elasticsearch/rag_elasticsearch/chain.py b/templates/rag-elasticsearch/rag_elasticsearch/chain.py index b40b5ac819..a28060e0e6 100644 --- a/templates/rag-elasticsearch/rag_elasticsearch/chain.py +++ b/templates/rag-elasticsearch/rag_elasticsearch/chain.py @@ -2,9 +2,9 @@ from operator import itemgetter from typing import List, Optional, Tuple from langchain.schema import BaseMessage, format_document -from langchain.vectorstores.elasticsearch import ElasticsearchStore from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import HuggingFaceEmbeddings +from langchain_community.vectorstores.elasticsearch import ElasticsearchStore from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-fusion/ingest.py b/templates/rag-fusion/ingest.py index f93faee19c..071a35a846 100644 --- a/templates/rag-fusion/ingest.py +++ b/templates/rag-fusion/ingest.py @@ -1,5 +1,5 @@ -from langchain.vectorstores import Pinecone from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Pinecone all_documents = { "doc1": "Climate change and economic impact.", diff --git a/templates/rag-fusion/rag_fusion/chain.py b/templates/rag-fusion/rag_fusion/chain.py index 1ca7bbd4c9..1f19a0c150 100644 --- a/templates/rag-fusion/rag_fusion/chain.py +++ b/templates/rag-fusion/rag_fusion/chain.py @@ -1,8 +1,8 @@ from langchain import hub from langchain.load import dumps, loads -from langchain.vectorstores import Pinecone from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Pinecone from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/rag-gemini-multi-modal/ingest.py b/templates/rag-gemini-multi-modal/ingest.py index 443c387cac..67c5f070c5 100644 --- a/templates/rag-gemini-multi-modal/ingest.py +++ b/templates/rag-gemini-multi-modal/ingest.py @@ -2,7 +2,7 @@ import os from pathlib import Path import pypdfium2 as pdfium -from langchain.vectorstores import Chroma +from langchain_community.vectorstores import Chroma from langchain_experimental.open_clip import OpenCLIPEmbeddings diff --git a/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py b/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py index c4057a913e..51991d066a 100644 --- a/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py +++ b/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py @@ -2,7 +2,7 @@ import base64 import io from pathlib import Path -from langchain.vectorstores import Chroma +from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py b/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py index 663a404103..4040342d81 100644 --- a/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py +++ b/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py @@ -4,9 +4,9 @@ from pathlib import Path from langchain.prompts import ChatPromptTemplate from langchain.schema import Document from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-matching-engine/rag_matching_engine/chain.py b/templates/rag-matching-engine/rag_matching_engine/chain.py index 83ed057920..0b9a3aadf7 100644 --- a/templates/rag-matching-engine/rag_matching_engine/chain.py +++ b/templates/rag-matching-engine/rag_matching_engine/chain.py @@ -1,9 +1,9 @@ import os from langchain.prompts import PromptTemplate -from langchain.vectorstores import MatchingEngine from langchain_community.embeddings import VertexAIEmbeddings from langchain_community.llms import VertexAI +from langchain_community.vectorstores import MatchingEngine from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py b/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py index 8d99565456..1cda0e6220 100644 --- a/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py +++ b/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py @@ -1,9 +1,9 @@ import os from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores import MomentoVectorIndex from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import MomentoVectorIndex from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough diff --git a/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py b/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py index f15d856493..ca5eb0dc41 100644 --- a/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py +++ b/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py @@ -1,10 +1,10 @@ ### Ingest code - you may need to run this the first time import os -from langchain.document_loaders import WebBaseLoader from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import MomentoVectorIndex +from langchain_community.document_loaders import WebBaseLoader from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import MomentoVectorIndex from momento import ( CredentialProvider, PreviewVectorIndexClient, diff --git a/templates/rag-mongo/ingest.py b/templates/rag-mongo/ingest.py index 07ce77ebe5..e396d0d187 100644 --- a/templates/rag-mongo/ingest.py +++ b/templates/rag-mongo/ingest.py @@ -1,9 +1,9 @@ import os -from langchain.document_loaders import PyPDFLoader from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import MongoDBAtlasVectorSearch +from langchain_community.document_loaders import PyPDFLoader from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import MongoDBAtlasVectorSearch from pymongo import MongoClient MONGO_URI = os.environ["MONGO_URI"] diff --git a/templates/rag-mongo/rag_mongo/chain.py b/templates/rag-mongo/rag_mongo/chain.py index e6eafb8546..cdbd8be3b0 100644 --- a/templates/rag-mongo/rag_mongo/chain.py +++ b/templates/rag-mongo/rag_mongo/chain.py @@ -1,11 +1,11 @@ import os -from langchain.document_loaders import PyPDFLoader from langchain.prompts import ChatPromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import MongoDBAtlasVectorSearch from langchain_community.chat_models import ChatOpenAI +from langchain_community.document_loaders import PyPDFLoader from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import MongoDBAtlasVectorSearch from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import ( diff --git a/templates/rag-multi-modal-local/ingest.py b/templates/rag-multi-modal-local/ingest.py index 34e215ad81..9aad0cf656 100644 --- a/templates/rag-multi-modal-local/ingest.py +++ b/templates/rag-multi-modal-local/ingest.py @@ -1,7 +1,7 @@ import os from pathlib import Path -from langchain.vectorstores import Chroma +from langchain_community.vectorstores import Chroma from langchain_experimental.open_clip import OpenCLIPEmbeddings # Load images diff --git a/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py b/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py index d84370b6aa..ce09cee074 100644 --- a/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py +++ b/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py @@ -2,8 +2,8 @@ import base64 import io from pathlib import Path -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOllama +from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-multi-modal-mv-local/ingest.py b/templates/rag-multi-modal-mv-local/ingest.py index 896e9ae949..4e3f711bd2 100644 --- a/templates/rag-multi-modal-mv-local/ingest.py +++ b/templates/rag-multi-modal-mv-local/ingest.py @@ -7,9 +7,9 @@ from pathlib import Path from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOllama from langchain_community.embeddings import OllamaEmbeddings +from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from PIL import Image diff --git a/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py b/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py index 079b385add..c91f7598e8 100644 --- a/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py +++ b/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py @@ -5,9 +5,9 @@ from pathlib import Path from langchain.pydantic_v1 import BaseModel from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOllama from langchain_community.embeddings import OllamaEmbeddings +from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py b/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py index 1e1fb1c3ac..7846e9d923 100644 --- a/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py +++ b/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py @@ -1,14 +1,14 @@ from typing import List from langchain.chains import LLMChain -from langchain.document_loaders import WebBaseLoader from langchain.output_parsers import PydanticOutputParser from langchain.prompts import ChatPromptTemplate, PromptTemplate from langchain.retrievers.multi_query import MultiQueryRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOllama, ChatOpenAI +from langchain_community.document_loaders import WebBaseLoader from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-opensearch/rag_opensearch/chain.py b/templates/rag-opensearch/rag_opensearch/chain.py index 39b7c2f104..59ff70643f 100644 --- a/templates/rag-opensearch/rag_opensearch/chain.py +++ b/templates/rag-opensearch/rag_opensearch/chain.py @@ -1,9 +1,11 @@ import os from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores.opensearch_vector_search import OpenSearchVectorSearch from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores.opensearch_vector_search import ( + OpenSearchVectorSearch, +) from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py b/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py index 2d13373ab7..32cf3ba0e5 100644 --- a/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py +++ b/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py @@ -2,9 +2,9 @@ import os from langchain.prompts import ChatPromptTemplate from langchain.retrievers.multi_query import MultiQueryRetriever -from langchain.vectorstores import Pinecone from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Pinecone from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough @@ -19,7 +19,7 @@ PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") ### Ingest code - you may need to run this the first time # Load -# from langchain.document_loaders import WebBaseLoader +# from langchain_community.document_loaders import WebBaseLoader # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") # data = loader.load() diff --git a/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py b/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py index 57b11a98b7..9e9a9aa322 100644 --- a/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py +++ b/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py @@ -3,9 +3,9 @@ import os from langchain.prompts import ChatPromptTemplate from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import CohereRerank -from langchain.vectorstores import Pinecone from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Pinecone from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough @@ -20,7 +20,7 @@ PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") ### Ingest code - you may need to run this the first time # # Load -# from langchain.document_loaders import WebBaseLoader +# from langchain_community.document_loaders import WebBaseLoader # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") # data = loader.load() diff --git a/templates/rag-pinecone/rag_pinecone/chain.py b/templates/rag-pinecone/rag_pinecone/chain.py index c78c454238..8755eded48 100644 --- a/templates/rag-pinecone/rag_pinecone/chain.py +++ b/templates/rag-pinecone/rag_pinecone/chain.py @@ -1,9 +1,9 @@ import os from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores import Pinecone from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Pinecone from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough @@ -18,7 +18,7 @@ PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") ### Ingest code - you may need to run this the first time # Load -# from langchain.document_loaders import WebBaseLoader +# from langchain_community.document_loaders import WebBaseLoader # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") # data = loader.load() diff --git a/templates/rag-redis/ingest.py b/templates/rag-redis/ingest.py index 3d65a17c15..89ffa40019 100644 --- a/templates/rag-redis/ingest.py +++ b/templates/rag-redis/ingest.py @@ -1,9 +1,9 @@ import os -from langchain.document_loaders import UnstructuredFileLoader from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import Redis +from langchain_community.document_loaders import UnstructuredFileLoader from langchain_community.embeddings import HuggingFaceEmbeddings +from langchain_community.vectorstores import Redis from rag_redis.config import EMBED_MODEL, INDEX_NAME, INDEX_SCHEMA, REDIS_URL diff --git a/templates/rag-redis/rag_redis/chain.py b/templates/rag-redis/rag_redis/chain.py index 13219c2180..fcbd772259 100644 --- a/templates/rag-redis/rag_redis/chain.py +++ b/templates/rag-redis/rag_redis/chain.py @@ -1,7 +1,7 @@ from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores import Redis from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import HuggingFaceEmbeddings +from langchain_community.vectorstores import Redis from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-self-query/ingest.py b/templates/rag-self-query/ingest.py index 02d0ed63e6..a4be4e970c 100644 --- a/templates/rag-self-query/ingest.py +++ b/templates/rag-self-query/ingest.py @@ -1,9 +1,9 @@ import os -from langchain.document_loaders import JSONLoader from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import ElasticsearchStore +from langchain_community.document_loaders import JSONLoader from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import ElasticsearchStore ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID") ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic") diff --git a/templates/rag-self-query/rag_self_query/chain.py b/templates/rag-self-query/rag_self_query/chain.py index f1dd70f6ee..340ec88886 100644 --- a/templates/rag-self-query/rag_self_query/chain.py +++ b/templates/rag-self-query/rag_self_query/chain.py @@ -4,9 +4,9 @@ from typing import List, Tuple from langchain.retrievers import SelfQueryRetriever from langchain.schema import format_document -from langchain.vectorstores.elasticsearch import ElasticsearchStore from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores.elasticsearch import ElasticsearchStore from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-semi-structured/rag_semi_structured/chain.py b/templates/rag-semi-structured/rag_semi_structured/chain.py index 3467d536bb..34770b012e 100644 --- a/templates/rag-semi-structured/rag_semi_structured/chain.py +++ b/templates/rag-semi-structured/rag_semi_structured/chain.py @@ -4,9 +4,9 @@ import uuid from langchain.prompts import ChatPromptTemplate from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore -from langchain.vectorstores import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/rag-singlestoredb/rag_singlestoredb/chain.py b/templates/rag-singlestoredb/rag_singlestoredb/chain.py index 5f691aaee9..ae0d07a160 100644 --- a/templates/rag-singlestoredb/rag_singlestoredb/chain.py +++ b/templates/rag-singlestoredb/rag_singlestoredb/chain.py @@ -1,9 +1,9 @@ import os from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores import SingleStoreDB from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import SingleStoreDB from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough @@ -15,7 +15,7 @@ if os.environ.get("SINGLESTOREDB_URL", None) is None: ## Ingest code - you may need to run this the first time # # Load -# from langchain.document_loaders import WebBaseLoader +# from langchain_community.document_loaders import WebBaseLoader # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") # data = loader.load() diff --git a/templates/rag-supabase/rag_supabase/chain.py b/templates/rag-supabase/rag_supabase/chain.py index 20de7d165f..e8bd91cd99 100644 --- a/templates/rag-supabase/rag_supabase/chain.py +++ b/templates/rag-supabase/rag_supabase/chain.py @@ -1,9 +1,9 @@ import os from langchain.prompts import ChatPromptTemplate -from langchain.vectorstores.supabase import SupabaseVectorStore from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores.supabase import SupabaseVectorStore from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-timescale-conversation/rag_timescale_conversation/chain.py b/templates/rag-timescale-conversation/rag_timescale_conversation/chain.py index 531d7e1ce4..4696e990ff 100644 --- a/templates/rag-timescale-conversation/rag_timescale_conversation/chain.py +++ b/templates/rag-timescale-conversation/rag_timescale_conversation/chain.py @@ -7,9 +7,9 @@ from dotenv import find_dotenv, load_dotenv from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.prompts.prompt import PromptTemplate from langchain.schema import AIMessage, HumanMessage, format_document -from langchain.vectorstores.timescalevector import TimescaleVector from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores.timescalevector import TimescaleVector from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import ( diff --git a/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py b/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py index cee2e04ffd..b540a72cdc 100644 --- a/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py +++ b/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py @@ -3,10 +3,10 @@ import tempfile from datetime import datetime, timedelta import requests -from langchain.document_loaders import JSONLoader from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores.timescalevector import TimescaleVector +from langchain_community.document_loaders import JSONLoader from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_community.vectorstores.timescalevector import TimescaleVector from timescale_vector import client diff --git a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/chain.py b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/chain.py index d14206ad5b..0891c35c78 100644 --- a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/chain.py +++ b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/chain.py @@ -6,10 +6,10 @@ from datetime import timedelta from langchain.chains.query_constructor.base import AttributeInfo from langchain.prompts import ChatPromptTemplate from langchain.retrievers.self_query.base import SelfQueryRetriever -from langchain.vectorstores.timescalevector import TimescaleVector from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain_community.llms import OpenAI +from langchain_community.vectorstores.timescalevector import TimescaleVector from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py index cee2e04ffd..b540a72cdc 100644 --- a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py +++ b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py @@ -3,10 +3,10 @@ import tempfile from datetime import datetime, timedelta import requests -from langchain.document_loaders import JSONLoader from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores.timescalevector import TimescaleVector +from langchain_community.document_loaders import JSONLoader from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_community.vectorstores.timescalevector import TimescaleVector from timescale_vector import client diff --git a/templates/rag-vectara-multiquery/rag_vectara_multiquery/chain.py b/templates/rag-vectara-multiquery/rag_vectara_multiquery/chain.py index eb863b6064..9b769e9bd0 100644 --- a/templates/rag-vectara-multiquery/rag_vectara_multiquery/chain.py +++ b/templates/rag-vectara-multiquery/rag_vectara_multiquery/chain.py @@ -1,8 +1,8 @@ import os from langchain.retrievers.multi_query import MultiQueryRetriever -from langchain.vectorstores import Vectara from langchain_community.chat_models import ChatOpenAI +from langchain_community.vectorstores import Vectara from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-vectara/rag_vectara/chain.py b/templates/rag-vectara/rag_vectara/chain.py index 972bd24dae..b2cf9e8475 100644 --- a/templates/rag-vectara/rag_vectara/chain.py +++ b/templates/rag-vectara/rag_vectara/chain.py @@ -1,6 +1,6 @@ import os -from langchain.vectorstores import Vectara +from langchain_community.vectorstores import Vectara from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-weaviate/rag_weaviate/chain.py b/templates/rag-weaviate/rag_weaviate/chain.py index 2d31bd6fab..1e0ac3f7ea 100644 --- a/templates/rag-weaviate/rag_weaviate/chain.py +++ b/templates/rag-weaviate/rag_weaviate/chain.py @@ -1,11 +1,11 @@ import os -from langchain.document_loaders import WebBaseLoader from langchain.prompts import ChatPromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import Weaviate from langchain_community.chat_models import ChatOpenAI +from langchain_community.document_loaders import WebBaseLoader from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.vectorstores import Weaviate from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/self-query-qdrant/self_query_qdrant/chain.py b/templates/self-query-qdrant/self_query_qdrant/chain.py index d6b00bee57..799d8816b8 100644 --- a/templates/self-query-qdrant/self_query_qdrant/chain.py +++ b/templates/self-query-qdrant/self_query_qdrant/chain.py @@ -4,10 +4,10 @@ from typing import List, Optional from langchain.chains.query_constructor.schema import AttributeInfo from langchain.retrievers import SelfQueryRetriever from langchain.schema import Document, StrOutputParser -from langchain.vectorstores.qdrant import Qdrant from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.llms import BaseLLM from langchain_community.llms.openai import OpenAI +from langchain_community.vectorstores.qdrant import Qdrant from langchain_core.embeddings import Embeddings from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/self-query-supabase/self_query_supabase/chain.py b/templates/self-query-supabase/self_query_supabase/chain.py index c7cec49a27..15e668869e 100644 --- a/templates/self-query-supabase/self_query_supabase/chain.py +++ b/templates/self-query-supabase/self_query_supabase/chain.py @@ -2,9 +2,9 @@ import os from langchain.chains.query_constructor.base import AttributeInfo from langchain.retrievers.self_query.base import SelfQueryRetriever -from langchain.vectorstores.supabase import SupabaseVectorStore from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.llms.openai import OpenAI +from langchain_community.vectorstores.supabase import SupabaseVectorStore from langchain_core.runnables import RunnableParallel, RunnablePassthrough from supabase.client import create_client diff --git a/templates/summarize-anthropic/summarize_anthropic.ipynb b/templates/summarize-anthropic/summarize_anthropic.ipynb index c8a093cc47..12615ed04b 100644 --- a/templates/summarize-anthropic/summarize_anthropic.ipynb +++ b/templates/summarize-anthropic/summarize_anthropic.ipynb @@ -37,7 +37,7 @@ "source": [ "import arxiv\n", "from langchain_community.chat_models import ChatAnthropic\n", - "from langchain.document_loaders import ArxivLoader, UnstructuredPDFLoader\n", + "from langchain_community.document_loaders import ArxivLoader, UnstructuredPDFLoader\n", "\n", "# Load a paper to use\n", "paper = next(arxiv.Search(query=\"Visual Instruction Tuning\").results())\n", @@ -61,7 +61,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WebBaseLoader\n", + "from langchain_community.document_loaders import WebBaseLoader\n", "\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", "text = loader.load()"