mirror of https://github.com/hwchase17/langchain
added `Wikipedia` document loader (#4141)
- Added the `Wikipedia` document loader. It is based on the existing `unilities/WikipediaAPIWrapper` - Added a respective ut-s and example notebook - Sorted list of classes in __init__pull/4224/head
parent
423f497168
commit
9544b30821
@ -0,0 +1,130 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bda1f3f5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Wikipedia\n",
|
||||
"\n",
|
||||
">[Wikipedia](https://wikipedia.org/) is a multilingual free online encyclopedia written and maintained by a community of volunteers, known as Wikipedians, through open collaboration and using a wiki-based editing system called MediaWiki. `Wikipedia` is the largest and most-read reference work in history.\n",
|
||||
"\n",
|
||||
"This notebook shows how to load wiki pages from `wikipedia.org` into the Document format that we use downstream."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b7a1eef-7bf7-4e7d-8bfc-c4e27c9488cb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Installation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2abd5578-aa3d-46b9-99af-8b262f0b3df8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"First, you need to install `wikipedia` python package."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b674aaea-ed3a-4541-8414-260a8f67f623",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!pip install wikipedia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "95f05e1c-195e-4e2b-ae8e-8d6637f15be6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Examples"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e29b954c-1407-4797-ae21-6ba8937156be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"`WikipediaLoader` has these arguments:\n",
|
||||
"- `query`: free text which used to find documents in Wikipedia\n",
|
||||
"- optional `lang`: default=\"en\". Use it to search in a specific language part of Wikipedia\n",
|
||||
"- optional `load_max_docs`: default=100. Use it to limit number of downloaded documents. It takes time to download all 100 documents, so use a small number for experiments. There is a hard limit of 300 for now.\n",
|
||||
"- optional `load_all_available_meta`: default=False. By default only the most important fields downloaded: `Published` (date when document was published/last updated), `title`, `Summary`. If True, other fields also downloaded."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9bfd5e46",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import WikipediaLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "700e4ef2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = WikipediaLoader(query='HUNTER X HUNTER', load_max_docs=2).load()\n",
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8977bac0-0042-4f23-9754-247dbd32439b",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs[0].metadata # meta-information of the Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "46969806-45a9-4c4d-a61b-cfb9658fc9de",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs[0].page_content[:400] # a content of the Document \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
from langchain.utilities.wikipedia import WikipediaAPIWrapper
|
||||
|
||||
|
||||
class WikipediaLoader(BaseLoader):
|
||||
"""Loads a query result from www.wikipedia.org into a list of Documents.
|
||||
The hard limit on the number of downloaded Documents is 300 for now.
|
||||
|
||||
Each wiki page represents one Document.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
query: str,
|
||||
lang: str = "en",
|
||||
load_max_docs: Optional[int] = 100,
|
||||
load_all_available_meta: Optional[bool] = False,
|
||||
):
|
||||
self.query = query
|
||||
self.lang = lang
|
||||
self.load_max_docs = load_max_docs
|
||||
self.load_all_available_meta = load_all_available_meta
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
client = WikipediaAPIWrapper(
|
||||
lang=self.lang,
|
||||
top_k_results=self.load_max_docs,
|
||||
load_all_available_meta=self.load_all_available_meta,
|
||||
)
|
||||
docs = client.load(self.query)
|
||||
return docs
|
@ -1,19 +1,56 @@
|
||||
"""Integration test for Wikipedia API Wrapper."""
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.schema import Document
|
||||
from langchain.utilities import WikipediaAPIWrapper
|
||||
|
||||
|
||||
def test_call() -> None:
|
||||
"""Test that WikipediaAPIWrapper returns correct answer"""
|
||||
@pytest.fixture
|
||||
def api_client() -> WikipediaAPIWrapper:
|
||||
return WikipediaAPIWrapper()
|
||||
|
||||
wikipedia = WikipediaAPIWrapper()
|
||||
output = wikipedia.run("HUNTER X HUNTER")
|
||||
|
||||
def test_run_success(api_client: WikipediaAPIWrapper) -> None:
|
||||
output = api_client.run("HUNTER X HUNTER")
|
||||
assert "Yoshihiro Togashi" in output
|
||||
|
||||
|
||||
def test_no_result_call() -> None:
|
||||
"""Test that call gives no result."""
|
||||
wikipedia = WikipediaAPIWrapper()
|
||||
output = wikipedia.run(
|
||||
def test_run_no_result(api_client: WikipediaAPIWrapper) -> None:
|
||||
output = api_client.run(
|
||||
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
|
||||
)
|
||||
assert "No good Wikipedia Search Result was found" == output
|
||||
|
||||
|
||||
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
|
||||
for doc in docs:
|
||||
assert doc.page_content
|
||||
assert doc.metadata
|
||||
main_meta = {"title", "summary"}
|
||||
assert set(doc.metadata).issuperset(main_meta)
|
||||
if all_meta:
|
||||
assert len(set(doc.metadata)) > len(main_meta)
|
||||
else:
|
||||
assert len(set(doc.metadata)) == len(main_meta)
|
||||
|
||||
|
||||
def test_load_success(api_client: WikipediaAPIWrapper) -> None:
|
||||
docs = api_client.load("HUNTER X HUNTER")
|
||||
assert len(docs) > 1
|
||||
assert_docs(docs, all_meta=False)
|
||||
|
||||
|
||||
def test_load_success_all_meta(api_client: WikipediaAPIWrapper) -> None:
|
||||
api_client.load_all_available_meta = True
|
||||
docs = api_client.load("HUNTER X HUNTER")
|
||||
assert len(docs) > 1
|
||||
assert_docs(docs, all_meta=True)
|
||||
|
||||
|
||||
def test_load_no_result(api_client: WikipediaAPIWrapper) -> None:
|
||||
docs = api_client.load(
|
||||
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
|
||||
)
|
||||
assert not docs
|
||||
|
Loading…
Reference in New Issue