mirror of https://github.com/hwchase17/langchain
`PubMed` document loader (#8893)
- added `PubMed Document Loader` artifacts; ut-s; examples - fixed `PubMed utility`; ut-s @hwchase17pull/8721/head
parent
a7824f16f2
commit
2d078c7767
@ -0,0 +1,139 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3df0dcf8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# PubMed\n",
|
||||
"\n",
|
||||
">[PubMed®](https://pubmed.ncbi.nlm.nih.gov/) by `The National Center for Biotechnology Information, National Library of Medicine` comprises more than 35 million citations for biomedical literature from `MEDLINE`, life science journals, and online books. Citations may include links to full text content from `PubMed Central` and publisher web sites."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "aecaff63",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.document_loaders import PubMedLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "f2f7e8d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = PubMedLoader(\"chatgpt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ed115aa1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docs = loader.load()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "b68d3264-b893-45e4-8ab0-077b25a586dc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"3"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"len(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "9f4626d2-068d-4aed-9ffe-ad754ad4b4cd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'uid': '37548997',\n",
|
||||
" 'Title': 'Performance of ChatGPT on the Situational Judgement Test-A Professional Dilemmas-Based Examination for Doctors in the United Kingdom.',\n",
|
||||
" 'Published': '2023-08-07',\n",
|
||||
" 'Copyright Information': '©Robin J Borchert, Charlotte R Hickman, Jack Pepys, Timothy J Sadler. Originally published in JMIR Medical Education (https://mededu.jmir.org), 07.08.2023.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[1].metadata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "8000f687-b500-4cce-841b-70d6151304da",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"BACKGROUND: ChatGPT is a large language model that has performed well on professional examinations in the fields of medicine, law, and business. However, it is unclear how ChatGPT would perform on an examination assessing professionalism and situational judgement for doctors.\\nOBJECTIVE: We evaluated the performance of ChatGPT on the Situational Judgement Test (SJT): a national examination taken by all final-year medical students in the United Kingdom. This examination is designed to assess attributes such as communication, teamwork, patient safety, prioritization skills, professionalism, and ethics.\\nMETHODS: All questions from the UK Foundation Programme Office's (UKFPO's) 2023 SJT practice examination were inputted into ChatGPT. For each question, ChatGPT's answers and rationales were recorded and assessed on the basis of the official UK Foundation Programme Office scoring template. Questions were categorized into domains of Good Medical Practice on the basis of the domains referenced in the rationales provided in the scoring sheet. Questions without clear domain links were screened by reviewers and assigned one or multiple domains. ChatGPT's overall performance, as well as its performance across the domains of Good Medical Practice, was evaluated.\\nRESULTS: Overall, ChatGPT performed well, scoring 76% on the SJT but scoring full marks on only a few questions (9%), which may reflect possible flaws in ChatGPT's situational judgement or inconsistencies in the reasoning across questions (or both) in the examination itself. ChatGPT demonstrated consistent performance across the 4 outlined domains in Good Medical Practice for doctors.\\nCONCLUSIONS: Further research is needed to understand the potential applications of large language models, such as ChatGPT, in medical education for standardizing questions and providing consistent rationales for examinations assessing professionalism and ethics.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docs[1].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1070e571-697d-4c33-9a4f-0b2dd6909629",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
# PubMed
|
||||
|
||||
# PubMed
|
||||
|
||||
>[PubMed®](https://pubmed.ncbi.nlm.nih.gov/) by `The National Center for Biotechnology Information, National Library of Medicine`
|
||||
> comprises more than 35 million citations for biomedical literature from `MEDLINE`, life science journals, and online books.
|
||||
> Citations may include links to full text content from `PubMed Central` and publisher web sites.
|
||||
|
||||
## Setup
|
||||
You need to install a python package.
|
||||
|
||||
```bash
|
||||
pip install xmltodict
|
||||
```
|
||||
|
||||
### Retriever
|
||||
|
||||
See a [usage example](/docs/integrations/retrievers/pubmed).
|
||||
|
||||
```python
|
||||
from langchain.retrievers import PubMedRetriever
|
||||
```
|
||||
|
||||
### Document Loader
|
||||
|
||||
See a [usage example](/docs/integrations/document_loaders/pubmed).
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import PubMedLoader
|
||||
```
|
@ -0,0 +1,39 @@
|
||||
from typing import Iterator, List, Optional
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.document_loaders.base import BaseLoader
|
||||
from langchain.utilities.pubmed import PubMedAPIWrapper
|
||||
|
||||
|
||||
class PubMedLoader(BaseLoader):
|
||||
"""Loads a query result from PubMed biomedical library into a list of Documents.
|
||||
|
||||
Attributes:
|
||||
query: The query to be passed to the PubMed API.
|
||||
load_max_docs: The maximum number of documents to load.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
query: str,
|
||||
load_max_docs: Optional[int] = 3,
|
||||
):
|
||||
"""Initialize the PubMedLoader.
|
||||
|
||||
Args:
|
||||
query: The query to be passed to the PubMed API.
|
||||
load_max_docs: The maximum number of documents to load.
|
||||
Defaults to 3.
|
||||
"""
|
||||
self.query = query
|
||||
self.load_max_docs = load_max_docs
|
||||
self._client = PubMedAPIWrapper(
|
||||
top_k_results=load_max_docs,
|
||||
)
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
return list(self._client.lazy_load_docs(self.query))
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
for doc in self._client.lazy_load_docs(self.query):
|
||||
yield doc
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,54 @@
|
||||
"""Integration test for PubMed API Wrapper."""
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.document_loaders import PubMedLoader
|
||||
from langchain.schema import Document
|
||||
|
||||
xmltodict = pytest.importorskip("xmltodict")
|
||||
|
||||
|
||||
def test_load_success() -> None:
|
||||
"""Test that returns the correct answer"""
|
||||
api_client = PubMedLoader(query="chatgpt")
|
||||
docs = api_client.load()
|
||||
print(docs)
|
||||
assert len(docs) == api_client.load_max_docs == 3
|
||||
assert_docs(docs)
|
||||
|
||||
|
||||
def test_load_success_load_max_docs() -> None:
|
||||
"""Test that returns the correct answer"""
|
||||
api_client = PubMedLoader(query="chatgpt", load_max_docs=2)
|
||||
docs = api_client.load()
|
||||
print(docs)
|
||||
assert len(docs) == api_client.load_max_docs == 2
|
||||
assert_docs(docs)
|
||||
|
||||
|
||||
def test_load_returns_no_result() -> None:
|
||||
"""Test that gives no result."""
|
||||
api_client = PubMedLoader(query="1605.08386WWW")
|
||||
docs = api_client.load()
|
||||
assert len(docs) == 0
|
||||
|
||||
|
||||
def test_load_no_content() -> None:
|
||||
"""Returns a Document without content."""
|
||||
api_client = PubMedLoader(query="37548971")
|
||||
docs = api_client.load()
|
||||
print(docs)
|
||||
assert len(docs) > 0
|
||||
assert docs[0].page_content == ""
|
||||
|
||||
|
||||
def assert_docs(docs: List[Document]) -> None:
|
||||
for doc in docs:
|
||||
assert doc.metadata
|
||||
assert set(doc.metadata) == {
|
||||
"Copyright Information",
|
||||
"uid",
|
||||
"Title",
|
||||
"Published",
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
"""Integration test for PubMed API Wrapper."""
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.retrievers import PubMedRetriever
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def retriever() -> PubMedRetriever:
|
||||
return PubMedRetriever()
|
||||
|
||||
|
||||
def assert_docs(docs: List[Document]) -> None:
|
||||
for doc in docs:
|
||||
assert doc.metadata
|
||||
assert set(doc.metadata) == {
|
||||
"Copyright Information",
|
||||
"uid",
|
||||
"Title",
|
||||
"Published",
|
||||
}
|
||||
|
||||
|
||||
def test_load_success(retriever: PubMedRetriever) -> None:
|
||||
docs = retriever.get_relevant_documents(query="chatgpt")
|
||||
assert len(docs) == 3
|
||||
assert_docs(docs)
|
||||
|
||||
|
||||
def test_load_success_top_k_results(retriever: PubMedRetriever) -> None:
|
||||
retriever.top_k_results = 2
|
||||
docs = retriever.get_relevant_documents(query="chatgpt")
|
||||
assert len(docs) == 2
|
||||
assert_docs(docs)
|
||||
|
||||
|
||||
def test_load_no_result(retriever: PubMedRetriever) -> None:
|
||||
docs = retriever.get_relevant_documents("1605.08386WWW")
|
||||
assert not docs
|
@ -1,50 +0,0 @@
|
||||
"""Integration test for PubMed API Wrapper."""
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.retrievers import PubMedRetriever
|
||||
from langchain.schema import Document
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def retriever() -> PubMedRetriever:
|
||||
return PubMedRetriever()
|
||||
|
||||
|
||||
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
|
||||
for doc in docs:
|
||||
assert doc.page_content
|
||||
assert doc.metadata
|
||||
main_meta = {"Published", "Title", "Authors", "Summary"}
|
||||
assert set(doc.metadata).issuperset(main_meta)
|
||||
if all_meta:
|
||||
assert len(set(doc.metadata)) > len(main_meta)
|
||||
else:
|
||||
assert len(set(doc.metadata)) == len(main_meta)
|
||||
|
||||
|
||||
def test_load_success(retriever: PubMedRetriever) -> None:
|
||||
docs = retriever.get_relevant_documents(query="1605.08386")
|
||||
assert len(docs) == 1
|
||||
assert_docs(docs, all_meta=False)
|
||||
|
||||
|
||||
def test_load_success_all_meta(retriever: PubMedRetriever) -> None:
|
||||
retriever.load_all_available_meta = True
|
||||
retriever.load_max_docs = 2
|
||||
docs = retriever.get_relevant_documents(query="ChatGPT")
|
||||
assert len(docs) > 1
|
||||
assert_docs(docs, all_meta=True)
|
||||
|
||||
|
||||
def test_load_success_init_args() -> None:
|
||||
retriever = PubMedRetriever(load_max_docs=1, load_all_available_meta=True)
|
||||
docs = retriever.get_relevant_documents(query="ChatGPT")
|
||||
assert len(docs) == 1
|
||||
assert_docs(docs, all_meta=True)
|
||||
|
||||
|
||||
def test_load_no_result(retriever: PubMedRetriever) -> None:
|
||||
docs = retriever.get_relevant_documents("1605.08386WWW")
|
||||
assert not docs
|
Loading…
Reference in New Issue