mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
7ffc431b3a
`SpacyTextSplitter` currently uses spacy's statistics-based `en_core_web_sm` model for sentence splitting. This is a good splitter, but it's also pretty slow, and in this case it's doing a lot of work that's not needed given that the spacy parse is then just thrown away. However, there is also a simple rules-based spacy sentencizer. Using this is at least an order of magnitude faster than using `en_core_web_sm` according to my local tests. Also, spacy sentence tokenization based on `en_core_web_sm` can be sped up in this case by not doing the NER stage. This shaves some cycles too, both when loading the model and when parsing the text. Consequently, this PR adds the option to use the basic spacy sentencizer, and it disables the NER stage for the current approach, *which is kept as the default*. Lastly, when extracting the tokenized sentences, the `text` attribute is called directly instead of doing the string conversion, which is IMO a bit more idiomatic.
38 lines
1.4 KiB
Python
38 lines
1.4 KiB
Python
"""Test text splitting functionality using NLTK and Spacy based sentence splitters."""
|
|
import pytest
|
|
|
|
from langchain.text_splitter import NLTKTextSplitter, SpacyTextSplitter
|
|
|
|
|
|
def test_nltk_text_splitting_args() -> None:
|
|
"""Test invalid arguments."""
|
|
with pytest.raises(ValueError):
|
|
NLTKTextSplitter(chunk_size=2, chunk_overlap=4)
|
|
|
|
|
|
def test_spacy_text_splitting_args() -> None:
|
|
"""Test invalid arguments."""
|
|
with pytest.raises(ValueError):
|
|
SpacyTextSplitter(chunk_size=2, chunk_overlap=4)
|
|
|
|
|
|
def test_nltk_text_splitter() -> None:
|
|
"""Test splitting by sentence using NLTK."""
|
|
text = "This is sentence one. And this is sentence two."
|
|
separator = "|||"
|
|
splitter = NLTKTextSplitter(separator=separator)
|
|
output = splitter.split_text(text)
|
|
expected_output = [f"This is sentence one.{separator}And this is sentence two."]
|
|
assert output == expected_output
|
|
|
|
|
|
@pytest.mark.parametrize("pipeline", ["sentencizer", "en_core_web_sm"])
|
|
def test_spacy_text_splitter(pipeline: str) -> None:
|
|
"""Test splitting by sentence using Spacy."""
|
|
text = "This is sentence one. And this is sentence two."
|
|
separator = "|||"
|
|
splitter = SpacyTextSplitter(separator=separator, pipeline=pipeline)
|
|
output = splitter.split_text(text)
|
|
expected_output = [f"This is sentence one.{separator}And this is sentence two."]
|
|
assert output == expected_output
|