From f7d724458895c81a4438cb924267dfe46e8c276b Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 8 Mar 2023 00:07:53 +0000 Subject: [PATCH] chunks rst --- scripts/parser/file/bulk.py | 1 + scripts/parser/file/rst_parser.py | 25 ++++++++++++++++++++++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/scripts/parser/file/bulk.py b/scripts/parser/file/bulk.py index 4fdea6f..fb0c0ea 100644 --- a/scripts/parser/file/bulk.py +++ b/scripts/parser/file/bulk.py @@ -61,6 +61,7 @@ class SimpleDirectoryReader(BaseReader): file_extractor: Optional[Dict[str, BaseParser]] = None, num_files_limit: Optional[int] = None, file_metadata: Optional[Callable[[str], Dict]] = None, + chunk_size_max: int = 2048, ) -> None: """Initialize with parameters.""" super().__init__() diff --git a/scripts/parser/file/rst_parser.py b/scripts/parser/file/rst_parser.py index 7c97b32..182f943 100644 --- a/scripts/parser/file/rst_parser.py +++ b/scripts/parser/file/rst_parser.py @@ -8,7 +8,7 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union, cast from parser.file.base_parser import BaseParser - +import tiktoken class RstParser(BaseParser): """reStructuredText parser. @@ -29,6 +29,7 @@ class RstParser(BaseParser): remove_whitespaces_excess: bool = True, #Be carefull with remove_characters_excess, might cause data loss remove_characters_excess: bool = True, + max_tokens: int = 2048, **kwargs: Any, ) -> None: """Init params.""" @@ -40,6 +41,22 @@ class RstParser(BaseParser): self._remove_directives = remove_directives self._remove_whitespaces_excess = remove_whitespaces_excess self._remove_characters_excess = remove_characters_excess + self._max_tokens = max_tokens + + def tups_chunk_append(self, tups: List[Tuple[Optional[str], str]], current_header: Optional[str], current_text: str): + """Append to tups chunk.""" + if current_header is not None: + if current_text == "" or None: + return tups + num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(current_text)) + if num_tokens > self._max_tokens: + chunks = [current_text[i:i + self._max_tokens] for i in range(0, len(current_text), self._max_tokens)] + for chunk in chunks: + tups.append((current_header, chunk)) + else: + tups.append((current_header, current_text)) + return tups + def rst_to_tups(self, rst_text: str) -> List[Tuple[Optional[str], str]]: """Convert a reStructuredText file to a dictionary. @@ -52,6 +69,7 @@ class RstParser(BaseParser): current_header = None current_text = "" + encoding = tiktoken.get_encoding("cl100k_base") for i, line in enumerate(lines): header_match = re.match(r"^[^\S\n]*[-=]+[^\S\n]*$", line) @@ -62,13 +80,14 @@ class RstParser(BaseParser): # removes the next heading from current Document if current_text.endswith(lines[i - 1] + "\n"): current_text = current_text[:len(current_text) - len(lines[i - 1] + "\n")] - rst_tups.append((current_header, current_text)) + rst_tups = self.tups_chunk_append(rst_tups, current_header, current_text) current_header = lines[i - 1] current_text = "" else: current_text += line + "\n" - rst_tups.append((current_header, current_text)) + + rst_tups = self.tups_chunk_append(rst_tups, current_header, current_text) #TODO: Format for rst #