token ingeest

This commit is contained in:
Pavel
2023-03-14 13:32:29 +04:00
parent 20a0800aa7
commit b6c02c850a
5 changed files with 80 additions and 78 deletions

View File

@@ -29,7 +29,6 @@ class RstParser(BaseParser):
remove_whitespaces_excess: bool = True,
#Be carefull with remove_characters_excess, might cause data loss
remove_characters_excess: bool = True,
# max_tokens: int = 2048,
**kwargs: Any,
) -> None:
"""Init params."""
@@ -41,18 +40,6 @@ class RstParser(BaseParser):
self._remove_directives = remove_directives
self._remove_whitespaces_excess = remove_whitespaces_excess
self._remove_characters_excess = remove_characters_excess
# self._max_tokens = max_tokens
# def tups_chunk_append(self, tups: List[Tuple[Optional[str], str]], current_header: Optional[str], current_text: str):
# """Append to tups chunk."""
# num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(current_text))
# if num_tokens > self._max_tokens:
# chunks = [current_text[i:i + self._max_tokens] for i in range(0, len(current_text), self._max_tokens)]
# for chunk in chunks:
# tups.append((current_header, chunk))
# else:
# tups.append((current_header, current_text))
# return tups
def rst_to_tups(self, rst_text: str) -> List[Tuple[Optional[str], str]]: