diff --git a/application/parser/file/html_parser.py b/application/parser/file/html_parser.py
index f6f885fc..7c192b6a 100644
--- a/application/parser/file/html_parser.py
+++ b/application/parser/file/html_parser.py
@@ -69,10 +69,10 @@ class HTMLParser(BaseParser):
Chunks.append([])
Chunks[-1].append(isd_el['text'])
- # Removing all the chunks with sum of lenth of all the strings in the chunk < 25
+ # Removing all the chunks with sum of length of all the strings in the chunk < 25
# TODO: This value can be an user defined variable
for chunk in Chunks:
- # sum of lenth of all the strings in the chunk
+ # sum of length of all the strings in the chunk
sum = 0
sum += len(str(chunk))
if sum < 25:
diff --git a/application/parser/file/rst_parser.py b/application/parser/file/rst_parser.py
index 4bd0e6f4..633ec844 100644
--- a/application/parser/file/rst_parser.py
+++ b/application/parser/file/rst_parser.py
@@ -27,7 +27,7 @@ class RstParser(BaseParser):
remove_interpreters: bool = True,
remove_directives: bool = True,
remove_whitespaces_excess: bool = True,
- # Be carefull with remove_characters_excess, might cause data loss
+ # Be careful with remove_characters_excess, might cause data loss
remove_characters_excess: bool = True,
**kwargs: Any,
) -> None:
diff --git a/docs/pages/Deploying/Quickstart.md b/docs/pages/Deploying/Quickstart.md
index a022e884..54a608ad 100644
--- a/docs/pages/Deploying/Quickstart.md
+++ b/docs/pages/Deploying/Quickstart.md
@@ -1,7 +1,7 @@
## Launching Web App
Note: Make sure you have docker installed
-1. Open dowload this repository with `git clone https://github.com/arc53/DocsGPT.git`
+1. Open download this repository with `git clone https://github.com/arc53/DocsGPT.git`
2. Create .env file in your root directory and set your `OPENAI_API_KEY` with your openai api key
3. Run `docker-compose build && docker-compose up`
4. Navigate to `http://localhost:5173/`
diff --git a/docs/pages/Developing/API-docs.md b/docs/pages/Developing/API-docs.md
index ff6d0530..4109eb10 100644
--- a/docs/pages/Developing/API-docs.md
+++ b/docs/pages/Developing/API-docs.md
@@ -2,7 +2,7 @@ App currently has two main api endpoints:
### /api/answer
Its a POST request that sends a JSON in body with 4 values. Here is a JavaScript fetch example
-It will recieve an answer for a user provided question
+It will receive an answer for a user provided question
```js
// answer (POST http://127.0.0.1:5000/api/answer)
@@ -29,7 +29,7 @@ In response you will get a json document like this one:
```
### /api/docs_check
-It will make sure documentation is loaded on a server (just run it everytime user is switching between libraries (documentations)
+It will make sure documentation is loaded on a server (just run it every time user is switching between libraries (documentations)
Its a POST request that sends a JSON in body with 1 value. Here is a JavaScript fetch example
```js
@@ -104,7 +104,7 @@ fetch("http://localhost:5001/api/task_status?task_id=b2d2a0f4-387c-44fd-a443-e4f
```
Responses:
-There are two types of repsonses:
+There are two types of responses:
1. while task it still running, where "current" will show progress from 0 - 100
```json
{
diff --git a/docs/pages/Guides/How-to-use-different-LLM.md b/docs/pages/Guides/How-to-use-different-LLM.md
index fb7c4e01..07c0346d 100644
--- a/docs/pages/Guides/How-to-use-different-LLM.md
+++ b/docs/pages/Guides/How-to-use-different-LLM.md
@@ -24,9 +24,9 @@ Options:
LLM_NAME (openai, manifest, cohere, Arc53/docsgpt-14b, Arc53/docsgpt-7b-falcon)
EMBEDDINGS_NAME (openai_text-embedding-ada-002, huggingface_sentence-transformers/all-mpnet-base-v2, huggingface_hkunlp/instructor-large, cohere_medium)
-Thats it!
+That's it!
### Hosting everything locally and privately (for using our optimised open-source models)
If you are working with important data and dont want anything to leave your premises.
-Make sure you set SELF_HOSTED_MODEL as true in you .env variable and for your LLM_NAME you can use anything thats on Huggingface
+Make sure you set SELF_HOSTED_MODEL as true in you .env variable and for your LLM_NAME you can use anything that's on Huggingface
diff --git a/scripts/parser/file/html_parser.py b/scripts/parser/file/html_parser.py
index 523d09ec..dfe2f8f5 100644
--- a/scripts/parser/file/html_parser.py
+++ b/scripts/parser/file/html_parser.py
@@ -69,10 +69,10 @@ class HTMLParser(BaseParser):
Chunks.append([])
Chunks[-1].append(isd_el['text'])
- # Removing all the chunks with sum of lenth of all the strings in the chunk < 25
+ # Removing all the chunks with sum of length of all the strings in the chunk < 25
# TODO: This value can be a user defined variable
for chunk in Chunks:
- # sum of lenth of all the strings in the chunk
+ # sum of length of all the strings in the chunk
sum = 0
sum += len(str(chunk))
if sum < 25:
diff --git a/scripts/parser/file/rst_parser.py b/scripts/parser/file/rst_parser.py
index f8feff70..887571b5 100644
--- a/scripts/parser/file/rst_parser.py
+++ b/scripts/parser/file/rst_parser.py
@@ -27,7 +27,7 @@ class RstParser(BaseParser):
remove_interpreters: bool = True,
remove_directives: bool = True,
remove_whitespaces_excess: bool = True,
- # Be carefull with remove_characters_excess, might cause data loss
+ # Be careful with remove_characters_excess, might cause data loss
remove_characters_excess: bool = True,
**kwargs: Any,
) -> None: