Compare commits
1189 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2be523cf77 | ||
|
|
c01e334487 | ||
|
|
a2418d1373 | ||
|
|
a697248b26 | ||
|
|
6058939c00 | ||
|
|
318de530e3 | ||
|
|
9e04b7796a | ||
|
|
e8099c4db5 | ||
|
|
bf808811cc | ||
|
|
f0293de1b9 | ||
|
|
810dcb90ce | ||
|
|
a2f2b8fabc | ||
|
|
cbc5f47786 | ||
|
|
3e3886ced7 | ||
|
|
9ce39fd2ba | ||
|
|
5b08cdedf0 | ||
|
|
67e4d40c49 | ||
|
|
537a733157 | ||
|
|
5136e7726d | ||
|
|
6e236ba74d | ||
|
|
374b665089 | ||
|
|
ffecc9a0c7 | ||
|
|
0b997418d3 | ||
|
|
eaad8a4cf5 | ||
|
|
396b4595f4 | ||
|
|
0752aae9ef | ||
|
|
ad2221a677 | ||
|
|
1713d693b1 | ||
|
|
f4f056449f | ||
|
|
6a70e3e45b | ||
|
|
a04cdee33f | ||
|
|
157769eeb4 | ||
|
|
667b66b926 | ||
|
|
c0f7b344d9 | ||
|
|
060c59e97d | ||
|
|
b3461b7134 | ||
|
|
001c450abb | ||
|
|
ceaa5763d4 | ||
|
|
b45fd58944 | ||
|
|
b3149def82 | ||
|
|
378d498402 | ||
|
|
98f52b32a3 | ||
|
|
0ab32a6f84 | ||
|
|
71cc22325d | ||
|
|
e1b2991aa6 | ||
|
|
033bcf80d0 | ||
|
|
103118d558 | ||
|
|
f91b5fa004 | ||
|
|
7179bf7b67 | ||
|
|
a3e6239e6e | ||
|
|
1fa12e56c6 | ||
|
|
4ff834de76 | ||
|
|
293b7b09a9 | ||
|
|
d5945f9ee7 | ||
|
|
d1f5a6fc31 | ||
|
|
e7b9f5e4c3 | ||
|
|
7870749077 | ||
|
|
c5352f443a | ||
|
|
fd8b7aa0f2 | ||
|
|
458ea266ec | ||
|
|
9748eaba25 | ||
|
|
887a3740b2 | ||
|
|
2e7cfe9cd7 | ||
|
|
6dbe156a02 | ||
|
|
2a9ef6d48e | ||
|
|
6717ddbd0b | ||
|
|
47c1aab064 | ||
|
|
eda41658b9 | ||
|
|
7f79363944 | ||
|
|
25967f2a09 | ||
|
|
4d3963ad67 | ||
|
|
f78c5257dc | ||
|
|
ccc6234ac8 | ||
|
|
c81b0200eb | ||
|
|
f039d37c8a | ||
|
|
237975bfef | ||
|
|
015bc7c8c3 | ||
|
|
3da2a00ee9 | ||
|
|
16eca5bebf | ||
|
|
a4483cf255 | ||
|
|
0bf020a1b4 | ||
|
|
d43927a167 | ||
|
|
a62566e8fb | ||
|
|
23a1730106 | ||
|
|
f8ac5e0af3 | ||
|
|
eb48a153d9 | ||
|
|
1a78a6f786 | ||
|
|
f8f60c62fe | ||
|
|
453e507b89 | ||
|
|
022c32f9d5 | ||
|
|
af1a0c3520 | ||
|
|
0a6d9dfcf4 | ||
|
|
5bdedacab1 | ||
|
|
d7a1be2f3c | ||
|
|
d6dcbb63d4 | ||
|
|
b2770f67a1 | ||
|
|
aa2691b153 | ||
|
|
e9a9cbbd07 | ||
|
|
17e2222802 | ||
|
|
58b2970b19 | ||
|
|
fd69961185 | ||
|
|
e5cd813958 | ||
|
|
5b12423d98 | ||
|
|
4141f633a3 | ||
|
|
67854b3ebd | ||
|
|
0c21dbc7c8 | ||
|
|
5925aa50d8 | ||
|
|
852b016111 | ||
|
|
ba77a67ba7 | ||
|
|
c14a9a55d7 | ||
|
|
5203db6c9c | ||
|
|
30eb8dda1d | ||
|
|
69d40b5fe8 | ||
|
|
706e87659e | ||
|
|
5c785e49af | ||
|
|
0974085c6f | ||
|
|
e67ced8848 | ||
|
|
c2dea6b881 | ||
|
|
ee62b2cf31 | ||
|
|
252e06bee6 | ||
|
|
1f0ce88e08 | ||
|
|
7e8fb388a3 | ||
|
|
a3de360878 | ||
|
|
6ee556e386 | ||
|
|
623ed89100 | ||
|
|
8c114cae95 | ||
|
|
93dd58ec59 | ||
|
|
f0bc93ad8e | ||
|
|
27e8aad479 | ||
|
|
6298578db9 | ||
|
|
f079e5dadd | ||
|
|
e6fdead89f | ||
|
|
cfa6e3982c | ||
|
|
e4bc4d9071 | ||
|
|
d372e10f1a | ||
|
|
d1c93754db | ||
|
|
3d54a1abf3 | ||
|
|
06eef5779d | ||
|
|
b4d78376fb | ||
|
|
87a59a6de3 | ||
|
|
1e7741e341 | ||
|
|
ae5e484506 | ||
|
|
c9dd219565 | ||
|
|
55eb662dc9 | ||
|
|
2d202088c7 | ||
|
|
9f7c9180d9 | ||
|
|
fdc5e0a92d | ||
|
|
7f6fef1373 | ||
|
|
e4973f572f | ||
|
|
eb768f2076 | ||
|
|
df9723a011 | ||
|
|
51ad3fdb0b | ||
|
|
8e553e7a93 | ||
|
|
4c70e92293 | ||
|
|
3e983b121e | ||
|
|
9f3c962ea4 | ||
|
|
2a1a3fb1b5 | ||
|
|
bb28cc5c65 | ||
|
|
23b6a38e18 | ||
|
|
715cd9daf5 | ||
|
|
cbfdaec394 | ||
|
|
bb527ac981 | ||
|
|
961c26894d | ||
|
|
693bdebb30 | ||
|
|
353e24f1c5 | ||
|
|
59d1773057 | ||
|
|
93a1368b60 | ||
|
|
3bc0fe5a70 | ||
|
|
973c11a048 | ||
|
|
5094386516 | ||
|
|
64477c6573 | ||
|
|
f052c707e7 | ||
|
|
de0e1d3e10 | ||
|
|
e273da1b5b | ||
|
|
761f6963ab | ||
|
|
e5aff1316a | ||
|
|
8ee0fbe6a3 | ||
|
|
4c6b8b4173 | ||
|
|
6940a75591 | ||
|
|
7ba939b008 | ||
|
|
6918a36bee | ||
|
|
ba132fc411 | ||
|
|
b4a940a8d6 | ||
|
|
5e0dd5c63b | ||
|
|
f19114e530 | ||
|
|
0db40ecf0f | ||
|
|
8289067a4e | ||
|
|
9c5e3d094b | ||
|
|
cb12b19c1e | ||
|
|
5d0b8588f9 | ||
|
|
0c05e1036d | ||
|
|
266087c5f1 | ||
|
|
147b94d936 | ||
|
|
872511ebb9 | ||
|
|
ce8ed5bfeb | ||
|
|
d81838dfc4 | ||
|
|
79ec3594fe | ||
|
|
cdb246697e | ||
|
|
6476e688e5 | ||
|
|
5d1ec6a9c8 | ||
|
|
be8a7e981a | ||
|
|
d59731a678 | ||
|
|
0254510d53 | ||
|
|
9327955891 | ||
|
|
4daf08e20f | ||
|
|
6fc31ddedb | ||
|
|
fac8c9ee4e | ||
|
|
d05f7e2084 | ||
|
|
0a0a6bae0f | ||
|
|
560c063db4 | ||
|
|
54ac2d33e2 | ||
|
|
fb3be8a6a0 | ||
|
|
5a33953b78 | ||
|
|
ba7a8fc796 | ||
|
|
0bdee8219a | ||
|
|
f82951f412 | ||
|
|
35e188b851 | ||
|
|
8990e4666a | ||
|
|
ceff618e5d | ||
|
|
cf3aab9d38 | ||
|
|
a74c70e8a1 | ||
|
|
46817c7664 | ||
|
|
c0c9cab14c | ||
|
|
478a034740 | ||
|
|
01693cb155 | ||
|
|
7a44c9e650 | ||
|
|
70a6a275f4 | ||
|
|
e627ebc127 | ||
|
|
56b81b78c3 | ||
|
|
c304485079 | ||
|
|
df51797c29 | ||
|
|
754339214c | ||
|
|
057ecc3ed9 | ||
|
|
c14f79ebf7 | ||
|
|
71fdff17de | ||
|
|
04b4001277 | ||
|
|
fbfb8a3b41 | ||
|
|
1bee088fe6 | ||
|
|
d2e4d6ecf0 | ||
|
|
5f03f90582 | ||
|
|
e54d46aae1 | ||
|
|
54a3b9900e | ||
|
|
1dc16e900a | ||
|
|
08a7e666b2 | ||
|
|
678fd28f1d | ||
|
|
ff89c3b274 | ||
|
|
cff7aebe55 | ||
|
|
ed3a3d0876 | ||
|
|
425cd9eb26 | ||
|
|
ebe84dd8a4 | ||
|
|
217c4144b5 | ||
|
|
aaeed64621 | ||
|
|
9133a56d2a | ||
|
|
12bd7dc44f | ||
|
|
32fa86adaa | ||
|
|
1811cff1f9 | ||
|
|
2e6a5c0525 | ||
|
|
b68b214d08 | ||
|
|
153d12d93f | ||
|
|
1a62a773ae | ||
|
|
5f0cccb81e | ||
|
|
5c7f4b3df7 | ||
|
|
1248b76b41 | ||
|
|
c4176af1ea | ||
|
|
799c306138 | ||
|
|
c65e3fdf62 | ||
|
|
08712ef4f8 | ||
|
|
d0119f5bf1 | ||
|
|
f3c626c800 | ||
|
|
f1891478d5 | ||
|
|
7a2e6e640d | ||
|
|
890d418639 | ||
|
|
f38c934a6d | ||
|
|
f3540aac0f | ||
|
|
889ce984a9 | ||
|
|
89a437149c | ||
|
|
43f65651ac | ||
|
|
d74d69c1c8 | ||
|
|
fefc85683c | ||
|
|
6f97158c0e | ||
|
|
1320101112 | ||
|
|
031a267394 | ||
|
|
9119030959 | ||
|
|
9b10a8028d | ||
|
|
4be38fcb0e | ||
|
|
9090f4485a | ||
|
|
5749d66ac9 | ||
|
|
4bb4b4eb1d | ||
|
|
103d062f74 | ||
|
|
9893480089 | ||
|
|
5dbd240017 | ||
|
|
e0dce8fd01 | ||
|
|
492139942c | ||
|
|
8ebff1a908 | ||
|
|
44def1f6bc | ||
|
|
8934b9ab5c | ||
|
|
130a6b67bd | ||
|
|
2df32cd9a7 | ||
|
|
d413d58b47 | ||
|
|
9e632aa0bd | ||
|
|
964020ee12 | ||
|
|
672e14d6ea | ||
|
|
54baf04a86 | ||
|
|
64c83460b9 | ||
|
|
74ec3fa7d4 | ||
|
|
0821d7a803 | ||
|
|
55beb3978c | ||
|
|
9b044815de | ||
|
|
0668fea3b7 | ||
|
|
a6677b2e45 | ||
|
|
6f544f56d8 | ||
|
|
5556be9cab | ||
|
|
465c4afe8d | ||
|
|
78dd1e1d81 | ||
|
|
eebfc78ad3 | ||
|
|
4783685fdb | ||
|
|
bfd0363fad | ||
|
|
e9323ba2ec | ||
|
|
dac774c9d2 | ||
|
|
664ee2b433 | ||
|
|
85f283fe2b | ||
|
|
7bd7d66afc | ||
|
|
81b16aa900 | ||
|
|
e07fb34ace | ||
|
|
9303746d80 | ||
|
|
e3e8e67cb4 | ||
|
|
6490027e57 | ||
|
|
6cbe4f2ea7 | ||
|
|
960365a063 | ||
|
|
839d614c9c | ||
|
|
ae13e557a7 | ||
|
|
a245383f8c | ||
|
|
78b8d3e41d | ||
|
|
fcfaa04cc6 | ||
|
|
fe866b2d66 | ||
|
|
e7bbc4ac0c | ||
|
|
3b746c91df | ||
|
|
06f0129b59 | ||
|
|
641e75b8a8 | ||
|
|
35f9fda457 | ||
|
|
de29d69efe | ||
|
|
f587af1005 | ||
|
|
4ed6580e1d | ||
|
|
2f6213c944 | ||
|
|
f365b76cfc | ||
|
|
55921b262f | ||
|
|
3039c97989 | ||
|
|
a1af4f19c5 | ||
|
|
131e4087fd | ||
|
|
ee6471351d | ||
|
|
d93266fee2 | ||
|
|
dbbf39db6d | ||
|
|
d40ea44ae6 | ||
|
|
f0d4847946 | ||
|
|
98a9c766ef | ||
|
|
91393b650b | ||
|
|
49a4b119e1 | ||
|
|
e69fab822b | ||
|
|
45c58cc766 | ||
|
|
ca48f000bd | ||
|
|
21ba1e3958 | ||
|
|
062f3256a7 | ||
|
|
186f565b99 | ||
|
|
5c2b4398d9 | ||
|
|
a9fb61bbd6 | ||
|
|
a51e25dbde | ||
|
|
0a717ae82e | ||
|
|
f9e6751279 | ||
|
|
0306f8ec65 | ||
|
|
66f2e549ce | ||
|
|
9ab413643a | ||
|
|
3a4eeb01b0 | ||
|
|
57a8dcc155 | ||
|
|
2f21476b2a | ||
|
|
9f9e2f3b24 | ||
|
|
f886dfb60c | ||
|
|
c22b014056 | ||
|
|
d899b6a7e1 | ||
|
|
450dde3739 | ||
|
|
2ac40903f3 | ||
|
|
f328b39f57 | ||
|
|
06cc4b07ab | ||
|
|
1c0b68f0e3 | ||
|
|
efcce6a826 | ||
|
|
fa8177d0e5 | ||
|
|
d51cd8df89 | ||
|
|
5530d611b9 | ||
|
|
e73636bef3 | ||
|
|
74ff994281 | ||
|
|
7b28d353ee | ||
|
|
e2a8ca143a | ||
|
|
4e81f98927 | ||
|
|
ab4c994266 | ||
|
|
aea6a434f1 | ||
|
|
6c95d5a2de | ||
|
|
fcaabb2c1e | ||
|
|
66b2722cad | ||
|
|
2e95666939 | ||
|
|
cdfcd99695 | ||
|
|
e71d21fc27 | ||
|
|
e95ebfd6a0 | ||
|
|
e5a875856a | ||
|
|
930218c067 | ||
|
|
ff1362e462 | ||
|
|
01457bbe79 | ||
|
|
8c7da0bdb6 | ||
|
|
6f634c3f13 | ||
|
|
a7f5303eaf | ||
|
|
7159e4fbe2 | ||
|
|
36b243e9d2 | ||
|
|
bd70e00f08 | ||
|
|
0ca96130c8 | ||
|
|
09aa56b63d | ||
|
|
60cd6a455a | ||
|
|
4752ce5250 | ||
|
|
832569a79c | ||
|
|
ecd8cebbef | ||
|
|
3c37efa650 | ||
|
|
21b6ce204d | ||
|
|
337d2970a0 | ||
|
|
3e5bd25c6e | ||
|
|
7f0f68b707 | ||
|
|
ea85482736 | ||
|
|
01160a5361 | ||
|
|
f4b5a02197 | ||
|
|
f724f10a35 | ||
|
|
0c221ba3d7 | ||
|
|
1907aaf32f | ||
|
|
d6f26b3133 | ||
|
|
c97a55e65f | ||
|
|
4a6e38f7da | ||
|
|
845ef42338 | ||
|
|
fde8de8b9e | ||
|
|
88123261ac | ||
|
|
c04b76528b | ||
|
|
04a13c2ebb | ||
|
|
6b3cc62cbe | ||
|
|
8627be07e7 | ||
|
|
5509a5bca3 | ||
|
|
dd52949a2a | ||
|
|
a310ae6566 | ||
|
|
1f8643c538 | ||
|
|
6ea313970d | ||
|
|
13e6b15308 | ||
|
|
0efc2277dd | ||
|
|
9e6aecd707 | ||
|
|
f5510ef1b5 | ||
|
|
c8e6224946 | ||
|
|
bf11300ab3 | ||
|
|
7361a35c94 | ||
|
|
02b2cebb85 | ||
|
|
9b6ae46e92 | ||
|
|
e5e5a42736 | ||
|
|
308d8afe4e | ||
|
|
2100cd77ce | ||
|
|
58b13ae69a | ||
|
|
993c9b31bd | ||
|
|
b5d6f0ad36 | ||
|
|
03c05a82e4 | ||
|
|
cc887d25e4 | ||
|
|
80e2d0651b | ||
|
|
ca3e549dd4 | ||
|
|
51f2ca72b9 | ||
|
|
771950f1de | ||
|
|
c969e9c014 | ||
|
|
344692f9f6 | ||
|
|
fd083078fc | ||
|
|
9bacae4b2e | ||
|
|
76baa6c5f8 | ||
|
|
84c822a0ca | ||
|
|
ddd938fd64 | ||
|
|
e91b30f4c7 | ||
|
|
31fb1801d2 | ||
|
|
117d0f2e38 | ||
|
|
79bb79debc | ||
|
|
11cd022965 | ||
|
|
d7b28a3586 | ||
|
|
dc14245105 | ||
|
|
e772dfaa12 | ||
|
|
4d29cae936 | ||
|
|
71ed0ffe13 | ||
|
|
56d0981cee | ||
|
|
ad43d10ce4 | ||
|
|
fb6618181a | ||
|
|
43a9bc0d7b | ||
|
|
f835b14902 | ||
|
|
c1c591d1eb | ||
|
|
4348549f2d | ||
|
|
e48df87e06 | ||
|
|
e718feb1f7 | ||
|
|
3b6f3f13d4 | ||
|
|
13fabaf6aa | ||
|
|
9cfcdb1c23 | ||
|
|
2800d0dcd3 | ||
|
|
3e2055255e | ||
|
|
64a8857884 | ||
|
|
808b291c2c | ||
|
|
cae3e7136e | ||
|
|
c069a187f8 | ||
|
|
91fa932168 | ||
|
|
188158a29b | ||
|
|
a3d5cb5851 | ||
|
|
0788582528 | ||
|
|
da81abc12e | ||
|
|
81b92111ca | ||
|
|
a809e72704 | ||
|
|
cb0e4b6e87 | ||
|
|
16df8d803c | ||
|
|
ce7ac78b42 | ||
|
|
c21e0755b3 | ||
|
|
e1dc0a576d | ||
|
|
a998db0570 | ||
|
|
c79ec45adb | ||
|
|
72481e8453 | ||
|
|
3753f7d138 | ||
|
|
4d92606562 | ||
|
|
2d0b6bcfcc | ||
|
|
57fb29b600 | ||
|
|
340647cb22 | ||
|
|
a06369dd7b | ||
|
|
95fe103718 | ||
|
|
036297ef36 | ||
|
|
129c055fee | ||
|
|
c688656607 | ||
|
|
b49e8deb3e | ||
|
|
17264e7872 | ||
|
|
022c0c3a89 | ||
|
|
b8539122ed | ||
|
|
4ca906a518 | ||
|
|
7bf67869b0 | ||
|
|
a032164a99 | ||
|
|
f588e7783e | ||
|
|
f8ca6c019f | ||
|
|
f88806fc3c | ||
|
|
ee0880fab7 | ||
|
|
261c674832 | ||
|
|
e95bc82b8e | ||
|
|
6d0cc49ecd | ||
|
|
e108833db2 | ||
|
|
151fdb9bad | ||
|
|
59ca8665fe | ||
|
|
71c101b82e | ||
|
|
860030824e | ||
|
|
46c4bf6e94 | ||
|
|
53ed6e54b5 | ||
|
|
3197c356e9 | ||
|
|
cdad083d7f | ||
|
|
2e076ef3f4 | ||
|
|
46e3a27626 | ||
|
|
1247867187 | ||
|
|
b1f863cc4d | ||
|
|
823b41b7ec | ||
|
|
16a2b3b19b | ||
|
|
0a2e899363 | ||
|
|
65d431c7a0 | ||
|
|
6b617955b7 | ||
|
|
10cf0470cb | ||
|
|
f91ca796de | ||
|
|
7f1fb41d48 | ||
|
|
ceb9c70fba | ||
|
|
5c9d11861e | ||
|
|
706e6c01aa | ||
|
|
64cecb4931 | ||
|
|
31e0dfef76 | ||
|
|
dc85f93423 | ||
|
|
4d5d407655 | ||
|
|
d2424ce540 | ||
|
|
4d5de8176a | ||
|
|
c451d00eb4 | ||
|
|
a8180bddad | ||
|
|
e988364766 | ||
|
|
396697ead2 | ||
|
|
2993bd8c05 | ||
|
|
fc50bb6e57 | ||
|
|
a064066e42 | ||
|
|
a6783e537b | ||
|
|
4b1dad96cd | ||
|
|
6758b51617 | ||
|
|
54fdd2da57 | ||
|
|
3132a4965e | ||
|
|
7ee3f10a81 | ||
|
|
accd65a26a | ||
|
|
e0ada7fc48 | ||
|
|
ad1401854c | ||
|
|
e18189caae | ||
|
|
d601d35a21 | ||
|
|
66fd402f00 | ||
|
|
835a04358c | ||
|
|
af9b4e448d | ||
|
|
39e8ba42ff | ||
|
|
1e52c956a8 | ||
|
|
d261ed074e | ||
|
|
47f9be32ce | ||
|
|
a17390c157 | ||
|
|
5ca5e0d00f | ||
|
|
b0085f2741 | ||
|
|
b983095e13 | ||
|
|
dd6e018e46 | ||
|
|
6f8394a086 | ||
|
|
a0739a18e8 | ||
|
|
27d33f015f | ||
|
|
ffb7ad1417 | ||
|
|
97e6bab6e3 | ||
|
|
b311b7620c | ||
|
|
25ec8fb2ab | ||
|
|
75100cd182 | ||
|
|
50d48ee3ec | ||
|
|
0e330f983b | ||
|
|
9523a929af | ||
|
|
3fcec069ed | ||
|
|
7c2e72aebb | ||
|
|
8d6fbddd67 | ||
|
|
66b5ac8ff1 | ||
|
|
e034fc12eb | ||
|
|
a8317ccacd | ||
|
|
74376586a8 | ||
|
|
ea49296cfe | ||
|
|
992f817fef | ||
|
|
36528fceab | ||
|
|
8323b8af4d | ||
|
|
0e496181a1 | ||
|
|
f47fc7a484 | ||
|
|
0585fb4c80 | ||
|
|
bdfcf6591e | ||
|
|
cad54f0f07 | ||
|
|
a52ab1685e | ||
|
|
3182816965 | ||
|
|
a8da4b0162 | ||
|
|
ab7f6e8300 | ||
|
|
943bf477a0 | ||
|
|
168f4c0056 | ||
|
|
35fef11d2a | ||
|
|
425a8a6412 | ||
|
|
b64495f7a9 | ||
|
|
014861a7f2 | ||
|
|
17edaa0e1f | ||
|
|
bbd0325c10 | ||
|
|
316c276545 | ||
|
|
32ea0213f7 | ||
|
|
86c2f0716e | ||
|
|
68b8d7d7f2 | ||
|
|
43a22f84d9 | ||
|
|
b3a0368b95 | ||
|
|
cd79330c4c | ||
|
|
245e09c723 | ||
|
|
495728593f | ||
|
|
e9c4b0dc01 | ||
|
|
9942bf2124 | ||
|
|
0a8ba068c4 | ||
|
|
a2bb70aaec | ||
|
|
5ed25d8bcb | ||
|
|
cafc068c39 | ||
|
|
b8dde0767b | ||
|
|
f8e5e3b3c0 | ||
|
|
edc19e99a9 | ||
|
|
2b0b3827ab | ||
|
|
8afe5a0087 | ||
|
|
0ecc53f3b6 | ||
|
|
b3f2827961 | ||
|
|
9c96a4d81b | ||
|
|
4f5e363452 | ||
|
|
92572ff919 | ||
|
|
39ddaf49be | ||
|
|
627dc2d4a0 | ||
|
|
42739bbb61 | ||
|
|
261c9eefe1 | ||
|
|
e21e4d2b16 | ||
|
|
8b6b8f0c53 | ||
|
|
5a9feb4411 | ||
|
|
f16128da09 | ||
|
|
48f9997ea9 | ||
|
|
f0e87094d6 | ||
|
|
e0882e9e04 | ||
|
|
d37885ea88 | ||
|
|
d13e5e7e3f | ||
|
|
aa9a024ee1 | ||
|
|
5bbf6d2ae9 | ||
|
|
30299a9f04 | ||
|
|
23d7fe936d | ||
|
|
b50c052222 | ||
|
|
ef9e9809e2 | ||
|
|
f139c3268b | ||
|
|
e869bfd991 | ||
|
|
d5309fcaf5 | ||
|
|
c4fc49553c | ||
|
|
75704899a7 | ||
|
|
70aa3b1ff1 | ||
|
|
6154a8169b | ||
|
|
cf0173e079 | ||
|
|
cc62fc6222 | ||
|
|
6cdadf1b37 | ||
|
|
dc90a66a96 | ||
|
|
4b629d20cf | ||
|
|
437bd13fd0 | ||
|
|
7ce1dc9069 | ||
|
|
5b4e517d9d | ||
|
|
6c3ed5e533 | ||
|
|
ec2762c31a | ||
|
|
29f3158b61 | ||
|
|
d83b7276fd | ||
|
|
1336010bb2 | ||
|
|
5cb3df6db1 | ||
|
|
4be0c1c0eb | ||
|
|
33e5e74228 | ||
|
|
2b06989372 | ||
|
|
cd4da2aca3 | ||
|
|
b335951862 | ||
|
|
e95e084956 | ||
|
|
b7d569de98 | ||
|
|
8320cca5cd | ||
|
|
bad5fec0f1 | ||
|
|
200a3b65ee | ||
|
|
d9fc2a93cc | ||
|
|
94f3533c29 | ||
|
|
77f7ad309e | ||
|
|
cefd270837 | ||
|
|
037e68a376 | ||
|
|
4da0785494 | ||
|
|
53171bafec | ||
|
|
16fe77e472 | ||
|
|
9f147e5b6d | ||
|
|
c43106a744 | ||
|
|
a73eb0377d | ||
|
|
e05514b455 | ||
|
|
901c7be9a8 | ||
|
|
e503cc3003 | ||
|
|
2ff777acb7 | ||
|
|
034d73a4eb | ||
|
|
aec55e50f9 | ||
|
|
1f356a67b2 | ||
|
|
0f60cd480d | ||
|
|
316b8c66db | ||
|
|
744d4ebbaf | ||
|
|
005deaccc8 | ||
|
|
b6f78ce1af | ||
|
|
932b504d82 | ||
|
|
1cca46cf7b | ||
|
|
a8f6d2adf0 | ||
|
|
203de18053 | ||
|
|
ee12b4164b | ||
|
|
b38459439d | ||
|
|
a2eddb3580 | ||
|
|
18adbc6bf0 | ||
|
|
c1ccef25a3 | ||
|
|
b1bea73efb | ||
|
|
4175d29056 | ||
|
|
46c78c33cf | ||
|
|
3fe5a41433 | ||
|
|
f2b1f95521 | ||
|
|
94f81caf28 | ||
|
|
669a4a299c | ||
|
|
afff55045f | ||
|
|
4fbcd2ba5d | ||
|
|
016295dfee | ||
|
|
6d2bc2929a | ||
|
|
23a5e566f2 | ||
|
|
4b387961a4 | ||
|
|
1765a8a7f9 | ||
|
|
837a5b52a7 | ||
|
|
180c4e855e | ||
|
|
01d2af9961 | ||
|
|
7f3cc6269b | ||
|
|
dbc0d54491 | ||
|
|
f843f5ae9d | ||
|
|
962cb290e4 | ||
|
|
7138655dd1 | ||
|
|
93acfc2e38 | ||
|
|
91878c4591 | ||
|
|
05ec1216e0 | ||
|
|
af0e6481f8 | ||
|
|
11a745c4d9 | ||
|
|
2393da4425 | ||
|
|
95ab08e02d | ||
|
|
95fdedf12e | ||
|
|
c73dd776db | ||
|
|
891e5fea3f | ||
|
|
bb2f6f23b5 | ||
|
|
cd9b03bdb9 | ||
|
|
a619269502 | ||
|
|
153b5c028b | ||
|
|
9a33bf2210 | ||
|
|
34b4cd2231 | ||
|
|
6045cbbc62 | ||
|
|
9bbf4044e0 | ||
|
|
fcf8a64d91 | ||
|
|
2c6ab18e41 | ||
|
|
2fea294b13 | ||
|
|
b47ecab1a9 | ||
|
|
b86c294250 | ||
|
|
3eacfb91aa | ||
|
|
94164c2a71 | ||
|
|
d85eb83ea2 | ||
|
|
b2002639db | ||
|
|
347cfe253f | ||
|
|
833e1836e1 | ||
|
|
e4be38b9f7 | ||
|
|
783e7f6939 | ||
|
|
c1c54f4848 | ||
|
|
86be6be2d2 | ||
|
|
35a63e867a | ||
|
|
9c12a417ee | ||
|
|
32a019c0d6 | ||
|
|
b7e4a3c99e | ||
|
|
039062d071 | ||
|
|
83ae3e8371 | ||
|
|
852de8bdfc | ||
|
|
b8acb860aa | ||
|
|
e6849b85d1 | ||
|
|
8fa9657ba6 | ||
|
|
04b038960b | ||
|
|
52507a5a95 | ||
|
|
d8505ba2ab | ||
|
|
fa26c0997e | ||
|
|
5a0aadd2ae | ||
|
|
025549ebf8 | ||
|
|
e85a583f0a | ||
|
|
f7244ddb7a | ||
|
|
d983a519e3 | ||
|
|
ae01070b8f | ||
|
|
0ffb40f4c1 | ||
|
|
8bcffb4ad5 | ||
|
|
b2118602d9 | ||
|
|
9303f3b47b | ||
|
|
e5c43cfc4b | ||
|
|
45fc08e221 | ||
|
|
67e8511106 | ||
|
|
4f7fd0a62b | ||
|
|
88fe454962 | ||
|
|
26f7a9be0a | ||
|
|
9256926bb7 | ||
|
|
2a83318739 | ||
|
|
d6e2535a5e | ||
|
|
2bffb7e22c | ||
|
|
24a162cf86 | ||
|
|
f3104f3bc4 | ||
|
|
45f1bf6709 | ||
|
|
40b2590815 | ||
|
|
dd9ab46b5c | ||
|
|
c2aeadae33 | ||
|
|
1bd9759ab7 | ||
|
|
dcdbb05168 | ||
|
|
ae117c47e9 | ||
|
|
7f7856f0e4 | ||
|
|
aa7b7c8619 | ||
|
|
ee0cbff245 | ||
|
|
c2c18b25d2 | ||
|
|
816c7c95ed | ||
|
|
cb5d65d11a | ||
|
|
75f3f43ba0 | ||
|
|
9a521355ed | ||
|
|
47bfdf0710 | ||
|
|
e1b49c3fb4 | ||
|
|
374dffc5fa | ||
|
|
4f735a5d11 | ||
|
|
94738d8fc4 | ||
|
|
adb4bfa10b | ||
|
|
48e6bbdc97 | ||
|
|
b54d6fea44 | ||
|
|
4462e6339d | ||
|
|
c1581b69f4 | ||
|
|
14284e0cc7 | ||
|
|
de40e733ec | ||
|
|
9d91b6f780 | ||
|
|
6a8b49f9c4 | ||
|
|
445a8a5647 | ||
|
|
83ce4a538a | ||
|
|
35a19d2007 | ||
|
|
505e12c5ea | ||
|
|
b2bfd7f23a | ||
|
|
cdb96e715d | ||
|
|
b3e5f09e3b | ||
|
|
db542d668a | ||
|
|
a8a79a55a4 | ||
|
|
47f62a87a7 | ||
|
|
44f353861a | ||
|
|
a2ef84a4a0 | ||
|
|
12ac20ec43 | ||
|
|
ecfbc7b9fd | ||
|
|
ba2fe0fb1f | ||
|
|
890a20edba | ||
|
|
e6f48c9403 | ||
|
|
909f0afa69 | ||
|
|
5ed2b99b8c | ||
|
|
7848751fd8 | ||
|
|
e593241d75 | ||
|
|
fcdc7b7aeb | ||
|
|
c3c7878f28 | ||
|
|
85f9ae5a0a | ||
|
|
98a97f34f5 | ||
|
|
98d647a3fe | ||
|
|
9a393b4f74 | ||
|
|
88d74235e1 | ||
|
|
36fa470348 | ||
|
|
33dce10bc3 | ||
|
|
feed0b288f | ||
|
|
1b7dc8a509 | ||
|
|
87cc3cf168 | ||
|
|
eac7b1e9f2 | ||
|
|
bb1a42df91 | ||
|
|
ac5ac3e9f1 | ||
|
|
bed25b317c | ||
|
|
1687e6682a | ||
|
|
22572c8ed1 | ||
|
|
8187a339f0 | ||
|
|
382c3930a2 | ||
|
|
a64a30c088 | ||
|
|
dac76a867f | ||
|
|
b2e86e105d | ||
|
|
b8e57c9b6f | ||
|
|
486a1bc9de | ||
|
|
b1b610f4b5 | ||
|
|
68447a6009 | ||
|
|
a55280b941 | ||
|
|
830462d525 | ||
|
|
ce8b29e9d0 | ||
|
|
6ab15f8eb1 | ||
|
|
96eb68e042 | ||
|
|
bf78bdd6d4 | ||
|
|
d998815847 | ||
|
|
00ba7b78ca | ||
|
|
0b735d94f1 | ||
|
|
301989540f | ||
|
|
e26b95a26f | ||
|
|
049c1ddb48 | ||
|
|
2f1c3075a2 | ||
|
|
b1a5068fd6 | ||
|
|
01fbd5d702 | ||
|
|
5916f92f1a | ||
|
|
5e45268f68 | ||
|
|
b8e28e0c12 | ||
|
|
04f824ea36 | ||
|
|
c216bea031 | ||
|
|
e72ef478dc | ||
|
|
897b4ef2cd | ||
|
|
2404899e28 | ||
|
|
a2dfc2cbdc | ||
|
|
92373b25a9 | ||
|
|
ce1840a9ae | ||
|
|
c4f4bdd789 | ||
|
|
ec5068e85b | ||
|
|
1d9d0ddf27 | ||
|
|
e393be90dd | ||
|
|
e633df06e4 | ||
|
|
0ff5f408d6 | ||
|
|
5eda42ff31 | ||
|
|
84168e22d0 | ||
|
|
b722845aff | ||
|
|
fd54682c02 | ||
|
|
f5e287ffa6 | ||
|
|
fb10a546d6 | ||
|
|
006897f1c0 | ||
|
|
968849e52b | ||
|
|
8bee47dc50 | ||
|
|
08250120d1 | ||
|
|
8892b70785 | ||
|
|
534e4cb591 | ||
|
|
489abdcb0b | ||
|
|
f6b6c2e9a3 | ||
|
|
43c016f024 | ||
|
|
c0e7d9cd8b | ||
|
|
5f687a31f8 | ||
|
|
f2d2478dee | ||
|
|
8a98789be1 | ||
|
|
87a5c8894a | ||
|
|
7e92ed4501 | ||
|
|
a57cdfff1e | ||
|
|
d4ff6d4d7a | ||
|
|
63d99d6a57 | ||
|
|
fce7d34171 | ||
|
|
e7df7f69b3 | ||
|
|
94cc18bd71 | ||
|
|
39024ce2ac | ||
|
|
7ac4f45e7b | ||
|
|
f209eebaf8 | ||
|
|
4889db78c9 | ||
|
|
bff200fede | ||
|
|
af6f783043 | ||
|
|
610adcbefc | ||
|
|
1d3631fa04 | ||
|
|
0630504664 | ||
|
|
577d58c92b | ||
|
|
899777632b | ||
|
|
6d5b698c39 | ||
|
|
dd9f1abcea | ||
|
|
b4bd34fb96 | ||
|
|
014971262d | ||
|
|
36ed69b07e | ||
|
|
bbf55ca46e | ||
|
|
3f88b04c4a | ||
|
|
f8910ba136 | ||
|
|
6c95d8b13e | ||
|
|
e6bccaaf4e | ||
|
|
3b8039a580 | ||
|
|
fae3f55010 | ||
|
|
20c877f75b | ||
|
|
8380858a82 | ||
|
|
d2358c399d | ||
|
|
c3af8a77af | ||
|
|
bc5a0b030b | ||
|
|
0b94f1717f | ||
|
|
aaa1249a41 | ||
|
|
ffaa22c49b | ||
|
|
0b78480977 | ||
|
|
ec4fc17e3a | ||
|
|
78b85fb664 | ||
|
|
6b6737613a | ||
|
|
da5d62cc1c | ||
|
|
6a68b63192 | ||
|
|
ff2e79fe7b | ||
|
|
1800e51b19 | ||
|
|
ba9c505249 | ||
|
|
bc9f1c17ed | ||
|
|
74845aed64 | ||
|
|
e49dd0cc6a | ||
|
|
27c45ae24a | ||
|
|
364a14adaf | ||
|
|
5c560b1dd5 | ||
|
|
28b8b88332 | ||
|
|
e39ef0cc9e | ||
|
|
8098d3fec8 | ||
|
|
059ffe09ea | ||
|
|
36a845c29e | ||
|
|
ce6f0dab56 | ||
|
|
f200ab10a4 | ||
|
|
3001688e0e | ||
|
|
a73774099e | ||
|
|
b28676d52c | ||
|
|
eef012b4d1 | ||
|
|
1417a1c020 | ||
|
|
962becb9a5 | ||
|
|
168648e789 | ||
|
|
7f56f57778 | ||
|
|
6cadddc2fc | ||
|
|
15fd54eac4 | ||
|
|
31350e6302 | ||
|
|
8742cdae0a | ||
|
|
4efcb388ff | ||
|
|
2d92e95c8a | ||
|
|
47e5d5684a | ||
|
|
b723e14d98 | ||
|
|
c9d24b8f42 | ||
|
|
43622e7ab1 | ||
|
|
5cfc185ba5 | ||
|
|
4be2635fbe | ||
|
|
0beafb8391 | ||
|
|
1d2654b9fa | ||
|
|
a4bc3673e7 | ||
|
|
fa080537e8 | ||
|
|
bdf67a7db7 | ||
|
|
db4cdc901c | ||
|
|
16a540b89b | ||
|
|
e00ec9ac3f | ||
|
|
fc760afdfc | ||
|
|
cb47bcdb0e | ||
|
|
8d62559ca8 | ||
|
|
dbe9c4dc18 | ||
|
|
1609b4562d | ||
|
|
b6cadb1d65 | ||
|
|
7aafac5b5e | ||
|
|
36f0aacb19 | ||
|
|
0c1a6a918d | ||
|
|
d1f5ff4dba | ||
|
|
77e6df2a1c | ||
|
|
119c037f24 | ||
|
|
97fe1abfd8 | ||
|
|
3a0163f0fb | ||
|
|
d3fab69155 | ||
|
|
9395d2c091 | ||
|
|
b9efb98280 | ||
|
|
60bb264663 | ||
|
|
316dd2f165 | ||
|
|
8a0f700563 | ||
|
|
3d0c6eafec | ||
|
|
46e055833b | ||
|
|
80dfdd1cb9 | ||
|
|
db21678b74 | ||
|
|
09c7fe0565 | ||
|
|
b6dfb2c856 | ||
|
|
ab46ba521f | ||
|
|
4a7670f2aa | ||
|
|
9ba86bc174 | ||
|
|
2ebe5e051c | ||
|
|
24e98abd15 | ||
|
|
b7f1a94ba4 | ||
|
|
70bc7465c9 | ||
|
|
65c2568427 | ||
|
|
186e7bf402 | ||
|
|
e6f1c7d0c3 | ||
|
|
87ad9a3190 | ||
|
|
0ed45f8754 | ||
|
|
116e4401c4 | ||
|
|
c3c0e643d2 | ||
|
|
d5522e7c08 | ||
|
|
658b14ba26 | ||
|
|
38f8469d0b | ||
|
|
ce6750be15 | ||
|
|
f2e600cff9 | ||
|
|
3dab4b0b1e | ||
|
|
5d47aaff29 | ||
|
|
625448fcd1 | ||
|
|
e0258cfc51 | ||
|
|
92993ee105 | ||
|
|
ce579293fb | ||
|
|
d56db14fc7 | ||
|
|
3e98f9e6bd | ||
|
|
e7bd9b6323 | ||
|
|
13699f5c02 | ||
|
|
f1f8341d25 | ||
|
|
796b4899aa | ||
|
|
7dcbed644a | ||
|
|
e6fe01876b | ||
|
|
9b75524d43 | ||
|
|
d98c876f82 | ||
|
|
be7b2fa0a4 | ||
|
|
28719e534c | ||
|
|
620a7c6db2 | ||
|
|
26450aca3a | ||
|
|
b1a3ff6cb1 | ||
|
|
ae2efc7f7b | ||
|
|
2523d039fb | ||
|
|
6dd13c6845 | ||
|
|
dbaa116fe0 | ||
|
|
476071fc1b | ||
|
|
66332ccf76 | ||
|
|
b3e9bb9ddb | ||
|
|
dff87b5fa3 | ||
|
|
7a00df65d0 | ||
|
|
60cc1d8ee5 | ||
|
|
b67ade3610 | ||
|
|
97f47f5415 | ||
|
|
ddef31ecdf | ||
|
|
fa31f1ee26 | ||
|
|
8e477c9d16 | ||
|
|
ce8f0ef9e1 | ||
|
|
4f64738f9e | ||
|
|
c4464455a1 | ||
|
|
254a6c2916 | ||
|
|
c9e1c326f5 | ||
|
|
4532b6cd8c | ||
|
|
53424a5c19 | ||
|
|
bfb47da398 | ||
|
|
cb96d90563 | ||
|
|
b6c02c850a | ||
|
|
c297e076e6 | ||
|
|
20a0800aa7 | ||
|
|
bac25112b7 | ||
|
|
1d2162705d | ||
|
|
1a1f66d2a0 | ||
|
|
a44cde33ed | ||
|
|
a9afd84787 | ||
|
|
ac0224b687 | ||
|
|
8be2992c9a | ||
|
|
e3ed23a0d4 | ||
|
|
377070e3a9 | ||
|
|
6d959051e2 | ||
|
|
0799728000 | ||
|
|
1f02f3b376 | ||
|
|
f7d7244588 | ||
|
|
352703827c | ||
|
|
5f4f55269e | ||
|
|
19e27e8403 | ||
|
|
b41b960ef0 | ||
|
|
d1cc91dd6f | ||
|
|
b0b12856d5 | ||
|
|
fdb19f8c49 | ||
|
|
b8a935ce3d | ||
|
|
ec61b80fd3 | ||
|
|
133863e601 | ||
|
|
3767b85958 | ||
|
|
a9672bc4a2 | ||
|
|
4f1e86d269 | ||
|
|
ae36ff9394 | ||
|
|
a888f38afb | ||
|
|
07e51dc8c6 | ||
|
|
986648479a | ||
|
|
533053f66f | ||
|
|
7a74c60fd1 | ||
|
|
d4d663de38 | ||
|
|
37ae24b879 | ||
|
|
787a06d06e | ||
|
|
43ca879f83 | ||
|
|
19f807b6c4 | ||
|
|
780f5893de | ||
|
|
54eea0ff04 | ||
|
|
480d91e818 | ||
|
|
820871329d | ||
|
|
f7d31fe615 | ||
|
|
8d9648391b | ||
|
|
d14438bf54 |
9
.env-template
Normal file
@@ -0,0 +1,9 @@
|
||||
API_KEY=<LLM api key (for example, open ai key)>
|
||||
LLM_NAME=docsgpt
|
||||
VITE_API_STREAMING=true
|
||||
|
||||
#For Azure (you can delete it if you don't use Azure)
|
||||
OPENAI_API_BASE=
|
||||
OPENAI_API_VERSION=
|
||||
AZURE_DEPLOYMENT_NAME=
|
||||
AZURE_EMBEDDINGS_DEPLOYMENT_NAME=
|
||||
138
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
name: "🐛 Bug Report"
|
||||
description: "Submit a bug report to help us improve"
|
||||
title: "🐛 Bug Report: "
|
||||
labels: ["type: bug"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: We value your time and your efforts to submit this bug report is appreciated. 🙏
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: "📜 Description"
|
||||
description: "A clear and concise description of what the bug is."
|
||||
placeholder: "It bugs out when ..."
|
||||
|
||||
- type: textarea
|
||||
id: steps-to-reproduce
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: "👟 Reproduction steps"
|
||||
description: "How do you trigger this bug? Please walk us through it step by step."
|
||||
placeholder: "1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error"
|
||||
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: "👍 Expected behavior"
|
||||
description: "What did you think should happen?"
|
||||
placeholder: "It should ..."
|
||||
|
||||
- type: textarea
|
||||
id: actual-behavior
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: "👎 Actual Behavior with Screenshots"
|
||||
description: "What did actually happen? Add screenshots, if applicable."
|
||||
placeholder: "It actually ..."
|
||||
|
||||
- type: dropdown
|
||||
id: operating-system
|
||||
attributes:
|
||||
label: "💻 Operating system"
|
||||
description: "What OS is your app running on?"
|
||||
options:
|
||||
- Linux
|
||||
- MacOS
|
||||
- Windows
|
||||
- Something else
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: browsers
|
||||
attributes:
|
||||
label: What browsers are you seeing the problem on?
|
||||
multiple: true
|
||||
options:
|
||||
- Firefox
|
||||
- Chrome
|
||||
- Safari
|
||||
- Microsoft Edge
|
||||
- Something else
|
||||
|
||||
- type: dropdown
|
||||
id: dev-environment
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: "🤖 What development environment are you experiencing this bug on?"
|
||||
options:
|
||||
- Docker
|
||||
- Local dev server
|
||||
|
||||
- type: textarea
|
||||
id: env-vars
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: "🔒 Did you set the correct environment variables in the right path? List the environment variable names (not values please!)"
|
||||
description: "Please refer to the [Project setup instructions](https://github.com/arc53/DocsGPT#quickstart) if you are unsure."
|
||||
placeholder: "It actually ..."
|
||||
|
||||
- type: textarea
|
||||
id: additional-context
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: "📃 Provide any additional context for the Bug."
|
||||
description: "Add any other context about the problem here."
|
||||
placeholder: "It actually ..."
|
||||
|
||||
- type: textarea
|
||||
id: logs
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: 📖 Relevant log output
|
||||
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
|
||||
render: shell
|
||||
|
||||
- type: checkboxes
|
||||
id: no-duplicate-issues
|
||||
attributes:
|
||||
label: "👀 Have you spent some time to check if this bug has been raised before?"
|
||||
options:
|
||||
- label: "I checked and didn't find similar issue"
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: willing-to-submit-pr
|
||||
attributes:
|
||||
label: 🔗 Are you willing to submit PR?
|
||||
description: This is absolutely not required, but we are happy to guide you in the contribution process.
|
||||
options: # Added options key
|
||||
- "Yes, I am willing to submit a PR!"
|
||||
- "No"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: 🧑⚖️ Code of Conduct
|
||||
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/arc53/DocsGPT/blob/main/CODE_OF_CONDUCT.md)
|
||||
options:
|
||||
- label: I agree to follow this project's Code of Conduct
|
||||
required: true
|
||||
54
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: 🚀 Feature
|
||||
description: "Submit a proposal for a new feature"
|
||||
title: "🚀 Feature: "
|
||||
labels: [feature]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: We value your time and your efforts to submit this bug report is appreciated. 🙏
|
||||
- type: textarea
|
||||
id: feature-description
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: "🔖 Feature description"
|
||||
description: "A clear and concise description of what the feature is."
|
||||
placeholder: "You should add ..."
|
||||
- type: textarea
|
||||
id: pitch
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: "🎤 Why is this feature needed ?"
|
||||
description: "Please explain why this feature should be implemented and how it would be used. Add examples, if applicable."
|
||||
placeholder: "In my use-case, ..."
|
||||
- type: textarea
|
||||
id: solution
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: "✌️ How do you aim to achieve this?"
|
||||
description: "A clear and concise description of what you want to happen."
|
||||
placeholder: "I want this feature to, ..."
|
||||
- type: textarea
|
||||
id: alternative
|
||||
validations:
|
||||
required: false
|
||||
attributes:
|
||||
label: "🔄️ Additional Information"
|
||||
description: "A clear and concise description of any alternative solutions or additional solutions you've considered."
|
||||
placeholder: "I tried, ..."
|
||||
- type: checkboxes
|
||||
id: no-duplicate-issues
|
||||
attributes:
|
||||
label: "👀 Have you spent some time to check if this feature request has been raised before?"
|
||||
options:
|
||||
- label: "I checked and didn't find similar issue"
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: willing-to-submit-pr
|
||||
attributes:
|
||||
label: Are you willing to submit PR?
|
||||
description: This is absolutely not required, but we are happy to guide you in the contribution process.
|
||||
options:
|
||||
- "Yes I am willing to submit a PR!"
|
||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
- **What kind of change does this PR introduce?** (Bug fix, feature, docs update, ...)
|
||||
|
||||
- **Why was this change needed?** (You can also link to an open issue here)
|
||||
|
||||
- **Other information**:
|
||||
5
.github/holopin.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
organization: arc53
|
||||
defaultSticker: clqmdf0ed34290glbvqh0kzxd
|
||||
stickers:
|
||||
- id: clqmdf0ed34290glbvqh0kzxd
|
||||
alias: festive
|
||||
23
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
repo:
|
||||
- '*'
|
||||
|
||||
github:
|
||||
- .github/**/*
|
||||
|
||||
application:
|
||||
- application/**/*
|
||||
|
||||
docs:
|
||||
- docs/**/*
|
||||
|
||||
extensions:
|
||||
- extensions/**/*
|
||||
|
||||
frontend:
|
||||
- frontend/**/*
|
||||
|
||||
scripts:
|
||||
- scripts/**/*
|
||||
|
||||
tests:
|
||||
- tests/**/*
|
||||
11
.github/workflows/ci.yml
vendored
@@ -8,7 +8,12 @@ on:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
if: github.repository == 'arc53/DocsGPT'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
@@ -23,17 +28,17 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Runs a single command using the runners shell
|
||||
- name: Build and push Docker images to docker.io and ghcr.io
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: './application/Dockerfile'
|
||||
platforms: linux/amd64
|
||||
|
||||
48
.github/workflows/cife.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: Build and push DocsGPT-FE Docker image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Runs a single command using the runners shell
|
||||
- name: Build and push Docker images to docker.io and ghcr.io
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: './frontend/Dockerfile'
|
||||
platforms: linux/amd64
|
||||
context: ./frontend
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
|
||||
ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest
|
||||
15
.github/workflows/labeler.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# https://github.com/actions/labeler
|
||||
name: Pull Request Labeler
|
||||
on:
|
||||
- pull_request_target
|
||||
jobs:
|
||||
triage:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v4
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
sync-labels: true
|
||||
17
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Python linting
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
pull_request:
|
||||
types: [ opened, synchronize ]
|
||||
|
||||
jobs:
|
||||
ruff:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Lint with Ruff
|
||||
uses: chartboost/ruff-action@v1
|
||||
30
.github/workflows/pytest.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Run python tests with pytest
|
||||
on: [push, pull_request]
|
||||
jobs:
|
||||
pytest_and_coverage:
|
||||
name: Run tests and count coverage
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest pytest-cov
|
||||
cd application
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
- name: Test with pytest and generate coverage report
|
||||
run: |
|
||||
python -m pytest --cov=application --cov=scripts --cov=extensions --cov-report=xml
|
||||
- name: Upload coverage reports to Codecov
|
||||
if: github.event_name == 'pull_request' && matrix.python-version == '3.11'
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
41
.github/workflows/sync_fork.yaml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Upstream Sync
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # every hour
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
sync_latest_from_upstream:
|
||||
name: Sync latest commits from upstream repo
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.repository.fork }}
|
||||
|
||||
steps:
|
||||
# Step 1: run a standard checkout action
|
||||
- name: Checkout target repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# Step 2: run the sync action
|
||||
- name: Sync upstream changes
|
||||
id: sync
|
||||
uses: aormsby/Fork-Sync-With-Upstream-action@v3.4
|
||||
with:
|
||||
# set your upstream repo and branch
|
||||
upstream_sync_repo: arc53/DocsGPT
|
||||
upstream_sync_branch: main
|
||||
target_sync_branch: main
|
||||
target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set
|
||||
|
||||
# Set test_mode true to run tests instead of the true action!!
|
||||
test_mode: false
|
||||
|
||||
- name: Sync check
|
||||
if: failure()
|
||||
run: |
|
||||
echo "::error::由于权限不足,导致同步失败(这是预期的行为),请前往仓库首页手动执行[Sync fork]。"
|
||||
echo "::error::Due to insufficient permissions, synchronization failed (as expected). Please go to the repository homepage and manually perform [Sync fork]."
|
||||
exit 1
|
||||
12
.gitignore
vendored
@@ -5,7 +5,7 @@ __pycache__/
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
*.next
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
@@ -162,3 +162,13 @@ frontend/*.sw?
|
||||
application/vectors/
|
||||
|
||||
**/inputs
|
||||
|
||||
**/indexes
|
||||
|
||||
**/temp
|
||||
|
||||
**/yarn.lock
|
||||
|
||||
node_modules/
|
||||
.vscode/settings.json
|
||||
models/
|
||||
|
||||
2
.ruff.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
# Allow lines to be as long as 120 characters.
|
||||
line-length = 120
|
||||
BIN
Assets/DocsGPT tee-back.jpeg
Normal file
|
After Width: | Height: | Size: 88 KiB |
BIN
Assets/DocsGPT tee-front.jpeg
Normal file
|
After Width: | Height: | Size: 21 KiB |
@@ -2,58 +2,58 @@
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
We as members, contributors and leaders pledge to make participation in our
|
||||
community, a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
nationality, personal appearance, race, religion or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
diverse, inclusive and a healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
Examples of behavior that contribute to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
## Demonstrating empathy and kindness towards other people
|
||||
1. Being respectful and open to differing opinions, viewpoints, and experiences
|
||||
2. Giving and gracefully accepting constructive feedback
|
||||
3. Taking accountability and offering apologies to those who have been impacted by our errors,
|
||||
while also gaining insights from the situation
|
||||
4. Focusing on what is best not just for us as individuals but for the
|
||||
community as a whole
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
1. The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
2. Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
3. Public or private harassment
|
||||
4. Publishing other's private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
5. Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
response to any behavior that they deem inappropriate, threatening, offensive
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
not aligned to this Code of Conduct and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
This Code of Conduct applies within all community spaces and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
posting via an official social media account or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
@@ -63,29 +63,27 @@ reported to the community leaders responsible for enforcement at
|
||||
contact@arc53.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
All community leaders are obligated to be respectful towards the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
the consequences for any action that they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
* **Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community space.
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
* **Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
* **Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
* **Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
@@ -93,23 +91,21 @@ like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
* **Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
* **Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
* **Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior,harassment of an
|
||||
individual or aggression towards or disparagement of classes of individuals.
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
* **Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
134
CONTRIBUTING.md
@@ -1,38 +1,128 @@
|
||||
# Welcome to DocsGPT Contributing guideline
|
||||
# Welcome to DocsGPT Contributing Guidelines
|
||||
|
||||
Thank you for choosing this project to contribute to, we are all very grateful!
|
||||
Thank you for choosing to contribute to DocsGPT! We are all very grateful!
|
||||
|
||||
# We accept different types of contributions
|
||||
|
||||
📣 Discussions - where you can start a new topic or answer some questions
|
||||
📣 **Discussions** - Engage in conversations, start new topics, or help answer questions.
|
||||
|
||||
🐞 Issues - Is how we track tasks, sometimes its bugs that need fixing, sometimes its new features
|
||||
🐞 **Issues** - This is where we keep track of tasks. It could be bugs,fixes or suggestions for new features.
|
||||
|
||||
🛠️ Pull requests - Is how you can suggest changes to our repository, to work on existing issue or to add new features
|
||||
🛠️ **Pull requests** - Suggest changes to our repository, either by working on existing issues or adding new features.
|
||||
|
||||
📚 Wiki - where we have our documentation
|
||||
📚 **Wiki** - This is where our documentation resides.
|
||||
|
||||
|
||||
## 🐞 Issues and Pull requests
|
||||
|
||||
We value contributions to our issues in form of discussion or suggestion, we recommend that you check out existing issues and our [Roadmap](https://github.com/orgs/arc53/projects/2)
|
||||
|
||||
If you want to contribute by writing code there are few things that you should know before doing it:
|
||||
We have frontend (React, Vite) and Backend (python)
|
||||
|
||||
### If you are looking to contribute to Frontend (⚛️React, Vite):
|
||||
Current frontend is being migrated from /application to /frontend with a new design, so please contribute to the new on. Check out this [Milestone](https://github.com/arc53/DocsGPT/milestone/1) and its issues also [Figma](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1)
|
||||
Please try to follow guidelines
|
||||
- We value contributions in the form of discussions or suggestions. We recommend taking a look at existing issues and our [roadmap](https://github.com/orgs/arc53/projects/2).
|
||||
|
||||
|
||||
### If you are looking to contribute to Backend (🐍Python):
|
||||
Check out our issues, and contribute to /application or /scripts (ignore old ingest_rst.py ingest_rst_sphinx.py files, they will be deprecated soon)
|
||||
Currently we don't have any tests(which would be useful😉) but before submitting you PR make sure that after you ingested some test data its queryable
|
||||
- If you're interested in contributing code, here are some important things to know:
|
||||
|
||||
### Workflow:
|
||||
Create a fork, make changes on your forked repository, submit changes in a form of pull request
|
||||
- We have a frontend built on React (Vite) and a backend in Python.
|
||||
=======
|
||||
Before creating issues, please check out how the latest version of our app looks and works by launching it via [Quickstart](https://github.com/arc53/DocsGPT#quickstart) the version on our live demo is slightly modified with login. Your issues should relate to the version that you can launch via [Quickstart](https://github.com/arc53/DocsGPT#quickstart).
|
||||
|
||||
## Questions / collaboration
|
||||
Please join our [Discord](https://discord.gg/n5BX8dh8rU) don't hesitate, we are very friendly and welcoming to new contributors.
|
||||
### 👨💻 If you're interested in contributing code, here are some important things to know:
|
||||
|
||||
# Thank you so much for considering to contribute to DocsGPT!🙏
|
||||
|
||||
Tech Stack Overview:
|
||||
|
||||
- 🌐 Frontend: Built with React (Vite) ⚛️,
|
||||
|
||||
- 🖥 Backend: Developed in Python 🐍
|
||||
|
||||
### 🌐 If you are looking to contribute to frontend (⚛️React, Vite):
|
||||
|
||||
- The current frontend is being migrated from [`/application`](https://github.com/arc53/DocsGPT/tree/main/application) to [`/frontend`](https://github.com/arc53/DocsGPT/tree/main/frontend) with a new design, so please contribute to the new one.
|
||||
- Check out this [milestone](https://github.com/arc53/DocsGPT/milestone/1) and its issues.
|
||||
- The updated Figma design can be found [here](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1).
|
||||
|
||||
Please try to follow the guidelines.
|
||||
|
||||
### 🖥 If you are looking to contribute to Backend (🐍 Python):
|
||||
|
||||
- Review our issues and contribute to [`/application`](https://github.com/arc53/DocsGPT/tree/main/application) or [`/scripts`](https://github.com/arc53/DocsGPT/tree/main/scripts) (please disregard old [`ingest_rst.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst.py) [`ingest_rst_sphinx.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst_sphinx.py) files; they will be deprecated soon).
|
||||
- All new code should be covered with unit tests ([pytest](https://github.com/pytest-dev/pytest)). Please find tests under [`/tests`](https://github.com/arc53/DocsGPT/tree/main/tests) folder.
|
||||
- Before submitting your Pull Request, ensure it can be queried after ingesting some test data.
|
||||
|
||||
### Testing
|
||||
|
||||
To run unit tests from the root of the repository, execute:
|
||||
```
|
||||
python -m pytest
|
||||
```
|
||||
|
||||
## Workflow 📈
|
||||
|
||||
Here's a step-by-step guide on how to contribute to DocsGPT:
|
||||
|
||||
1. **Fork the Repository:**
|
||||
- Click the "Fork" button at the top-right of this repository to create your fork.
|
||||
|
||||
2. **Clone the Forked Repository:**
|
||||
- Clone the repository using:
|
||||
``` shell
|
||||
git clone https://github.com/<your-github-username>/DocsGPT.git
|
||||
```
|
||||
|
||||
3. **Keep your Fork in Sync:**
|
||||
- Before you make any changes, make sure that your fork is in sync to avoid merge conflicts using:
|
||||
```shell
|
||||
git remote add upstream https://github.com/arc53/DocsGPT.git
|
||||
git pull upstream main
|
||||
```
|
||||
|
||||
4. **Create and Switch to a New Branch:**
|
||||
- Create a new branch for your contribution using:
|
||||
```shell
|
||||
git checkout -b your-branch-name
|
||||
```
|
||||
|
||||
5. **Make Changes:**
|
||||
- Make the required changes in your branch.
|
||||
|
||||
6. **Add Changes to the Staging Area:**
|
||||
- Add your changes to the staging area using:
|
||||
```shell
|
||||
git add .
|
||||
```
|
||||
|
||||
7. **Commit Your Changes:**
|
||||
- Commit your changes with a descriptive commit message using:
|
||||
```shell
|
||||
git commit -m "Your descriptive commit message"
|
||||
```
|
||||
|
||||
8. **Push Your Changes to the Remote Repository:**
|
||||
- Push your branch with changes to your fork on GitHub using:
|
||||
```shell
|
||||
git push origin your-branch-name
|
||||
```
|
||||
|
||||
9. **Submit a Pull Request (PR):**
|
||||
- Create a Pull Request from your branch to the main repository. Make sure to include a detailed description of your changes and reference any related issues.
|
||||
|
||||
10. **Collaborate:**
|
||||
- Be responsive to comments and feedback on your PR.
|
||||
- Make necessary updates as suggested.
|
||||
- Once your PR is approved, it will be merged into the main repository.
|
||||
|
||||
11. **Testing:**
|
||||
- Before submitting a Pull Request, ensure your code passes all unit tests.
|
||||
- To run unit tests from the root of the repository, execute:
|
||||
```shell
|
||||
python -m pytest
|
||||
```
|
||||
|
||||
*Note: You should run the unit test only after making the changes to the backend code.*
|
||||
|
||||
12. **Questions and Collaboration:**
|
||||
- Feel free to join our Discord. We're very friendly and welcoming to new contributors, so don't hesitate to reach out.
|
||||
|
||||
Thank you for considering contributing to DocsGPT! 🙏
|
||||
|
||||
## Questions/collaboration
|
||||
Feel free to join our [Discord](https://discord.gg/n5BX8dh8rU). We're very friendly and welcoming to new contributors, so don't hesitate to reach out.
|
||||
# Thank you so much for considering to contribute DocsGPT!🙏
|
||||
|
||||
190
README.md
@@ -7,65 +7,193 @@
|
||||
</p>
|
||||
|
||||
<p align="left">
|
||||
<strong>DocsGPT</strong> is a cutting-edge open-source solution that streamlines the process of finding information in project documentation. With its integration of the powerful <strong>GPT</strong> models, developers can easily ask questions about a project and receive accurate answers.
|
||||
<strong><a href="https://docsgpt.arc53.com/">DocsGPT</a></strong> is a cutting-edge open-source solution that streamlines the process of finding information in the project documentation. With its integration of the powerful <strong>GPT</strong> models, developers can easily ask questions about a project and receive accurate answers.
|
||||
|
||||
Say goodbye to time-consuming manual searches, and let <strong>DocsGPT</strong> help you quickly find the information you need. Try it out and see how it revolutionizes your project documentation experience. Contribute to its development and be a part of the future of AI-powered assistance.
|
||||
Say goodbye to time-consuming manual searches, and let <strong><a href="https://docsgpt.arc53.com/">DocsGPT</a></strong> help you quickly find the information you need. Try it out and see how it revolutionizes your project documentation experience. Contribute to its development and be a part of the future of AI-powered assistance.
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
|
||||
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
||||
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
||||
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
||||
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
||||
|
||||
<a href="https://github.com/arc53/DocsGPT"></a>
|
||||
<a href="https://github.com/arc53/DocsGPT"></a>
|
||||
<a href="https://github.com/arc53/DocsGPT/blob/main/LICENSE"></a>
|
||||
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
||||
<a href="https://twitter.com/docsgptai"></a>
|
||||
|
||||
|
||||
</div>
|
||||
|
||||

|
||||
### Production Support / Help for Companies:
|
||||
|
||||
We're eager to provide personalized assistance when deploying your DocsGPT to a live environment.
|
||||
|
||||
- [Book Demo :wave:](https://airtable.com/appdeaL0F1qV8Bl2C/shrrJF1Ll7btCJRbP)
|
||||
- [Send Email :email:](mailto:contact@arc53.com?subject=DocsGPT%20support%2Fsolutions)
|
||||
|
||||

|
||||
|
||||
## Roadmap
|
||||
|
||||
You can find our [Roadmap](https://github.com/orgs/arc53/projects/2) here, please don't hesitate contributing or creating issues, it helps us make DocsGPT better!
|
||||
You can find our roadmap [here](https://github.com/orgs/arc53/projects/2). Please don't hesitate to contribute or create issues, it helps us improve DocsGPT!
|
||||
|
||||
## Preview
|
||||

|
||||
## Our Open-Source Models Optimized for DocsGPT:
|
||||
|
||||
## [Live preview](https://docsgpt.arc53.com/)
|
||||
| Name | Base Model | Requirements (or similar) |
|
||||
| --------------------------------------------------------------------- | ----------- | ------------------------- |
|
||||
| [Docsgpt-7b-falcon](https://huggingface.co/Arc53/docsgpt-7b-falcon) | Falcon-7b | 1xA10G gpu |
|
||||
| [Docsgpt-14b](https://huggingface.co/Arc53/docsgpt-14b) | llama-2-14b | 2xA10 gpu's |
|
||||
| [Docsgpt-40b-falcon](https://huggingface.co/Arc53/docsgpt-40b-falcon) | falcon-40b | 8xA10G gpu's |
|
||||
|
||||
## [Join Our Discord](https://discord.gg/n5BX8dh8rU)
|
||||
If you don't have enough resources to run it, you can use bitsnbytes to quantize.
|
||||
|
||||
## Features
|
||||
|
||||
## Project structure
|
||||
- Application - flask app (main application)
|
||||

|
||||
|
||||
- Extensions - chrome extension
|
||||
## Useful Links
|
||||
|
||||
- Scripts - script that creates similarity search index and store for other libraries.
|
||||
- :mag: :fire: [Live preview](https://docsgpt.arc53.com/)
|
||||
|
||||
- :speech_balloon: :tada: [Join our Discord](https://discord.gg/n5BX8dh8rU)
|
||||
|
||||
- :books: :sunglasses: [Guides](https://docs.docsgpt.co.uk/)
|
||||
|
||||
- :couple: [Interested in contributing?](https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md)
|
||||
|
||||
- :file_folder: :rocket: [How to use any other documentation](https://docs.docsgpt.co.uk/Guides/How-to-train-on-other-documentation)
|
||||
|
||||
- :house: :closed_lock_with_key: [How to host it locally (so all data will stay on-premises)](https://docs.docsgpt.co.uk/Guides/How-to-use-different-LLM)
|
||||
|
||||
## Project Structure
|
||||
|
||||
- Application - Flask app (main application).
|
||||
|
||||
- Extensions - Chrome extension.
|
||||
|
||||
- Scripts - Script that creates similarity search index for other libraries.
|
||||
|
||||
- Frontend - Frontend uses <a href="https://vitejs.dev/">Vite</a> and <a href="https://react.dev/">React</a>.
|
||||
|
||||
## QuickStart
|
||||
Please note: current vector database uses pandas Python documentation, thus responses will be related to it, if you want to use other docs please follow a guide below
|
||||
|
||||
1. Navigate to `/application` folder
|
||||
2. Install dependencies
|
||||
`pip install -r requirements.txt`
|
||||
3. Prepare .env file
|
||||
Copy .env_sample and create .env with your openai api token
|
||||
4. Run the app
|
||||
`python app.py`
|
||||
> [!Note]
|
||||
> Make sure you have [Docker](https://docs.docker.com/engine/install/) installed
|
||||
|
||||
On Mac OS or Linux, write:
|
||||
|
||||
[How to install the Chrome extension](https://github.com/arc53/docsgpt/wiki#launch-chrome-extension)
|
||||
`./setup.sh`
|
||||
|
||||
It will install all the dependencies and allow you to download the local model, use OpenAI or use our LLM API.
|
||||
|
||||
## [Guides](https://github.com/arc53/docsgpt/wiki)
|
||||
Otherwise, refer to this Guide:
|
||||
|
||||
## [Interested in contributing?](https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md)
|
||||
1. Download and open this repository with `git clone https://github.com/arc53/DocsGPT.git`
|
||||
2. Create a `.env` file in your root directory and set the env variables and `VITE_API_STREAMING` to true or false, depending on whether you want streaming answers or not.
|
||||
It should look like this inside:
|
||||
|
||||
## [How to use any other documentation](https://github.com/arc53/docsgpt/wiki/How-to-train-on-other-documentation)
|
||||
```
|
||||
LLM_NAME=[docsgpt or openai or others]
|
||||
VITE_API_STREAMING=true
|
||||
API_KEY=[if LLM_NAME is openai]
|
||||
```
|
||||
|
||||
## [How to host it locally (so all data will stay on-premises)](https://github.com/arc53/DocsGPT/wiki/How-to-use-different-LLM's#hosting-everything-locally)
|
||||
See optional environment variables in the [/.env-template](https://github.com/arc53/DocsGPT/blob/main/.env-template) and [/application/.env_sample](https://github.com/arc53/DocsGPT/blob/main/application/.env_sample) files.
|
||||
|
||||
Built with [🦜️🔗 LangChain](https://github.com/hwchase17/langchain)
|
||||
3. Run [./run-with-docker-compose.sh](https://github.com/arc53/DocsGPT/blob/main/run-with-docker-compose.sh).
|
||||
4. Navigate to http://localhost:5173/.
|
||||
|
||||
To stop, just run `Ctrl + C`.
|
||||
|
||||
## Development Environments
|
||||
|
||||
### Spin up Mongo and Redis
|
||||
|
||||
For development, only two containers are used from [docker-compose.yaml](https://github.com/arc53/DocsGPT/blob/main/docker-compose.yaml) (by deleting all services except for Redis and Mongo).
|
||||
See file [docker-compose-dev.yaml](./docker-compose-dev.yaml).
|
||||
|
||||
Run
|
||||
|
||||
```
|
||||
docker compose -f docker-compose-dev.yaml build
|
||||
docker compose -f docker-compose-dev.yaml up -d
|
||||
```
|
||||
|
||||
### Run the Backend
|
||||
|
||||
> [!Note]
|
||||
> Make sure you have Python 3.10 or 3.11 installed.
|
||||
|
||||
1. Export required environment variables or prepare a `.env` file in the `/application` folder:
|
||||
- Copy [.env_sample](https://github.com/arc53/DocsGPT/blob/main/application/.env_sample) and create `.env`.
|
||||
|
||||
(check out [`application/core/settings.py`](application/core/settings.py) if you want to see more config options.)
|
||||
|
||||
2. (optional) Create a Python virtual environment:
|
||||
You can follow the [Python official documentation](https://docs.python.org/3/tutorial/venv.html) for virtual environments.
|
||||
|
||||
a) On Mac OS and Linux
|
||||
|
||||
```commandline
|
||||
python -m venv venv
|
||||
. venv/bin/activate
|
||||
```
|
||||
|
||||
b) On Windows
|
||||
|
||||
```commandline
|
||||
python -m venv venv
|
||||
venv/Scripts/activate
|
||||
```
|
||||
|
||||
3. Download embedding model and save it in the `model/` folder:
|
||||
You can use the script below, or download it manually from [here](https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip), unzip it and save it in the `model/` folder.
|
||||
|
||||
```commandline
|
||||
wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip
|
||||
unzip mpnet-base-v2.zip -d model
|
||||
rm mpnet-base-v2.zip
|
||||
|
||||
4. Change to the `application/` subdir by the command `cd application/` and install dependencies for the backend:
|
||||
|
||||
```commandline
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
5. Run the app using `flask --app application/app.py run --host=0.0.0.0 --port=7091`.
|
||||
6. Start worker with `celery -A application.app.celery worker -l INFO`.
|
||||
|
||||
### Start Frontend
|
||||
|
||||
> [!Note]
|
||||
> Make sure you have Node version 16 or higher.
|
||||
|
||||
1. Navigate to the [/frontend](https://github.com/arc53/DocsGPT/tree/main/frontend) folder.
|
||||
2. Install the required packages `husky` and `vite` (ignore if already installed).
|
||||
|
||||
```commandline
|
||||
npm install husky -g
|
||||
npm install vite -g
|
||||
```
|
||||
|
||||
3. Install dependencies by running `npm install --include=dev`.
|
||||
4. Run the app using `npm run dev`.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for information about how to get involved. We welcome issues, questions, and pull requests.
|
||||
|
||||
## Code Of Conduct
|
||||
|
||||
We as members, contributors, and leaders, pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Please refer to the [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) file for more information about contributing.
|
||||
|
||||
## Many Thanks To Our Contributors⚡
|
||||
|
||||
<a href="https://github.com/arc53/DocsGPT/graphs/contributors" alt="View Contributors">
|
||||
<img src="https://contrib.rocks/image?repo=arc53/DocsGPT" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## License
|
||||
|
||||
The source code license is [MIT](https://opensource.org/license/mit/), as described in the [LICENSE](LICENSE) file.
|
||||
|
||||
Built with [:bird: :link: LangChain](https://github.com/hwchase17/langchain)
|
||||
|
||||
@@ -1 +1,11 @@
|
||||
OPENAI_API_KEY=your_api_key
|
||||
API_KEY=your_api_key
|
||||
EMBEDDINGS_KEY=your_api_key
|
||||
API_URL=http://localhost:7091
|
||||
FLASK_APP=application/app.py
|
||||
FLASK_DEBUG=true
|
||||
|
||||
#For OPENAI on Azure
|
||||
OPENAI_API_BASE=
|
||||
OPENAI_API_VERSION=
|
||||
AZURE_DEPLOYMENT_NAME=
|
||||
AZURE_EMBEDDINGS_DEPLOYMENT_NAME=
|
||||
@@ -1,20 +1,29 @@
|
||||
FROM python:3.10-slim-bullseye as builder
|
||||
FROM python:3.11-slim-bullseye as builder
|
||||
|
||||
# Tiktoken requires Rust toolchain, so build it in a separate stage
|
||||
RUN apt-get update && apt-get install -y gcc curl
|
||||
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && apt-get install --reinstall libc6-dev -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
RUN pip install --upgrade pip && pip install tiktoken==0.1.2
|
||||
RUN pip install --upgrade pip && pip install tiktoken==0.5.2
|
||||
COPY requirements.txt .
|
||||
RUN pip install -r requirements.txt
|
||||
RUN apt-get install -y wget unzip
|
||||
RUN wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip
|
||||
RUN unzip mpnet-base-v2.zip -d model
|
||||
RUN rm mpnet-base-v2.zip
|
||||
|
||||
FROM python:3.11-slim-bullseye
|
||||
|
||||
# Copy pre-built packages and binaries from builder stage
|
||||
COPY --from=builder /usr/local/ /usr/local/
|
||||
|
||||
FROM python:3.10-slim-bullseye
|
||||
# Copy pre-built packages from builder stage
|
||||
COPY --from=builder /usr/local/lib/python3.10/site-packages/ /usr/local/lib/python3.10/site-packages/
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
COPY --from=builder /model /app/model
|
||||
|
||||
COPY . /app/application
|
||||
ENV FLASK_APP=app.py
|
||||
ENV FLASK_DEBUG=true
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
EXPOSE 5001
|
||||
EXPOSE 7091
|
||||
|
||||
CMD ["gunicorn", "-w", "6", "--bind", "0.0.0.0:5001", "wsgi:app"]
|
||||
CMD ["gunicorn", "-w", "2", "--timeout", "120", "--bind", "0.0.0.0:7091", "application.wsgi:app"]
|
||||
|
||||
0
application/__init__.py
Normal file
0
application/api/__init__.py
Normal file
0
application/api/answer/__init__.py
Normal file
374
application/api/answer/routes.py
Normal file
@@ -0,0 +1,374 @@
|
||||
import asyncio
|
||||
import os
|
||||
from flask import Blueprint, request, Response
|
||||
import json
|
||||
import datetime
|
||||
import logging
|
||||
import traceback
|
||||
|
||||
from pymongo import MongoClient
|
||||
from bson.objectid import ObjectId
|
||||
from transformers import GPT2TokenizerFast
|
||||
|
||||
|
||||
|
||||
from application.core.settings import settings
|
||||
from application.vectorstore.vector_creator import VectorCreator
|
||||
from application.llm.llm_creator import LLMCreator
|
||||
from application.error import bad_request
|
||||
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
mongo = MongoClient(settings.MONGO_URI)
|
||||
db = mongo["docsgpt"]
|
||||
conversations_collection = db["conversations"]
|
||||
vectors_collection = db["vectors"]
|
||||
prompts_collection = db["prompts"]
|
||||
answer = Blueprint('answer', __name__)
|
||||
|
||||
if settings.LLM_NAME == "gpt4":
|
||||
gpt_model = 'gpt-4'
|
||||
elif settings.LLM_NAME == "anthropic":
|
||||
gpt_model = 'claude-2'
|
||||
else:
|
||||
gpt_model = 'gpt-3.5-turbo'
|
||||
|
||||
# load the prompts
|
||||
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_default.txt"), "r") as f:
|
||||
chat_combine_template = f.read()
|
||||
|
||||
with open(os.path.join(current_dir, "prompts", "chat_reduce_prompt.txt"), "r") as f:
|
||||
chat_reduce_template = f.read()
|
||||
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_creative.txt"), "r") as f:
|
||||
chat_combine_creative = f.read()
|
||||
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_strict.txt"), "r") as f:
|
||||
chat_combine_strict = f.read()
|
||||
|
||||
api_key_set = settings.API_KEY is not None
|
||||
embeddings_key_set = settings.EMBEDDINGS_KEY is not None
|
||||
|
||||
|
||||
async def async_generate(chain, question, chat_history):
|
||||
result = await chain.arun({"question": question, "chat_history": chat_history})
|
||||
return result
|
||||
|
||||
|
||||
def count_tokens(string):
|
||||
tokenizer = GPT2TokenizerFast.from_pretrained('gpt2')
|
||||
return len(tokenizer(string)['input_ids'])
|
||||
|
||||
|
||||
def run_async_chain(chain, question, chat_history):
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
result = {}
|
||||
try:
|
||||
answer = loop.run_until_complete(async_generate(chain, question, chat_history))
|
||||
finally:
|
||||
loop.close()
|
||||
result["answer"] = answer
|
||||
return result
|
||||
|
||||
|
||||
def get_vectorstore(data):
|
||||
if "active_docs" in data:
|
||||
if data["active_docs"].split("/")[0] == "default":
|
||||
vectorstore = ""
|
||||
elif data["active_docs"].split("/")[0] == "local":
|
||||
vectorstore = "indexes/" + data["active_docs"]
|
||||
else:
|
||||
vectorstore = "vectors/" + data["active_docs"]
|
||||
if data["active_docs"] == "default":
|
||||
vectorstore = ""
|
||||
else:
|
||||
vectorstore = ""
|
||||
vectorstore = os.path.join("application", vectorstore)
|
||||
return vectorstore
|
||||
|
||||
|
||||
def is_azure_configured():
|
||||
return settings.OPENAI_API_BASE and settings.OPENAI_API_VERSION and settings.AZURE_DEPLOYMENT_NAME
|
||||
|
||||
|
||||
def complete_stream(question, docsearch, chat_history, api_key, prompt_id, conversation_id):
|
||||
llm = LLMCreator.create_llm(settings.LLM_NAME, api_key=api_key)
|
||||
|
||||
if prompt_id == 'default':
|
||||
prompt = chat_combine_template
|
||||
elif prompt_id == 'creative':
|
||||
prompt = chat_combine_creative
|
||||
elif prompt_id == 'strict':
|
||||
prompt = chat_combine_strict
|
||||
else:
|
||||
prompt = prompts_collection.find_one({"_id": ObjectId(prompt_id)})["content"]
|
||||
|
||||
docs = docsearch.search(question, k=2)
|
||||
if settings.LLM_NAME == "llama.cpp":
|
||||
docs = [docs[0]]
|
||||
# join all page_content together with a newline
|
||||
docs_together = "\n".join([doc.page_content for doc in docs])
|
||||
p_chat_combine = prompt.replace("{summaries}", docs_together)
|
||||
messages_combine = [{"role": "system", "content": p_chat_combine}]
|
||||
source_log_docs = []
|
||||
for doc in docs:
|
||||
if doc.metadata:
|
||||
source_log_docs.append({"title": doc.metadata['title'].split('/')[-1], "text": doc.page_content})
|
||||
else:
|
||||
source_log_docs.append({"title": doc.page_content, "text": doc.page_content})
|
||||
|
||||
if len(chat_history) > 1:
|
||||
tokens_current_history = 0
|
||||
# count tokens in history
|
||||
chat_history.reverse()
|
||||
for i in chat_history:
|
||||
if "prompt" in i and "response" in i:
|
||||
tokens_batch = count_tokens(i["prompt"]) + count_tokens(i["response"])
|
||||
if tokens_current_history + tokens_batch < settings.TOKENS_MAX_HISTORY:
|
||||
tokens_current_history += tokens_batch
|
||||
messages_combine.append({"role": "user", "content": i["prompt"]})
|
||||
messages_combine.append({"role": "system", "content": i["response"]})
|
||||
messages_combine.append({"role": "user", "content": question})
|
||||
|
||||
response_full = ""
|
||||
completion = llm.gen_stream(model=gpt_model, engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
messages=messages_combine)
|
||||
for line in completion:
|
||||
data = json.dumps({"answer": str(line)})
|
||||
response_full += str(line)
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
# save conversation to database
|
||||
if conversation_id is not None:
|
||||
conversations_collection.update_one(
|
||||
{"_id": ObjectId(conversation_id)},
|
||||
{"$push": {"queries": {"prompt": question, "response": response_full, "sources": source_log_docs}}},
|
||||
)
|
||||
|
||||
else:
|
||||
# create new conversation
|
||||
# generate summary
|
||||
messages_summary = [{"role": "assistant", "content": "Summarise following conversation in no more than 3 "
|
||||
"words, respond ONLY with the summary, use the same "
|
||||
"language as the system \n\nUser: " + question + "\n\n" +
|
||||
"AI: " +
|
||||
response_full},
|
||||
{"role": "user", "content": "Summarise following conversation in no more than 3 words, "
|
||||
"respond ONLY with the summary, use the same language as the "
|
||||
"system"}]
|
||||
|
||||
completion = llm.gen(model=gpt_model, engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
messages=messages_summary, max_tokens=30)
|
||||
conversation_id = conversations_collection.insert_one(
|
||||
{"user": "local",
|
||||
"date": datetime.datetime.utcnow(),
|
||||
"name": completion,
|
||||
"queries": [{"prompt": question, "response": response_full, "sources": source_log_docs}]}
|
||||
).inserted_id
|
||||
|
||||
# send data.type = "end" to indicate that the stream has ended as json
|
||||
data = json.dumps({"type": "id", "id": str(conversation_id)})
|
||||
yield f"data: {data}\n\n"
|
||||
data = json.dumps({"type": "end"})
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
|
||||
@answer.route("/stream", methods=["POST"])
|
||||
def stream():
|
||||
data = request.get_json()
|
||||
# get parameter from url question
|
||||
question = data["question"]
|
||||
history = data["history"]
|
||||
# history to json object from string
|
||||
history = json.loads(history)
|
||||
conversation_id = data["conversation_id"]
|
||||
if 'prompt_id' in data:
|
||||
prompt_id = data["prompt_id"]
|
||||
else:
|
||||
prompt_id = 'default'
|
||||
|
||||
# check if active_docs is set
|
||||
|
||||
if not api_key_set:
|
||||
api_key = data["api_key"]
|
||||
else:
|
||||
api_key = settings.API_KEY
|
||||
if not embeddings_key_set:
|
||||
embeddings_key = data["embeddings_key"]
|
||||
else:
|
||||
embeddings_key = settings.EMBEDDINGS_KEY
|
||||
if "active_docs" in data:
|
||||
vectorstore = get_vectorstore({"active_docs": data["active_docs"]})
|
||||
else:
|
||||
vectorstore = ""
|
||||
docsearch = VectorCreator.create_vectorstore(settings.VECTOR_STORE, vectorstore, embeddings_key)
|
||||
|
||||
return Response(
|
||||
complete_stream(question, docsearch,
|
||||
chat_history=history, api_key=api_key,
|
||||
prompt_id=prompt_id,
|
||||
conversation_id=conversation_id), mimetype="text/event-stream"
|
||||
)
|
||||
|
||||
|
||||
@answer.route("/api/answer", methods=["POST"])
|
||||
def api_answer():
|
||||
data = request.get_json()
|
||||
question = data["question"]
|
||||
history = data["history"]
|
||||
if "conversation_id" not in data:
|
||||
conversation_id = None
|
||||
else:
|
||||
conversation_id = data["conversation_id"]
|
||||
print("-" * 5)
|
||||
if not api_key_set:
|
||||
api_key = data["api_key"]
|
||||
else:
|
||||
api_key = settings.API_KEY
|
||||
if not embeddings_key_set:
|
||||
embeddings_key = data["embeddings_key"]
|
||||
else:
|
||||
embeddings_key = settings.EMBEDDINGS_KEY
|
||||
if 'prompt_id' in data:
|
||||
prompt_id = data["prompt_id"]
|
||||
else:
|
||||
prompt_id = 'default'
|
||||
|
||||
if prompt_id == 'default':
|
||||
prompt = chat_combine_template
|
||||
elif prompt_id == 'creative':
|
||||
prompt = chat_combine_creative
|
||||
elif prompt_id == 'strict':
|
||||
prompt = chat_combine_strict
|
||||
else:
|
||||
prompt = prompts_collection.find_one({"_id": ObjectId(prompt_id)})["content"]
|
||||
|
||||
# use try and except to check for exception
|
||||
try:
|
||||
# check if the vectorstore is set
|
||||
vectorstore = get_vectorstore(data)
|
||||
# loading the index and the store and the prompt template
|
||||
# Note if you have used other embeddings than OpenAI, you need to change the embeddings
|
||||
docsearch = VectorCreator.create_vectorstore(settings.VECTOR_STORE, vectorstore, embeddings_key)
|
||||
|
||||
|
||||
llm = LLMCreator.create_llm(settings.LLM_NAME, api_key=api_key)
|
||||
|
||||
|
||||
|
||||
docs = docsearch.search(question, k=2)
|
||||
# join all page_content together with a newline
|
||||
docs_together = "\n".join([doc.page_content for doc in docs])
|
||||
p_chat_combine = prompt.replace("{summaries}", docs_together)
|
||||
messages_combine = [{"role": "system", "content": p_chat_combine}]
|
||||
source_log_docs = []
|
||||
for doc in docs:
|
||||
if doc.metadata:
|
||||
source_log_docs.append({"title": doc.metadata['title'].split('/')[-1], "text": doc.page_content})
|
||||
else:
|
||||
source_log_docs.append({"title": doc.page_content, "text": doc.page_content})
|
||||
# join all page_content together with a newline
|
||||
|
||||
|
||||
if len(history) > 1:
|
||||
tokens_current_history = 0
|
||||
# count tokens in history
|
||||
history.reverse()
|
||||
for i in history:
|
||||
if "prompt" in i and "response" in i:
|
||||
tokens_batch = count_tokens(i["prompt"]) + count_tokens(i["response"])
|
||||
if tokens_current_history + tokens_batch < settings.TOKENS_MAX_HISTORY:
|
||||
tokens_current_history += tokens_batch
|
||||
messages_combine.append({"role": "user", "content": i["prompt"]})
|
||||
messages_combine.append({"role": "system", "content": i["response"]})
|
||||
messages_combine.append({"role": "user", "content": question})
|
||||
|
||||
|
||||
completion = llm.gen(model=gpt_model, engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
messages=messages_combine)
|
||||
|
||||
|
||||
result = {"answer": completion, "sources": source_log_docs}
|
||||
logger.debug(result)
|
||||
|
||||
# generate conversationId
|
||||
if conversation_id is not None:
|
||||
conversations_collection.update_one(
|
||||
{"_id": ObjectId(conversation_id)},
|
||||
{"$push": {"queries": {"prompt": question,
|
||||
"response": result["answer"], "sources": result['sources']}}},
|
||||
)
|
||||
|
||||
else:
|
||||
# create new conversation
|
||||
# generate summary
|
||||
messages_summary = [
|
||||
{"role": "assistant", "content": "Summarise following conversation in no more than 3 words, "
|
||||
"respond ONLY with the summary, use the same language as the system \n\n"
|
||||
"User: " + question + "\n\n" + "AI: " + result["answer"]},
|
||||
{"role": "user", "content": "Summarise following conversation in no more than 3 words, "
|
||||
"respond ONLY with the summary, use the same language as the system"}
|
||||
]
|
||||
|
||||
completion = llm.gen(
|
||||
model=gpt_model,
|
||||
engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
messages=messages_summary,
|
||||
max_tokens=30
|
||||
)
|
||||
conversation_id = conversations_collection.insert_one(
|
||||
{"user": "local",
|
||||
"date": datetime.datetime.utcnow(),
|
||||
"name": completion,
|
||||
"queries": [{"prompt": question, "response": result["answer"], "sources": source_log_docs}]}
|
||||
).inserted_id
|
||||
|
||||
result["conversation_id"] = str(conversation_id)
|
||||
|
||||
# mock result
|
||||
# result = {
|
||||
# "answer": "The answer is 42",
|
||||
# "sources": ["https://en.wikipedia.org/wiki/42_(number)", "https://en.wikipedia.org/wiki/42_(number)"]
|
||||
# }
|
||||
return result
|
||||
except Exception as e:
|
||||
# print whole traceback
|
||||
traceback.print_exc()
|
||||
print(str(e))
|
||||
return bad_request(500, str(e))
|
||||
|
||||
|
||||
@answer.route("/api/search", methods=["POST"])
|
||||
def api_search():
|
||||
data = request.get_json()
|
||||
# get parameter from url question
|
||||
question = data["question"]
|
||||
|
||||
if not embeddings_key_set:
|
||||
if "embeddings_key" in data:
|
||||
embeddings_key = data["embeddings_key"]
|
||||
else:
|
||||
embeddings_key = settings.EMBEDDINGS_KEY
|
||||
else:
|
||||
embeddings_key = settings.EMBEDDINGS_KEY
|
||||
if "active_docs" in data:
|
||||
vectorstore = get_vectorstore({"active_docs": data["active_docs"]})
|
||||
else:
|
||||
vectorstore = ""
|
||||
docsearch = VectorCreator.create_vectorstore(settings.VECTOR_STORE, vectorstore, embeddings_key)
|
||||
|
||||
docs = docsearch.search(question, k=2)
|
||||
|
||||
source_log_docs = []
|
||||
for doc in docs:
|
||||
if doc.metadata:
|
||||
source_log_docs.append({"title": doc.metadata['title'].split('/')[-1], "text": doc.page_content})
|
||||
else:
|
||||
source_log_docs.append({"title": doc.page_content, "text": doc.page_content})
|
||||
#yield f"data:{data}\n\n"
|
||||
return source_log_docs
|
||||
|
||||
0
application/api/internal/__init__.py
Normal file
69
application/api/internal/routes.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import os
|
||||
import datetime
|
||||
from flask import Blueprint, request, send_from_directory
|
||||
from pymongo import MongoClient
|
||||
from werkzeug.utils import secure_filename
|
||||
|
||||
|
||||
from application.core.settings import settings
|
||||
mongo = MongoClient(settings.MONGO_URI)
|
||||
db = mongo["docsgpt"]
|
||||
conversations_collection = db["conversations"]
|
||||
vectors_collection = db["vectors"]
|
||||
|
||||
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
internal = Blueprint('internal', __name__)
|
||||
@internal.route("/api/download", methods=["get"])
|
||||
def download_file():
|
||||
user = secure_filename(request.args.get("user"))
|
||||
job_name = secure_filename(request.args.get("name"))
|
||||
filename = secure_filename(request.args.get("file"))
|
||||
save_dir = os.path.join(current_dir, settings.UPLOAD_FOLDER, user, job_name)
|
||||
return send_from_directory(save_dir, filename, as_attachment=True)
|
||||
|
||||
|
||||
|
||||
@internal.route("/api/upload_index", methods=["POST"])
|
||||
def upload_index_files():
|
||||
"""Upload two files(index.faiss, index.pkl) to the user's folder."""
|
||||
if "user" not in request.form:
|
||||
return {"status": "no user"}
|
||||
user = secure_filename(request.form["user"])
|
||||
if "name" not in request.form:
|
||||
return {"status": "no name"}
|
||||
job_name = secure_filename(request.form["name"])
|
||||
save_dir = os.path.join(current_dir, "indexes", user, job_name)
|
||||
if settings.VECTOR_STORE == "faiss":
|
||||
if "file_faiss" not in request.files:
|
||||
print("No file part")
|
||||
return {"status": "no file"}
|
||||
file_faiss = request.files["file_faiss"]
|
||||
if file_faiss.filename == "":
|
||||
return {"status": "no file name"}
|
||||
if "file_pkl" not in request.files:
|
||||
print("No file part")
|
||||
return {"status": "no file"}
|
||||
file_pkl = request.files["file_pkl"]
|
||||
if file_pkl.filename == "":
|
||||
return {"status": "no file name"}
|
||||
# saves index files
|
||||
|
||||
if not os.path.exists(save_dir):
|
||||
os.makedirs(save_dir)
|
||||
file_faiss.save(os.path.join(save_dir, "index.faiss"))
|
||||
file_pkl.save(os.path.join(save_dir, "index.pkl"))
|
||||
# create entry in vectors_collection
|
||||
vectors_collection.insert_one(
|
||||
{
|
||||
"user": user,
|
||||
"name": job_name,
|
||||
"language": job_name,
|
||||
"location": save_dir,
|
||||
"date": datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S"),
|
||||
"model": settings.EMBEDDINGS_NAME,
|
||||
"type": "local",
|
||||
}
|
||||
)
|
||||
return {"status": "ok"}
|
||||
0
application/api/user/__init__.py
Normal file
321
application/api/user/routes.py
Normal file
@@ -0,0 +1,321 @@
|
||||
import os
|
||||
from flask import Blueprint, request, jsonify
|
||||
import requests
|
||||
from pymongo import MongoClient
|
||||
from bson.objectid import ObjectId
|
||||
from werkzeug.utils import secure_filename
|
||||
|
||||
from application.api.user.tasks import ingest
|
||||
|
||||
from application.core.settings import settings
|
||||
from application.vectorstore.vector_creator import VectorCreator
|
||||
|
||||
mongo = MongoClient(settings.MONGO_URI)
|
||||
db = mongo["docsgpt"]
|
||||
conversations_collection = db["conversations"]
|
||||
vectors_collection = db["vectors"]
|
||||
prompts_collection = db["prompts"]
|
||||
feedback_collection = db["feedback"]
|
||||
user = Blueprint('user', __name__)
|
||||
|
||||
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
@user.route("/api/delete_conversation", methods=["POST"])
|
||||
def delete_conversation():
|
||||
# deletes a conversation from the database
|
||||
conversation_id = request.args.get("id")
|
||||
# write to mongodb
|
||||
conversations_collection.delete_one(
|
||||
{
|
||||
"_id": ObjectId(conversation_id),
|
||||
}
|
||||
)
|
||||
|
||||
return {"status": "ok"}
|
||||
|
||||
@user.route("/api/get_conversations", methods=["get"])
|
||||
def get_conversations():
|
||||
# provides a list of conversations
|
||||
conversations = conversations_collection.find().sort("date", -1)
|
||||
list_conversations = []
|
||||
for conversation in conversations:
|
||||
list_conversations.append({"id": str(conversation["_id"]), "name": conversation["name"]})
|
||||
|
||||
#list_conversations = [{"id": "default", "name": "default"}, {"id": "jeff", "name": "jeff"}]
|
||||
|
||||
return jsonify(list_conversations)
|
||||
|
||||
|
||||
@user.route("/api/get_single_conversation", methods=["get"])
|
||||
def get_single_conversation():
|
||||
# provides data for a conversation
|
||||
conversation_id = request.args.get("id")
|
||||
conversation = conversations_collection.find_one({"_id": ObjectId(conversation_id)})
|
||||
return jsonify(conversation['queries'])
|
||||
|
||||
@user.route("/api/update_conversation_name", methods=["POST"])
|
||||
def update_conversation_name():
|
||||
# update data for a conversation
|
||||
data = request.get_json()
|
||||
id = data["id"]
|
||||
name = data["name"]
|
||||
conversations_collection.update_one({"_id": ObjectId(id)},{"$set":{"name":name}})
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@user.route("/api/feedback", methods=["POST"])
|
||||
def api_feedback():
|
||||
data = request.get_json()
|
||||
question = data["question"]
|
||||
answer = data["answer"]
|
||||
feedback = data["feedback"]
|
||||
|
||||
|
||||
feedback_collection.insert_one(
|
||||
{
|
||||
"question": question,
|
||||
"answer": answer,
|
||||
"feedback": feedback,
|
||||
}
|
||||
)
|
||||
return {"status": "ok"}
|
||||
|
||||
@user.route("/api/delete_by_ids", methods=["get"])
|
||||
def delete_by_ids():
|
||||
"""Delete by ID. These are the IDs in the vectorstore"""
|
||||
|
||||
ids = request.args.get("path")
|
||||
if not ids:
|
||||
return {"status": "error"}
|
||||
|
||||
if settings.VECTOR_STORE == "faiss":
|
||||
result = vectors_collection.delete_index(ids=ids)
|
||||
if result:
|
||||
return {"status": "ok"}
|
||||
return {"status": "error"}
|
||||
|
||||
@user.route("/api/delete_old", methods=["get"])
|
||||
def delete_old():
|
||||
"""Delete old indexes."""
|
||||
import shutil
|
||||
|
||||
path = request.args.get("path")
|
||||
dirs = path.split("/")
|
||||
dirs_clean = []
|
||||
for i in range(0, len(dirs)):
|
||||
dirs_clean.append(secure_filename(dirs[i]))
|
||||
# check that path strats with indexes or vectors
|
||||
|
||||
if dirs_clean[0] not in ["indexes", "vectors"]:
|
||||
return {"status": "error"}
|
||||
path_clean = "/".join(dirs_clean)
|
||||
vectors_collection.delete_one({"name": dirs_clean[-1], 'user': dirs_clean[-2]})
|
||||
if settings.VECTOR_STORE == "faiss":
|
||||
try:
|
||||
shutil.rmtree(os.path.join(current_dir, path_clean))
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
else:
|
||||
vetorstore = VectorCreator.create_vectorstore(
|
||||
settings.VECTOR_STORE, path=os.path.join(current_dir, path_clean)
|
||||
)
|
||||
vetorstore.delete_index()
|
||||
|
||||
return {"status": "ok"}
|
||||
|
||||
@user.route("/api/upload", methods=["POST"])
|
||||
def upload_file():
|
||||
"""Upload a file to get vectorized and indexed."""
|
||||
if "user" not in request.form:
|
||||
return {"status": "no user"}
|
||||
user = secure_filename(request.form["user"])
|
||||
if "name" not in request.form:
|
||||
return {"status": "no name"}
|
||||
job_name = secure_filename(request.form["name"])
|
||||
# check if the post request has the file part
|
||||
if "file" not in request.files:
|
||||
print("No file part")
|
||||
return {"status": "no file"}
|
||||
file = request.files["file"]
|
||||
if file.filename == "":
|
||||
return {"status": "no file name"}
|
||||
|
||||
if file:
|
||||
filename = secure_filename(file.filename)
|
||||
# save dir
|
||||
save_dir = os.path.join(current_dir, settings.UPLOAD_FOLDER, user, job_name)
|
||||
# create dir if not exists
|
||||
if not os.path.exists(save_dir):
|
||||
os.makedirs(save_dir)
|
||||
|
||||
file.save(os.path.join(save_dir, filename))
|
||||
task = ingest.delay(settings.UPLOAD_FOLDER, [".rst", ".md", ".pdf", ".txt", ".docx",
|
||||
".csv", ".epub", ".html", ".mdx"],
|
||||
job_name, filename, user)
|
||||
# task id
|
||||
task_id = task.id
|
||||
return {"status": "ok", "task_id": task_id}
|
||||
else:
|
||||
return {"status": "error"}
|
||||
|
||||
@user.route("/api/task_status", methods=["GET"])
|
||||
def task_status():
|
||||
"""Get celery job status."""
|
||||
task_id = request.args.get("task_id")
|
||||
from application.celery import celery
|
||||
task = celery.AsyncResult(task_id)
|
||||
task_meta = task.info
|
||||
return {"status": task.status, "result": task_meta}
|
||||
|
||||
|
||||
@user.route("/api/combine", methods=["GET"])
|
||||
def combined_json():
|
||||
user = "local"
|
||||
"""Provide json file with combined available indexes."""
|
||||
# get json from https://d3dg1063dc54p9.cloudfront.net/combined.json
|
||||
|
||||
data = [
|
||||
{
|
||||
"name": "default",
|
||||
"language": "default",
|
||||
"version": "",
|
||||
"description": "default",
|
||||
"fullName": "default",
|
||||
"date": "default",
|
||||
"docLink": "default",
|
||||
"model": settings.EMBEDDINGS_NAME,
|
||||
"location": "remote",
|
||||
}
|
||||
]
|
||||
# structure: name, language, version, description, fullName, date, docLink
|
||||
# append data from vectors_collection
|
||||
for index in vectors_collection.find({"user": user}):
|
||||
data.append(
|
||||
{
|
||||
"name": index["name"],
|
||||
"language": index["language"],
|
||||
"version": "",
|
||||
"description": index["name"],
|
||||
"fullName": index["name"],
|
||||
"date": index["date"],
|
||||
"docLink": index["location"],
|
||||
"model": settings.EMBEDDINGS_NAME,
|
||||
"location": "local",
|
||||
}
|
||||
)
|
||||
if settings.VECTOR_STORE == "faiss":
|
||||
data_remote = requests.get("https://d3dg1063dc54p9.cloudfront.net/combined.json").json()
|
||||
for index in data_remote:
|
||||
index["location"] = "remote"
|
||||
data.append(index)
|
||||
|
||||
return jsonify(data)
|
||||
|
||||
|
||||
@user.route("/api/docs_check", methods=["POST"])
|
||||
def check_docs():
|
||||
# check if docs exist in a vectorstore folder
|
||||
data = request.get_json()
|
||||
# split docs on / and take first part
|
||||
if data["docs"].split("/")[0] == "local":
|
||||
return {"status": "exists"}
|
||||
vectorstore = "vectors/" + data["docs"]
|
||||
base_path = "https://raw.githubusercontent.com/arc53/DocsHUB/main/"
|
||||
if os.path.exists(vectorstore) or data["docs"] == "default":
|
||||
return {"status": "exists"}
|
||||
else:
|
||||
r = requests.get(base_path + vectorstore + "index.faiss")
|
||||
|
||||
if r.status_code != 200:
|
||||
return {"status": "null"}
|
||||
else:
|
||||
if not os.path.exists(vectorstore):
|
||||
os.makedirs(vectorstore)
|
||||
with open(vectorstore + "index.faiss", "wb") as f:
|
||||
f.write(r.content)
|
||||
|
||||
# download the store
|
||||
r = requests.get(base_path + vectorstore + "index.pkl")
|
||||
with open(vectorstore + "index.pkl", "wb") as f:
|
||||
f.write(r.content)
|
||||
|
||||
return {"status": "loaded"}
|
||||
|
||||
@user.route("/api/create_prompt", methods=["POST"])
|
||||
def create_prompt():
|
||||
data = request.get_json()
|
||||
content = data["content"]
|
||||
name = data["name"]
|
||||
if name == "":
|
||||
return {"status": "error"}
|
||||
user = "local"
|
||||
resp = prompts_collection.insert_one(
|
||||
{
|
||||
"name": name,
|
||||
"content": content,
|
||||
"user": user,
|
||||
}
|
||||
)
|
||||
new_id = str(resp.inserted_id)
|
||||
return {"id": new_id}
|
||||
|
||||
@user.route("/api/get_prompts", methods=["GET"])
|
||||
def get_prompts():
|
||||
user = "local"
|
||||
prompts = prompts_collection.find({"user": user})
|
||||
list_prompts = []
|
||||
list_prompts.append({"id": "default", "name": "default", "type": "public"})
|
||||
list_prompts.append({"id": "creative", "name": "creative", "type": "public"})
|
||||
list_prompts.append({"id": "strict", "name": "strict", "type": "public"})
|
||||
for prompt in prompts:
|
||||
list_prompts.append({"id": str(prompt["_id"]), "name": prompt["name"], "type": "private"})
|
||||
|
||||
return jsonify(list_prompts)
|
||||
|
||||
@user.route("/api/get_single_prompt", methods=["GET"])
|
||||
def get_single_prompt():
|
||||
prompt_id = request.args.get("id")
|
||||
if prompt_id == 'default':
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_default.txt"), "r") as f:
|
||||
chat_combine_template = f.read()
|
||||
return jsonify({"content": chat_combine_template})
|
||||
elif prompt_id == 'creative':
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_creative.txt"), "r") as f:
|
||||
chat_reduce_creative = f.read()
|
||||
return jsonify({"content": chat_reduce_creative})
|
||||
elif prompt_id == 'strict':
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_strict.txt"), "r") as f:
|
||||
chat_reduce_strict = f.read()
|
||||
return jsonify({"content": chat_reduce_strict})
|
||||
|
||||
|
||||
prompt = prompts_collection.find_one({"_id": ObjectId(prompt_id)})
|
||||
return jsonify({"content": prompt["content"]})
|
||||
|
||||
@user.route("/api/delete_prompt", methods=["POST"])
|
||||
def delete_prompt():
|
||||
data = request.get_json()
|
||||
id = data["id"]
|
||||
prompts_collection.delete_one(
|
||||
{
|
||||
"_id": ObjectId(id),
|
||||
}
|
||||
)
|
||||
return {"status": "ok"}
|
||||
|
||||
@user.route("/api/update_prompt", methods=["POST"])
|
||||
def update_prompt_name():
|
||||
data = request.get_json()
|
||||
id = data["id"]
|
||||
name = data["name"]
|
||||
content = data["content"]
|
||||
# check if name is null
|
||||
if name == "":
|
||||
return {"status": "error"}
|
||||
prompts_collection.update_one({"_id": ObjectId(id)},{"$set":{"name":name, "content": content}})
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
7
application/api/user/tasks.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from application.worker import ingest_worker
|
||||
from application.celery import celery
|
||||
|
||||
@celery.task(bind=True)
|
||||
def ingest(self, directory, formats, name_job, filename, user):
|
||||
resp = ingest_worker(self, directory, formats, name_job, filename, user)
|
||||
return resp
|
||||
@@ -1,202 +1,44 @@
|
||||
import os
|
||||
import json
|
||||
import traceback
|
||||
|
||||
import dotenv
|
||||
import requests
|
||||
from flask import Flask, request, render_template
|
||||
from langchain import FAISS
|
||||
from langchain.llms import OpenAIChat
|
||||
from langchain import VectorDBQA, HuggingFaceHub, Cohere, OpenAI
|
||||
from langchain.chains.question_answering import load_qa_chain
|
||||
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceHubEmbeddings, CohereEmbeddings, \
|
||||
HuggingFaceInstructEmbeddings
|
||||
from langchain.prompts import PromptTemplate
|
||||
from error import bad_request
|
||||
|
||||
os.environ["LANGCHAIN_HANDLER"] = "langchain"
|
||||
|
||||
if os.getenv("LLM_NAME") is not None:
|
||||
llm_choice = os.getenv("LLM_NAME")
|
||||
else:
|
||||
llm_choice = "openai"
|
||||
|
||||
if os.getenv("EMBEDDINGS_NAME") is not None:
|
||||
embeddings_choice = os.getenv("EMBEDDINGS_NAME")
|
||||
else:
|
||||
embeddings_choice = "openai_text-embedding-ada-002"
|
||||
|
||||
if llm_choice == "manifest":
|
||||
from manifest import Manifest
|
||||
from langchain.llms.manifest import ManifestWrapper
|
||||
|
||||
manifest = Manifest(
|
||||
client_name="huggingface",
|
||||
client_connection="http://127.0.0.1:5000"
|
||||
)
|
||||
|
||||
# Redirect PosixPath to WindowsPath on Windows
|
||||
import platform
|
||||
import dotenv
|
||||
from application.celery import celery
|
||||
from flask import Flask, request, redirect
|
||||
from application.core.settings import settings
|
||||
from application.api.user.routes import user
|
||||
from application.api.answer.routes import answer
|
||||
from application.api.internal.routes import internal
|
||||
|
||||
if platform.system() == "Windows":
|
||||
import pathlib
|
||||
|
||||
temp = pathlib.PosixPath
|
||||
pathlib.PosixPath = pathlib.WindowsPath
|
||||
|
||||
# loading the .env file
|
||||
dotenv.load_dotenv()
|
||||
|
||||
with open("combine_prompt.txt", "r") as f:
|
||||
template = f.read()
|
||||
|
||||
with open("combine_prompt_hist.txt", "r") as f:
|
||||
template_hist = f.read()
|
||||
|
||||
with open("question_prompt.txt", "r") as f:
|
||||
template_quest = f.read()
|
||||
|
||||
if os.getenv("API_KEY") is not None:
|
||||
api_key_set = True
|
||||
else:
|
||||
api_key_set = False
|
||||
if os.getenv("EMBEDDINGS_KEY") is not None:
|
||||
embeddings_key_set = True
|
||||
else:
|
||||
embeddings_key_set = False
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
app.register_blueprint(user)
|
||||
app.register_blueprint(answer)
|
||||
app.register_blueprint(internal)
|
||||
app.config.update(
|
||||
UPLOAD_FOLDER="inputs",
|
||||
CELERY_BROKER_URL=settings.CELERY_BROKER_URL,
|
||||
CELERY_RESULT_BACKEND=settings.CELERY_RESULT_BACKEND,
|
||||
MONGO_URI=settings.MONGO_URI
|
||||
)
|
||||
celery.config_from_object("application.celeryconfig")
|
||||
|
||||
@app.route("/")
|
||||
def home():
|
||||
return render_template("index.html", api_key_set=api_key_set, llm_choice=llm_choice,
|
||||
embeddings_choice=embeddings_choice)
|
||||
|
||||
|
||||
@app.route("/api/answer", methods=["POST"])
|
||||
def api_answer():
|
||||
data = request.get_json()
|
||||
question = data["question"]
|
||||
history = data["history"]
|
||||
print('-' * 5)
|
||||
if not api_key_set:
|
||||
api_key = data["api_key"]
|
||||
if request.remote_addr in ('0.0.0.0', '127.0.0.1', 'localhost', '172.18.0.1'):
|
||||
return redirect('http://localhost:5173')
|
||||
else:
|
||||
api_key = os.getenv("API_KEY")
|
||||
if not embeddings_key_set:
|
||||
embeddings_key = data["embeddings_key"]
|
||||
else:
|
||||
embeddings_key = os.getenv("EMBEDDINGS_KEY")
|
||||
return 'Welcome to DocsGPT Backend!'
|
||||
|
||||
# use try and except to check for exception
|
||||
try:
|
||||
# check if the vectorstore is set
|
||||
if "active_docs" in data:
|
||||
vectorstore = "vectors/" + data["active_docs"]
|
||||
if data['active_docs'] == "default":
|
||||
vectorstore = ""
|
||||
else:
|
||||
vectorstore = ""
|
||||
#vectorstore = "outputs/inputs/"
|
||||
# loading the index and the store and the prompt template
|
||||
# Note if you have used other embeddings than OpenAI, you need to change the embeddings
|
||||
if embeddings_choice == "openai_text-embedding-ada-002":
|
||||
docsearch = FAISS.load_local(vectorstore, OpenAIEmbeddings(openai_api_key=embeddings_key))
|
||||
elif embeddings_choice == "huggingface_sentence-transformers/all-mpnet-base-v2":
|
||||
docsearch = FAISS.load_local(vectorstore, HuggingFaceHubEmbeddings())
|
||||
elif embeddings_choice == "huggingface_hkunlp/instructor-large":
|
||||
docsearch = FAISS.load_local(vectorstore, HuggingFaceInstructEmbeddings())
|
||||
elif embeddings_choice == "cohere_medium":
|
||||
docsearch = FAISS.load_local(vectorstore, CohereEmbeddings(cohere_api_key=embeddings_key))
|
||||
|
||||
# create a prompt template
|
||||
if history:
|
||||
history = json.loads(history)
|
||||
template_temp = template_hist.replace("{historyquestion}", history[0]).replace("{historyanswer}",
|
||||
history[1])
|
||||
c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template_temp,
|
||||
template_format="jinja2")
|
||||
else:
|
||||
c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template,
|
||||
template_format="jinja2")
|
||||
|
||||
q_prompt = PromptTemplate(input_variables=["context", "question"], template=template_quest,
|
||||
template_format="jinja2")
|
||||
if llm_choice == "openai":
|
||||
llm = OpenAIChat(openai_api_key=api_key, temperature=0)
|
||||
#llm = OpenAI(openai_api_key=api_key, temperature=0)
|
||||
elif llm_choice == "manifest":
|
||||
llm = ManifestWrapper(client=manifest, llm_kwargs={"temperature": 0.001, "max_tokens": 2048})
|
||||
elif llm_choice == "huggingface":
|
||||
llm = HuggingFaceHub(repo_id="bigscience/bloom", huggingfacehub_api_token=api_key)
|
||||
elif llm_choice == "cohere":
|
||||
llm = Cohere(model="command-xlarge-nightly", cohere_api_key=api_key)
|
||||
|
||||
qa_chain = load_qa_chain(llm=llm, chain_type="map_reduce",
|
||||
combine_prompt=c_prompt, question_prompt=q_prompt)
|
||||
|
||||
chain = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch, k=10)
|
||||
|
||||
# fetch the answer
|
||||
result = chain({"query": question})
|
||||
|
||||
# some formatting for the frontend
|
||||
result['answer'] = result['result']
|
||||
result['answer'] = result['answer'].replace("\\n", "\n")
|
||||
try:
|
||||
result['answer'] = result['answer'].split("SOURCES:")[0]
|
||||
except:
|
||||
pass
|
||||
|
||||
# mock result
|
||||
# result = {
|
||||
# "answer": "The answer is 42",
|
||||
# "sources": ["https://en.wikipedia.org/wiki/42_(number)", "https://en.wikipedia.org/wiki/42_(number)"]
|
||||
# }
|
||||
return result
|
||||
except Exception as e:
|
||||
# print whole traceback
|
||||
traceback.print_exc()
|
||||
print(str(e))
|
||||
return bad_request(500, str(e))
|
||||
|
||||
|
||||
@app.route("/api/docs_check", methods=["POST"])
|
||||
def check_docs():
|
||||
# check if docs exist in a vectorstore folder
|
||||
data = request.get_json()
|
||||
vectorstore = "vectors/" + data["docs"]
|
||||
base_path = 'https://raw.githubusercontent.com/arc53/DocsHUB/main/'
|
||||
if os.path.exists(vectorstore) or data["docs"] == "default":
|
||||
return {"status": 'exists'}
|
||||
else:
|
||||
r = requests.get(base_path + vectorstore + "index.faiss")
|
||||
|
||||
if r.status_code != 200:
|
||||
return {"status": 'null'}
|
||||
else:
|
||||
if not os.path.exists(vectorstore):
|
||||
os.makedirs(vectorstore)
|
||||
with open(vectorstore + "index.faiss", "wb") as f:
|
||||
f.write(r.content)
|
||||
|
||||
# download the store
|
||||
r = requests.get(base_path + vectorstore + "index.pkl")
|
||||
with open(vectorstore + "index.pkl", "wb") as f:
|
||||
f.write(r.content)
|
||||
|
||||
return {"status": 'loaded'}
|
||||
|
||||
|
||||
# handling CORS
|
||||
@app.after_request
|
||||
def after_request(response):
|
||||
response.headers.add('Access-Control-Allow-Origin', '*')
|
||||
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
|
||||
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
|
||||
response.headers.add("Access-Control-Allow-Origin", "*")
|
||||
response.headers.add("Access-Control-Allow-Headers", "Content-Type,Authorization")
|
||||
response.headers.add("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS")
|
||||
return response
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(debug=True, port=5001)
|
||||
app.run(debug=True, port=7091)
|
||||
|
||||
|
||||
9
application/celery.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from celery import Celery
|
||||
from application.core.settings import settings
|
||||
|
||||
def make_celery(app_name=__name__):
|
||||
celery = Celery(app_name, broker=settings.CELERY_BROKER_URL, backend=settings.CELERY_RESULT_BACKEND)
|
||||
celery.conf.update(settings)
|
||||
return celery
|
||||
|
||||
celery = make_celery()
|
||||
8
application/celeryconfig.py
Normal file
@@ -0,0 +1,8 @@
|
||||
import os
|
||||
|
||||
broker_url = os.getenv("CELERY_BROKER_URL")
|
||||
result_backend = os.getenv("CELERY_RESULT_BACKEND")
|
||||
|
||||
task_serializer = 'json'
|
||||
result_serializer = 'json'
|
||||
accept_content = ['json']
|
||||
@@ -1,25 +0,0 @@
|
||||
You are a DocsGPT, friendly and helpful AI assistant by Arc53 that provides help with documents. You give thorough answers with code examples if possible.
|
||||
|
||||
QUESTION: How to merge tables in pandas?
|
||||
=========
|
||||
Content: pandas provides various facilities for easily combining together Series or DataFrame with various kinds of set logic for the indexes and relational algebra functionality in the case of join / merge-type operations.
|
||||
Source: 28-pl
|
||||
Content: pandas provides a single function, merge(), as the entry point for all standard database join operations between DataFrame or named Series objects: \n\npandas.merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None)
|
||||
Source: 30-pl
|
||||
=========
|
||||
FINAL ANSWER: To merge two tables in pandas, you can use the pd.merge() function. The basic syntax is: \n\npd.merge(left, right, on, how) \n\nwhere left and right are the two tables to merge, on is the column to merge on, and how is the type of merge to perform. \n\nFor example, to merge the two tables df1 and df2 on the column 'id', you can use: \n\npd.merge(df1, df2, on='id', how='inner')
|
||||
SOURCES: 28-pl 30-pl
|
||||
|
||||
QUESTION: How are you?
|
||||
=========
|
||||
CONTENT:
|
||||
SOURCE:
|
||||
=========
|
||||
FINAL ANSWER: I am fine, thank you. How are you?
|
||||
SOURCES:
|
||||
|
||||
QUESTION: {{ question }}
|
||||
=========
|
||||
{{ summaries }}
|
||||
=========
|
||||
FINAL ANSWER:
|
||||
@@ -1,33 +0,0 @@
|
||||
You are a DocsGPT, friendly and helpful AI assistant by Arc53 that provides help with documents. You give thorough answers with code examples if possible.
|
||||
|
||||
QUESTION: How to merge tables in pandas?
|
||||
=========
|
||||
Content: pandas provides various facilities for easily combining together Series or DataFrame with various kinds of set logic for the indexes and relational algebra functionality in the case of join / merge-type operations.
|
||||
Source: 28-pl
|
||||
Content: pandas provides a single function, merge(), as the entry point for all standard database join operations between DataFrame or named Series objects: \n\npandas.merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None)
|
||||
Source: 30-pl
|
||||
=========
|
||||
FINAL ANSWER: To merge two tables in pandas, you can use the pd.merge() function. The basic syntax is: \n\npd.merge(left, right, on, how) \n\nwhere left and right are the two tables to merge, on is the column to merge on, and how is the type of merge to perform. \n\nFor example, to merge the two tables df1 and df2 on the column 'id', you can use: \n\npd.merge(df1, df2, on='id', how='inner')
|
||||
SOURCES: 28-pl 30-pl
|
||||
|
||||
QUESTION: How are you?
|
||||
=========
|
||||
CONTENT:
|
||||
SOURCE:
|
||||
=========
|
||||
FINAL ANSWER: I am fine, thank you. How are you?
|
||||
SOURCES:
|
||||
|
||||
QUESTION: {{ historyquestion }}
|
||||
=========
|
||||
CONTENT:
|
||||
SOURCE:
|
||||
=========
|
||||
FINAL ANSWER: {{ historyanswer }}
|
||||
SOURCES:
|
||||
|
||||
QUESTION: {{ question }}
|
||||
=========
|
||||
{{ summaries }}
|
||||
=========
|
||||
FINAL ANSWER:
|
||||
0
application/core/__init__.py
Normal file
44
application/core/settings.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
import os
|
||||
|
||||
from pydantic_settings import BaseSettings
|
||||
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
LLM_NAME: str = "docsgpt"
|
||||
EMBEDDINGS_NAME: str = "huggingface_sentence-transformers/all-mpnet-base-v2"
|
||||
CELERY_BROKER_URL: str = "redis://localhost:6379/0"
|
||||
CELERY_RESULT_BACKEND: str = "redis://localhost:6379/1"
|
||||
MONGO_URI: str = "mongodb://localhost:27017/docsgpt"
|
||||
MODEL_PATH: str = os.path.join(current_dir, "models/docsgpt-7b-f16.gguf")
|
||||
TOKENS_MAX_HISTORY: int = 150
|
||||
UPLOAD_FOLDER: str = "inputs"
|
||||
VECTOR_STORE: str = "faiss" # "faiss" or "elasticsearch"
|
||||
|
||||
API_URL: str = "http://localhost:7091" # backend url for celery worker
|
||||
|
||||
API_KEY: Optional[str] = None # LLM api key
|
||||
EMBEDDINGS_KEY: Optional[str] = None # api key for embeddings (if using openai, just copy API_KEY)
|
||||
OPENAI_API_BASE: Optional[str] = None # azure openai api base url
|
||||
OPENAI_API_VERSION: Optional[str] = None # azure openai api version
|
||||
AZURE_DEPLOYMENT_NAME: Optional[str] = None # azure deployment name for answering
|
||||
AZURE_EMBEDDINGS_DEPLOYMENT_NAME: Optional[str] = None # azure deployment name for embeddings
|
||||
|
||||
# elasticsearch
|
||||
ELASTIC_CLOUD_ID: Optional[str] = None # cloud id for elasticsearch
|
||||
ELASTIC_USERNAME: Optional[str] = None # username for elasticsearch
|
||||
ELASTIC_PASSWORD: Optional[str] = None # password for elasticsearch
|
||||
ELASTIC_URL: Optional[str] = None # url for elasticsearch
|
||||
ELASTIC_INDEX: Optional[str] = "docsgpt" # index name for elasticsearch
|
||||
|
||||
# SageMaker config
|
||||
SAGEMAKER_ENDPOINT: Optional[str] = None # SageMaker endpoint name
|
||||
SAGEMAKER_REGION: Optional[str] = None # SageMaker region name
|
||||
SAGEMAKER_ACCESS_KEY: Optional[str] = None # SageMaker access key
|
||||
SAGEMAKER_SECRET_KEY: Optional[str] = None # SageMaker secret key
|
||||
|
||||
|
||||
path = Path(__file__).parent.parent.absolute()
|
||||
settings = Settings(_env_file=path.joinpath(".env"), _env_file_encoding="utf-8")
|
||||
@@ -1,13 +1,15 @@
|
||||
from flask import jsonify
|
||||
from werkzeug.http import HTTP_STATUS_CODES
|
||||
|
||||
def response_error(code_status,message=None):
|
||||
payload = {'error':HTTP_STATUS_CODES.get(code_status,"something went wrong")}
|
||||
|
||||
def response_error(code_status, message=None):
|
||||
payload = {'error': HTTP_STATUS_CODES.get(code_status, "something went wrong")}
|
||||
if message:
|
||||
payload['message'] = message
|
||||
response = jsonify(payload)
|
||||
response.status_code = code_status
|
||||
return response
|
||||
|
||||
def bad_request(status_code=400,message=''):
|
||||
return response_error(code_status=status_code,message=message)
|
||||
|
||||
def bad_request(status_code=400, message=''):
|
||||
return response_error(code_status=status_code, message=message)
|
||||
|
||||
0
application/llm/__init__.py
Normal file
40
application/llm/anthropic.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from application.llm.base import BaseLLM
|
||||
from application.core.settings import settings
|
||||
|
||||
class AnthropicLLM(BaseLLM):
|
||||
|
||||
def __init__(self, api_key=None):
|
||||
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
||||
self.api_key = api_key or settings.ANTHROPIC_API_KEY # If not provided, use a default from settings
|
||||
self.anthropic = Anthropic(api_key=self.api_key)
|
||||
self.HUMAN_PROMPT = HUMAN_PROMPT
|
||||
self.AI_PROMPT = AI_PROMPT
|
||||
|
||||
def gen(self, model, messages, engine=None, max_tokens=300, stream=False, **kwargs):
|
||||
context = messages[0]['content']
|
||||
user_question = messages[-1]['content']
|
||||
prompt = f"### Context \n {context} \n ### Question \n {user_question}"
|
||||
if stream:
|
||||
return self.gen_stream(model, prompt, max_tokens, **kwargs)
|
||||
|
||||
completion = self.anthropic.completions.create(
|
||||
model=model,
|
||||
max_tokens_to_sample=max_tokens,
|
||||
stream=stream,
|
||||
prompt=f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT}",
|
||||
)
|
||||
return completion.completion
|
||||
|
||||
def gen_stream(self, model, messages, engine=None, max_tokens=300, **kwargs):
|
||||
context = messages[0]['content']
|
||||
user_question = messages[-1]['content']
|
||||
prompt = f"### Context \n {context} \n ### Question \n {user_question}"
|
||||
stream_response = self.anthropic.completions.create(
|
||||
model=model,
|
||||
prompt=f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT}",
|
||||
max_tokens_to_sample=max_tokens,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
for completion in stream_response:
|
||||
yield completion.completion
|
||||
14
application/llm/base.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class BaseLLM(ABC):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def gen(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def gen_stream(self, *args, **kwargs):
|
||||
pass
|
||||
49
application/llm/docsgpt_provider.py
Normal file
@@ -0,0 +1,49 @@
|
||||
from application.llm.base import BaseLLM
|
||||
import json
|
||||
import requests
|
||||
|
||||
class DocsGPTAPILLM(BaseLLM):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.endpoint = "https://llm.docsgpt.co.uk"
|
||||
|
||||
|
||||
def gen(self, model, engine, messages, stream=False, **kwargs):
|
||||
context = messages[0]['content']
|
||||
user_question = messages[-1]['content']
|
||||
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||
|
||||
response = requests.post(
|
||||
f"{self.endpoint}/answer",
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": 30
|
||||
}
|
||||
)
|
||||
response_clean = response.json()['a'].split("###")[0]
|
||||
|
||||
return response_clean
|
||||
|
||||
def gen_stream(self, model, engine, messages, stream=True, **kwargs):
|
||||
context = messages[0]['content']
|
||||
user_question = messages[-1]['content']
|
||||
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||
|
||||
# send prompt to endpoint /stream
|
||||
response = requests.post(
|
||||
f"{self.endpoint}/stream",
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": 256
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
#data = json.loads(line)
|
||||
data_str = line.decode('utf-8')
|
||||
if data_str.startswith("data: "):
|
||||
data = json.loads(data_str[6:])
|
||||
yield data['a']
|
||||
|
||||
44
application/llm/huggingface.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from application.llm.base import BaseLLM
|
||||
|
||||
class HuggingFaceLLM(BaseLLM):
|
||||
|
||||
def __init__(self, api_key, llm_name='Arc53/DocsGPT-7B',q=False):
|
||||
global hf
|
||||
|
||||
from langchain.llms import HuggingFacePipeline
|
||||
if q:
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
|
||||
tokenizer = AutoTokenizer.from_pretrained(llm_name)
|
||||
bnb_config = BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_use_double_quant=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
bnb_4bit_compute_dtype=torch.bfloat16
|
||||
)
|
||||
model = AutoModelForCausalLM.from_pretrained(llm_name,quantization_config=bnb_config)
|
||||
else:
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
||||
tokenizer = AutoTokenizer.from_pretrained(llm_name)
|
||||
model = AutoModelForCausalLM.from_pretrained(llm_name)
|
||||
|
||||
pipe = pipeline(
|
||||
"text-generation", model=model,
|
||||
tokenizer=tokenizer, max_new_tokens=2000,
|
||||
device_map="auto", eos_token_id=tokenizer.eos_token_id
|
||||
)
|
||||
hf = HuggingFacePipeline(pipeline=pipe)
|
||||
|
||||
def gen(self, model, engine, messages, stream=False, **kwargs):
|
||||
context = messages[0]['content']
|
||||
user_question = messages[-1]['content']
|
||||
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||
|
||||
result = hf(prompt)
|
||||
|
||||
return result.content
|
||||
|
||||
def gen_stream(self, model, engine, messages, stream=True, **kwargs):
|
||||
|
||||
raise NotImplementedError("HuggingFaceLLM Streaming is not implemented yet.")
|
||||
|
||||
39
application/llm/llama_cpp.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from application.llm.base import BaseLLM
|
||||
from application.core.settings import settings
|
||||
|
||||
class LlamaCpp(BaseLLM):
|
||||
|
||||
def __init__(self, api_key, llm_name=settings.MODEL_PATH, **kwargs):
|
||||
global llama
|
||||
try:
|
||||
from llama_cpp import Llama
|
||||
except ImportError:
|
||||
raise ImportError("Please install llama_cpp using pip install llama-cpp-python")
|
||||
|
||||
llama = Llama(model_path=llm_name, n_ctx=2048)
|
||||
|
||||
def gen(self, model, engine, messages, stream=False, **kwargs):
|
||||
context = messages[0]['content']
|
||||
user_question = messages[-1]['content']
|
||||
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||
|
||||
result = llama(prompt, max_tokens=150, echo=False)
|
||||
|
||||
# import sys
|
||||
# print(result['choices'][0]['text'].split('### Answer \n')[-1], file=sys.stderr)
|
||||
|
||||
return result['choices'][0]['text'].split('### Answer \n')[-1]
|
||||
|
||||
def gen_stream(self, model, engine, messages, stream=True, **kwargs):
|
||||
context = messages[0]['content']
|
||||
user_question = messages[-1]['content']
|
||||
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||
|
||||
result = llama(prompt, max_tokens=150, echo=False, stream=stream)
|
||||
|
||||
# import sys
|
||||
# print(list(result), file=sys.stderr)
|
||||
|
||||
for item in result:
|
||||
for choice in item['choices']:
|
||||
yield choice['text']
|
||||
26
application/llm/llm_creator.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from application.llm.openai import OpenAILLM, AzureOpenAILLM
|
||||
from application.llm.sagemaker import SagemakerAPILLM
|
||||
from application.llm.huggingface import HuggingFaceLLM
|
||||
from application.llm.llama_cpp import LlamaCpp
|
||||
from application.llm.anthropic import AnthropicLLM
|
||||
from application.llm.docsgpt_provider import DocsGPTAPILLM
|
||||
|
||||
|
||||
|
||||
class LLMCreator:
|
||||
llms = {
|
||||
'openai': OpenAILLM,
|
||||
'azure_openai': AzureOpenAILLM,
|
||||
'sagemaker': SagemakerAPILLM,
|
||||
'huggingface': HuggingFaceLLM,
|
||||
'llama.cpp': LlamaCpp,
|
||||
'anthropic': AnthropicLLM,
|
||||
'docsgpt': DocsGPTAPILLM
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def create_llm(cls, type, *args, **kwargs):
|
||||
llm_class = cls.llms.get(type.lower())
|
||||
if not llm_class:
|
||||
raise ValueError(f"No LLM class found for type {type}")
|
||||
return llm_class(*args, **kwargs)
|
||||
60
application/llm/openai.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from application.llm.base import BaseLLM
|
||||
from application.core.settings import settings
|
||||
|
||||
class OpenAILLM(BaseLLM):
|
||||
|
||||
def __init__(self, api_key):
|
||||
global openai
|
||||
from openai import OpenAI
|
||||
|
||||
self.client = OpenAI(
|
||||
api_key=api_key,
|
||||
)
|
||||
self.api_key = api_key
|
||||
|
||||
def _get_openai(self):
|
||||
# Import openai when needed
|
||||
import openai
|
||||
|
||||
return openai
|
||||
|
||||
def gen(self, model, engine, messages, stream=False, **kwargs):
|
||||
response = self.client.chat.completions.create(model=model,
|
||||
messages=messages,
|
||||
stream=stream,
|
||||
**kwargs)
|
||||
|
||||
return response.choices[0].message.content
|
||||
|
||||
def gen_stream(self, model, engine, messages, stream=True, **kwargs):
|
||||
response = self.client.chat.completions.create(model=model,
|
||||
messages=messages,
|
||||
stream=stream,
|
||||
**kwargs)
|
||||
|
||||
for line in response:
|
||||
# import sys
|
||||
# print(line.choices[0].delta.content, file=sys.stderr)
|
||||
if line.choices[0].delta.content is not None:
|
||||
yield line.choices[0].delta.content
|
||||
|
||||
|
||||
class AzureOpenAILLM(OpenAILLM):
|
||||
|
||||
def __init__(self, openai_api_key, openai_api_base, openai_api_version, deployment_name):
|
||||
super().__init__(openai_api_key)
|
||||
self.api_base = settings.OPENAI_API_BASE,
|
||||
self.api_version = settings.OPENAI_API_VERSION,
|
||||
self.deployment_name = settings.AZURE_DEPLOYMENT_NAME,
|
||||
from openai import AzureOpenAI
|
||||
self.client = AzureOpenAI(
|
||||
api_key=openai_api_key,
|
||||
api_version=settings.OPENAI_API_VERSION,
|
||||
api_base=settings.OPENAI_API_BASE,
|
||||
deployment_name=settings.AZURE_DEPLOYMENT_NAME,
|
||||
)
|
||||
|
||||
def _get_openai(self):
|
||||
openai = super()._get_openai()
|
||||
|
||||
return openai
|
||||
139
application/llm/sagemaker.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from application.llm.base import BaseLLM
|
||||
from application.core.settings import settings
|
||||
import json
|
||||
import io
|
||||
|
||||
|
||||
|
||||
class LineIterator:
|
||||
"""
|
||||
A helper class for parsing the byte stream input.
|
||||
|
||||
The output of the model will be in the following format:
|
||||
```
|
||||
b'{"outputs": [" a"]}\n'
|
||||
b'{"outputs": [" challenging"]}\n'
|
||||
b'{"outputs": [" problem"]}\n'
|
||||
...
|
||||
```
|
||||
|
||||
While usually each PayloadPart event from the event stream will contain a byte array
|
||||
with a full json, this is not guaranteed and some of the json objects may be split across
|
||||
PayloadPart events. For example:
|
||||
```
|
||||
{'PayloadPart': {'Bytes': b'{"outputs": '}}
|
||||
{'PayloadPart': {'Bytes': b'[" problem"]}\n'}}
|
||||
```
|
||||
|
||||
This class accounts for this by concatenating bytes written via the 'write' function
|
||||
and then exposing a method which will return lines (ending with a '\n' character) within
|
||||
the buffer via the 'scan_lines' function. It maintains the position of the last read
|
||||
position to ensure that previous bytes are not exposed again.
|
||||
"""
|
||||
|
||||
def __init__(self, stream):
|
||||
self.byte_iterator = iter(stream)
|
||||
self.buffer = io.BytesIO()
|
||||
self.read_pos = 0
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
while True:
|
||||
self.buffer.seek(self.read_pos)
|
||||
line = self.buffer.readline()
|
||||
if line and line[-1] == ord('\n'):
|
||||
self.read_pos += len(line)
|
||||
return line[:-1]
|
||||
try:
|
||||
chunk = next(self.byte_iterator)
|
||||
except StopIteration:
|
||||
if self.read_pos < self.buffer.getbuffer().nbytes:
|
||||
continue
|
||||
raise
|
||||
if 'PayloadPart' not in chunk:
|
||||
print('Unknown event type:' + chunk)
|
||||
continue
|
||||
self.buffer.seek(0, io.SEEK_END)
|
||||
self.buffer.write(chunk['PayloadPart']['Bytes'])
|
||||
|
||||
class SagemakerAPILLM(BaseLLM):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
import boto3
|
||||
runtime = boto3.client(
|
||||
'runtime.sagemaker',
|
||||
aws_access_key_id='xxx',
|
||||
aws_secret_access_key='xxx',
|
||||
region_name='us-west-2'
|
||||
)
|
||||
|
||||
|
||||
self.endpoint = settings.SAGEMAKER_ENDPOINT
|
||||
self.runtime = runtime
|
||||
|
||||
|
||||
def gen(self, model, engine, messages, stream=False, **kwargs):
|
||||
context = messages[0]['content']
|
||||
user_question = messages[-1]['content']
|
||||
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||
|
||||
|
||||
# Construct payload for endpoint
|
||||
payload = {
|
||||
"inputs": prompt,
|
||||
"stream": False,
|
||||
"parameters": {
|
||||
"do_sample": True,
|
||||
"temperature": 0.1,
|
||||
"max_new_tokens": 30,
|
||||
"repetition_penalty": 1.03,
|
||||
"stop": ["</s>", "###"]
|
||||
}
|
||||
}
|
||||
body_bytes = json.dumps(payload).encode('utf-8')
|
||||
|
||||
# Invoke the endpoint
|
||||
response = self.runtime.invoke_endpoint(EndpointName=self.endpoint,
|
||||
ContentType='application/json',
|
||||
Body=body_bytes)
|
||||
result = json.loads(response['Body'].read().decode())
|
||||
import sys
|
||||
print(result[0]['generated_text'], file=sys.stderr)
|
||||
return result[0]['generated_text'][len(prompt):]
|
||||
|
||||
def gen_stream(self, model, engine, messages, stream=True, **kwargs):
|
||||
context = messages[0]['content']
|
||||
user_question = messages[-1]['content']
|
||||
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||
|
||||
|
||||
# Construct payload for endpoint
|
||||
payload = {
|
||||
"inputs": prompt,
|
||||
"stream": True,
|
||||
"parameters": {
|
||||
"do_sample": True,
|
||||
"temperature": 0.1,
|
||||
"max_new_tokens": 512,
|
||||
"repetition_penalty": 1.03,
|
||||
"stop": ["</s>", "###"]
|
||||
}
|
||||
}
|
||||
body_bytes = json.dumps(payload).encode('utf-8')
|
||||
|
||||
# Invoke the endpoint
|
||||
response = self.runtime.invoke_endpoint_with_response_stream(EndpointName=self.endpoint,
|
||||
ContentType='application/json',
|
||||
Body=body_bytes)
|
||||
#result = json.loads(response['Body'].read().decode())
|
||||
event_stream = response['Body']
|
||||
start_json = b'{'
|
||||
for line in LineIterator(event_stream):
|
||||
if line != b'' and start_json in line:
|
||||
#print(line)
|
||||
data = json.loads(line[line.find(start_json):].decode('utf-8'))
|
||||
if data['token']['text'] not in ["</s>", "###"]:
|
||||
print(data['token']['text'],end='')
|
||||
yield data['token']['text']
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"devDependencies": {
|
||||
"tailwindcss": "^3.2.4"
|
||||
}
|
||||
}
|
||||
1
application/parser/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
1
application/parser/file/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
19
application/parser/file/base.py
Normal file
@@ -0,0 +1,19 @@
|
||||
"""Base reader class."""
|
||||
from abc import abstractmethod
|
||||
from typing import Any, List
|
||||
|
||||
from langchain.docstore.document import Document as LCDocument
|
||||
from application.parser.schema.base import Document
|
||||
|
||||
|
||||
class BaseReader:
|
||||
"""Utilities for loading data from a directory."""
|
||||
|
||||
@abstractmethod
|
||||
def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
|
||||
"""Load data from the input directory."""
|
||||
|
||||
def load_langchain_documents(self, **load_kwargs: Any) -> List[LCDocument]:
|
||||
"""Load data in LangChain document format."""
|
||||
docs = self.load_data(**load_kwargs)
|
||||
return [d.to_langchain_format() for d in docs]
|
||||
38
application/parser/file/base_parser.py
Normal file
@@ -0,0 +1,38 @@
|
||||
"""Base parser and config class."""
|
||||
|
||||
from abc import abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
|
||||
class BaseParser:
|
||||
"""Base class for all parsers."""
|
||||
|
||||
def __init__(self, parser_config: Optional[Dict] = None):
|
||||
"""Init params."""
|
||||
self._parser_config = parser_config
|
||||
|
||||
def init_parser(self) -> None:
|
||||
"""Init parser and store it."""
|
||||
parser_config = self._init_parser()
|
||||
self._parser_config = parser_config
|
||||
|
||||
@property
|
||||
def parser_config_set(self) -> bool:
|
||||
"""Check if parser config is set."""
|
||||
return self._parser_config is not None
|
||||
|
||||
@property
|
||||
def parser_config(self) -> Dict:
|
||||
"""Check if parser config is set."""
|
||||
if self._parser_config is None:
|
||||
raise ValueError("Parser config not set.")
|
||||
return self._parser_config
|
||||
|
||||
@abstractmethod
|
||||
def _init_parser(self) -> Dict:
|
||||
"""Initialize the parser with the config."""
|
||||
|
||||
@abstractmethod
|
||||
def parse_file(self, file: Path, errors: str = "ignore") -> Union[str, List[str]]:
|
||||
"""Parse file."""
|
||||
162
application/parser/file/bulk.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""Simple reader that reads files of different formats from a directory."""
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Callable, Dict, List, Optional, Union
|
||||
|
||||
from application.parser.file.base import BaseReader
|
||||
from application.parser.file.base_parser import BaseParser
|
||||
from application.parser.file.docs_parser import DocxParser, PDFParser
|
||||
from application.parser.file.epub_parser import EpubParser
|
||||
from application.parser.file.html_parser import HTMLParser
|
||||
from application.parser.file.markdown_parser import MarkdownParser
|
||||
from application.parser.file.rst_parser import RstParser
|
||||
from application.parser.file.tabular_parser import PandasCSVParser
|
||||
from application.parser.schema.base import Document
|
||||
|
||||
DEFAULT_FILE_EXTRACTOR: Dict[str, BaseParser] = {
|
||||
".pdf": PDFParser(),
|
||||
".docx": DocxParser(),
|
||||
".csv": PandasCSVParser(),
|
||||
".epub": EpubParser(),
|
||||
".md": MarkdownParser(),
|
||||
".rst": RstParser(),
|
||||
".html": HTMLParser(),
|
||||
".mdx": MarkdownParser(),
|
||||
}
|
||||
|
||||
|
||||
class SimpleDirectoryReader(BaseReader):
|
||||
"""Simple directory reader.
|
||||
|
||||
Can read files into separate documents, or concatenates
|
||||
files into one document text.
|
||||
|
||||
Args:
|
||||
input_dir (str): Path to the directory.
|
||||
input_files (List): List of file paths to read (Optional; overrides input_dir)
|
||||
exclude_hidden (bool): Whether to exclude hidden files (dotfiles).
|
||||
errors (str): how encoding and decoding errors are to be handled,
|
||||
see https://docs.python.org/3/library/functions.html#open
|
||||
recursive (bool): Whether to recursively search in subdirectories.
|
||||
False by default.
|
||||
required_exts (Optional[List[str]]): List of required extensions.
|
||||
Default is None.
|
||||
file_extractor (Optional[Dict[str, BaseParser]]): A mapping of file
|
||||
extension to a BaseParser class that specifies how to convert that file
|
||||
to text. See DEFAULT_FILE_EXTRACTOR.
|
||||
num_files_limit (Optional[int]): Maximum number of files to read.
|
||||
Default is None.
|
||||
file_metadata (Optional[Callable[str, Dict]]): A function that takes
|
||||
in a filename and returns a Dict of metadata for the Document.
|
||||
Default is None.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_dir: Optional[str] = None,
|
||||
input_files: Optional[List] = None,
|
||||
exclude_hidden: bool = True,
|
||||
errors: str = "ignore",
|
||||
recursive: bool = True,
|
||||
required_exts: Optional[List[str]] = None,
|
||||
file_extractor: Optional[Dict[str, BaseParser]] = None,
|
||||
num_files_limit: Optional[int] = None,
|
||||
file_metadata: Optional[Callable[[str], Dict]] = None,
|
||||
) -> None:
|
||||
"""Initialize with parameters."""
|
||||
super().__init__()
|
||||
|
||||
if not input_dir and not input_files:
|
||||
raise ValueError("Must provide either `input_dir` or `input_files`.")
|
||||
|
||||
self.errors = errors
|
||||
|
||||
self.recursive = recursive
|
||||
self.exclude_hidden = exclude_hidden
|
||||
self.required_exts = required_exts
|
||||
self.num_files_limit = num_files_limit
|
||||
|
||||
if input_files:
|
||||
self.input_files = []
|
||||
for path in input_files:
|
||||
print(path)
|
||||
input_file = Path(path)
|
||||
self.input_files.append(input_file)
|
||||
elif input_dir:
|
||||
self.input_dir = Path(input_dir)
|
||||
self.input_files = self._add_files(self.input_dir)
|
||||
|
||||
self.file_extractor = file_extractor or DEFAULT_FILE_EXTRACTOR
|
||||
self.file_metadata = file_metadata
|
||||
|
||||
def _add_files(self, input_dir: Path) -> List[Path]:
|
||||
"""Add files."""
|
||||
input_files = sorted(input_dir.iterdir())
|
||||
new_input_files = []
|
||||
dirs_to_explore = []
|
||||
for input_file in input_files:
|
||||
if input_file.is_dir():
|
||||
if self.recursive:
|
||||
dirs_to_explore.append(input_file)
|
||||
elif self.exclude_hidden and input_file.name.startswith("."):
|
||||
continue
|
||||
elif (
|
||||
self.required_exts is not None
|
||||
and input_file.suffix not in self.required_exts
|
||||
):
|
||||
continue
|
||||
else:
|
||||
new_input_files.append(input_file)
|
||||
|
||||
for dir_to_explore in dirs_to_explore:
|
||||
sub_input_files = self._add_files(dir_to_explore)
|
||||
new_input_files.extend(sub_input_files)
|
||||
|
||||
if self.num_files_limit is not None and self.num_files_limit > 0:
|
||||
new_input_files = new_input_files[0: self.num_files_limit]
|
||||
|
||||
# print total number of files added
|
||||
logging.debug(
|
||||
f"> [SimpleDirectoryReader] Total files added: {len(new_input_files)}"
|
||||
)
|
||||
|
||||
return new_input_files
|
||||
|
||||
def load_data(self, concatenate: bool = False) -> List[Document]:
|
||||
"""Load data from the input directory.
|
||||
|
||||
Args:
|
||||
concatenate (bool): whether to concatenate all files into one document.
|
||||
If set to True, file metadata is ignored.
|
||||
False by default.
|
||||
|
||||
Returns:
|
||||
List[Document]: A list of documents.
|
||||
|
||||
"""
|
||||
data: Union[str, List[str]] = ""
|
||||
data_list: List[str] = []
|
||||
metadata_list = []
|
||||
for input_file in self.input_files:
|
||||
if input_file.suffix in self.file_extractor:
|
||||
parser = self.file_extractor[input_file.suffix]
|
||||
if not parser.parser_config_set:
|
||||
parser.init_parser()
|
||||
data = parser.parse_file(input_file, errors=self.errors)
|
||||
else:
|
||||
# do standard read
|
||||
with open(input_file, "r", errors=self.errors) as f:
|
||||
data = f.read()
|
||||
if isinstance(data, List):
|
||||
data_list.extend(data)
|
||||
else:
|
||||
data_list.append(str(data))
|
||||
if self.file_metadata is not None:
|
||||
metadata_list.append(self.file_metadata(str(input_file)))
|
||||
|
||||
if concatenate:
|
||||
return [Document("\n".join(data_list))]
|
||||
elif self.file_metadata is not None:
|
||||
return [Document(d, extra_info=m) for d, m in zip(data_list, metadata_list)]
|
||||
else:
|
||||
return [Document(d) for d in data_list]
|
||||
59
application/parser/file/docs_parser.py
Normal file
@@ -0,0 +1,59 @@
|
||||
"""Docs parser.
|
||||
|
||||
Contains parsers for docx, pdf files.
|
||||
|
||||
"""
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from application.parser.file.base_parser import BaseParser
|
||||
|
||||
|
||||
class PDFParser(BaseParser):
|
||||
"""PDF parser."""
|
||||
|
||||
def _init_parser(self) -> Dict:
|
||||
"""Init parser."""
|
||||
return {}
|
||||
|
||||
def parse_file(self, file: Path, errors: str = "ignore") -> str:
|
||||
"""Parse file."""
|
||||
try:
|
||||
import PyPDF2
|
||||
except ImportError:
|
||||
raise ValueError("PyPDF2 is required to read PDF files.")
|
||||
text_list = []
|
||||
with open(file, "rb") as fp:
|
||||
# Create a PDF object
|
||||
pdf = PyPDF2.PdfReader(fp)
|
||||
|
||||
# Get the number of pages in the PDF document
|
||||
num_pages = len(pdf.pages)
|
||||
|
||||
# Iterate over every page
|
||||
for page in range(num_pages):
|
||||
# Extract the text from the page
|
||||
page_text = pdf.pages[page].extract_text()
|
||||
text_list.append(page_text)
|
||||
text = "\n".join(text_list)
|
||||
|
||||
return text
|
||||
|
||||
|
||||
class DocxParser(BaseParser):
|
||||
"""Docx parser."""
|
||||
|
||||
def _init_parser(self) -> Dict:
|
||||
"""Init parser."""
|
||||
return {}
|
||||
|
||||
def parse_file(self, file: Path, errors: str = "ignore") -> str:
|
||||
"""Parse file."""
|
||||
try:
|
||||
import docx2txt
|
||||
except ImportError:
|
||||
raise ValueError("docx2txt is required to read Microsoft Word files.")
|
||||
|
||||
text = docx2txt.process(file)
|
||||
|
||||
return text
|
||||
43
application/parser/file/epub_parser.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""Epub parser.
|
||||
|
||||
Contains parsers for epub files.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
from application.parser.file.base_parser import BaseParser
|
||||
|
||||
|
||||
class EpubParser(BaseParser):
|
||||
"""Epub Parser."""
|
||||
|
||||
def _init_parser(self) -> Dict:
|
||||
"""Init parser."""
|
||||
return {}
|
||||
|
||||
def parse_file(self, file: Path, errors: str = "ignore") -> str:
|
||||
"""Parse file."""
|
||||
try:
|
||||
import ebooklib
|
||||
from ebooklib import epub
|
||||
except ImportError:
|
||||
raise ValueError("`EbookLib` is required to read Epub files.")
|
||||
try:
|
||||
import html2text
|
||||
except ImportError:
|
||||
raise ValueError("`html2text` is required to parse Epub files.")
|
||||
|
||||
text_list = []
|
||||
book = epub.read_epub(file, options={"ignore_ncx": True})
|
||||
|
||||
# Iterate through all chapters.
|
||||
for item in book.get_items():
|
||||
# Chapters are typically located in epub documents items.
|
||||
if item.get_type() == ebooklib.ITEM_DOCUMENT:
|
||||
text_list.append(
|
||||
html2text.html2text(item.get_content().decode("utf-8"))
|
||||
)
|
||||
|
||||
text = "\n".join(text_list)
|
||||
return text
|
||||
83
application/parser/file/html_parser.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""HTML parser.
|
||||
|
||||
Contains parser for html files.
|
||||
|
||||
"""
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, Union
|
||||
|
||||
from application.parser.file.base_parser import BaseParser
|
||||
|
||||
|
||||
class HTMLParser(BaseParser):
|
||||
"""HTML parser."""
|
||||
|
||||
def _init_parser(self) -> Dict:
|
||||
"""Init parser."""
|
||||
return {}
|
||||
|
||||
def parse_file(self, file: Path, errors: str = "ignore") -> Union[str, list[str]]:
|
||||
"""Parse file.
|
||||
|
||||
Returns:
|
||||
Union[str, List[str]]: a string or a List of strings.
|
||||
"""
|
||||
try:
|
||||
from unstructured.partition.html import partition_html
|
||||
from unstructured.staging.base import convert_to_isd
|
||||
from unstructured.cleaners.core import clean
|
||||
except ImportError:
|
||||
raise ValueError("unstructured package is required to parse HTML files.")
|
||||
|
||||
# Using the unstructured library to convert the html to isd format
|
||||
# isd sample : isd = [
|
||||
# {"text": "My Title", "type": "Title"},
|
||||
# {"text": "My Narrative", "type": "NarrativeText"}
|
||||
# ]
|
||||
with open(file, "r", encoding="utf-8") as fp:
|
||||
elements = partition_html(file=fp)
|
||||
isd = convert_to_isd(elements)
|
||||
|
||||
# Removing non ascii charactwers from isd_el['text']
|
||||
for isd_el in isd:
|
||||
isd_el['text'] = isd_el['text'].encode("ascii", "ignore").decode()
|
||||
|
||||
# Removing all the \n characters from isd_el['text'] using regex and replace with single space
|
||||
# Removing all the extra spaces from isd_el['text'] using regex and replace with single space
|
||||
for isd_el in isd:
|
||||
isd_el['text'] = re.sub(r'\n', ' ', isd_el['text'], flags=re.MULTILINE | re.DOTALL)
|
||||
isd_el['text'] = re.sub(r"\s{2,}", " ", isd_el['text'], flags=re.MULTILINE | re.DOTALL)
|
||||
|
||||
# more cleaning: extra_whitespaces, dashes, bullets, trailing_punctuation
|
||||
for isd_el in isd:
|
||||
clean(isd_el['text'], extra_whitespace=True, dashes=True, bullets=True, trailing_punctuation=True)
|
||||
|
||||
# Creating a list of all the indexes of isd_el['type'] = 'Title'
|
||||
title_indexes = [i for i, isd_el in enumerate(isd) if isd_el['type'] == 'Title']
|
||||
|
||||
# Creating 'Chunks' - List of lists of strings
|
||||
# each list starting with isd_el['type'] = 'Title' and all the data till the next 'Title'
|
||||
# Each Chunk can be thought of as an individual set of data, which can be sent to the model
|
||||
# Where Each Title is grouped together with the data under it
|
||||
|
||||
Chunks = [[]]
|
||||
final_chunks = list(list())
|
||||
|
||||
for i, isd_el in enumerate(isd):
|
||||
if i in title_indexes:
|
||||
Chunks.append([])
|
||||
Chunks[-1].append(isd_el['text'])
|
||||
|
||||
# Removing all the chunks with sum of length of all the strings in the chunk < 25
|
||||
# TODO: This value can be an user defined variable
|
||||
for chunk in Chunks:
|
||||
# sum of length of all the strings in the chunk
|
||||
sum = 0
|
||||
sum += len(str(chunk))
|
||||
if sum < 25:
|
||||
Chunks.remove(chunk)
|
||||
else:
|
||||
# appending all the approved chunks to final_chunks as a single string
|
||||
final_chunks.append(" ".join([str(item) for item in chunk]))
|
||||
return final_chunks
|
||||
145
application/parser/file/markdown_parser.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""Markdown parser.
|
||||
|
||||
Contains parser for md files.
|
||||
|
||||
"""
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union, cast
|
||||
|
||||
import tiktoken
|
||||
from application.parser.file.base_parser import BaseParser
|
||||
|
||||
|
||||
class MarkdownParser(BaseParser):
|
||||
"""Markdown parser.
|
||||
|
||||
Extract text from markdown files.
|
||||
Returns dictionary with keys as headers and values as the text between headers.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args: Any,
|
||||
remove_hyperlinks: bool = True,
|
||||
remove_images: bool = True,
|
||||
max_tokens: int = 2048,
|
||||
# remove_tables: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Init params."""
|
||||
super().__init__(*args, **kwargs)
|
||||
self._remove_hyperlinks = remove_hyperlinks
|
||||
self._remove_images = remove_images
|
||||
self._max_tokens = max_tokens
|
||||
# self._remove_tables = remove_tables
|
||||
|
||||
def tups_chunk_append(self, tups: List[Tuple[Optional[str], str]], current_header: Optional[str],
|
||||
current_text: str):
|
||||
"""Append to tups chunk."""
|
||||
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(current_text))
|
||||
if num_tokens > self._max_tokens:
|
||||
chunks = [current_text[i:i + self._max_tokens] for i in range(0, len(current_text), self._max_tokens)]
|
||||
for chunk in chunks:
|
||||
tups.append((current_header, chunk))
|
||||
else:
|
||||
tups.append((current_header, current_text))
|
||||
return tups
|
||||
|
||||
def markdown_to_tups(self, markdown_text: str) -> List[Tuple[Optional[str], str]]:
|
||||
"""Convert a markdown file to a dictionary.
|
||||
|
||||
The keys are the headers and the values are the text under each header.
|
||||
|
||||
"""
|
||||
markdown_tups: List[Tuple[Optional[str], str]] = []
|
||||
lines = markdown_text.split("\n")
|
||||
|
||||
current_header = None
|
||||
current_text = ""
|
||||
|
||||
for line in lines:
|
||||
header_match = re.match(r"^#+\s", line)
|
||||
if header_match:
|
||||
if current_header is not None:
|
||||
if current_text == "" or None:
|
||||
continue
|
||||
markdown_tups = self.tups_chunk_append(markdown_tups, current_header, current_text)
|
||||
|
||||
current_header = line
|
||||
current_text = ""
|
||||
else:
|
||||
current_text += line + "\n"
|
||||
markdown_tups = self.tups_chunk_append(markdown_tups, current_header, current_text)
|
||||
|
||||
if current_header is not None:
|
||||
# pass linting, assert keys are defined
|
||||
markdown_tups = [
|
||||
(re.sub(r"#", "", cast(str, key)).strip(), re.sub(r"<.*?>", "", value))
|
||||
for key, value in markdown_tups
|
||||
]
|
||||
else:
|
||||
markdown_tups = [
|
||||
(key, re.sub("\n", "", value)) for key, value in markdown_tups
|
||||
]
|
||||
|
||||
return markdown_tups
|
||||
|
||||
def remove_images(self, content: str) -> str:
|
||||
"""Get a dictionary of a markdown file from its path."""
|
||||
pattern = r"!{1}\[\[(.*)\]\]"
|
||||
content = re.sub(pattern, "", content)
|
||||
return content
|
||||
|
||||
# def remove_tables(self, content: str) -> List[List[str]]:
|
||||
# """Convert markdown tables to nested lists."""
|
||||
# table_rows_pattern = r"((\r?\n){2}|^)([^\r\n]*\|[^\r\n]*(\r?\n)?)+(?=(\r?\n){2}|$)"
|
||||
# table_cells_pattern = r"([^\|\r\n]*)\|"
|
||||
#
|
||||
# table_rows = re.findall(table_rows_pattern, content, re.MULTILINE)
|
||||
# table_lists = []
|
||||
# for row in table_rows:
|
||||
# cells = re.findall(table_cells_pattern, row[2])
|
||||
# cells = [cell.strip() for cell in cells if cell.strip()]
|
||||
# table_lists.append(cells)
|
||||
# return str(table_lists)
|
||||
|
||||
def remove_hyperlinks(self, content: str) -> str:
|
||||
"""Get a dictionary of a markdown file from its path."""
|
||||
pattern = r"\[(.*?)\]\((.*?)\)"
|
||||
content = re.sub(pattern, r"\1", content)
|
||||
return content
|
||||
|
||||
def _init_parser(self) -> Dict:
|
||||
"""Initialize the parser with the config."""
|
||||
return {}
|
||||
|
||||
def parse_tups(
|
||||
self, filepath: Path, errors: str = "ignore"
|
||||
) -> List[Tuple[Optional[str], str]]:
|
||||
"""Parse file into tuples."""
|
||||
with open(filepath, "r") as f:
|
||||
content = f.read()
|
||||
if self._remove_hyperlinks:
|
||||
content = self.remove_hyperlinks(content)
|
||||
if self._remove_images:
|
||||
content = self.remove_images(content)
|
||||
# if self._remove_tables:
|
||||
# content = self.remove_tables(content)
|
||||
markdown_tups = self.markdown_to_tups(content)
|
||||
return markdown_tups
|
||||
|
||||
def parse_file(
|
||||
self, filepath: Path, errors: str = "ignore"
|
||||
) -> Union[str, List[str]]:
|
||||
"""Parse file into string."""
|
||||
tups = self.parse_tups(filepath, errors=errors)
|
||||
results = []
|
||||
# TODO: don't include headers right now
|
||||
for header, value in tups:
|
||||
if header is None:
|
||||
results.append(value)
|
||||
else:
|
||||
results.append(f"\n\n{header}\n{value}")
|
||||
return results
|
||||
51
application/parser/file/openapi3_parser.py
Normal file
@@ -0,0 +1,51 @@
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from openapi_parser import parse
|
||||
|
||||
try:
|
||||
from application.parser.file.base_parser import BaseParser
|
||||
except ModuleNotFoundError:
|
||||
from base_parser import BaseParser
|
||||
|
||||
|
||||
class OpenAPI3Parser(BaseParser):
|
||||
def init_parser(self) -> None:
|
||||
return super().init_parser()
|
||||
|
||||
def get_base_urls(self, urls):
|
||||
base_urls = []
|
||||
for i in urls:
|
||||
parsed_url = urlparse(i)
|
||||
base_url = parsed_url.scheme + "://" + parsed_url.netloc
|
||||
if base_url not in base_urls:
|
||||
base_urls.append(base_url)
|
||||
return base_urls
|
||||
|
||||
def get_info_from_paths(self, path):
|
||||
info = ""
|
||||
if path.operations:
|
||||
for operation in path.operations:
|
||||
info += (
|
||||
f"\n{operation.method.value}="
|
||||
f"{operation.responses[0].description}"
|
||||
)
|
||||
return info
|
||||
|
||||
def parse_file(self, file_path):
|
||||
data = parse(file_path)
|
||||
results = ""
|
||||
base_urls = self.get_base_urls(link.url for link in data.servers)
|
||||
base_urls = ",".join([base_url for base_url in base_urls])
|
||||
results += f"Base URL:{base_urls}\n"
|
||||
i = 1
|
||||
for path in data.paths:
|
||||
info = self.get_info_from_paths(path)
|
||||
results += (
|
||||
f"Path{i}: {path.url}\n"
|
||||
f"description: {path.description}\n"
|
||||
f"parameters: {path.parameters}\nmethods: {info}\n"
|
||||
)
|
||||
i += 1
|
||||
with open("results.txt", "w") as f:
|
||||
f.write(results)
|
||||
return results
|
||||
173
application/parser/file/rst_parser.py
Normal file
@@ -0,0 +1,173 @@
|
||||
"""reStructuredText parser.
|
||||
|
||||
Contains parser for md files.
|
||||
|
||||
"""
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from application.parser.file.base_parser import BaseParser
|
||||
|
||||
|
||||
class RstParser(BaseParser):
|
||||
"""reStructuredText parser.
|
||||
|
||||
Extract text from .rst files.
|
||||
Returns dictionary with keys as headers and values as the text between headers.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args: Any,
|
||||
remove_hyperlinks: bool = True,
|
||||
remove_images: bool = True,
|
||||
remove_table_excess: bool = True,
|
||||
remove_interpreters: bool = True,
|
||||
remove_directives: bool = True,
|
||||
remove_whitespaces_excess: bool = True,
|
||||
# Be careful with remove_characters_excess, might cause data loss
|
||||
remove_characters_excess: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Init params."""
|
||||
super().__init__(*args, **kwargs)
|
||||
self._remove_hyperlinks = remove_hyperlinks
|
||||
self._remove_images = remove_images
|
||||
self._remove_table_excess = remove_table_excess
|
||||
self._remove_interpreters = remove_interpreters
|
||||
self._remove_directives = remove_directives
|
||||
self._remove_whitespaces_excess = remove_whitespaces_excess
|
||||
self._remove_characters_excess = remove_characters_excess
|
||||
|
||||
def rst_to_tups(self, rst_text: str) -> List[Tuple[Optional[str], str]]:
|
||||
"""Convert a reStructuredText file to a dictionary.
|
||||
|
||||
The keys are the headers and the values are the text under each header.
|
||||
|
||||
"""
|
||||
rst_tups: List[Tuple[Optional[str], str]] = []
|
||||
lines = rst_text.split("\n")
|
||||
|
||||
current_header = None
|
||||
current_text = ""
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
header_match = re.match(r"^[^\S\n]*[-=]+[^\S\n]*$", line)
|
||||
if header_match and i > 0 and (
|
||||
len(lines[i - 1].strip()) == len(header_match.group().strip()) or lines[i - 2] == lines[i - 2]):
|
||||
if current_header is not None:
|
||||
if current_text == "" or None:
|
||||
continue
|
||||
# removes the next heading from current Document
|
||||
if current_text.endswith(lines[i - 1] + "\n"):
|
||||
current_text = current_text[:len(current_text) - len(lines[i - 1] + "\n")]
|
||||
rst_tups.append((current_header, current_text))
|
||||
|
||||
current_header = lines[i - 1]
|
||||
current_text = ""
|
||||
else:
|
||||
current_text += line + "\n"
|
||||
|
||||
rst_tups.append((current_header, current_text))
|
||||
|
||||
# TODO: Format for rst
|
||||
#
|
||||
# if current_header is not None:
|
||||
# # pass linting, assert keys are defined
|
||||
# rst_tups = [
|
||||
# (re.sub(r"#", "", cast(str, key)).strip(), re.sub(r"<.*?>", "", value))
|
||||
# for key, value in rst_tups
|
||||
# ]
|
||||
# else:
|
||||
# rst_tups = [
|
||||
# (key, re.sub("\n", "", value)) for key, value in rst_tups
|
||||
# ]
|
||||
|
||||
if current_header is None:
|
||||
rst_tups = [
|
||||
(key, re.sub("\n", "", value)) for key, value in rst_tups
|
||||
]
|
||||
return rst_tups
|
||||
|
||||
def remove_images(self, content: str) -> str:
|
||||
pattern = r"\.\. image:: (.*)"
|
||||
content = re.sub(pattern, "", content)
|
||||
return content
|
||||
|
||||
def remove_hyperlinks(self, content: str) -> str:
|
||||
pattern = r"`(.*?) <(.*?)>`_"
|
||||
content = re.sub(pattern, r"\1", content)
|
||||
return content
|
||||
|
||||
def remove_directives(self, content: str) -> str:
|
||||
"""Removes reStructuredText Directives"""
|
||||
pattern = r"`\.\.([^:]+)::"
|
||||
content = re.sub(pattern, "", content)
|
||||
return content
|
||||
|
||||
def remove_interpreters(self, content: str) -> str:
|
||||
"""Removes reStructuredText Interpreted Text Roles"""
|
||||
pattern = r":(\w+):"
|
||||
content = re.sub(pattern, "", content)
|
||||
return content
|
||||
|
||||
def remove_table_excess(self, content: str) -> str:
|
||||
"""Pattern to remove grid table separators"""
|
||||
pattern = r"^\+[-]+\+[-]+\+$"
|
||||
content = re.sub(pattern, "", content, flags=re.MULTILINE)
|
||||
return content
|
||||
|
||||
def remove_whitespaces_excess(self, content: List[Tuple[str, Any]]) -> List[Tuple[str, Any]]:
|
||||
"""Pattern to match 2 or more consecutive whitespaces"""
|
||||
pattern = r"\s{2,}"
|
||||
content = [(key, re.sub(pattern, " ", value)) for key, value in content]
|
||||
return content
|
||||
|
||||
def remove_characters_excess(self, content: List[Tuple[str, Any]]) -> List[Tuple[str, Any]]:
|
||||
"""Pattern to match 2 or more consecutive characters"""
|
||||
pattern = r"(\S)\1{2,}"
|
||||
content = [(key, re.sub(pattern, r"\1\1\1", value, flags=re.MULTILINE)) for key, value in content]
|
||||
return content
|
||||
|
||||
def _init_parser(self) -> Dict:
|
||||
"""Initialize the parser with the config."""
|
||||
return {}
|
||||
|
||||
def parse_tups(
|
||||
self, filepath: Path, errors: str = "ignore"
|
||||
) -> List[Tuple[Optional[str], str]]:
|
||||
"""Parse file into tuples."""
|
||||
with open(filepath, "r") as f:
|
||||
content = f.read()
|
||||
if self._remove_hyperlinks:
|
||||
content = self.remove_hyperlinks(content)
|
||||
if self._remove_images:
|
||||
content = self.remove_images(content)
|
||||
if self._remove_table_excess:
|
||||
content = self.remove_table_excess(content)
|
||||
if self._remove_directives:
|
||||
content = self.remove_directives(content)
|
||||
if self._remove_interpreters:
|
||||
content = self.remove_interpreters(content)
|
||||
rst_tups = self.rst_to_tups(content)
|
||||
if self._remove_whitespaces_excess:
|
||||
rst_tups = self.remove_whitespaces_excess(rst_tups)
|
||||
if self._remove_characters_excess:
|
||||
rst_tups = self.remove_characters_excess(rst_tups)
|
||||
return rst_tups
|
||||
|
||||
def parse_file(
|
||||
self, filepath: Path, errors: str = "ignore"
|
||||
) -> Union[str, List[str]]:
|
||||
"""Parse file into string."""
|
||||
tups = self.parse_tups(filepath, errors=errors)
|
||||
results = []
|
||||
# TODO: don't include headers right now
|
||||
for header, value in tups:
|
||||
if header is None:
|
||||
results.append(value)
|
||||
else:
|
||||
results.append(f"\n\n{header}\n{value}")
|
||||
return results
|
||||
115
application/parser/file/tabular_parser.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""Tabular parser.
|
||||
|
||||
Contains parsers for tabular data files.
|
||||
|
||||
"""
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from application.parser.file.base_parser import BaseParser
|
||||
|
||||
|
||||
class CSVParser(BaseParser):
|
||||
"""CSV parser.
|
||||
|
||||
Args:
|
||||
concat_rows (bool): whether to concatenate all rows into one document.
|
||||
If set to False, a Document will be created for each row.
|
||||
True by default.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *args: Any, concat_rows: bool = True, **kwargs: Any) -> None:
|
||||
"""Init params."""
|
||||
super().__init__(*args, **kwargs)
|
||||
self._concat_rows = concat_rows
|
||||
|
||||
def _init_parser(self) -> Dict:
|
||||
"""Init parser."""
|
||||
return {}
|
||||
|
||||
def parse_file(self, file: Path, errors: str = "ignore") -> Union[str, List[str]]:
|
||||
"""Parse file.
|
||||
|
||||
Returns:
|
||||
Union[str, List[str]]: a string or a List of strings.
|
||||
|
||||
"""
|
||||
try:
|
||||
import csv
|
||||
except ImportError:
|
||||
raise ValueError("csv module is required to read CSV files.")
|
||||
text_list = []
|
||||
with open(file, "r") as fp:
|
||||
csv_reader = csv.reader(fp)
|
||||
for row in csv_reader:
|
||||
text_list.append(", ".join(row))
|
||||
if self._concat_rows:
|
||||
return "\n".join(text_list)
|
||||
else:
|
||||
return text_list
|
||||
|
||||
|
||||
class PandasCSVParser(BaseParser):
|
||||
r"""Pandas-based CSV parser.
|
||||
|
||||
Parses CSVs using the separator detection from Pandas `read_csv`function.
|
||||
If special parameters are required, use the `pandas_config` dict.
|
||||
|
||||
Args:
|
||||
concat_rows (bool): whether to concatenate all rows into one document.
|
||||
If set to False, a Document will be created for each row.
|
||||
True by default.
|
||||
|
||||
col_joiner (str): Separator to use for joining cols per row.
|
||||
Set to ", " by default.
|
||||
|
||||
row_joiner (str): Separator to use for joining each row.
|
||||
Only used when `concat_rows=True`.
|
||||
Set to "\n" by default.
|
||||
|
||||
pandas_config (dict): Options for the `pandas.read_csv` function call.
|
||||
Refer to https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html
|
||||
for more information.
|
||||
Set to empty dict by default, this means pandas will try to figure
|
||||
out the separators, table head, etc. on its own.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args: Any,
|
||||
concat_rows: bool = True,
|
||||
col_joiner: str = ", ",
|
||||
row_joiner: str = "\n",
|
||||
pandas_config: dict = {},
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Init params."""
|
||||
super().__init__(*args, **kwargs)
|
||||
self._concat_rows = concat_rows
|
||||
self._col_joiner = col_joiner
|
||||
self._row_joiner = row_joiner
|
||||
self._pandas_config = pandas_config
|
||||
|
||||
def _init_parser(self) -> Dict:
|
||||
"""Init parser."""
|
||||
return {}
|
||||
|
||||
def parse_file(self, file: Path, errors: str = "ignore") -> Union[str, List[str]]:
|
||||
"""Parse file."""
|
||||
try:
|
||||
import pandas as pd
|
||||
except ImportError:
|
||||
raise ValueError("pandas module is required to read CSV files.")
|
||||
|
||||
df = pd.read_csv(file, **self._pandas_config)
|
||||
|
||||
text_list = df.apply(
|
||||
lambda row: (self._col_joiner).join(row.astype(str).tolist()), axis=1
|
||||
).tolist()
|
||||
|
||||
if self._concat_rows:
|
||||
return (self._row_joiner).join(text_list)
|
||||
else:
|
||||
return text_list
|
||||
66
application/parser/java2doc.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import os
|
||||
|
||||
import javalang
|
||||
|
||||
|
||||
def find_files(directory):
|
||||
files_list = []
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for file in files:
|
||||
if file.endswith('.java'):
|
||||
files_list.append(os.path.join(root, file))
|
||||
return files_list
|
||||
|
||||
|
||||
def extract_functions(file_path):
|
||||
with open(file_path, "r") as file:
|
||||
java_code = file.read()
|
||||
methods = {}
|
||||
tree = javalang.parse.parse(java_code)
|
||||
for _, node in tree.filter(javalang.tree.MethodDeclaration):
|
||||
method_name = node.name
|
||||
start_line = node.position.line - 1
|
||||
end_line = start_line
|
||||
brace_count = 0
|
||||
for line in java_code.splitlines()[start_line:]:
|
||||
end_line += 1
|
||||
brace_count += line.count("{") - line.count("}")
|
||||
if brace_count == 0:
|
||||
break
|
||||
method_source_code = "\n".join(java_code.splitlines()[start_line:end_line])
|
||||
methods[method_name] = method_source_code
|
||||
return methods
|
||||
|
||||
|
||||
def extract_classes(file_path):
|
||||
with open(file_path, 'r') as file:
|
||||
source_code = file.read()
|
||||
classes = {}
|
||||
tree = javalang.parse.parse(source_code)
|
||||
for class_decl in tree.types:
|
||||
class_name = class_decl.name
|
||||
declarations = []
|
||||
methods = []
|
||||
for field_decl in class_decl.fields:
|
||||
field_name = field_decl.declarators[0].name
|
||||
field_type = field_decl.type.name
|
||||
declarations.append(f"{field_type} {field_name}")
|
||||
for method_decl in class_decl.methods:
|
||||
methods.append(method_decl.name)
|
||||
class_string = "Declarations: " + ", ".join(declarations) + "\n Method name: " + ", ".join(methods)
|
||||
classes[class_name] = class_string
|
||||
return classes
|
||||
|
||||
|
||||
def extract_functions_and_classes(directory):
|
||||
files = find_files(directory)
|
||||
functions_dict = {}
|
||||
classes_dict = {}
|
||||
for file in files:
|
||||
functions = extract_functions(file)
|
||||
if functions:
|
||||
functions_dict[file] = functions
|
||||
classes = extract_classes(file)
|
||||
if classes:
|
||||
classes_dict[file] = classes
|
||||
return functions_dict, classes_dict
|
||||
70
application/parser/js2doc.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import os
|
||||
|
||||
import escodegen
|
||||
import esprima
|
||||
|
||||
|
||||
def find_files(directory):
|
||||
files_list = []
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for file in files:
|
||||
if file.endswith('.js'):
|
||||
files_list.append(os.path.join(root, file))
|
||||
return files_list
|
||||
|
||||
|
||||
def extract_functions(file_path):
|
||||
with open(file_path, 'r') as file:
|
||||
source_code = file.read()
|
||||
functions = {}
|
||||
tree = esprima.parseScript(source_code)
|
||||
for node in tree.body:
|
||||
if node.type == 'FunctionDeclaration':
|
||||
func_name = node.id.name if node.id else '<anonymous>'
|
||||
functions[func_name] = escodegen.generate(node)
|
||||
elif node.type == 'VariableDeclaration':
|
||||
for declaration in node.declarations:
|
||||
if declaration.init and declaration.init.type == 'FunctionExpression':
|
||||
func_name = declaration.id.name if declaration.id else '<anonymous>'
|
||||
functions[func_name] = escodegen.generate(declaration.init)
|
||||
elif node.type == 'ClassDeclaration':
|
||||
for subnode in node.body.body:
|
||||
if subnode.type == 'MethodDefinition':
|
||||
func_name = subnode.key.name
|
||||
functions[func_name] = escodegen.generate(subnode.value)
|
||||
elif subnode.type == 'VariableDeclaration':
|
||||
for declaration in subnode.declarations:
|
||||
if declaration.init and declaration.init.type == 'FunctionExpression':
|
||||
func_name = declaration.id.name if declaration.id else '<anonymous>'
|
||||
functions[func_name] = escodegen.generate(declaration.init)
|
||||
return functions
|
||||
|
||||
|
||||
def extract_classes(file_path):
|
||||
with open(file_path, 'r') as file:
|
||||
source_code = file.read()
|
||||
classes = {}
|
||||
tree = esprima.parseScript(source_code)
|
||||
for node in tree.body:
|
||||
if node.type == 'ClassDeclaration':
|
||||
class_name = node.id.name
|
||||
function_names = []
|
||||
for subnode in node.body.body:
|
||||
if subnode.type == 'MethodDefinition':
|
||||
function_names.append(subnode.key.name)
|
||||
classes[class_name] = ", ".join(function_names)
|
||||
return classes
|
||||
|
||||
|
||||
def extract_functions_and_classes(directory):
|
||||
files = find_files(directory)
|
||||
functions_dict = {}
|
||||
classes_dict = {}
|
||||
for file in files:
|
||||
functions = extract_functions(file)
|
||||
if functions:
|
||||
functions_dict[file] = functions
|
||||
classes = extract_classes(file)
|
||||
if classes:
|
||||
classes_dict[file] = classes
|
||||
return functions_dict, classes_dict
|
||||
94
application/parser/open_ai_func.py
Normal file
@@ -0,0 +1,94 @@
|
||||
import os
|
||||
|
||||
import tiktoken
|
||||
from application.vectorstore.vector_creator import VectorCreator
|
||||
from application.core.settings import settings
|
||||
from retry import retry
|
||||
|
||||
|
||||
# from langchain_community.embeddings import HuggingFaceEmbeddings
|
||||
# from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
||||
# from langchain_community.embeddings import CohereEmbeddings
|
||||
|
||||
|
||||
def num_tokens_from_string(string: str, encoding_name: str) -> int:
|
||||
# Function to convert string to tokens and estimate user cost.
|
||||
encoding = tiktoken.get_encoding(encoding_name)
|
||||
num_tokens = len(encoding.encode(string))
|
||||
total_price = ((num_tokens / 1000) * 0.0004)
|
||||
return num_tokens, total_price
|
||||
|
||||
|
||||
@retry(tries=10, delay=60)
|
||||
def store_add_texts_with_retry(store, i):
|
||||
store.add_texts([i.page_content], metadatas=[i.metadata])
|
||||
# store_pine.add_texts([i.page_content], metadatas=[i.metadata])
|
||||
|
||||
|
||||
def call_openai_api(docs, folder_name, task_status):
|
||||
# Function to create a vector store from the documents and save it to disk.
|
||||
|
||||
# create output folder if it doesn't exist
|
||||
if not os.path.exists(f"{folder_name}"):
|
||||
os.makedirs(f"{folder_name}")
|
||||
|
||||
from tqdm import tqdm
|
||||
c1 = 0
|
||||
if settings.VECTOR_STORE == "faiss":
|
||||
docs_init = [docs[0]]
|
||||
docs.pop(0)
|
||||
|
||||
store = VectorCreator.create_vectorstore(
|
||||
settings.VECTOR_STORE,
|
||||
docs_init = docs_init,
|
||||
path=f"{folder_name}",
|
||||
embeddings_key=os.getenv("EMBEDDINGS_KEY")
|
||||
)
|
||||
else:
|
||||
store = VectorCreator.create_vectorstore(
|
||||
settings.VECTOR_STORE,
|
||||
path=f"{folder_name}",
|
||||
embeddings_key=os.getenv("EMBEDDINGS_KEY")
|
||||
)
|
||||
# Uncomment for MPNet embeddings
|
||||
# model_name = "sentence-transformers/all-mpnet-base-v2"
|
||||
# hf = HuggingFaceEmbeddings(model_name=model_name)
|
||||
# store = FAISS.from_documents(docs_test, hf)
|
||||
s1 = len(docs)
|
||||
for i in tqdm(docs, desc="Embedding 🦖", unit="docs", total=len(docs),
|
||||
bar_format='{l_bar}{bar}| Time Left: {remaining}'):
|
||||
try:
|
||||
task_status.update_state(state='PROGRESS', meta={'current': int((c1 / s1) * 100)})
|
||||
store_add_texts_with_retry(store, i)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print("Error on ", i)
|
||||
print("Saving progress")
|
||||
print(f"stopped at {c1} out of {len(docs)}")
|
||||
store.save_local(f"{folder_name}")
|
||||
break
|
||||
c1 += 1
|
||||
if settings.VECTOR_STORE == "faiss":
|
||||
store.save_local(f"{folder_name}")
|
||||
|
||||
|
||||
def get_user_permission(docs, folder_name):
|
||||
# Function to ask user permission to call the OpenAI api and spend their OpenAI funds.
|
||||
# Here we convert the docs list to a string and calculate the number of OpenAI tokens the string represents.
|
||||
# docs_content = (" ".join(docs))
|
||||
docs_content = ""
|
||||
for doc in docs:
|
||||
docs_content += doc.page_content
|
||||
|
||||
tokens, total_price = num_tokens_from_string(string=docs_content, encoding_name="cl100k_base")
|
||||
# Here we print the number of tokens and the approx user cost with some visually appealing formatting.
|
||||
print(f"Number of Tokens = {format(tokens, ',d')}")
|
||||
print(f"Approx Cost = ${format(total_price, ',.2f')}")
|
||||
# Here we check for user permission before calling the API.
|
||||
user_input = input("Price Okay? (Y/N) \n").lower()
|
||||
if user_input == "y":
|
||||
call_openai_api(docs, folder_name)
|
||||
elif user_input == "":
|
||||
call_openai_api(docs, folder_name)
|
||||
else:
|
||||
print("The API was not called. No money was spent.")
|
||||
121
application/parser/py2doc.py
Normal file
@@ -0,0 +1,121 @@
|
||||
import ast
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import tiktoken
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.prompts import PromptTemplate
|
||||
|
||||
|
||||
def find_files(directory):
|
||||
files_list = []
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for file in files:
|
||||
if file.endswith('.py'):
|
||||
files_list.append(os.path.join(root, file))
|
||||
return files_list
|
||||
|
||||
|
||||
def extract_functions(file_path):
|
||||
with open(file_path, 'r') as file:
|
||||
source_code = file.read()
|
||||
functions = {}
|
||||
tree = ast.parse(source_code)
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.FunctionDef):
|
||||
func_name = node.name
|
||||
func_def = ast.get_source_segment(source_code, node)
|
||||
functions[func_name] = func_def
|
||||
return functions
|
||||
|
||||
|
||||
def extract_classes(file_path):
|
||||
with open(file_path, 'r') as file:
|
||||
source_code = file.read()
|
||||
classes = {}
|
||||
tree = ast.parse(source_code)
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.ClassDef):
|
||||
class_name = node.name
|
||||
function_names = []
|
||||
for subnode in ast.walk(node):
|
||||
if isinstance(subnode, ast.FunctionDef):
|
||||
function_names.append(subnode.name)
|
||||
classes[class_name] = ", ".join(function_names)
|
||||
return classes
|
||||
|
||||
|
||||
def extract_functions_and_classes(directory):
|
||||
files = find_files(directory)
|
||||
functions_dict = {}
|
||||
classes_dict = {}
|
||||
for file in files:
|
||||
functions = extract_functions(file)
|
||||
if functions:
|
||||
functions_dict[file] = functions
|
||||
classes = extract_classes(file)
|
||||
if classes:
|
||||
classes_dict[file] = classes
|
||||
return functions_dict, classes_dict
|
||||
|
||||
|
||||
def parse_functions(functions_dict, formats, dir):
|
||||
c1 = len(functions_dict)
|
||||
for i, (source, functions) in enumerate(functions_dict.items(), start=1):
|
||||
print(f"Processing file {i}/{c1}")
|
||||
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
|
||||
subfolders = "/".join(source_w.split("/")[:-1])
|
||||
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
|
||||
for j, (name, function) in enumerate(functions.items(), start=1):
|
||||
print(f"Processing function {j}/{len(functions)}")
|
||||
prompt = PromptTemplate(
|
||||
input_variables=["code"],
|
||||
template="Code: \n{code}, \nDocumentation: ",
|
||||
)
|
||||
llm = OpenAI(temperature=0)
|
||||
response = llm(prompt.format(code=function))
|
||||
mode = "a" if Path(f"outputs/{source_w}").exists() else "w"
|
||||
with open(f"outputs/{source_w}", mode) as f:
|
||||
f.write(
|
||||
f"\n\n# Function name: {name} \n\nFunction: \n```\n{function}\n```, \nDocumentation: \n{response}")
|
||||
|
||||
|
||||
def parse_classes(classes_dict, formats, dir):
|
||||
c1 = len(classes_dict)
|
||||
for i, (source, classes) in enumerate(classes_dict.items()):
|
||||
print(f"Processing file {i + 1}/{c1}")
|
||||
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
|
||||
subfolders = "/".join(source_w.split("/")[:-1])
|
||||
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
|
||||
for name, function_names in classes.items():
|
||||
print(f"Processing Class {i + 1}/{c1}")
|
||||
prompt = PromptTemplate(
|
||||
input_variables=["class_name", "functions_names"],
|
||||
template="Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
|
||||
)
|
||||
llm = OpenAI(temperature=0)
|
||||
response = llm(prompt.format(class_name=name, functions_names=function_names))
|
||||
|
||||
with open(f"outputs/{source_w}", "a" if Path(f"outputs/{source_w}").exists() else "w") as f:
|
||||
f.write(f"\n\n# Class name: {name} \n\nFunctions: \n{function_names}, \nDocumentation: \n{response}")
|
||||
|
||||
|
||||
def transform_to_docs(functions_dict, classes_dict, formats, dir):
|
||||
docs_content = ''.join([str(key) + str(value) for key, value in functions_dict.items()])
|
||||
docs_content += ''.join([str(key) + str(value) for key, value in classes_dict.items()])
|
||||
|
||||
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(docs_content))
|
||||
total_price = ((num_tokens / 1000) * 0.02)
|
||||
|
||||
print(f"Number of Tokens = {num_tokens:,d}")
|
||||
print(f"Approx Cost = ${total_price:,.2f}")
|
||||
|
||||
user_input = input("Price Okay? (Y/N)\n").lower()
|
||||
if user_input == "y" or user_input == "":
|
||||
if not Path("outputs").exists():
|
||||
Path("outputs").mkdir()
|
||||
parse_functions(functions_dict, formats, dir)
|
||||
parse_classes(classes_dict, formats, dir)
|
||||
print("All done!")
|
||||
else:
|
||||
print("The API was not called. No money was spent.")
|
||||
1
application/parser/schema/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
34
application/parser/schema/base.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""Base schema for readers."""
|
||||
from dataclasses import dataclass
|
||||
|
||||
from langchain.docstore.document import Document as LCDocument
|
||||
from application.parser.schema.schema import BaseDocument
|
||||
|
||||
|
||||
@dataclass
|
||||
class Document(BaseDocument):
|
||||
"""Generic interface for a data document.
|
||||
|
||||
This document connects to data sources.
|
||||
|
||||
"""
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
"""Post init."""
|
||||
if self.text is None:
|
||||
raise ValueError("text field not set.")
|
||||
|
||||
@classmethod
|
||||
def get_type(cls) -> str:
|
||||
"""Get Document type."""
|
||||
return "Document"
|
||||
|
||||
def to_langchain_format(self) -> LCDocument:
|
||||
"""Convert struct to LangChain document format."""
|
||||
metadata = self.extra_info or {}
|
||||
return LCDocument(page_content=self.text, metadata=metadata)
|
||||
|
||||
@classmethod
|
||||
def from_langchain_format(cls, doc: LCDocument) -> "Document":
|
||||
"""Convert struct from LangChain document format."""
|
||||
return cls(text=doc.page_content, extra_info=doc.metadata)
|
||||
64
application/parser/schema/schema.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""Base schema for data structures."""
|
||||
from abc import abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from dataclasses_json import DataClassJsonMixin
|
||||
|
||||
|
||||
@dataclass
|
||||
class BaseDocument(DataClassJsonMixin):
|
||||
"""Base document.
|
||||
|
||||
Generic abstract interfaces that captures both index structs
|
||||
as well as documents.
|
||||
|
||||
"""
|
||||
|
||||
# TODO: consolidate fields from Document/IndexStruct into base class
|
||||
text: Optional[str] = None
|
||||
doc_id: Optional[str] = None
|
||||
embedding: Optional[List[float]] = None
|
||||
|
||||
# extra fields
|
||||
extra_info: Optional[Dict[str, Any]] = None
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def get_type(cls) -> str:
|
||||
"""Get Document type."""
|
||||
|
||||
def get_text(self) -> str:
|
||||
"""Get text."""
|
||||
if self.text is None:
|
||||
raise ValueError("text field not set.")
|
||||
return self.text
|
||||
|
||||
def get_doc_id(self) -> str:
|
||||
"""Get doc_id."""
|
||||
if self.doc_id is None:
|
||||
raise ValueError("doc_id not set.")
|
||||
return self.doc_id
|
||||
|
||||
@property
|
||||
def is_doc_id_none(self) -> bool:
|
||||
"""Check if doc_id is None."""
|
||||
return self.doc_id is None
|
||||
|
||||
def get_embedding(self) -> List[float]:
|
||||
"""Get embedding.
|
||||
|
||||
Errors if embedding is None.
|
||||
|
||||
"""
|
||||
if self.embedding is None:
|
||||
raise ValueError("embedding not set.")
|
||||
return self.embedding
|
||||
|
||||
@property
|
||||
def extra_info_str(self) -> Optional[str]:
|
||||
"""Extra info string."""
|
||||
if self.extra_info is None:
|
||||
return None
|
||||
|
||||
return "\n".join([f"{k}: {str(v)}" for k, v in self.extra_info.items()])
|
||||
77
application/parser/token_func.py
Normal file
@@ -0,0 +1,77 @@
|
||||
import re
|
||||
from math import ceil
|
||||
from typing import List
|
||||
|
||||
import tiktoken
|
||||
from application.parser.schema.base import Document
|
||||
|
||||
|
||||
def separate_header_and_body(text):
|
||||
header_pattern = r"^(.*?\n){3}"
|
||||
match = re.match(header_pattern, text)
|
||||
header = match.group(0)
|
||||
body = text[len(header):]
|
||||
return header, body
|
||||
|
||||
|
||||
def group_documents(documents: List[Document], min_tokens: int, max_tokens: int) -> List[Document]:
|
||||
docs = []
|
||||
current_group = None
|
||||
|
||||
for doc in documents:
|
||||
doc_len = len(tiktoken.get_encoding("cl100k_base").encode(doc.text))
|
||||
|
||||
if current_group is None:
|
||||
current_group = Document(text=doc.text, doc_id=doc.doc_id, embedding=doc.embedding,
|
||||
extra_info=doc.extra_info)
|
||||
elif len(tiktoken.get_encoding("cl100k_base").encode(
|
||||
current_group.text)) + doc_len < max_tokens and doc_len < min_tokens:
|
||||
current_group.text += " " + doc.text
|
||||
else:
|
||||
docs.append(current_group)
|
||||
current_group = Document(text=doc.text, doc_id=doc.doc_id, embedding=doc.embedding,
|
||||
extra_info=doc.extra_info)
|
||||
|
||||
if current_group is not None:
|
||||
docs.append(current_group)
|
||||
|
||||
return docs
|
||||
|
||||
|
||||
def split_documents(documents: List[Document], max_tokens: int) -> List[Document]:
|
||||
docs = []
|
||||
for doc in documents:
|
||||
token_length = len(tiktoken.get_encoding("cl100k_base").encode(doc.text))
|
||||
if token_length <= max_tokens:
|
||||
docs.append(doc)
|
||||
else:
|
||||
header, body = separate_header_and_body(doc.text)
|
||||
if len(tiktoken.get_encoding("cl100k_base").encode(header)) > max_tokens:
|
||||
body = doc.text
|
||||
header = ""
|
||||
num_body_parts = ceil(token_length / max_tokens)
|
||||
part_length = ceil(len(body) / num_body_parts)
|
||||
body_parts = [body[i:i + part_length] for i in range(0, len(body), part_length)]
|
||||
for i, body_part in enumerate(body_parts):
|
||||
new_doc = Document(text=header + body_part.strip(),
|
||||
doc_id=f"{doc.doc_id}-{i}",
|
||||
embedding=doc.embedding,
|
||||
extra_info=doc.extra_info)
|
||||
docs.append(new_doc)
|
||||
return docs
|
||||
|
||||
|
||||
def group_split(documents: List[Document], max_tokens: int = 2000, min_tokens: int = 150, token_check: bool = True):
|
||||
if not token_check:
|
||||
return documents
|
||||
print("Grouping small documents")
|
||||
try:
|
||||
documents = group_documents(documents=documents, min_tokens=min_tokens, max_tokens=max_tokens)
|
||||
except Exception:
|
||||
print("Grouping failed, try running without token_check")
|
||||
print("Separating large documents")
|
||||
try:
|
||||
documents = split_documents(documents=documents, max_tokens=max_tokens)
|
||||
except Exception:
|
||||
print("Grouping failed, try running without token_check")
|
||||
return documents
|
||||
9
application/prompts/chat_combine_creative.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
You are a DocsGPT, friendly and helpful AI assistant by Arc53 that provides help with documents. You give thorough answers with code examples if possible.
|
||||
Use the following pieces of context to help answer the users question. If its not relevant to the question, provide friendly responses.
|
||||
You have access to chat history, and can use it to help answer the question.
|
||||
When using code examples, use the following format:
|
||||
```(language)
|
||||
(code)
|
||||
```
|
||||
----------------
|
||||
{summaries}
|
||||
9
application/prompts/chat_combine_default.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
You are a helpful AI assistant, DocsGPT, specializing in document assistance, designed to offer detailed and informative responses.
|
||||
If appropriate, your answers can include code examples, formatted as follows:
|
||||
```(language)
|
||||
(code)
|
||||
```
|
||||
You effectively utilize chat history, ensuring relevant and tailored responses.
|
||||
If a question doesn't align with your context, you provide friendly and helpful replies.
|
||||
----------------
|
||||
{summaries}
|
||||
13
application/prompts/chat_combine_strict.txt
Normal file
@@ -0,0 +1,13 @@
|
||||
You are an AI Assistant, DocsGPT, adept at offering document assistance.
|
||||
Your expertise lies in providing answer on top of provided context.
|
||||
You can leverage the chat history if needed.
|
||||
Answer the question based on the context below.
|
||||
Keep the answer concise. Respond "Irrelevant context" if not sure about the answer.
|
||||
If question is not related to the context, respond "Irrelevant context".
|
||||
When using code examples, use the following format:
|
||||
```(language)
|
||||
(code)
|
||||
```
|
||||
----------------
|
||||
Context:
|
||||
{summaries}
|
||||
3
application/prompts/chat_reduce_prompt.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
Use the following pieces of context to help answer the users question. If its not relevant to the question, respond with "-"
|
||||
----------------
|
||||
{context}
|
||||
@@ -1,4 +0,0 @@
|
||||
Use the following portion of a long document to see if any of the text is relevant to answer the question.
|
||||
{{ context }}
|
||||
Question: {{ question }}
|
||||
Provide all relevant text to the question verbatim. Summarize if needed. If nothing relevant return "-".
|
||||
@@ -1,126 +1,33 @@
|
||||
aiodns==3.0.0
|
||||
aiohttp==3.8.3
|
||||
aiohttp-retry==2.8.3
|
||||
aiosignal==1.3.1
|
||||
alabaster==0.7.13
|
||||
aleph-alpha-client==2.16.0
|
||||
anyio==3.6.2
|
||||
argilla==1.3.0
|
||||
async-timeout==4.0.2
|
||||
attrs==22.2.0
|
||||
Babel==2.11.0
|
||||
backoff==2.2.1
|
||||
blobfile==2.0.1
|
||||
boto3==1.26.82
|
||||
botocore==1.29.82
|
||||
cffi==1.15.1
|
||||
charset-normalizer==2.1.1
|
||||
click==8.1.3
|
||||
cohere==3.4.0
|
||||
dataclasses-json==0.5.7
|
||||
decorator==5.1.1
|
||||
deeplake==3.2.12
|
||||
Deprecated==1.2.13
|
||||
dill==0.3.6
|
||||
docutils==0.19
|
||||
anthropic==0.12.0
|
||||
boto3==1.34.6
|
||||
celery==5.3.6
|
||||
dataclasses_json==0.6.3
|
||||
docx2txt==0.8
|
||||
entrypoints==0.4
|
||||
escodegen==1.0.10
|
||||
EbookLib==0.18
|
||||
elasticsearch==8.12.0
|
||||
escodegen==1.0.11
|
||||
esprima==4.0.1
|
||||
esutils==1.0.1
|
||||
et-xmlfile==1.1.0
|
||||
faiss-cpu==1.7.3
|
||||
filelock==3.9.0
|
||||
Flask==2.2.2
|
||||
frozenlist==1.3.3
|
||||
greenlet==2.0.2
|
||||
gunicorn==20.1.0
|
||||
h11==0.14.0
|
||||
httpcore==0.16.3
|
||||
httpx==0.23.3
|
||||
hub==3.0.1
|
||||
huggingface-hub==0.12.0
|
||||
humbug==0.2.8
|
||||
idna==3.4
|
||||
imagesize==1.4.1
|
||||
itsdangerous==2.1.2
|
||||
faiss-cpu==1.7.4
|
||||
Flask==3.0.1
|
||||
gunicorn==21.2.0
|
||||
html2text==2020.1.16
|
||||
javalang==0.13.0
|
||||
Jinja2==3.1.2
|
||||
jmespath==1.0.1
|
||||
joblib==1.2.0
|
||||
langchain==0.0.98
|
||||
lxml==4.9.2
|
||||
manifest-ml==0.1.1
|
||||
MarkupSafe==2.1.2
|
||||
marshmallow==3.19.0
|
||||
marshmallow-enum==1.5.1
|
||||
monotonic==1.6
|
||||
multidict==6.0.4
|
||||
multiprocess==0.70.14
|
||||
mypy-extensions==0.4.3
|
||||
langchain==0.1.4
|
||||
langchain-openai==0.0.5
|
||||
nltk==3.8.1
|
||||
numcodecs==0.11.0
|
||||
numpy==1.23.5
|
||||
openai==0.27.0
|
||||
openpyxl==3.1.1
|
||||
packaging==23.0
|
||||
pandas==1.5.3
|
||||
pathos==0.3.0
|
||||
Pillow==9.4.0
|
||||
pox==0.3.2
|
||||
ppft==1.7.6.6
|
||||
py==1.11.0
|
||||
pycares==4.3.0
|
||||
pycparser==2.21
|
||||
pycryptodomex==3.17
|
||||
pydantic==1.10.4
|
||||
Pygments==2.14.0
|
||||
PyJWT==2.6.0
|
||||
openapi3_parser==1.1.16
|
||||
pandas==2.2.0
|
||||
pydantic_settings==2.1.0
|
||||
pymongo==4.6.1
|
||||
PyPDF2==3.0.1
|
||||
python-dateutil==2.8.2
|
||||
python-docx==0.8.11
|
||||
python-dotenv==0.21.1
|
||||
python-magic==0.4.27
|
||||
python-pptx==0.6.21
|
||||
pytz==2022.7.1
|
||||
PyYAML==6.0
|
||||
redis==4.5.1
|
||||
regex==2022.10.31
|
||||
requests==2.28.2
|
||||
python-dotenv==1.0.1
|
||||
redis==5.0.1
|
||||
Requests==2.31.0
|
||||
retry==0.9.2
|
||||
rfc3986==1.5.0
|
||||
s3transfer==0.6.0
|
||||
scikit-learn==1.2.1
|
||||
scipy==1.10.0
|
||||
sentence-transformers==2.2.2
|
||||
sentencepiece==0.1.97
|
||||
six==1.16.0
|
||||
sniffio==1.3.0
|
||||
snowballstemmer==2.2.0
|
||||
Sphinx==6.1.3
|
||||
sphinxcontrib-applehelp==1.0.4
|
||||
sphinxcontrib-devhelp==1.0.2
|
||||
sphinxcontrib-htmlhelp==2.0.1
|
||||
sphinxcontrib-jsmath==1.0.1
|
||||
sphinxcontrib-qthelp==1.0.3
|
||||
sphinxcontrib-serializinghtml==1.1.5
|
||||
SQLAlchemy==1.4.46
|
||||
sqlitedict==2.1.0
|
||||
tenacity==8.2.1
|
||||
threadpoolctl==3.1.0
|
||||
tiktoken==0.1.2
|
||||
tokenizers==0.13.2
|
||||
torch==1.13.1
|
||||
torchvision==0.14.1
|
||||
tqdm==4.64.1
|
||||
transformers==4.26.0
|
||||
typer==0.7.0
|
||||
typing-inspect==0.8.0
|
||||
typing_extensions==4.4.0
|
||||
unstructured==0.4.11
|
||||
urllib3==1.26.14
|
||||
Werkzeug==2.2.3
|
||||
wrapt==1.14.1
|
||||
XlsxWriter==3.0.8
|
||||
xxhash==3.2.0
|
||||
yarl==1.8.2
|
||||
sentence-transformers
|
||||
tiktoken==0.5.2
|
||||
torch==2.1.2
|
||||
tqdm==4.66.1
|
||||
transformers==4.36.2
|
||||
unstructured==0.12.2
|
||||
Werkzeug==3.0.1
|
||||
|
||||
|
Before Width: | Height: | Size: 37 KiB |
|
Before Width: | Height: | Size: 352 KiB |
|
Before Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 631 B |
|
Before Width: | Height: | Size: 1.7 KiB |
|
Before Width: | Height: | Size: 15 KiB |
@@ -1 +0,0 @@
|
||||
{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}
|
||||
@@ -1,19 +0,0 @@
|
||||
function resetApiKey() {
|
||||
const modal = document.getElementById("modal");
|
||||
modal.classList.toggle("hidden");
|
||||
}
|
||||
|
||||
const apiKeyForm = document.getElementById("api-key-form");
|
||||
if (apiKeyForm) {
|
||||
apiKeyForm.addEventListener("submit", function(event) {
|
||||
event.preventDefault();
|
||||
|
||||
const apiKeyInput = document.getElementById("api-key-input");
|
||||
const apiKey = apiKeyInput.value;
|
||||
|
||||
localStorage.setItem("apiKey", apiKey);
|
||||
|
||||
apiKeyInput.value = "";
|
||||
modal.classList.toggle("hidden");
|
||||
});
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
var form = document.getElementById('message-form');
|
||||
var errorModal = document.getElementById('error-alert')
|
||||
document.getElementById('close').addEventListener('click',()=>{
|
||||
errorModal.classList.toggle('hidden')
|
||||
})
|
||||
|
||||
|
||||
function submitForm(event){
|
||||
event.preventDefault()
|
||||
var message = document.getElementById("message-input").value;
|
||||
console.log(message.length)
|
||||
if(message.length === 0){
|
||||
return
|
||||
}
|
||||
msg_html = '<div class="bg-blue-500 text-white p-2 rounded-lg mb-2 self-end"><p class="text-sm">'
|
||||
msg_html += message
|
||||
msg_html += '</p></div>'
|
||||
document.getElementById("messages").innerHTML += msg_html;
|
||||
let chatWindow = document.getElementById("messages-container");
|
||||
chatWindow.scrollTop = chatWindow.scrollHeight;
|
||||
document.getElementById("message-input").value = "";
|
||||
document.getElementById("button-submit").innerHTML = '<i class="fa fa-circle-o-notch fa-spin"></i> Thinking...';
|
||||
document.getElementById("button-submit").disabled = true;
|
||||
if (localStorage.getItem('activeDocs') == null) {
|
||||
localStorage.setItem('activeDocs', 'default')
|
||||
}
|
||||
|
||||
|
||||
fetch('/api/answer', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
|
||||
body: JSON.stringify({question: message,
|
||||
api_key: localStorage.getItem('apiKey'),
|
||||
embeddings_key: localStorage.getItem('apiKey'),
|
||||
history: localStorage.getItem('chatHistory'),
|
||||
active_docs: localStorage.getItem('activeDocs')}),
|
||||
}).then((response)=> response.json())
|
||||
.then(data => {
|
||||
console.log('Success:', data);
|
||||
if(data.error){
|
||||
document.getElementById('text-error').textContent = `Error : ${JSON.stringify(data.message)}`
|
||||
errorModal.classList.toggle('hidden')
|
||||
}
|
||||
if(data.answer){
|
||||
msg_html = '<div class="bg-indigo-500 text-white p-2 rounded-lg mb-2 self-start"><code class="text-sm">'
|
||||
data.answer = data.answer.replace(/\n/g, "<br>");
|
||||
msg_html += data.answer
|
||||
msg_html += '</code></div>'
|
||||
document.getElementById("messages").innerHTML += msg_html;
|
||||
let chatWindow = document.getElementById("messages-container");
|
||||
chatWindow.scrollTop = chatWindow.scrollHeight;
|
||||
}
|
||||
document.getElementById("button-submit").innerHTML = 'Send';
|
||||
document.getElementById("button-submit").disabled = false;
|
||||
let chatHistory = [message, data.answer || ''];
|
||||
localStorage.setItem('chatHistory', JSON.stringify(chatHistory));
|
||||
|
||||
|
||||
|
||||
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('Error:', error);
|
||||
// console.log(error);
|
||||
// document.getElementById("button-submit").innerHTML = 'Send';
|
||||
// document.getElementById("button-submit").disabled = false;
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
window.addEventListener('submit',submitForm)
|
||||
@@ -1,15 +0,0 @@
|
||||
document.getElementById("select-docs").addEventListener("change", function() {
|
||||
localStorage.setItem('activeDocs', this.value)
|
||||
fetch('/api/docs_check', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({docs: this.value}),
|
||||
}).then(response => response.json()).then(
|
||||
data => {
|
||||
console.log('Success:', data);
|
||||
}
|
||||
)
|
||||
});
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
|
||||
|
||||
|
||||
|
||||
@media screen and (max-width: 1024px) {
|
||||
.text-lg {
|
||||
font-size: 3.125rem;
|
||||
margin: 2rem;
|
||||
line-height: inherit;
|
||||
}
|
||||
.text-sm {
|
||||
font-size: 2.5rem;
|
||||
margin: 1.5rem;
|
||||
line-height: inherit;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
.loader {
|
||||
border: 16px solid #f3f3f3; /* Light grey */
|
||||
border-top: 16px solid #3498db; /* Blue */
|
||||
border-radius: 50%;
|
||||
width: 120px;
|
||||
height: 120px;
|
||||
animation: spin 2s linear infinite;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
0% { transform: rotate(0deg); }
|
||||
100% { transform: rotate(360deg); }
|
||||
}
|
||||
|
||||
|
||||
@@ -1,195 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>DocsGPT 🦖 Preview</title>
|
||||
<link href="{{url_for('static',filename='dist/css/output.css')}}" rel="stylesheet">
|
||||
<link rel="favicon" href="{{ url_for('static', filename='favicon/favicon.ico') }}">
|
||||
<link rel="apple-touch-icon" sizes="180x180" href="{{ url_for('static', filename='favicon/apple-touch-icon.png') }}">
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="{{ url_for('static', filename='favicon/favicon-32x32.png') }}">
|
||||
<link rel="icon" type="image/png" sizes="16x16" href="{{ url_for('static', filename='favicon/favicon-16x16.png') }}">
|
||||
<link rel="manifest" href="{{ url_for('static', filename='favicon//site.webmanifest') }}">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
|
||||
|
||||
|
||||
|
||||
</head>
|
||||
|
||||
|
||||
<body>
|
||||
|
||||
|
||||
|
||||
<header class="bg-white p-2 flex justify-between items-center">
|
||||
<h1 class="text-lg font-medium">DocsGPT 🦖 Preview</h1>
|
||||
<div>
|
||||
<a href="https://github.com/arc53/docsgpt" class="text-blue-500 hover:text-blue-800 text-sm">About</a>
|
||||
{% if not api_key_set %}
|
||||
<button class="text-sm text-yellow-500 hover:text-yellow-800" onclick="resetApiKey()">Reset Key</button>
|
||||
{% endif %}
|
||||
</div>
|
||||
</header>
|
||||
|
||||
|
||||
<!-- Alert Info -->
|
||||
<div class="border flex justify-between
|
||||
w-auto px-4 py-3 rounded relative
|
||||
hidden" style="background-color: rgb(197, 51, 51);color: white;" id="error-alert" role="alert">
|
||||
<span class="block sm:inline" id="text-error"></span>
|
||||
<strong class="text-xl align-center alert-del" style="cursor: pointer;" id="close">×</strong>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="lg:flex ml-2 mr-2">
|
||||
<div class="lg:w-3/4 min-h-screen max-h-screen">
|
||||
<div class="w-full flex flex-col h-5/6">
|
||||
<div id="messages-container" style="overflow: auto;" class="sm:max-lg:mb-[12rem]">
|
||||
|
||||
<div id="messages" class="w-full flex flex-col mt-2" >
|
||||
<div class="bg-indigo-500 text-white p-2 rounded-lg mb-2 self-start">
|
||||
<p class="text-sm">Hello, ask me anything about this library. Im here to help</p>
|
||||
</div>
|
||||
<div class="bg-blue-500 text-white p-2 rounded-lg mb-2 self-end">
|
||||
<p class="text-sm">How to merge tables?</p>
|
||||
</div>
|
||||
<div class="bg-indigo-500 text-white p-2 rounded-lg mb-2 self-start">
|
||||
<p class="text-sm">To merge two tables in pandas, you can use the pd.merge() function. The basic syntax is:<br>
|
||||
pd.merge(left, right, on, how)<br>
|
||||
where left and right are the two tables to merge, on is the column to merge on, and how is the type of merge to perform.<br>
|
||||
For example, to merge the two tables df1 and df2 on the column 'key', you can use:<br>
|
||||
pd.merge(df1, df2, on='key', how='left')<br>
|
||||
This will return a new DataFrame with all the columns from both tables, and only the rows that match the 'key' column. </p>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="fixed bottom-0 w-full mt-4 mb-2 lg:w-3/4">
|
||||
<form id="message-form" autocomplete="off" class="flex items-stretch">
|
||||
<input autocomplete="off" id="message-input" class="bg-white p-2 rounded-lg ml-2 text-sm w-full" type="text" placeholder="Type your message here...">
|
||||
<button id="button-submit" class="bg-blue-500 text-white p-2 rounded-lg ml-2 mr-2 text-sm sm:max-lg:p-5" type="submit">Send</button>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<div class="lg:w-1/4 p-2 sm:max-lg:hidden">
|
||||
<p class="text-sm">This is a chatbot that uses the GPT-3, Faiss and <a href="https://github.com/hwchase17/langchain" class="text-blue-500 hover:text-blue-800">LangChain</a> to answer questions</p>
|
||||
<br>
|
||||
<p class="text-sm">The source code is available on <a href="https://github.com/arc53/docsgpt" class="text-blue-500 hover:text-blue-800">Github</a></p><br>
|
||||
<p class="text-sm">Currently It uses python pandas documentation, so it will respond to information relevant to pandas. If you want to train it on different documentation - <a href="https://github.com/arc53/docsgpt/wiki/How-to-train-on-other-documentation" class="text-blue-500 hover:text-blue-800"> please follow this guide </a></p><br>
|
||||
<p class="text-sm">If you want to launch it on your own server - <a href="https://github.com/arc53/docsgpt/wiki/How-to-train-on-other-documentation" class="text-blue-500 hover:text-blue-800"> follow this guide </a></p><br>
|
||||
<label class="block mb-2 text-sm font-medium text-gray-900">Select documentation from DocsHUB</label>
|
||||
<select id="select-docs" class="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5">
|
||||
<option selected value="default">Choose documentation</option>
|
||||
<option value="default">Default</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex items-center justify-center h-full">
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
{% if not api_key_set %}
|
||||
|
||||
<div class="fixed z-10 overflow-y-auto top-0 w-full left-0 show" id="modal">
|
||||
<div class="flex items-center justify-center min-height-100vh pt-4 px-4 pb-20 text-center sm:block sm:p-0">
|
||||
<div class="fixed inset-0 transition-opacity">
|
||||
<div class="absolute inset-0 bg-gray-900 opacity-75" />
|
||||
</div>
|
||||
<span class="hidden sm:inline-block sm:align-middle sm:h-screen">​</span>
|
||||
<div class=" text-sm inline-block align-center bg-white rounded-lg text-left overflow-hidden shadow-xl transform transition-all sm:my-8 sm:align-middle sm:max-w-lg sm:w-full" role="dialog" aria-modal="true" aria-labelledby="modal-headline">
|
||||
<form id="api-key-form">
|
||||
<div class="bg-white px-4 pt-5 pb-4 sm:p-6 sm:pb-4">
|
||||
<h2>Before you can start using DocsGPT we need you to provide an API key for llm. Currently, we support only OpenAI but soon many more. You can find it <a class="text-blue-500 hover:text-blue-800" href="https://platform.openai.com/account/api-keys">here</a></h2><br>
|
||||
<label>OpenAI API key:</label>
|
||||
|
||||
<input id="api-key-input" type="password" class="w-full bg-gray-100 p-2 mt-2 mb-3" placeholder="Paste you Api Key here">
|
||||
|
||||
</div>
|
||||
<div class="bg-gray-200 px-4 py-3 text-right">
|
||||
<button type="submit" class="py-2 px-4 bg-blue-500 text-white rounded hover:bg-blue-700 mr-2">Save</button>
|
||||
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
|
||||
|
||||
<script>
|
||||
function docsIndex() {
|
||||
// loads latest index from https://raw.githubusercontent.com/arc53/DocsHUB/main/combined.json
|
||||
// and stores it in localStorage
|
||||
fetch('https://d3dg1063dc54p9.cloudfront.net/combined.json')
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
localStorage.setItem("docsIndex", JSON.stringify(data));
|
||||
localStorage.setItem("docsIndexDate", Date.now());
|
||||
generateOptions()
|
||||
}
|
||||
|
||||
)
|
||||
|
||||
}
|
||||
function generateOptions(){
|
||||
docsIndex = localStorage.getItem('docsIndex')
|
||||
// create option on select with id select-docs
|
||||
var select = document.getElementById("select-docs");
|
||||
// convert docsIndex to json
|
||||
docsIndex = JSON.parse(docsIndex)
|
||||
// create option for each key in docsIndex
|
||||
for (var key in docsIndex) {
|
||||
var option = document.createElement("option");
|
||||
if (docsIndex[key].name == docsIndex[key].language) {
|
||||
option.text = docsIndex[key].name + " " + docsIndex[key].version;
|
||||
option.value = docsIndex[key].name + "/" + ".project" + "/" + docsIndex[key].version + "/{{ embeddings_choice }}/";
|
||||
if (docsIndex[key].model == "{{ embeddings_choice }}") {
|
||||
select.add(option);
|
||||
}
|
||||
}
|
||||
else {
|
||||
option.text = docsIndex[key].name + " " + docsIndex[key].version;
|
||||
option.value = docsIndex[key].language + "/" + docsIndex[key].name + "/" + docsIndex[key].version + "/{{ embeddings_choice }}/";
|
||||
if (docsIndex[key].model == "{{ embeddings_choice }}") {
|
||||
select.add(option);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
{% if not api_key_set %}
|
||||
if (localStorage.getItem('apiKey') === null) {
|
||||
console.log("apiKey is not set")
|
||||
document.getElementById('modal').classList.toggle('hidden')
|
||||
}
|
||||
{% endif %}
|
||||
if (localStorage.getItem('docsIndex') === null) {
|
||||
console.log("docsIndex is not set")
|
||||
docsIndex()
|
||||
}
|
||||
else if (localStorage.getItem("docsIndexDate") < Date.now() - 900000) {
|
||||
console.log("docsIndex is older than 15 minutes")
|
||||
docsIndex()
|
||||
}
|
||||
|
||||
generateOptions()
|
||||
|
||||
</script>
|
||||
{% if not api_key_set %}
|
||||
<script src="{{url_for('static',filename='src/authapi.js')}}"></script>
|
||||
{% endif %}
|
||||
<script src="{{url_for('static',filename='src/chat.js')}}"></script>
|
||||
<script src="{{url_for('static',filename='src/choiceChange.js')}}"></script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
0
application/vectorstore/__init__.py
Normal file
56
application/vectorstore/base.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import os
|
||||
from langchain_community.embeddings import (
|
||||
HuggingFaceEmbeddings,
|
||||
CohereEmbeddings,
|
||||
HuggingFaceInstructEmbeddings,
|
||||
)
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from application.core.settings import settings
|
||||
|
||||
class BaseVectorStore(ABC):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def search(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def is_azure_configured(self):
|
||||
return settings.OPENAI_API_BASE and settings.OPENAI_API_VERSION and settings.AZURE_DEPLOYMENT_NAME
|
||||
|
||||
def _get_embeddings(self, embeddings_name, embeddings_key=None):
|
||||
embeddings_factory = {
|
||||
"openai_text-embedding-ada-002": OpenAIEmbeddings,
|
||||
"huggingface_sentence-transformers/all-mpnet-base-v2": HuggingFaceEmbeddings,
|
||||
"huggingface_hkunlp/instructor-large": HuggingFaceInstructEmbeddings,
|
||||
"cohere_medium": CohereEmbeddings
|
||||
}
|
||||
|
||||
if embeddings_name not in embeddings_factory:
|
||||
raise ValueError(f"Invalid embeddings_name: {embeddings_name}")
|
||||
|
||||
if embeddings_name == "openai_text-embedding-ada-002":
|
||||
if self.is_azure_configured():
|
||||
os.environ["OPENAI_API_TYPE"] = "azure"
|
||||
embedding_instance = embeddings_factory[embeddings_name](
|
||||
model=settings.AZURE_EMBEDDINGS_DEPLOYMENT_NAME
|
||||
)
|
||||
else:
|
||||
embedding_instance = embeddings_factory[embeddings_name](
|
||||
openai_api_key=embeddings_key
|
||||
)
|
||||
elif embeddings_name == "cohere_medium":
|
||||
embedding_instance = embeddings_factory[embeddings_name](
|
||||
cohere_api_key=embeddings_key
|
||||
)
|
||||
elif embeddings_name == "huggingface_sentence-transformers/all-mpnet-base-v2":
|
||||
embedding_instance = embeddings_factory[embeddings_name](
|
||||
#model_name="./model/all-mpnet-base-v2",
|
||||
model_kwargs={"device": "cpu"},
|
||||
)
|
||||
else:
|
||||
embedding_instance = embeddings_factory[embeddings_name]()
|
||||
|
||||
return embedding_instance
|
||||
|
||||
8
application/vectorstore/document_class.py
Normal file
@@ -0,0 +1,8 @@
|
||||
class Document(str):
|
||||
"""Class for storing a piece of text and associated metadata."""
|
||||
|
||||
def __new__(cls, page_content: str, metadata: dict):
|
||||
instance = super().__new__(cls, page_content)
|
||||
instance.page_content = page_content
|
||||
instance.metadata = metadata
|
||||
return instance
|
||||
213
application/vectorstore/elasticsearch.py
Normal file
@@ -0,0 +1,213 @@
|
||||
from application.vectorstore.base import BaseVectorStore
|
||||
from application.core.settings import settings
|
||||
from application.vectorstore.document_class import Document
|
||||
import elasticsearch
|
||||
|
||||
|
||||
|
||||
|
||||
class ElasticsearchStore(BaseVectorStore):
|
||||
_es_connection = None # Class attribute to hold the Elasticsearch connection
|
||||
|
||||
def __init__(self, path, embeddings_key, index_name=settings.ELASTIC_INDEX):
|
||||
super().__init__()
|
||||
self.path = path.replace("application/indexes/", "").rstrip("/")
|
||||
self.embeddings_key = embeddings_key
|
||||
self.index_name = index_name
|
||||
|
||||
if ElasticsearchStore._es_connection is None:
|
||||
connection_params = {}
|
||||
if settings.ELASTIC_URL:
|
||||
connection_params["hosts"] = [settings.ELASTIC_URL]
|
||||
connection_params["http_auth"] = (settings.ELASTIC_USERNAME, settings.ELASTIC_PASSWORD)
|
||||
elif settings.ELASTIC_CLOUD_ID:
|
||||
connection_params["cloud_id"] = settings.ELASTIC_CLOUD_ID
|
||||
connection_params["basic_auth"] = (settings.ELASTIC_USERNAME, settings.ELASTIC_PASSWORD)
|
||||
else:
|
||||
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
|
||||
|
||||
|
||||
|
||||
ElasticsearchStore._es_connection = elasticsearch.Elasticsearch(**connection_params)
|
||||
|
||||
self.docsearch = ElasticsearchStore._es_connection
|
||||
|
||||
def connect_to_elasticsearch(
|
||||
*,
|
||||
es_url = None,
|
||||
cloud_id = None,
|
||||
api_key = None,
|
||||
username = None,
|
||||
password = None,
|
||||
):
|
||||
try:
|
||||
import elasticsearch
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import elasticsearch python package. "
|
||||
"Please install it with `pip install elasticsearch`."
|
||||
)
|
||||
|
||||
if es_url and cloud_id:
|
||||
raise ValueError(
|
||||
"Both es_url and cloud_id are defined. Please provide only one."
|
||||
)
|
||||
|
||||
connection_params = {}
|
||||
|
||||
if es_url:
|
||||
connection_params["hosts"] = [es_url]
|
||||
elif cloud_id:
|
||||
connection_params["cloud_id"] = cloud_id
|
||||
else:
|
||||
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
|
||||
|
||||
if api_key:
|
||||
connection_params["api_key"] = api_key
|
||||
elif username and password:
|
||||
connection_params["basic_auth"] = (username, password)
|
||||
|
||||
es_client = elasticsearch.Elasticsearch(
|
||||
**connection_params,
|
||||
)
|
||||
try:
|
||||
es_client.info()
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
return es_client
|
||||
|
||||
def search(self, question, k=2, index_name=settings.ELASTIC_INDEX, *args, **kwargs):
|
||||
embeddings = self._get_embeddings(settings.EMBEDDINGS_NAME, self.embeddings_key)
|
||||
vector = embeddings.embed_query(question)
|
||||
knn = {
|
||||
"filter": [{"match": {"metadata.store.keyword": self.path}}],
|
||||
"field": "vector",
|
||||
"k": k,
|
||||
"num_candidates": 100,
|
||||
"query_vector": vector,
|
||||
}
|
||||
full_query = {
|
||||
"knn": knn,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [
|
||||
{
|
||||
"match": {
|
||||
"text": {
|
||||
"query": question,
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"filter": [{"match": {"metadata.store.keyword": self.path}}],
|
||||
}
|
||||
},
|
||||
"rank": {"rrf": {}},
|
||||
}
|
||||
resp = self.docsearch.search(index=self.index_name, query=full_query['query'], size=k, knn=full_query['knn'])
|
||||
# create Documents objects from the results page_content ['_source']['text'], metadata ['_source']['metadata']
|
||||
doc_list = []
|
||||
for hit in resp['hits']['hits']:
|
||||
|
||||
doc_list.append(Document(page_content = hit['_source']['text'], metadata = hit['_source']['metadata']))
|
||||
return doc_list
|
||||
|
||||
def _create_index_if_not_exists(
|
||||
self, index_name, dims_length
|
||||
):
|
||||
|
||||
if self._es_connection.indices.exists(index=index_name):
|
||||
print(f"Index {index_name} already exists.")
|
||||
|
||||
else:
|
||||
|
||||
indexSettings = self.index(
|
||||
dims_length=dims_length,
|
||||
)
|
||||
self._es_connection.indices.create(index=index_name, **indexSettings)
|
||||
|
||||
def index(
|
||||
self,
|
||||
dims_length,
|
||||
):
|
||||
return {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"vector": {
|
||||
"type": "dense_vector",
|
||||
"dims": dims_length,
|
||||
"index": True,
|
||||
"similarity": "cosine",
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def add_texts(
|
||||
self,
|
||||
texts,
|
||||
metadatas = None,
|
||||
ids = None,
|
||||
refresh_indices = True,
|
||||
create_index_if_not_exists = True,
|
||||
bulk_kwargs = None,
|
||||
**kwargs,
|
||||
):
|
||||
|
||||
from elasticsearch.helpers import BulkIndexError, bulk
|
||||
|
||||
bulk_kwargs = bulk_kwargs or {}
|
||||
import uuid
|
||||
embeddings = []
|
||||
ids = ids or [str(uuid.uuid4()) for _ in texts]
|
||||
requests = []
|
||||
embeddings = self._get_embeddings(settings.EMBEDDINGS_NAME, self.embeddings_key)
|
||||
|
||||
vectors = embeddings.embed_documents(list(texts))
|
||||
|
||||
dims_length = len(vectors[0])
|
||||
|
||||
if create_index_if_not_exists:
|
||||
self._create_index_if_not_exists(
|
||||
index_name=self.index_name, dims_length=dims_length
|
||||
)
|
||||
|
||||
for i, (text, vector) in enumerate(zip(texts, vectors)):
|
||||
metadata = metadatas[i] if metadatas else {}
|
||||
|
||||
requests.append(
|
||||
{
|
||||
"_op_type": "index",
|
||||
"_index": self.index_name,
|
||||
"text": text,
|
||||
"vector": vector,
|
||||
"metadata": metadata,
|
||||
"_id": ids[i],
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
if len(requests) > 0:
|
||||
try:
|
||||
success, failed = bulk(
|
||||
self._es_connection,
|
||||
requests,
|
||||
stats_only=True,
|
||||
refresh=refresh_indices,
|
||||
**bulk_kwargs,
|
||||
)
|
||||
return ids
|
||||
except BulkIndexError as e:
|
||||
print(f"Error adding texts: {e}")
|
||||
firstError = e.errors[0].get("index", {}).get("error", {})
|
||||
print(f"First error reason: {firstError.get('reason')}")
|
||||
raise e
|
||||
|
||||
else:
|
||||
return []
|
||||
|
||||
def delete_index(self):
|
||||
self._es_connection.delete_by_query(index=self.index_name, query={"match": {
|
||||
"metadata.store.keyword": self.path}},)
|
||||
|
||||
46
application/vectorstore/faiss.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from langchain_community.vectorstores import FAISS
|
||||
from application.vectorstore.base import BaseVectorStore
|
||||
from application.core.settings import settings
|
||||
|
||||
class FaissStore(BaseVectorStore):
|
||||
|
||||
def __init__(self, path, embeddings_key, docs_init=None):
|
||||
super().__init__()
|
||||
self.path = path
|
||||
embeddings = self._get_embeddings(settings.EMBEDDINGS_NAME, embeddings_key)
|
||||
if docs_init:
|
||||
self.docsearch = FAISS.from_documents(
|
||||
docs_init, embeddings
|
||||
)
|
||||
else:
|
||||
self.docsearch = FAISS.load_local(
|
||||
self.path, embeddings
|
||||
)
|
||||
self.assert_embedding_dimensions(embeddings)
|
||||
|
||||
def search(self, *args, **kwargs):
|
||||
return self.docsearch.similarity_search(*args, **kwargs)
|
||||
|
||||
def add_texts(self, *args, **kwargs):
|
||||
return self.docsearch.add_texts(*args, **kwargs)
|
||||
|
||||
def save_local(self, *args, **kwargs):
|
||||
return self.docsearch.save_local(*args, **kwargs)
|
||||
|
||||
def delete_index(self, *args, **kwargs):
|
||||
return self.docsearch.delete(*args, **kwargs)
|
||||
|
||||
def assert_embedding_dimensions(self, embeddings):
|
||||
"""
|
||||
Check that the word embedding dimension of the docsearch index matches
|
||||
the dimension of the word embeddings used
|
||||
"""
|
||||
if settings.EMBEDDINGS_NAME == "huggingface_sentence-transformers/all-mpnet-base-v2":
|
||||
try:
|
||||
word_embedding_dimension = embeddings.client[1].word_embedding_dimension
|
||||
except AttributeError as e:
|
||||
raise AttributeError("word_embedding_dimension not found in embeddings.client[1]") from e
|
||||
docsearch_index_dimension = self.docsearch.index.d
|
||||
if word_embedding_dimension != docsearch_index_dimension:
|
||||
raise ValueError(f"word_embedding_dimension ({word_embedding_dimension}) " +
|
||||
f"!= docsearch_index_word_embedding_dimension ({docsearch_index_dimension})")
|
||||
126
application/vectorstore/mongodb.py
Normal file
@@ -0,0 +1,126 @@
|
||||
from application.vectorstore.base import BaseVectorStore
|
||||
from application.core.settings import settings
|
||||
from application.vectorstore.document_class import Document
|
||||
|
||||
class MongoDBVectorStore(BaseVectorStore):
|
||||
def __init__(
|
||||
self,
|
||||
path: str = "",
|
||||
embeddings_key: str = "embeddings",
|
||||
collection: str = "documents",
|
||||
index_name: str = "vector_search_index",
|
||||
text_key: str = "text",
|
||||
embedding_key: str = "embedding",
|
||||
database: str = "docsgpt",
|
||||
):
|
||||
self._index_name = index_name
|
||||
self._text_key = text_key
|
||||
self._embedding_key = embedding_key
|
||||
self._embeddings_key = embeddings_key
|
||||
self._mongo_uri = settings.MONGO_URI
|
||||
self._path = path.replace("application/indexes/", "").rstrip("/")
|
||||
self._embedding = self._get_embeddings(settings.EMBEDDINGS_NAME, embeddings_key)
|
||||
|
||||
try:
|
||||
import pymongo
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import pymongo python package. "
|
||||
"Please install it with `pip install pymongo`."
|
||||
)
|
||||
|
||||
self._client = pymongo.MongoClient(self._mongo_uri)
|
||||
self._database = self._client[database]
|
||||
self._collection = self._database[collection]
|
||||
|
||||
|
||||
def search(self, question, k=2, *args, **kwargs):
|
||||
query_vector = self._embedding.embed_query(question)
|
||||
|
||||
pipeline = [
|
||||
{
|
||||
"$vectorSearch": {
|
||||
"queryVector": query_vector,
|
||||
"path": self._embedding_key,
|
||||
"limit": k,
|
||||
"numCandidates": k * 10,
|
||||
"index": self._index_name,
|
||||
"filter": {
|
||||
"store": {"$eq": self._path}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
cursor = self._collection.aggregate(pipeline)
|
||||
|
||||
results = []
|
||||
for doc in cursor:
|
||||
text = doc[self._text_key]
|
||||
doc.pop("_id")
|
||||
doc.pop(self._text_key)
|
||||
doc.pop(self._embedding_key)
|
||||
metadata = doc
|
||||
results.append(Document(text, metadata))
|
||||
return results
|
||||
|
||||
def _insert_texts(self, texts, metadatas):
|
||||
if not texts:
|
||||
return []
|
||||
embeddings = self._embedding.embed_documents(texts)
|
||||
to_insert = [
|
||||
{self._text_key: t, self._embedding_key: embedding, **m}
|
||||
for t, m, embedding in zip(texts, metadatas, embeddings)
|
||||
]
|
||||
# insert the documents in MongoDB Atlas
|
||||
insert_result = self._collection.insert_many(to_insert)
|
||||
return insert_result.inserted_ids
|
||||
|
||||
def add_texts(self,
|
||||
texts,
|
||||
metadatas = None,
|
||||
ids = None,
|
||||
refresh_indices = True,
|
||||
create_index_if_not_exists = True,
|
||||
bulk_kwargs = None,
|
||||
**kwargs,):
|
||||
|
||||
|
||||
#dims = self._embedding.client[1].word_embedding_dimension
|
||||
# # check if index exists
|
||||
# if create_index_if_not_exists:
|
||||
# # check if index exists
|
||||
# info = self._collection.index_information()
|
||||
# if self._index_name not in info:
|
||||
# index_mongo = {
|
||||
# "fields": [{
|
||||
# "type": "vector",
|
||||
# "path": self._embedding_key,
|
||||
# "numDimensions": dims,
|
||||
# "similarity": "cosine",
|
||||
# },
|
||||
# {
|
||||
# "type": "filter",
|
||||
# "path": "store"
|
||||
# }]
|
||||
# }
|
||||
# self._collection.create_index(self._index_name, index_mongo)
|
||||
|
||||
batch_size = 100
|
||||
_metadatas = metadatas or ({} for _ in texts)
|
||||
texts_batch = []
|
||||
metadatas_batch = []
|
||||
result_ids = []
|
||||
for i, (text, metadata) in enumerate(zip(texts, _metadatas)):
|
||||
texts_batch.append(text)
|
||||
metadatas_batch.append(metadata)
|
||||
if (i + 1) % batch_size == 0:
|
||||
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
|
||||
texts_batch = []
|
||||
metadatas_batch = []
|
||||
if texts_batch:
|
||||
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
|
||||
return result_ids
|
||||
|
||||
def delete_index(self, *args, **kwargs):
|
||||
self._collection.delete_many({"store": self._path})
|
||||
18
application/vectorstore/vector_creator.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from application.vectorstore.faiss import FaissStore
|
||||
from application.vectorstore.elasticsearch import ElasticsearchStore
|
||||
from application.vectorstore.mongodb import MongoDBVectorStore
|
||||
|
||||
|
||||
class VectorCreator:
|
||||
vectorstores = {
|
||||
'faiss': FaissStore,
|
||||
'elasticsearch':ElasticsearchStore,
|
||||
'mongodb': MongoDBVectorStore,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def create_vectorstore(cls, type, *args, **kwargs):
|
||||
vectorstore_class = cls.vectorstores.get(type.lower())
|
||||
if not vectorstore_class:
|
||||
raise ValueError(f"No vectorstore class found for type {type}")
|
||||
return vectorstore_class(*args, **kwargs)
|
||||
123
application/worker.py
Normal file
@@ -0,0 +1,123 @@
|
||||
import os
|
||||
import shutil
|
||||
import string
|
||||
import zipfile
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import nltk
|
||||
import requests
|
||||
|
||||
from application.core.settings import settings
|
||||
from application.parser.file.bulk import SimpleDirectoryReader
|
||||
from application.parser.open_ai_func import call_openai_api
|
||||
from application.parser.schema.base import Document
|
||||
from application.parser.token_func import group_split
|
||||
|
||||
try:
|
||||
nltk.download('punkt', quiet=True)
|
||||
nltk.download('averaged_perceptron_tagger', quiet=True)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
|
||||
# Define a function to extract metadata from a given filename.
|
||||
def metadata_from_filename(title):
|
||||
store = '/'.join(title.split('/')[1:3])
|
||||
return {'title': title, 'store': store}
|
||||
|
||||
|
||||
# Define a function to generate a random string of a given length.
|
||||
def generate_random_string(length):
|
||||
return ''.join([string.ascii_letters[i % 52] for i in range(length)])
|
||||
|
||||
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
# Define the main function for ingesting and processing documents.
|
||||
def ingest_worker(self, directory, formats, name_job, filename, user):
|
||||
"""
|
||||
Ingest and process documents.
|
||||
|
||||
Args:
|
||||
self: Reference to the instance of the task.
|
||||
directory (str): Specifies the directory for ingesting ('inputs' or 'temp').
|
||||
formats (list of str): List of file extensions to consider for ingestion (e.g., [".rst", ".md"]).
|
||||
name_job (str): Name of the job for this ingestion task.
|
||||
filename (str): Name of the file to be ingested.
|
||||
user (str): Identifier for the user initiating the ingestion.
|
||||
|
||||
Returns:
|
||||
dict: Information about the completed ingestion task, including input parameters and a "limited" flag.
|
||||
"""
|
||||
# directory = 'inputs' or 'temp'
|
||||
# formats = [".rst", ".md"]
|
||||
input_files = None
|
||||
recursive = True
|
||||
limit = None
|
||||
exclude = True
|
||||
# name_job = 'job1'
|
||||
# filename = 'install.rst'
|
||||
# user = 'local'
|
||||
sample = False
|
||||
token_check = True
|
||||
min_tokens = 150
|
||||
max_tokens = 1250
|
||||
full_path = directory + '/' + user + '/' + name_job
|
||||
import sys
|
||||
print(full_path, file=sys.stderr)
|
||||
# check if API_URL env variable is set
|
||||
file_data = {'name': name_job, 'file': filename, 'user': user}
|
||||
response = requests.get(urljoin(settings.API_URL, "/api/download"), params=file_data)
|
||||
# check if file is in the response
|
||||
print(response, file=sys.stderr)
|
||||
file = response.content
|
||||
|
||||
if not os.path.exists(full_path):
|
||||
os.makedirs(full_path)
|
||||
with open(full_path + '/' + filename, 'wb') as f:
|
||||
f.write(file)
|
||||
|
||||
# check if file is .zip and extract it
|
||||
if filename.endswith('.zip'):
|
||||
with zipfile.ZipFile(full_path + '/' + filename, 'r') as zip_ref:
|
||||
zip_ref.extractall(full_path)
|
||||
os.remove(full_path + '/' + filename)
|
||||
|
||||
self.update_state(state='PROGRESS', meta={'current': 1})
|
||||
|
||||
raw_docs = SimpleDirectoryReader(input_dir=full_path, input_files=input_files, recursive=recursive,
|
||||
required_exts=formats, num_files_limit=limit,
|
||||
exclude_hidden=exclude, file_metadata=metadata_from_filename).load_data()
|
||||
raw_docs = group_split(documents=raw_docs, min_tokens=min_tokens, max_tokens=max_tokens, token_check=token_check)
|
||||
|
||||
docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
|
||||
|
||||
call_openai_api(docs, full_path, self)
|
||||
self.update_state(state='PROGRESS', meta={'current': 100})
|
||||
|
||||
if sample:
|
||||
for i in range(min(5, len(raw_docs))):
|
||||
print(raw_docs[i].text)
|
||||
|
||||
# get files from outputs/inputs/index.faiss and outputs/inputs/index.pkl
|
||||
# and send them to the server (provide user and name in form)
|
||||
file_data = {'name': name_job, 'user': user}
|
||||
if settings.VECTOR_STORE == "faiss":
|
||||
files = {'file_faiss': open(full_path + '/index.faiss', 'rb'),
|
||||
'file_pkl': open(full_path + '/index.pkl', 'rb')}
|
||||
response = requests.post(urljoin(settings.API_URL, "/api/upload_index"), files=files, data=file_data)
|
||||
response = requests.get(urljoin(settings.API_URL, "/api/delete_old?path=" + full_path))
|
||||
else:
|
||||
response = requests.post(urljoin(settings.API_URL, "/api/upload_index"), data=file_data)
|
||||
|
||||
|
||||
# delete local
|
||||
shutil.rmtree(full_path)
|
||||
|
||||
return {
|
||||
'directory': directory,
|
||||
'formats': formats,
|
||||
'name_job': name_job,
|
||||
'filename': filename,
|
||||
'user': user,
|
||||
'limited': False
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
from app import app
|
||||
from application.app import app
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run()
|
||||
app.run(debug=True, port=7091)
|
||||
|
||||
2
codecov.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
ignore:
|
||||
- "*/tests/*"
|
||||
71
docker-compose-azure.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
frontend:
|
||||
build: ./frontend
|
||||
environment:
|
||||
- VITE_API_HOST=http://localhost:7091
|
||||
- VITE_API_STREAMING=$VITE_API_STREAMING
|
||||
ports:
|
||||
- "5173:5173"
|
||||
depends_on:
|
||||
- backend
|
||||
|
||||
backend:
|
||||
build: ./application
|
||||
environment:
|
||||
- API_KEY=$OPENAI_API_KEY
|
||||
- EMBEDDINGS_KEY=$OPENAI_API_KEY
|
||||
- CELERY_BROKER_URL=redis://redis:6379/0
|
||||
- CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
- MONGO_URI=mongodb://mongo:27017/docsgpt
|
||||
- OPENAI_API_KEY=$OPENAI_API_KEY
|
||||
- OPENAI_API_BASE=$OPENAI_API_BASE
|
||||
- OPENAI_API_VERSION=$OPENAI_API_VERSION
|
||||
- AZURE_DEPLOYMENT_NAME=$AZURE_DEPLOYMENT_NAME
|
||||
- AZURE_EMBEDDINGS_DEPLOYMENT_NAME=$AZURE_EMBEDDINGS_DEPLOYMENT_NAME
|
||||
ports:
|
||||
- "7091:7091"
|
||||
volumes:
|
||||
- ./application/indexes:/app/application/indexes
|
||||
- ./application/inputs:/app/application/inputs
|
||||
- ./application/vectors:/app/application/vectors
|
||||
depends_on:
|
||||
- redis
|
||||
- mongo
|
||||
|
||||
worker:
|
||||
build: ./application
|
||||
command: celery -A application.app.celery worker -l INFO
|
||||
environment:
|
||||
- API_KEY=$OPENAI_API_KEY
|
||||
- EMBEDDINGS_KEY=$OPENAI_API_KEY
|
||||
- CELERY_BROKER_URL=redis://redis:6379/0
|
||||
- CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
- MONGO_URI=mongodb://mongo:27017/docsgpt
|
||||
- API_URL=http://backend:7091
|
||||
- OPENAI_API_KEY=$OPENAI_API_KEY
|
||||
- OPENAI_API_BASE=$OPENAI_API_BASE
|
||||
- OPENAI_API_VERSION=$OPENAI_API_VERSION
|
||||
- AZURE_DEPLOYMENT_NAME=$AZURE_DEPLOYMENT_NAME
|
||||
- AZURE_EMBEDDINGS_DEPLOYMENT_NAME=$AZURE_EMBEDDINGS_DEPLOYMENT_NAME
|
||||
depends_on:
|
||||
- redis
|
||||
- mongo
|
||||
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
mongo:
|
||||
image: mongo:6
|
||||
ports:
|
||||
- 27017:27017
|
||||
volumes:
|
||||
- mongodb_data_container:/data/db
|
||||
|
||||
|
||||
|
||||
volumes:
|
||||
mongodb_data_container:
|
||||
20
docker-compose-dev.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
mongo:
|
||||
image: mongo:6
|
||||
ports:
|
||||
- 27017:27017
|
||||
volumes:
|
||||
- mongodb_data_container:/data/db
|
||||
|
||||
|
||||
|
||||
volumes:
|
||||
mongodb_data_container:
|
||||