mirror of
https://github.com/arc53/DocsGPT.git
synced 2025-11-29 08:33:20 +00:00
Compare commits
3655 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77f4f8d8b0 | ||
|
|
a2d04beaa1 | ||
|
|
7d8ed2d102 | ||
|
|
aab8d3a4f1 | ||
|
|
763aa73ea4 | ||
|
|
30c79e92d4 | ||
|
|
402d5e054b | ||
|
|
0e211df206 | ||
|
|
e24a0ac686 | ||
|
|
8c91b1c527 | ||
|
|
2b38f80d04 | ||
|
|
282bd35f52 | ||
|
|
cc9b4c2bcb | ||
|
|
068ce4970a | ||
|
|
cf19165ad8 | ||
|
|
68c479f3a5 | ||
|
|
ba496a772b | ||
|
|
3b27db36f2 | ||
|
|
f803def69b | ||
|
|
52065e69a4 | ||
|
|
50f5e8a955 | ||
|
|
2d0e97b66d | ||
|
|
5f3cc5a392 | ||
|
|
ac66d77512 | ||
|
|
50cf653d4a | ||
|
|
56256051d2 | ||
|
|
c0361ff03d | ||
|
|
f153435c08 | ||
|
|
9aa7f22fa6 | ||
|
|
52b7bda5f8 | ||
|
|
21aefa2778 | ||
|
|
a89ff71c9e | ||
|
|
4c275816be | ||
|
|
f8dfbcfc80 | ||
|
|
d317f6473d | ||
|
|
00b4e133d4 | ||
|
|
b6349e4efb | ||
|
|
6ca3d9585c | ||
|
|
5935a0283a | ||
|
|
5400a6ec06 | ||
|
|
6574d9cc84 | ||
|
|
42b83c5994 | ||
|
|
896612a5a3 | ||
|
|
0ee875bee4 | ||
|
|
8ce345cd94 | ||
|
|
da2f8477e6 | ||
|
|
82b47b5673 | ||
|
|
3369b910b4 | ||
|
|
ec0c4c3b84 | ||
|
|
f74e2c9da1 | ||
|
|
e26ad3c475 | ||
|
|
145c3b8ad0 | ||
|
|
0ff6c6a154 | ||
|
|
641cf5a4c1 | ||
|
|
09b9576eef | ||
|
|
18b71ca2f2 | ||
|
|
e0eb7f456e | ||
|
|
188d118fc0 | ||
|
|
adcdce8d76 | ||
|
|
b865a7aec1 | ||
|
|
cec8c72b46 | ||
|
|
b052e32805 | ||
|
|
816f660be3 | ||
|
|
fc8be45d5a | ||
|
|
e749c936c9 | ||
|
|
b2b9670a23 | ||
|
|
2f88890c94 | ||
|
|
6366663f03 | ||
|
|
20fe7dc6d1 | ||
|
|
4b9153069e | ||
|
|
80406d0753 | ||
|
|
35f4c11784 | ||
|
|
7896526f19 | ||
|
|
f7db22edff | ||
|
|
0e4196f036 | ||
|
|
1bf6af6eeb | ||
|
|
5a9bc6d2bf | ||
|
|
f7f6042579 | ||
|
|
c4a598f3d3 | ||
|
|
7c23f43c63 | ||
|
|
7e2cbdd88c | ||
|
|
3b3a04a249 | ||
|
|
f9b2c95695 | ||
|
|
c2c18e8319 | ||
|
|
384ad3e0ac | ||
|
|
8c986aaa7f | ||
|
|
bb4ea76d30 | ||
|
|
2868e47cf8 | ||
|
|
e0adc3e5d5 | ||
|
|
e55d1a5865 | ||
|
|
018273c6b2 | ||
|
|
44b8a11c04 | ||
|
|
56e5aba559 | ||
|
|
46904ccd54 | ||
|
|
5b7c7a4471 | ||
|
|
9da4215d1f | ||
|
|
f39ac9945f | ||
|
|
a0cc2e4d46 | ||
|
|
4065041a9f | ||
|
|
f08067a161 | ||
|
|
545caacfa3 | ||
|
|
a06f646637 | ||
|
|
578c68205a | ||
|
|
f09f1433a9 | ||
|
|
15a9e97a1e | ||
|
|
b3af4ee50b | ||
|
|
07d59b6640 | ||
|
|
e25b988dc8 | ||
|
|
2410bd8654 | ||
|
|
44d21ab703 | ||
|
|
e283957c8f | ||
|
|
b1210c4902 | ||
|
|
e7430f0fbc | ||
|
|
92d6ae54c3 | ||
|
|
f82be23ca9 | ||
|
|
8c3f75e3e2 | ||
|
|
193d59f193 | ||
|
|
c2bebbaefa | ||
|
|
7ae5a9c5a5 | ||
|
|
3b69bea23d | ||
|
|
ab05726b99 | ||
|
|
b2b04268e9 | ||
|
|
bd73fa9ae7 | ||
|
|
927d10d66e | ||
|
|
b67329623c | ||
|
|
6f47aa802b | ||
|
|
3417c73011 | ||
|
|
6a02bcf15b | ||
|
|
cd0fbf79a3 | ||
|
|
15d2d0115b | ||
|
|
d1a0fe6e91 | ||
|
|
1db80d140f | ||
|
|
896dcf1f9e | ||
|
|
819a12fb49 | ||
|
|
c68273706c | ||
|
|
6bb0cd535a | ||
|
|
cb9ec69cf6 | ||
|
|
143854fa81 | ||
|
|
2f48a3d7d5 | ||
|
|
ec95dafe1e | ||
|
|
3d1fe724e5 | ||
|
|
5c615d6f2d | ||
|
|
d72558eb36 | ||
|
|
65c33ad915 | ||
|
|
9be128a963 | ||
|
|
eb05132008 | ||
|
|
f94a093e8c | ||
|
|
0d0c2daf64 | ||
|
|
823d948b25 | ||
|
|
56831fbcf2 | ||
|
|
bf49b9cb88 | ||
|
|
e01adffbad | ||
|
|
08a5d52d82 | ||
|
|
fdae235742 | ||
|
|
9903fad1e9 | ||
|
|
14bbd5338d | ||
|
|
4a236c2f6f | ||
|
|
0a8cdbd7f1 | ||
|
|
94c49843be | ||
|
|
9281fac898 | ||
|
|
0b2736f454 | ||
|
|
ae116b0d0d | ||
|
|
ba260e3382 | ||
|
|
1282e7687f | ||
|
|
b1d8266eef | ||
|
|
7acae6935b | ||
|
|
092c01cae7 | ||
|
|
56a1066c30 | ||
|
|
1356d71839 | ||
|
|
1eb011e8c3 | ||
|
|
e349eb28b0 | ||
|
|
b000b235a2 | ||
|
|
16fe92282e | ||
|
|
e218e88cf4 | ||
|
|
888ea81a32 | ||
|
|
735fab7640 | ||
|
|
45745c2a47 | ||
|
|
4caff0fcf6 | ||
|
|
762ea6ce7f | ||
|
|
8b4f6553f3 | ||
|
|
a61e44d175 | ||
|
|
e1b1558fc9 | ||
|
|
53225bda4e | ||
|
|
5212769848 | ||
|
|
d5ded3c9f4 | ||
|
|
c92d778894 | ||
|
|
829abd1ad6 | ||
|
|
266d256a07 | ||
|
|
8380cac3e7 | ||
|
|
a24652f901 | ||
|
|
2d203d3c70 | ||
|
|
48d21600da | ||
|
|
2508d0fbb3 | ||
|
|
e90e80c289 | ||
|
|
5e4748f9d9 | ||
|
|
212952f3e9 | ||
|
|
f99b6496c5 | ||
|
|
67423d51b9 | ||
|
|
58465ece65 | ||
|
|
8ede3a0173 | ||
|
|
ad2f0f8950 | ||
|
|
76973a4b4c | ||
|
|
b198e2e029 | ||
|
|
4d6ea401b5 | ||
|
|
b00c4cc3b6 | ||
|
|
4185e64c65 | ||
|
|
6eb2c884a2 | ||
|
|
6c0362a4cf | ||
|
|
50b1755a63 | ||
|
|
ff3c7eb5fb | ||
|
|
3755316d49 | ||
|
|
f952046847 | ||
|
|
969cdb4a63 | ||
|
|
f336d44595 | ||
|
|
a53f93c195 | ||
|
|
fcb334ce33 | ||
|
|
8ddf04a904 | ||
|
|
29698ca169 | ||
|
|
a9baf7436a | ||
|
|
99a8962183 | ||
|
|
afc5b15a6b | ||
|
|
b6ab508e27 | ||
|
|
789e65557a | ||
|
|
8a7806ab2d | ||
|
|
493303e103 | ||
|
|
1d9af05e9e | ||
|
|
5b07c5f2e8 | ||
|
|
2a4ec0cf5b | ||
|
|
a00c44386e | ||
|
|
a38d71bbfb | ||
|
|
a24a3f868c | ||
|
|
f60c516185 | ||
|
|
26f4646304 | ||
|
|
3a351f67e6 | ||
|
|
e7c09cb91e | ||
|
|
ae1a6ef303 | ||
|
|
2ff477a339 | ||
|
|
793f3fb683 | ||
|
|
a472ee7602 | ||
|
|
c62040e232 | ||
|
|
2e7cb510ae | ||
|
|
dbe45904d7 | ||
|
|
5623734276 | ||
|
|
d3b592bffc | ||
|
|
4fcbdae5bf | ||
|
|
ca95d7275a | ||
|
|
61baf3701c | ||
|
|
bbce872ac5 | ||
|
|
0f7ebcd8e4 | ||
|
|
82fc19e7b7 | ||
|
|
839a12bed4 | ||
|
|
2ef23fe1b3 | ||
|
|
fd905b1a06 | ||
|
|
1372210004 | ||
|
|
ade704d065 | ||
|
|
42f48649b9 | ||
|
|
0b08e8b617 | ||
|
|
926b2f1a1b | ||
|
|
1770a1a45f | ||
|
|
50ed2a64c6 | ||
|
|
2332344988 | ||
|
|
7ccc8cdc58 | ||
|
|
ecec9f913e | ||
|
|
777f40fc5e | ||
|
|
327ae35420 | ||
|
|
0d48159da8 | ||
|
|
d36f12a4ea | ||
|
|
709488beb1 | ||
|
|
a9e4583695 | ||
|
|
4702dec933 | ||
|
|
e6352dd691 | ||
|
|
240ea3b857 | ||
|
|
f0908af3c0 | ||
|
|
6834961dd1 | ||
|
|
b404162364 | ||
|
|
e879ef805f | ||
|
|
7077ca5e98 | ||
|
|
a1e6978c8f | ||
|
|
584391dd59 | ||
|
|
bab3ae809c | ||
|
|
c78518baf0 | ||
|
|
556d7e0497 | ||
|
|
2d27936dab | ||
|
|
0cc22de545 | ||
|
|
63f6127049 | ||
|
|
f34e00c986 | ||
|
|
55f60a9fe1 | ||
|
|
7da3618e0c | ||
|
|
56bfa98633 | ||
|
|
96f6188722 | ||
|
|
aa9d359039 | ||
|
|
cef5731028 | ||
|
|
5bc28bd4fd | ||
|
|
55a1d867c3 | ||
|
|
6c3a79802e | ||
|
|
c35c5e0793 | ||
|
|
7bc83caa99 | ||
|
|
3aceca63c6 | ||
|
|
9bc166ffd4 | ||
|
|
fc01b90007 | ||
|
|
e35f1d70e4 | ||
|
|
cab1f3787a | ||
|
|
bb42f4cbc1 | ||
|
|
98dc418a51 | ||
|
|
322b4eb18c | ||
|
|
7f1cc30ed8 | ||
|
|
7b45a6b956 | ||
|
|
e36769e70f | ||
|
|
bd4a4cc4af | ||
|
|
8343fe63cb | ||
|
|
7d89fb8461 | ||
|
|
098955d230 | ||
|
|
d254d14928 | ||
|
|
0a3e8ca535 | ||
|
|
b8a10e0962 | ||
|
|
0aceda96e4 | ||
|
|
44b6ec25a2 | ||
|
|
1b84d1fa9d | ||
|
|
78d5ed2ed2 | ||
|
|
142477ab9b | ||
|
|
b414f79bc5 | ||
|
|
6e08fe21d0 | ||
|
|
9b839655a7 | ||
|
|
3353c0ee1d | ||
|
|
aaecf52c99 | ||
|
|
8b3e960be0 | ||
|
|
3351f71813 | ||
|
|
7490256303 | ||
|
|
041d600e45 | ||
|
|
b4e2588a24 | ||
|
|
68dc14c5a1 | ||
|
|
ef35864e16 | ||
|
|
c0d385b983 | ||
|
|
b2df431fa4 | ||
|
|
69a4bd415a | ||
|
|
4862548e65 | ||
|
|
50248cc9ea | ||
|
|
430822bae3 | ||
|
|
dd9d18208d | ||
|
|
e5b1a71659 | ||
|
|
35f4b13237 | ||
|
|
5f5c31cd5b | ||
|
|
e9530d5ec5 | ||
|
|
143f4aa886 | ||
|
|
ece5c8bb31 | ||
|
|
31baf181a3 | ||
|
|
3bae30c70c | ||
|
|
12b18c6bd1 | ||
|
|
787d9e3bf5 | ||
|
|
f325b54895 | ||
|
|
c5616705b0 | ||
|
|
c0f693d35d | ||
|
|
52a5f132c1 | ||
|
|
f14eac6d10 | ||
|
|
e90fe117ec | ||
|
|
381d737d24 | ||
|
|
7cab5b3b09 | ||
|
|
9f911cb5cb | ||
|
|
3da7cba06c | ||
|
|
b47af9600f | ||
|
|
92c3c707e1 | ||
|
|
5acc54e609 | ||
|
|
9c6352dd5b | ||
|
|
8e29a07df5 | ||
|
|
bd88cd3a06 | ||
|
|
f371b9702f | ||
|
|
3ff4ae29af | ||
|
|
eae0f2e7a9 | ||
|
|
305a98bb79 | ||
|
|
8040a3ed60 | ||
|
|
bb9de7d9b0 | ||
|
|
d8e8bc0068 | ||
|
|
6577e9d852 | ||
|
|
3f8625c65a | ||
|
|
92d69636a7 | ||
|
|
9c28817fba | ||
|
|
773788fb32 | ||
|
|
a393ad8e04 | ||
|
|
71d3714347 | ||
|
|
b7e1329c13 | ||
|
|
59e6d9d10e | ||
|
|
46efb446fb | ||
|
|
d31e3a54fd | ||
|
|
c4e471ac47 | ||
|
|
3b8733e085 | ||
|
|
a7c67d83ca | ||
|
|
8abc1de26d | ||
|
|
2ca9f708a6 | ||
|
|
f8f369fbb2 | ||
|
|
3e9155767b | ||
|
|
8cd4195657 | ||
|
|
ad1a944276 | ||
|
|
02ff4c5657 | ||
|
|
b1b27f2dde | ||
|
|
5097f77469 | ||
|
|
7e826d5002 | ||
|
|
fe8143a56c | ||
|
|
e5442a713a | ||
|
|
1982a46f36 | ||
|
|
c8c3640baf | ||
|
|
fdf47b3f2c | ||
|
|
93fa4b6a37 | ||
|
|
90e9ab70b0 | ||
|
|
573c2386b7 | ||
|
|
d2176aeeb9 | ||
|
|
920aec5c3e | ||
|
|
b792c5459a | ||
|
|
87fbf05fa1 | ||
|
|
67c53250c5 | ||
|
|
d657eea910 | ||
|
|
b5fbb825ed | ||
|
|
d094e7a4c6 | ||
|
|
945c155b17 | ||
|
|
f798072a1e | ||
|
|
f967214b57 | ||
|
|
d0b92e2540 | ||
|
|
8ddfe272bf | ||
|
|
b7a6bad7cd | ||
|
|
e2f6c04406 | ||
|
|
c662725955 | ||
|
|
4b66ddfdef | ||
|
|
2d55b1f592 | ||
|
|
14adfabf7e | ||
|
|
e7a76ede76 | ||
|
|
de47df3bf9 | ||
|
|
5475e6f7c5 | ||
|
|
8e3f3d74d4 | ||
|
|
046f6c66ed | ||
|
|
79f9d6552e | ||
|
|
56b4b63749 | ||
|
|
b3246a48c7 | ||
|
|
71722ef6a3 | ||
|
|
ebf8f00302 | ||
|
|
7445928c7e | ||
|
|
5ab7602f2f | ||
|
|
a340aff63a | ||
|
|
f82042ff00 | ||
|
|
920422e28c | ||
|
|
50d6b7a6f8 | ||
|
|
41d624a36a | ||
|
|
f42c37c82e | ||
|
|
119fcdf6f6 | ||
|
|
a5b093d1a9 | ||
|
|
e07cb44a3e | ||
|
|
fec1bcfd5c | ||
|
|
dbcf658343 | ||
|
|
d89e78c9ca | ||
|
|
ec50650dfa | ||
|
|
7432e551f9 | ||
|
|
4ee6bd44d1 | ||
|
|
26f819098d | ||
|
|
a1c79f93d7 | ||
|
|
9c1b202d74 | ||
|
|
8ad0f59f19 | ||
|
|
50fbe3d5af | ||
|
|
af40a77d24 | ||
|
|
8af9a5e921 | ||
|
|
9807788ecb | ||
|
|
5e2f329f15 | ||
|
|
9572a7adaa | ||
|
|
1ba94f4f5f | ||
|
|
237afa0a3a | ||
|
|
d80b7017cf | ||
|
|
56793c8db7 | ||
|
|
8edb217943 | ||
|
|
23ebcf1065 | ||
|
|
68a5a3d62a | ||
|
|
8d7236b0db | ||
|
|
96c7daf818 | ||
|
|
9d8073d468 | ||
|
|
fc4942e189 | ||
|
|
ca69d025bd | ||
|
|
ffa428e32a | ||
|
|
c24e90eaae | ||
|
|
ab32eff588 | ||
|
|
7f592f2b35 | ||
|
|
3bf7f67adf | ||
|
|
594ce05292 | ||
|
|
fe02ca68d5 | ||
|
|
21ef27ee9b | ||
|
|
09d37f669f | ||
|
|
416b776062 | ||
|
|
5ed05d4020 | ||
|
|
4004bfb5ef | ||
|
|
45aace8966 | ||
|
|
d9fc623dcb | ||
|
|
dbb822f6b0 | ||
|
|
3d64dffc32 | ||
|
|
130ece7bc0 | ||
|
|
b2809b2e9a | ||
|
|
29e89d2965 | ||
|
|
e7d54a639e | ||
|
|
22df98e9bb | ||
|
|
0d45c44c6f | ||
|
|
63c6912841 | ||
|
|
73bce73034 | ||
|
|
b2582796a2 | ||
|
|
8babb6e68f | ||
|
|
d1d28df8a1 | ||
|
|
cd556d5d43 | ||
|
|
2855283a2c | ||
|
|
06c29500f2 | ||
|
|
81104153a6 | ||
|
|
23bfd4683c | ||
|
|
a52a3e3158 | ||
|
|
44e524e3c3 | ||
|
|
9a430f73e2 | ||
|
|
fdea40ec11 | ||
|
|
526d340849 | ||
|
|
fe95f6ad81 | ||
|
|
39e73c37ab | ||
|
|
39b36b6857 | ||
|
|
44e98748c5 | ||
|
|
8a7aeee955 | ||
|
|
1c7befb8d3 | ||
|
|
d5d59ac62c | ||
|
|
562f0762a0 | ||
|
|
e46aedce21 | ||
|
|
57cc09b1d7 | ||
|
|
e1e608b744 | ||
|
|
cbfa5a5118 | ||
|
|
ea9ab5b27c | ||
|
|
357ced6cba | ||
|
|
3ffda69651 | ||
|
|
e1bf4e0762 | ||
|
|
ec7f14b82d | ||
|
|
6520be5b85 | ||
|
|
17e4fad6fb | ||
|
|
d84c416421 | ||
|
|
32803c89a3 | ||
|
|
a86bcb5c29 | ||
|
|
7d76a33790 | ||
|
|
8552e81022 | ||
|
|
eacdde829f | ||
|
|
d873539856 | ||
|
|
24bb2e469d | ||
|
|
e1aa2cc0b8 | ||
|
|
d073947f3b | ||
|
|
3243740dd1 | ||
|
|
f9bd566a3b | ||
|
|
183251487c | ||
|
|
ff532210f7 | ||
|
|
d0a04d9801 | ||
|
|
ea6533db4e | ||
|
|
89d5e7bee5 | ||
|
|
7e6cdee592 | ||
|
|
990c2fb416 | ||
|
|
09e054c6aa | ||
|
|
23f648f53a | ||
|
|
07fa656e7c | ||
|
|
7858c48f11 | ||
|
|
e56d54c3f0 | ||
|
|
f37ca95c10 | ||
|
|
72e51bb072 | ||
|
|
dcfcbf54be | ||
|
|
204936b2d0 | ||
|
|
98856b39ac | ||
|
|
ad5f707486 | ||
|
|
5ecfb0ce6d | ||
|
|
2147b3f06f | ||
|
|
7daed3daaf | ||
|
|
481df4d604 | ||
|
|
cf333873fd | ||
|
|
ae700e8f3a | ||
|
|
16386a9524 | ||
|
|
7e7ce276b2 | ||
|
|
71c6b41b83 | ||
|
|
4b2faae29a | ||
|
|
7e28e562d0 | ||
|
|
93c2e2a597 | ||
|
|
c45d13d834 | ||
|
|
330276cdf7 | ||
|
|
22c7015c69 | ||
|
|
cc67d4a1e2 | ||
|
|
eeb9da696f | ||
|
|
4979e1ac9a | ||
|
|
545353dabf | ||
|
|
545376740c | ||
|
|
8289b02ab0 | ||
|
|
fc0060662b | ||
|
|
df9d432d29 | ||
|
|
76fd6e15cc | ||
|
|
06982efda5 | ||
|
|
3cd9a72495 | ||
|
|
0ce27f274a | ||
|
|
e60f78ac4a | ||
|
|
637d3a24a1 | ||
|
|
24c8b24b1f | ||
|
|
5ad34e2216 | ||
|
|
64c42f0ddf | ||
|
|
0a31ddaae6 | ||
|
|
38476cfeb8 | ||
|
|
decc31f1f0 | ||
|
|
ea0aa64330 | ||
|
|
e9a6044645 | ||
|
|
474d700df2 | ||
|
|
c50ff6faa3 | ||
|
|
c8efef8f04 | ||
|
|
1d22f77568 | ||
|
|
5aa51f5f36 | ||
|
|
335c21c48a | ||
|
|
c35d1cecfe | ||
|
|
0d3e6157cd | ||
|
|
68e4cf4d14 | ||
|
|
9454150f7d | ||
|
|
0a0e16547e | ||
|
|
0aec1b9969 | ||
|
|
3e1ec23409 | ||
|
|
2f9f428a2f | ||
|
|
da15cde49c | ||
|
|
e6ed37139a | ||
|
|
377e33c148 | ||
|
|
e567d88951 | ||
|
|
89b2937b11 | ||
|
|
142ed75468 | ||
|
|
d80eeb044c | ||
|
|
7c69e99914 | ||
|
|
5e1aaf5a44 | ||
|
|
ad610d2f90 | ||
|
|
02934452d6 | ||
|
|
8b054010e1 | ||
|
|
5b77f3839b | ||
|
|
231b792452 | ||
|
|
b468e0c164 | ||
|
|
fa1f9d7009 | ||
|
|
c5a8f3abcd | ||
|
|
dfe6a8d3e3 | ||
|
|
292257770c | ||
|
|
b4c6b2b08b | ||
|
|
6cb4577e1b | ||
|
|
456784db48 | ||
|
|
dd9ea46e58 | ||
|
|
ed3af2fac0 | ||
|
|
02f8132f3a | ||
|
|
55bd90fad9 | ||
|
|
cd7bbb45c3 | ||
|
|
6c7fc0ed22 | ||
|
|
5421bc1386 | ||
|
|
051841e566 | ||
|
|
0c68815cf2 | ||
|
|
0c1138179b | ||
|
|
1f3d1cc73e | ||
|
|
707d1332de | ||
|
|
f6c88da81b | ||
|
|
a651e6e518 | ||
|
|
bea89b93eb | ||
|
|
244c9b96a2 | ||
|
|
a37bd76950 | ||
|
|
9d70032de8 | ||
|
|
e4945b41e9 | ||
|
|
493dc8689c | ||
|
|
bdac2ffa27 | ||
|
|
b1235f3ce0 | ||
|
|
ba4bb63a1f | ||
|
|
3227b0e69c | ||
|
|
29c899627e | ||
|
|
5923781484 | ||
|
|
8bb263a2ec | ||
|
|
94c7bba168 | ||
|
|
f9ad4c068a | ||
|
|
19d68252cd | ||
|
|
72bbe3b1ce | ||
|
|
856824316b | ||
|
|
95e189d1d8 | ||
|
|
c629460acb | ||
|
|
f235a94986 | ||
|
|
632cba86e9 | ||
|
|
6b92c7eccc | ||
|
|
ab0da1abac | ||
|
|
7f31ac7bcb | ||
|
|
57a6fb31b2 | ||
|
|
fd2b6c111c | ||
|
|
302458b505 | ||
|
|
0e31329785 | ||
|
|
8978a4cf2d | ||
|
|
57d103116f | ||
|
|
a4e9ee72d4 | ||
|
|
c70be12bfd | ||
|
|
4241307990 | ||
|
|
727a8ef13d | ||
|
|
7c92558ad1 | ||
|
|
45083d29a6 | ||
|
|
5089d86095 | ||
|
|
80e55ef385 | ||
|
|
b5ed98445f | ||
|
|
82d377abf5 | ||
|
|
2dbea5d1b2 | ||
|
|
4ba35d6189 | ||
|
|
1620b4f214 | ||
|
|
cec3f987f2 | ||
|
|
ec27445728 | ||
|
|
55050a9f58 | ||
|
|
4b1f572b04 | ||
|
|
502dc9ec52 | ||
|
|
28f925ef75 | ||
|
|
9c8999a3ae | ||
|
|
90db42ce3a | ||
|
|
551130f0e1 | ||
|
|
98abeabc0d | ||
|
|
2940a60b3c | ||
|
|
76b9bc0d56 | ||
|
|
42422ccdcd | ||
|
|
e9702ae2de | ||
|
|
5c54852ebe | ||
|
|
718a86ecda | ||
|
|
e02f19058e | ||
|
|
1223fd2149 | ||
|
|
4095b2b674 | ||
|
|
3be6e2132b | ||
|
|
b09386d102 | ||
|
|
6464698b6d | ||
|
|
9230fd3bd6 | ||
|
|
7771609ea0 | ||
|
|
561a125c92 | ||
|
|
7149461d8e | ||
|
|
02c8bd06f5 | ||
|
|
0732d9b6c8 | ||
|
|
2952c1be08 | ||
|
|
96c4a13c93 | ||
|
|
53abf1a79e | ||
|
|
f00802dd6b | ||
|
|
ab95d90284 | ||
|
|
9f17eb1d28 | ||
|
|
f4ab85a2bb | ||
|
|
5b40c5a9d7 | ||
|
|
6583aeff08 | ||
|
|
b1c531fbcc | ||
|
|
4406426515 | ||
|
|
af48782464 | ||
|
|
726d4ddd9f | ||
|
|
adc637b689 | ||
|
|
d6c9b4fbc9 | ||
|
|
e17cc8ea34 | ||
|
|
574a0e2dba | ||
|
|
fd0bd13b08 | ||
|
|
f8c92147cd | ||
|
|
8136cd78d3 | ||
|
|
d9c4331480 | ||
|
|
7af726f4b2 | ||
|
|
a50f3bc55b | ||
|
|
5438bf9754 | ||
|
|
7fd377bdbe | ||
|
|
84620a7375 | ||
|
|
6968317db2 | ||
|
|
67a92428b5 | ||
|
|
5bb639f0ad | ||
|
|
5bc758aa2d | ||
|
|
27b24f19de | ||
|
|
3dfde84827 | ||
|
|
5e39be6a2c | ||
|
|
35248991e7 | ||
|
|
b76e820122 | ||
|
|
51eced00aa | ||
|
|
079a216f5b | ||
|
|
8b5df98f57 | ||
|
|
fb6fd5b5b2 | ||
|
|
5d5ea3eb8f | ||
|
|
21360981ee | ||
|
|
0b3cad152f | ||
|
|
2c2dbe45a6 | ||
|
|
5c7a3a515c | ||
|
|
f2b05ad56d | ||
|
|
5f9702b91c | ||
|
|
93de4065c7 | ||
|
|
8e0e55fe5e | ||
|
|
a8a8585570 | ||
|
|
1f3c07979a | ||
|
|
fa07b3349d | ||
|
|
519ffe617b | ||
|
|
fe02bf9347 | ||
|
|
faa583864d | ||
|
|
1a7504eba0 | ||
|
|
46d32b4072 | ||
|
|
18d8b9c395 | ||
|
|
8b9b74464e | ||
|
|
867c375843 | ||
|
|
54ca6acf5a | ||
|
|
6ac2d6d228 | ||
|
|
10c7a5f36b | ||
|
|
4fd6c52951 | ||
|
|
93fea17918 | ||
|
|
b3f6a3aae6 | ||
|
|
773147701d | ||
|
|
d891c8dae2 | ||
|
|
101852c7d1 | ||
|
|
c1f13ba8b1 | ||
|
|
71e45860f3 | ||
|
|
25dfd63c4f | ||
|
|
fc12d7b4c8 | ||
|
|
a6eedc6d84 | ||
|
|
b523a98289 | ||
|
|
a0929c96ba | ||
|
|
ae1f25379f | ||
|
|
1e3c8cb7b1 | ||
|
|
b9f28705c8 | ||
|
|
ad4f3ce379 | ||
|
|
d4f53bf6bb | ||
|
|
2ea2819477 | ||
|
|
49a2b2ce6d | ||
|
|
06edc261c0 | ||
|
|
af69bc9d3c | ||
|
|
6eb8256220 | ||
|
|
ecf3067d67 | ||
|
|
3a7f23f75e | ||
|
|
f88c34a0be | ||
|
|
572c57e023 | ||
|
|
79cf2150d5 | ||
|
|
68b868047e | ||
|
|
377670b34a | ||
|
|
2b7f4de832 | ||
|
|
4a88a63fa0 | ||
|
|
bf195051e2 | ||
|
|
c3ccd9feff | ||
|
|
2d0f0948fb | ||
|
|
fc7a5d098d | ||
|
|
b7f766ab82 | ||
|
|
bfffd5e4b3 | ||
|
|
63ba005f4d | ||
|
|
f66ef05f2a | ||
|
|
a3b28843b6 | ||
|
|
b07ec8accb | ||
|
|
06f4b5823a | ||
|
|
99fe57f99a | ||
|
|
d1226031e1 | ||
|
|
78f3e64d5a | ||
|
|
1d98e75b92 | ||
|
|
66d8d95763 | ||
|
|
e2bf468195 | ||
|
|
b7efc16257 | ||
|
|
ec6bcdff7e | ||
|
|
3e65885e1f | ||
|
|
c6ce4d9374 | ||
|
|
0b437d0e8d | ||
|
|
e1df3be4b9 | ||
|
|
b944769f8c | ||
|
|
56b8074c22 | ||
|
|
b577f322c9 | ||
|
|
b007e2af8f | ||
|
|
df89990aa5 | ||
|
|
c108a53b11 | ||
|
|
4831f5bb5d | ||
|
|
987ef63e64 | ||
|
|
e997e12bb9 | ||
|
|
6ba0add265 | ||
|
|
9160c13039 | ||
|
|
40be9f65e4 | ||
|
|
0aae53524c | ||
|
|
1d1efc00b5 | ||
|
|
7584305159 | ||
|
|
554601d674 | ||
|
|
6caf14f4b2 | ||
|
|
edbd08be8a | ||
|
|
caed6df53b | ||
|
|
d823fba60b | ||
|
|
92c8abe65d | ||
|
|
91e966b480 | ||
|
|
1f0b779c64 | ||
|
|
0ccd76074a | ||
|
|
07c6dcab4a | ||
|
|
84cbc1201c | ||
|
|
495bbc2aba | ||
|
|
cb0bceacfa | ||
|
|
6799050718 | ||
|
|
4b892e8939 | ||
|
|
674001b499 | ||
|
|
c730777134 | ||
|
|
8148876249 | ||
|
|
4cf946f856 | ||
|
|
05706f1641 | ||
|
|
6fed84958e | ||
|
|
64011c5988 | ||
|
|
3e02d5a56f | ||
|
|
14f57bc3a4 | ||
|
|
ac8f1b9aa3 | ||
|
|
104c6ef457 | ||
|
|
84661cea36 | ||
|
|
c2b0ed85d2 | ||
|
|
5a081f2419 | ||
|
|
88016f9c35 | ||
|
|
0d56e62bb8 | ||
|
|
567756edd3 | ||
|
|
7cc0a3620e | ||
|
|
b5587e458f | ||
|
|
b22d965b7b | ||
|
|
cc0b41ddfb | ||
|
|
006aeeebb0 | ||
|
|
3cfb1abf62 | ||
|
|
e1da69040d | ||
|
|
5924693e90 | ||
|
|
9ee7d659df | ||
|
|
ac1b1c3cdd | ||
|
|
8440138ba0 | ||
|
|
877b44ec0a | ||
|
|
cc4acb8766 | ||
|
|
3aa85bb51c | ||
|
|
4e948d8bff | ||
|
|
28489d244c | ||
|
|
acf3dd2762 | ||
|
|
8589303753 | ||
|
|
0d9fc26119 | ||
|
|
9dd63c1da4 | ||
|
|
7ff03ab098 | ||
|
|
750345d209 | ||
|
|
03ee16f5ca | ||
|
|
586fc80c19 | ||
|
|
13cd221fe5 | ||
|
|
f35af54e9f | ||
|
|
67e37f1ce1 | ||
|
|
49ff27a5fe | ||
|
|
04730ba8c7 | ||
|
|
b2fcf91958 | ||
|
|
b78d2bd4b1 | ||
|
|
2612ce5ad9 | ||
|
|
798913740e | ||
|
|
7d0445cc20 | ||
|
|
361f6895ee | ||
|
|
47442f4f58 | ||
|
|
307c2e1682 | ||
|
|
2190359e4d | ||
|
|
27a933c7b7 | ||
|
|
71970a0d1d | ||
|
|
7661273cfd | ||
|
|
cd06334049 | ||
|
|
05319e36a7 | ||
|
|
200a3b81e5 | ||
|
|
5647755762 | ||
|
|
adb2947b52 | ||
|
|
7b05afab74 | ||
|
|
5cf5bed6a8 | ||
|
|
095cb58df3 | ||
|
|
181bf69994 | ||
|
|
927b513bf8 | ||
|
|
05801cd90c | ||
|
|
a8ac00469d | ||
|
|
1e3ae948a2 | ||
|
|
2d8aa229c6 | ||
|
|
84f4812189 | ||
|
|
8a3612e56c | ||
|
|
d08861fb30 | ||
|
|
ecc0f9d9f5 | ||
|
|
e209699b19 | ||
|
|
c8d8690cfd | ||
|
|
59d05b698a | ||
|
|
1bcbfc8d18 | ||
|
|
bafed63d40 | ||
|
|
828a056e21 | ||
|
|
9424f6303a | ||
|
|
c0dc5c3a4d | ||
|
|
d0fb3da285 | ||
|
|
ccce01800d | ||
|
|
b44b9d8016 | ||
|
|
7592c45bd9 | ||
|
|
b024936ad7 | ||
|
|
be2246283f | ||
|
|
a7969f6ec8 | ||
|
|
ac447dd055 | ||
|
|
28cdbe407c | ||
|
|
bf486082c9 | ||
|
|
41290b463c | ||
|
|
385ebe234e | ||
|
|
72e9fcc895 | ||
|
|
5f42e4ac3f | ||
|
|
926ec89f48 | ||
|
|
440e1b9156 | ||
|
|
ea0a6e413d | ||
|
|
0de4241b56 | ||
|
|
6e8a53a204 | ||
|
|
60772889d5 | ||
|
|
7db7c9e978 | ||
|
|
d85bf67103 | ||
|
|
926f2e9f48 | ||
|
|
2019f29e8c | ||
|
|
3b45b63d2a | ||
|
|
1c08c53121 | ||
|
|
7623bde159 | ||
|
|
1ed0f5e78d | ||
|
|
568ab33a37 | ||
|
|
f639b052e3 | ||
|
|
56f91948f8 | ||
|
|
6c5e481318 | ||
|
|
f487f1e8c1 | ||
|
|
68ee9743fe | ||
|
|
f4cb48ed0d | ||
|
|
ad77fe1116 | ||
|
|
28a0667da6 | ||
|
|
1f0366c989 | ||
|
|
3a51922650 | ||
|
|
82b2be5046 | ||
|
|
0fc9718c35 | ||
|
|
976733a3c3 | ||
|
|
5d17072709 | ||
|
|
fbad183d39 | ||
|
|
7356a2ff07 | ||
|
|
6ff948c107 | ||
|
|
e3ebce117b | ||
|
|
ce69b09730 | ||
|
|
c823cef405 | ||
|
|
0379b81d43 | ||
|
|
6a997163fd | ||
|
|
93f8466230 | ||
|
|
114c8d3c22 | ||
|
|
3e77e79194 | ||
|
|
ca91d36979 | ||
|
|
d47232246a | ||
|
|
d819222cf7 | ||
|
|
0c4c4d5622 | ||
|
|
ad051ed083 | ||
|
|
1aa0af3e58 | ||
|
|
72556b37f5 | ||
|
|
0bddae5775 | ||
|
|
1f1e710a6d | ||
|
|
b57d418b98 | ||
|
|
0913c43219 | ||
|
|
d754a43fba | ||
|
|
f97b56a87b | ||
|
|
2f78398914 | ||
|
|
81b9a34e5e | ||
|
|
73ba078efc | ||
|
|
1ffe0ad85c | ||
|
|
797b36a81e | ||
|
|
b82c14892e | ||
|
|
a8891dabec | ||
|
|
86ba797665 | ||
|
|
3830dcb3f3 | ||
|
|
c20fe7a773 | ||
|
|
fa01f86b19 | ||
|
|
9583095734 | ||
|
|
a5b2eb3a28 | ||
|
|
72f2784588 | ||
|
|
5c5b730bb8 | ||
|
|
b9ec6b4315 | ||
|
|
4b83fa3549 | ||
|
|
a69e81076a | ||
|
|
4cd2b73f19 | ||
|
|
4ea0bebd92 | ||
|
|
bbcdae25a1 | ||
|
|
220a801138 | ||
|
|
c6821d9cc3 | ||
|
|
8b59245e6a | ||
|
|
9b5ee2e694 | ||
|
|
e932d86b69 | ||
|
|
96f05311b8 | ||
|
|
3e2d68782c | ||
|
|
db2a4349cb | ||
|
|
2014fe83a3 | ||
|
|
55439aab5e | ||
|
|
8c91864f1c | ||
|
|
9319ec5bb2 | ||
|
|
83e4023c19 | ||
|
|
a14701bdd2 | ||
|
|
379dd011ff | ||
|
|
49b3ccfe2b | ||
|
|
16608370a6 | ||
|
|
53015c9d8e | ||
|
|
6d68b89ea0 | ||
|
|
254582da89 | ||
|
|
af54b7cfef | ||
|
|
f13149db8e | ||
|
|
79912a4067 | ||
|
|
c0b6b85ec0 | ||
|
|
a4895f5166 | ||
|
|
4d7670a12e | ||
|
|
8c21954049 | ||
|
|
132fab1c03 | ||
|
|
e7b8d71010 | ||
|
|
fff8cfdee0 | ||
|
|
3e45a3b4d8 | ||
|
|
7c66e21356 | ||
|
|
c477a49777 | ||
|
|
5a38c09f8d | ||
|
|
fe4657b122 | ||
|
|
c1dcd2e57d | ||
|
|
26d993674e | ||
|
|
9d475001ee | ||
|
|
34eb25b0ba | ||
|
|
716b935177 | ||
|
|
92528af600 | ||
|
|
2606e6b82d | ||
|
|
b965ce7376 | ||
|
|
048f1b53c0 | ||
|
|
43340c4aa8 | ||
|
|
9f073fcbcf | ||
|
|
c0c60a4875 | ||
|
|
94f682e461 | ||
|
|
1086bfe1ba | ||
|
|
d441d5763f | ||
|
|
c0a2daa3a3 | ||
|
|
3de51b6a65 | ||
|
|
a741388447 | ||
|
|
1ea9b87498 | ||
|
|
0cab007c37 | ||
|
|
4a331db5fc | ||
|
|
904b0bf2da | ||
|
|
90425542f8 | ||
|
|
eae0141d50 | ||
|
|
9594c82005 | ||
|
|
657aacceb5 | ||
|
|
a35dbf99a6 | ||
|
|
0d80f5d752 | ||
|
|
b36f4dfd08 | ||
|
|
fddee69f92 | ||
|
|
ec270a3b54 | ||
|
|
c97d1e3363 | ||
|
|
554c1ed1f7 | ||
|
|
a90b286482 | ||
|
|
cc78ea7222 | ||
|
|
7f2cc3b232 | ||
|
|
00b10f17c1 | ||
|
|
cab6305462 | ||
|
|
7218403ad7 | ||
|
|
811dfecf98 | ||
|
|
acbbf30a0e | ||
|
|
4d29f8f679 | ||
|
|
13fcbe3e74 | ||
|
|
850b79f459 | ||
|
|
9e6f970bc4 | ||
|
|
cbcb717aee | ||
|
|
5aea46c214 | ||
|
|
6394720c5a | ||
|
|
6af627ea97 | ||
|
|
85277f2b4f | ||
|
|
7b0876204e | ||
|
|
cf65942504 | ||
|
|
7369b02bf4 | ||
|
|
1438fea76b | ||
|
|
e0912f0cf0 | ||
|
|
838525b452 | ||
|
|
774cbbf47a | ||
|
|
d15bc6d32c | ||
|
|
99e0766f53 | ||
|
|
51225b18b2 | ||
|
|
96ab01b0c1 | ||
|
|
a4eb4ea66d | ||
|
|
54819e288a | ||
|
|
ec5fbded4f | ||
|
|
f939576311 | ||
|
|
628784da35 | ||
|
|
9ea3231060 | ||
|
|
0b7858494f | ||
|
|
8f98c8a3c9 | ||
|
|
67f9b3a6e0 | ||
|
|
5defc0a87b | ||
|
|
b4bcb09707 | ||
|
|
b2d74f66b3 | ||
|
|
75223e18ee | ||
|
|
4aea9c727d | ||
|
|
7d779afcd4 | ||
|
|
5cb7a69a46 | ||
|
|
0e88bfc570 | ||
|
|
48cf56557b | ||
|
|
9c9354cf38 | ||
|
|
e730ae66ae | ||
|
|
58d6b71808 | ||
|
|
4b9c1c4863 | ||
|
|
e1cdacaebf | ||
|
|
af120248d7 | ||
|
|
3749b327f9 | ||
|
|
017ccd6351 | ||
|
|
cdc860933e | ||
|
|
7b408f338a | ||
|
|
b326c0c9ae | ||
|
|
f06f409f2d | ||
|
|
a0e8b70e6d | ||
|
|
5294178bb7 | ||
|
|
9050d48bc3 | ||
|
|
9d0b54f461 | ||
|
|
4ba848a483 | ||
|
|
0b26e6232a | ||
|
|
88ad827a87 | ||
|
|
0b0f0a959a | ||
|
|
25ee749724 | ||
|
|
204b871fa2 | ||
|
|
f45db6014d | ||
|
|
475850ef94 | ||
|
|
602fe086b9 | ||
|
|
5ad76cf2af | ||
|
|
03e8c56f05 | ||
|
|
d1981967b2 | ||
|
|
c6094ad575 | ||
|
|
93e376ad2f | ||
|
|
6bba3d164a | ||
|
|
b5decffaa2 | ||
|
|
c068ac48d1 | ||
|
|
d4b89803b2 | ||
|
|
d5b73236de | ||
|
|
1e011879b1 | ||
|
|
9c30ff3024 | ||
|
|
035f41b12c | ||
|
|
0bbf1db434 | ||
|
|
639e267392 | ||
|
|
bd5504461e | ||
|
|
c46aa23fdd | ||
|
|
d654e79be3 | ||
|
|
c41877920a | ||
|
|
3f11e3e6a6 | ||
|
|
225e73c8cf | ||
|
|
95ec541a38 | ||
|
|
1941bd36bb | ||
|
|
e1b6d61558 | ||
|
|
c873e4ef42 | ||
|
|
90eb261da6 | ||
|
|
fb46cc9fdf | ||
|
|
2d5a2eb52b | ||
|
|
fa108126bb | ||
|
|
b9540ba2bc | ||
|
|
1992acaf61 | ||
|
|
8c586a34e7 | ||
|
|
44399a03c1 | ||
|
|
3e70af9a57 | ||
|
|
475d20b627 | ||
|
|
69c5c6d6b8 | ||
|
|
2480dc83b2 | ||
|
|
7c8b617f62 | ||
|
|
7377fee8ca | ||
|
|
bdd78b664f | ||
|
|
9272d4725a | ||
|
|
4ae6a8e25d | ||
|
|
6e660140ae | ||
|
|
5315429195 | ||
|
|
abf898e032 | ||
|
|
eef112d83d | ||
|
|
e1784abbeb | ||
|
|
0031ca3159 | ||
|
|
411115523e | ||
|
|
8b206b087c | ||
|
|
0d126106c0 | ||
|
|
0751debff7 | ||
|
|
33a28a64ec | ||
|
|
28e37d8ad2 | ||
|
|
190f571718 | ||
|
|
c7d7dfbd50 | ||
|
|
efb018d2b0 | ||
|
|
cae9a45832 | ||
|
|
3daeab5186 | ||
|
|
83914d5a56 | ||
|
|
0f611eb87b | ||
|
|
f70b2d0839 | ||
|
|
2f33a46e89 | ||
|
|
598c7a5d76 | ||
|
|
8724c12c11 | ||
|
|
22d9020331 | ||
|
|
b4d77080e8 | ||
|
|
e42fc97d03 | ||
|
|
e45648b389 | ||
|
|
085c4ddf09 | ||
|
|
5ddf9bd7ec | ||
|
|
2420af3b6d | ||
|
|
b8fade251b | ||
|
|
8935dc4e31 | ||
|
|
ae61d89494 | ||
|
|
753832d701 | ||
|
|
8926cf777c | ||
|
|
868ea1a1e2 | ||
|
|
1e1707ec0b | ||
|
|
636ac2a56c | ||
|
|
45076b05f7 | ||
|
|
ba9e2101bb | ||
|
|
7301b61cb8 | ||
|
|
ee3f657751 | ||
|
|
e30291966a | ||
|
|
2536bd0988 | ||
|
|
5234350bde | ||
|
|
36e4398bcb | ||
|
|
4b040280c3 | ||
|
|
fdd2300517 | ||
|
|
49913b2258 | ||
|
|
4927b64d27 | ||
|
|
fb2df05e3f | ||
|
|
ab90a93eec | ||
|
|
48c17169b5 | ||
|
|
41cd83f20e | ||
|
|
52dd3f798a | ||
|
|
070efd6951 | ||
|
|
502d82e1c9 | ||
|
|
7760e779ae | ||
|
|
474298c969 | ||
|
|
b2a013c027 | ||
|
|
cca5ef098b | ||
|
|
41b4c28430 | ||
|
|
90962ee056 | ||
|
|
953cff09a0 | ||
|
|
b41a989051 | ||
|
|
4fcd45c1ae | ||
|
|
1f75f0c082 | ||
|
|
c2a95b5bec | ||
|
|
0a246d3de7 | ||
|
|
2d6238d431 | ||
|
|
c4f3dc4434 | ||
|
|
2aea24afdd | ||
|
|
666240f21e | ||
|
|
fb4ab220d6 | ||
|
|
5a882fe37f | ||
|
|
132326136a | ||
|
|
6fc4723d61 | ||
|
|
8564198321 | ||
|
|
4c3f990d4b | ||
|
|
b19c14787e | ||
|
|
f67b79f007 | ||
|
|
daa332aa20 | ||
|
|
c3f538c2f6 | ||
|
|
a0e677ea00 | ||
|
|
343569ba19 | ||
|
|
9096013e13 | ||
|
|
89a2f249c1 | ||
|
|
4b0e094272 | ||
|
|
97713e872a | ||
|
|
f9a7db11eb | ||
|
|
1448d7e6eb | ||
|
|
8e7d5340d7 | ||
|
|
47ecf98e2a | ||
|
|
f8e4e42a36 | ||
|
|
38753c4395 | ||
|
|
b473e13b83 | ||
|
|
9092575186 | ||
|
|
ffe5ac2aad | ||
|
|
0ab6f75410 | ||
|
|
099245f27e | ||
|
|
0a0fe20fa0 | ||
|
|
c2aa5cc994 | ||
|
|
f84e59a7fb | ||
|
|
613c032994 | ||
|
|
7829db97bf | ||
|
|
acdfde6752 | ||
|
|
c673c0b245 | ||
|
|
4bf4e11cee | ||
|
|
770175456f | ||
|
|
0abbf71f15 | ||
|
|
46b0de367a | ||
|
|
30309659d3 | ||
|
|
acadd6bddc | ||
|
|
96c57260cb | ||
|
|
f29f58b2ac | ||
|
|
124a04738c | ||
|
|
3a60c31df9 | ||
|
|
501cf3973c | ||
|
|
c73251e998 | ||
|
|
201fb61bd4 | ||
|
|
f87ae429f4 | ||
|
|
35e8e2df44 | ||
|
|
7c3f80f13d | ||
|
|
17a176ad4e | ||
|
|
ca5eb06de9 | ||
|
|
2378548cf1 | ||
|
|
fdd265f47f | ||
|
|
3e2e1ecddf | ||
|
|
863950963f | ||
|
|
defa1b28a8 | ||
|
|
1f649274d1 | ||
|
|
3ce04de161 | ||
|
|
e798d18e70 | ||
|
|
ed2609d3b3 | ||
|
|
6d2a2632c5 | ||
|
|
dbf95a95a4 | ||
|
|
0e4bd06795 | ||
|
|
4d38280cfa | ||
|
|
75173473ae | ||
|
|
b314b27260 | ||
|
|
cc7e223082 | ||
|
|
79f87d4c20 | ||
|
|
8adbd6720a | ||
|
|
c3973571a7 | ||
|
|
bf63509a6e | ||
|
|
6552fe831b | ||
|
|
05fdf6b93a | ||
|
|
6953c3dbe4 | ||
|
|
55ecda902d | ||
|
|
0495610257 | ||
|
|
301bb2dcfe | ||
|
|
598b8f9980 | ||
|
|
9528f34a25 | ||
|
|
625aed151d | ||
|
|
4ffdf3f9a2 | ||
|
|
0a97e5b7be | ||
|
|
bfeae3a95b | ||
|
|
4ab12663be | ||
|
|
0584c29781 | ||
|
|
a8231d375a | ||
|
|
a86b342ba5 | ||
|
|
0a7a313e5d | ||
|
|
9d4aee5de2 | ||
|
|
faf031ce80 | ||
|
|
e9a2b8f03a | ||
|
|
d89bd0941d | ||
|
|
8d8423b6e0 | ||
|
|
e22669f91d | ||
|
|
b5e5fb7f10 | ||
|
|
2709994ede | ||
|
|
e5bd194b6c | ||
|
|
f01f76dba7 | ||
|
|
289bd41570 | ||
|
|
6a0d6a8faf | ||
|
|
dcc39d954e | ||
|
|
8a67f18cd9 | ||
|
|
2e02304c71 | ||
|
|
ce975c5d93 | ||
|
|
fb4bb54aca | ||
|
|
dae0942d03 | ||
|
|
25b1173db7 | ||
|
|
92d90866ca | ||
|
|
1595e0210a | ||
|
|
ea4ef40a12 | ||
|
|
9986fce8bf | ||
|
|
628f83172a | ||
|
|
c855896221 | ||
|
|
94b5241e70 | ||
|
|
0600f095f5 | ||
|
|
a0a05b676f | ||
|
|
a818975823 | ||
|
|
8e9f31cc32 | ||
|
|
0d4bc4ec2c | ||
|
|
7a0118b31c | ||
|
|
e9a8161811 | ||
|
|
a6bface632 | ||
|
|
48f47351ee | ||
|
|
9247f16add | ||
|
|
d3eab30d74 | ||
|
|
f65ecb9a0f | ||
|
|
312cb9ae70 | ||
|
|
cce60ce101 | ||
|
|
e0a3b8004c | ||
|
|
91239820e3 | ||
|
|
8641a91182 | ||
|
|
84bffd24f2 | ||
|
|
9fb37b1179 | ||
|
|
4eee10b5d5 | ||
|
|
c53456876c | ||
|
|
1a9f31174d | ||
|
|
0493352292 | ||
|
|
13b91193cc | ||
|
|
9a367c76a0 | ||
|
|
f58e7cc154 | ||
|
|
5ee0f15d94 | ||
|
|
250edf26a5 | ||
|
|
7a01376828 | ||
|
|
63b547ea13 | ||
|
|
626689cbe0 | ||
|
|
a44319d815 | ||
|
|
2c8a2945f0 | ||
|
|
ba59042e5c | ||
|
|
3273af7f40 | ||
|
|
5971ff884e | ||
|
|
cbf33e698b | ||
|
|
868e59bca0 | ||
|
|
04959df194 | ||
|
|
47d687b151 | ||
|
|
2ad6b4fa4e | ||
|
|
8e94688b77 | ||
|
|
fab367f041 | ||
|
|
94617c5ef7 | ||
|
|
4443bc77fd | ||
|
|
d33246612d | ||
|
|
144ab61e07 | ||
|
|
a4c95fd62b | ||
|
|
2245f4690e | ||
|
|
8eaeaa91f9 | ||
|
|
7bd0351ee9 | ||
|
|
811a20f080 | ||
|
|
1decff2114 | ||
|
|
c97968f6c0 | ||
|
|
9deb5adcbf | ||
|
|
91e7c16d90 | ||
|
|
edc81d8e6e | ||
|
|
ed8d553491 | ||
|
|
a64a5e89db | ||
|
|
bd636d59dd | ||
|
|
2d15492190 | ||
|
|
d696f0d081 | ||
|
|
9409e4498f | ||
|
|
541a6417b7 | ||
|
|
f6e9f9011d | ||
|
|
2fe3cb2b22 | ||
|
|
6b9519b56f | ||
|
|
9bbe7564a9 | ||
|
|
58af393968 | ||
|
|
bed4939652 | ||
|
|
ebf6109219 | ||
|
|
0ef232f731 | ||
|
|
6f83bd8961 | ||
|
|
ad602f22c8 | ||
|
|
70f44fcaca | ||
|
|
1f32e7cf82 | ||
|
|
00390200ec | ||
|
|
0a11a3afee | ||
|
|
9f77b03643 | ||
|
|
c6dc1675d8 | ||
|
|
e475a4cc7c | ||
|
|
dfc3cdd5d4 | ||
|
|
6974db5fd8 | ||
|
|
32c67c2a02 | ||
|
|
6c585de6d3 | ||
|
|
1056c943d3 | ||
|
|
839f0a3b95 | ||
|
|
b19e9cae23 | ||
|
|
84a15ef54d | ||
|
|
d4b409e166 | ||
|
|
ba1c0ab6fb | ||
|
|
eddafcfdfb | ||
|
|
8a225e279f | ||
|
|
d5cce88108 | ||
|
|
a7aae3ff7e | ||
|
|
25feab9a29 | ||
|
|
97916bf925 | ||
|
|
42e2c784c4 | ||
|
|
e00c6f2c14 | ||
|
|
0837295bd3 | ||
|
|
f3a005a667 | ||
|
|
d59ffaf0bd | ||
|
|
e133c29b2c | ||
|
|
f64bf7daa0 | ||
|
|
ef24318c17 | ||
|
|
33fe0ffc93 | ||
|
|
243b036ae7 | ||
|
|
06518c209a | ||
|
|
3482474265 | ||
|
|
5debb48265 | ||
|
|
1a8f89573d | ||
|
|
84377eed07 | ||
|
|
dd9589b37a | ||
|
|
7c00099919 | ||
|
|
bc840900a3 | ||
|
|
4429755c09 | ||
|
|
a2967afb55 | ||
|
|
3d03826db5 | ||
|
|
7ff86a2aee | ||
|
|
2a68cc9989 | ||
|
|
855365fba6 | ||
|
|
928303f27b | ||
|
|
df2f69e85f | ||
|
|
ec3407df7e | ||
|
|
5dae074c95 | ||
|
|
a35be6ae57 | ||
|
|
101935ae46 | ||
|
|
d68e731ffd | ||
|
|
bf0dd6946e | ||
|
|
41cbcbc07f | ||
|
|
73f93946b0 | ||
|
|
181d2504e5 | ||
|
|
b0423d987e | ||
|
|
9157fe7323 | ||
|
|
e02718947a | ||
|
|
ebbd47c9cb | ||
|
|
ad810b3740 | ||
|
|
c3e85d747a | ||
|
|
8e092cbe1c | ||
|
|
9a35609bc7 | ||
|
|
7fed92d6b3 | ||
|
|
1f52461cd9 | ||
|
|
fe16743d16 | ||
|
|
7fd8e57bdc | ||
|
|
ed6cd9890a | ||
|
|
f876f9e20e | ||
|
|
3e87d83ae8 | ||
|
|
62b15f2d6f | ||
|
|
89529f4df5 | ||
|
|
fe18d6e638 | ||
|
|
042519005c | ||
|
|
7e24995afe | ||
|
|
877b165a9a | ||
|
|
0784823e21 | ||
|
|
1a9f47b1bc | ||
|
|
64f72ada28 | ||
|
|
171916e1a4 | ||
|
|
dbfc1bb68f | ||
|
|
5d4c067d80 | ||
|
|
3f10a775ba | ||
|
|
2f05a47de3 | ||
|
|
9ca079c95a | ||
|
|
2d37083719 | ||
|
|
0b890e1d70 | ||
|
|
0bb014c965 | ||
|
|
0684449c2a | ||
|
|
a23806d16a | ||
|
|
0b7be94d13 | ||
|
|
4ac996cfe6 | ||
|
|
78c819f976 | ||
|
|
365537f74e | ||
|
|
5c756348a5 | ||
|
|
ed12c2d527 | ||
|
|
82189b0a3c | ||
|
|
23889f7f16 | ||
|
|
45e14bc2f5 | ||
|
|
0746f30645 | ||
|
|
daedfc0a57 | ||
|
|
2cc3372b86 | ||
|
|
e024452610 | ||
|
|
06e4a05e41 | ||
|
|
256514fefc | ||
|
|
3f64ff8194 | ||
|
|
af2cef1bfc | ||
|
|
3be74b1fdd | ||
|
|
e2a705806a | ||
|
|
5c99615edf | ||
|
|
605f168c7e | ||
|
|
b223cf05d9 | ||
|
|
419b98b50f | ||
|
|
b99b3b844a | ||
|
|
d9787e849e | ||
|
|
631e77ce65 | ||
|
|
7ff3a31e72 | ||
|
|
331dfdbab4 | ||
|
|
c83ff2237c | ||
|
|
9972435525 | ||
|
|
409d0e4084 | ||
|
|
991a38df28 | ||
|
|
656f4da8f9 | ||
|
|
f8d65b84db | ||
|
|
18ed255f5a | ||
|
|
4a8e9bf04e | ||
|
|
0b1a302995 | ||
|
|
6978e7439f | ||
|
|
fcb6bec474 | ||
|
|
91690ff99a | ||
|
|
45f930a9e2 | ||
|
|
09a1879f3e | ||
|
|
4bc14dbdd0 | ||
|
|
1627d424e7 | ||
|
|
0aa9da39a9 | ||
|
|
8564c2ba72 | ||
|
|
1c791f240a | ||
|
|
bea0ccaa6c | ||
|
|
05f756963c | ||
|
|
54ad6ad1c7 | ||
|
|
c44ff77e09 | ||
|
|
c77d415893 | ||
|
|
92c9612dee | ||
|
|
b40417fcfe | ||
|
|
13c890b212 | ||
|
|
741ab6e43c | ||
|
|
3db46ecd68 | ||
|
|
a972bb8827 | ||
|
|
2e4e080329 | ||
|
|
5fe4c40ec1 | ||
|
|
d18cb373fc | ||
|
|
1b1771e4eb | ||
|
|
51e450cc4b | ||
|
|
67a97a7e51 | ||
|
|
1e88c86378 | ||
|
|
fcb5f946dd | ||
|
|
2e69e9bef3 | ||
|
|
88623754cf | ||
|
|
886bcef7b0 | ||
|
|
cc414da744 | ||
|
|
6e88ecc2da | ||
|
|
88d2420163 | ||
|
|
7e71ee1aae | ||
|
|
c6d78e27c6 | ||
|
|
b6d06dcfc3 | ||
|
|
756c46c026 | ||
|
|
5afba6e30f | ||
|
|
02a23a65e7 | ||
|
|
e7de16833d | ||
|
|
990c6d1c64 | ||
|
|
f78a8c6fea | ||
|
|
5580d19b75 | ||
|
|
c035e3c7c6 | ||
|
|
1502adfb85 | ||
|
|
3b76b3ddce | ||
|
|
e4a1730a5b | ||
|
|
4aa66170c5 | ||
|
|
24352b56af | ||
|
|
cbea17b4d5 | ||
|
|
4dd0d65db4 | ||
|
|
cdfdcc7d03 | ||
|
|
b4082b2cfa | ||
|
|
fae3cfc58f | ||
|
|
ad038784cc | ||
|
|
7e69dd5ff0 | ||
|
|
8720ec65ab | ||
|
|
3098f99eba | ||
|
|
98b90b82c4 | ||
|
|
01268a37e3 | ||
|
|
0c46f3c205 | ||
|
|
b442cc186a | ||
|
|
2353276aa7 | ||
|
|
8c034d3e78 | ||
|
|
2c25f4a4c0 | ||
|
|
d2a988a715 | ||
|
|
bd12eac145 | ||
|
|
ebc44273c9 | ||
|
|
b781d78cc6 | ||
|
|
3f7c8bdba5 | ||
|
|
fd8e277530 | ||
|
|
6a024b0ced | ||
|
|
a185b2a12a | ||
|
|
2366c2cd94 | ||
|
|
567c01e302 | ||
|
|
b3d2c1a5d1 | ||
|
|
7b3ecb5c2f | ||
|
|
f4abed43ba | ||
|
|
5854202f22 | ||
|
|
d3238de8ab | ||
|
|
09a2705311 | ||
|
|
a4c0861cf4 | ||
|
|
5ba917c5e4 | ||
|
|
83f2fb1e62 | ||
|
|
7bf79675c1 | ||
|
|
f33aa9c71b | ||
|
|
ac1c21c784 | ||
|
|
1757ce23af | ||
|
|
3f1bae3044 | ||
|
|
20bd7ca2cc | ||
|
|
a2b0204a95 | ||
|
|
f7063d03f1 | ||
|
|
9be3043e0f | ||
|
|
4aeeaf185c | ||
|
|
a25d5d98a4 | ||
|
|
973304e0d7 | ||
|
|
590a735f99 | ||
|
|
42185a011b | ||
|
|
e7b872a5df | ||
|
|
2bf75c36e4 | ||
|
|
bcd9005b53 | ||
|
|
cca6297a64 | ||
|
|
39e94d4a5e | ||
|
|
204b1b1963 | ||
|
|
b2e45e8af3 | ||
|
|
27797581ba | ||
|
|
a797801d4c | ||
|
|
d7c09e3493 | ||
|
|
085542c861 | ||
|
|
7ccd74b022 | ||
|
|
047afeebb6 | ||
|
|
a6eab324b8 | ||
|
|
74a11da9bd | ||
|
|
473cd79af1 | ||
|
|
b3c075714c | ||
|
|
f5661b3b1e | ||
|
|
8ce1fd561d | ||
|
|
adb2cf35d4 | ||
|
|
3e32724729 | ||
|
|
b59aa2f3e7 | ||
|
|
44405b250c | ||
|
|
2b547f71f4 | ||
|
|
bd66d0a987 | ||
|
|
56e3a1c3b2 | ||
|
|
62802eb138 | ||
|
|
848beb11df | ||
|
|
0481e766ae | ||
|
|
644a66c983 | ||
|
|
5f62c0d57a | ||
|
|
c440e6f8fa | ||
|
|
706bd6126a | ||
|
|
f8544cf14b | ||
|
|
a8bb992569 | ||
|
|
ddafb96eba | ||
|
|
1dfb5d29c3 | ||
|
|
3c64abceb8 | ||
|
|
3c43b87e9f | ||
|
|
d7fe1150dc | ||
|
|
3e55be910b | ||
|
|
1ae0af56cc | ||
|
|
b329ede52a | ||
|
|
a9f6a06446 | ||
|
|
24383997ef | ||
|
|
48ec0ae44c | ||
|
|
ea49095b0a | ||
|
|
f3ff3920d9 | ||
|
|
3db07f3a26 | ||
|
|
a2ef45e13f | ||
|
|
3e20934e20 | ||
|
|
ec5db5d2c7 | ||
|
|
cc25b5e856 | ||
|
|
c06f232589 | ||
|
|
aa57984bde | ||
|
|
e1a8184c2e | ||
|
|
fc24eb08cb | ||
|
|
345ab8ea9e | ||
|
|
65547bad87 | ||
|
|
4402442e54 | ||
|
|
5bfd7d5e6c | ||
|
|
09537ec0dd | ||
|
|
5ad4ba6abd | ||
|
|
4decb34f99 | ||
|
|
947014945f | ||
|
|
b6710beadc | ||
|
|
c2c5f07ffa | ||
|
|
95a60adfcc | ||
|
|
59eeb73b60 | ||
|
|
0173a4d7fa | ||
|
|
e2e7ee1893 | ||
|
|
58dfc58622 | ||
|
|
c8bfc52fab | ||
|
|
3e50fe1aa9 | ||
|
|
e7c760e68b | ||
|
|
fa9ca221b4 | ||
|
|
cff76c672c | ||
|
|
2395785337 | ||
|
|
862ea5fa36 | ||
|
|
aa0964d99f | ||
|
|
5b5281e50c | ||
|
|
8bcf7bdade | ||
|
|
57cfafeb34 | ||
|
|
5ea8706ba9 | ||
|
|
68f497517d | ||
|
|
8308bd0039 | ||
|
|
b7f00324bc | ||
|
|
77f5328ab9 | ||
|
|
bb82b9a9d3 | ||
|
|
355fd2b5d7 | ||
|
|
1e6cc95f09 | ||
|
|
bb8b4cae79 | ||
|
|
faef061d74 | ||
|
|
62af7549c7 | ||
|
|
04b4db763c | ||
|
|
c95b4d1305 | ||
|
|
40b8aa42cc | ||
|
|
e66535c572 | ||
|
|
81c8511316 | ||
|
|
b59170078d | ||
|
|
453c653975 | ||
|
|
976e7b6765 | ||
|
|
94a8c42311 | ||
|
|
534b650c10 | ||
|
|
f01c04bb24 | ||
|
|
fcc0449076 | ||
|
|
565b0c5a9c | ||
|
|
c204d37ac7 | ||
|
|
c5adea6993 | ||
|
|
c8648101a7 | ||
|
|
1f9c167bd2 | ||
|
|
cbae7bd500 | ||
|
|
27f97bc55d | ||
|
|
601ba40cb0 | ||
|
|
4632531f2d | ||
|
|
c9e95a9146 | ||
|
|
c48d4ae7df | ||
|
|
4895d389e4 | ||
|
|
92916e42c1 | ||
|
|
1a8499cf26 | ||
|
|
81a912c93f | ||
|
|
989d6eee0f | ||
|
|
a8d371045b | ||
|
|
b80726e942 | ||
|
|
0fc48ea03f | ||
|
|
afc86efe28 | ||
|
|
ab1ebeb7e0 | ||
|
|
6932c7e3e9 | ||
|
|
c04687fdd1 | ||
|
|
7717242112 | ||
|
|
1ad82c22d9 | ||
|
|
8fa88175c1 | ||
|
|
b05fec93bb | ||
|
|
802311a06a | ||
|
|
dc0f26d3d8 | ||
|
|
36c32fd968 | ||
|
|
0001963a04 | ||
|
|
6a264a45e2 | ||
|
|
d897315355 | ||
|
|
d536b9d8c6 | ||
|
|
6af7d4e6e8 | ||
|
|
67dddcb224 | ||
|
|
aad12aa227 | ||
|
|
d71675f3d2 | ||
|
|
88fb35552a | ||
|
|
8a084a05c9 | ||
|
|
90ef7ddacb | ||
|
|
034dfffb85 | ||
|
|
09a15e2e59 | ||
|
|
7859a0d001 | ||
|
|
085e1b6c41 | ||
|
|
5b01664d53 | ||
|
|
03adfd4898 | ||
|
|
1616124fa2 | ||
|
|
2611550ffd | ||
|
|
2989be47cc | ||
|
|
9b7a346380 | ||
|
|
8c8bf8702f | ||
|
|
eb8c2a9277 | ||
|
|
350c91889e | ||
|
|
43018840d1 | ||
|
|
4daf443db8 | ||
|
|
a85f214fdb | ||
|
|
ab77c4e616 | ||
|
|
19315f72a0 | ||
|
|
3683d4a759 | ||
|
|
e9a7722915 | ||
|
|
7794129929 | ||
|
|
2ee2067634 | ||
|
|
ef6ec3fcb8 | ||
|
|
c57ff3ae51 | ||
|
|
aa7f59f88c | ||
|
|
3b4a95ef33 | ||
|
|
09ba14b8ca | ||
|
|
b5dc7281f9 | ||
|
|
deca7726f4 | ||
|
|
058e0279b4 | ||
|
|
5e604950c5 | ||
|
|
af1b81097f | ||
|
|
08e713d381 | ||
|
|
a2c228c09f | ||
|
|
4e6afebf9e | ||
|
|
13d274202e | ||
|
|
f56acac656 | ||
|
|
68d325b5b5 | ||
|
|
40907a2584 | ||
|
|
f4f5d99ec9 | ||
|
|
0475e55518 | ||
|
|
872b390cfa | ||
|
|
dc4c539607 | ||
|
|
69f8c76ce2 | ||
|
|
cbdcca7fd8 | ||
|
|
1e7fa988da | ||
|
|
3cfe771b8b | ||
|
|
8b4d6d0868 | ||
|
|
ba35e1422b | ||
|
|
8db9b09c96 | ||
|
|
78e99d1171 | ||
|
|
6a12f96fdc | ||
|
|
b79a20151c | ||
|
|
c9976020dd | ||
|
|
e8988e82d0 | ||
|
|
ff95570da6 | ||
|
|
6698a000e6 | ||
|
|
b084e3074d | ||
|
|
bc4f9c3442 | ||
|
|
ecf88e7ea1 | ||
|
|
d6cb66cd2f | ||
|
|
bc2241f67a | ||
|
|
3d292aa485 | ||
|
|
21f46a8aea | ||
|
|
ba6d61cc35 | ||
|
|
27b43c11bd | ||
|
|
a5292c4473 | ||
|
|
f3923488a5 | ||
|
|
d964ebfff8 | ||
|
|
e420eeece4 | ||
|
|
f34713545e | ||
|
|
2b513c7d87 | ||
|
|
7e28893613 | ||
|
|
822674ca78 | ||
|
|
35ebf97fdf | ||
|
|
b456c0ca9f | ||
|
|
bae49891be | ||
|
|
dfb4a67d87 | ||
|
|
8e727a3253 | ||
|
|
d6f03d7a07 | ||
|
|
5ada5bf1a0 | ||
|
|
ae9c935c5c | ||
|
|
95618001aa | ||
|
|
40c361968e | ||
|
|
757abda654 | ||
|
|
862807e863 | ||
|
|
d188db887c | ||
|
|
59b6c56262 | ||
|
|
f92658de82 | ||
|
|
f2b3402c17 | ||
|
|
24badf65a4 | ||
|
|
86442f212f | ||
|
|
9b6d6ecc32 | ||
|
|
1f51277004 | ||
|
|
68cc646a3e | ||
|
|
420ca1b3b4 | ||
|
|
a83e68815a | ||
|
|
d87aebb718 | ||
|
|
a9a4d14e8a | ||
|
|
9ed2fc7359 | ||
|
|
caad382a95 | ||
|
|
ea39fc9c48 | ||
|
|
bf7cce52db | ||
|
|
63a15a3359 | ||
|
|
db34392210 | ||
|
|
cc4e0ba6c1 | ||
|
|
38989f9c68 | ||
|
|
c78c92e539 | ||
|
|
31e694f50d | ||
|
|
5368199517 | ||
|
|
6bbb9176fc | ||
|
|
4209eee2f8 | ||
|
|
f65ebb6b71 | ||
|
|
ef8107e56a | ||
|
|
2293a30f19 | ||
|
|
d7678fd355 | ||
|
|
27d8a5cf99 | ||
|
|
03f6c58ac6 | ||
|
|
4fb52dc6fc | ||
|
|
0232ba3f25 | ||
|
|
5987e4c8e1 | ||
|
|
18ce7c8f2f | ||
|
|
177c9da8b5 | ||
|
|
b5f1a8e90f | ||
|
|
494c3dd1bd | ||
|
|
ad8f78d51e | ||
|
|
5112801c37 | ||
|
|
226adfdba2 | ||
|
|
22c0375dca | ||
|
|
66ebfef619 | ||
|
|
7e75513151 | ||
|
|
e77f6c9f6f | ||
|
|
5bd7c0ab8b | ||
|
|
97f7f6f7d2 | ||
|
|
d65fc70f07 | ||
|
|
dcae85eae8 | ||
|
|
686a48298b | ||
|
|
8ca590559d | ||
|
|
70251222cc | ||
|
|
e55c68d27e | ||
|
|
da4f2ef6b3 | ||
|
|
dbf2cabd38 | ||
|
|
72e68a163c | ||
|
|
919a87bf47 | ||
|
|
bea0bbfcdb | ||
|
|
c12d7a8d82 | ||
|
|
711ad1750a | ||
|
|
825567d449 | ||
|
|
800439d29e | ||
|
|
e3517dde13 | ||
|
|
f2da8473a4 | ||
|
|
9cc9a6e9b4 | ||
|
|
873406732f | ||
|
|
14ab950a6c | ||
|
|
6cd8e71f4f | ||
|
|
4aeaec9dc7 | ||
|
|
e318228a08 | ||
|
|
d22efbf745 | ||
|
|
90309d5552 | ||
|
|
72842ecd7a | ||
|
|
a1b32ffca9 | ||
|
|
44d225e6ca | ||
|
|
37ab5f9d7a | ||
|
|
61fdcec511 | ||
|
|
45cc4fd97a | ||
|
|
3228b88312 | ||
|
|
a1d3592d08 | ||
|
|
c686d950d0 | ||
|
|
ca779bb0af | ||
|
|
90f64e2527 | ||
|
|
444d50f751 | ||
|
|
2f9c72c1cf | ||
|
|
1bb81614a5 | ||
|
|
888e13e198 | ||
|
|
8166642ff9 | ||
|
|
51c42790b7 | ||
|
|
f105fd1b2c | ||
|
|
fe78e9a336 | ||
|
|
2fce25b0c8 | ||
|
|
6c0da2ea94 | ||
|
|
a353e69648 | ||
|
|
ac930d5504 | ||
|
|
d4cf8037b7 | ||
|
|
fb1fd851b0 | ||
|
|
2ff8c0b128 | ||
|
|
d232229abf | ||
|
|
490e58fb52 | ||
|
|
a8582be54d | ||
|
|
30bb8449e9 | ||
|
|
adb7132e02 | ||
|
|
4a1e488bd7 | ||
|
|
d200db0eeb | ||
|
|
28e06fa684 | ||
|
|
c4cb9b07cb | ||
|
|
817fc5d4b3 | ||
|
|
2de1e5f71a | ||
|
|
5246d85f11 | ||
|
|
9526ed0258 | ||
|
|
736add031c | ||
|
|
d4042ebaa2 | ||
|
|
54e31be3b2 | ||
|
|
b630be8c8a | ||
|
|
0aca41f9a6 | ||
|
|
a3fed0f84b | ||
|
|
1414ad6d50 | ||
|
|
ed6b4dabf8 | ||
|
|
d9309ebc6e | ||
|
|
c49b7613e0 | ||
|
|
4f88b6dc71 | ||
|
|
5c9e6404cc | ||
|
|
80df494787 | ||
|
|
c0886c2785 | ||
|
|
a83a56eecd | ||
|
|
130eb56d09 | ||
|
|
b60b473e02 | ||
|
|
e0504eb957 | ||
|
|
3886e41e94 | ||
|
|
edc54c7120 | ||
|
|
cef1167ef1 | ||
|
|
f456500f3a | ||
|
|
59328ea44d | ||
|
|
0dc840dc8e | ||
|
|
6700028bd1 | ||
|
|
213b1d1d0d | ||
|
|
feab64b09a | ||
|
|
f9f096cca8 | ||
|
|
535d174c2b | ||
|
|
11d2401970 | ||
|
|
232b36d4ae | ||
|
|
b38b159f4e | ||
|
|
606bf1ff58 | ||
|
|
46cec638dd | ||
|
|
8637397c86 | ||
|
|
7502e1881f | ||
|
|
734d5e50c5 | ||
|
|
052ff6727b | ||
|
|
2962dbd6b8 | ||
|
|
392afd6f33 | ||
|
|
fc3f4dff10 | ||
|
|
24d6889b24 | ||
|
|
27e3e22703 | ||
|
|
7b7f609c47 | ||
|
|
5389c8858a | ||
|
|
c2d2fbba96 | ||
|
|
cfc039dae1 | ||
|
|
edf24dc992 | ||
|
|
97710296ac | ||
|
|
acbfc0bb81 | ||
|
|
e1fe2fb093 | ||
|
|
dd5c1ec9ed | ||
|
|
c7f7614646 | ||
|
|
d604398642 | ||
|
|
d40b1d8937 | ||
|
|
49b4b476dc | ||
|
|
c0ec689be9 | ||
|
|
8e26decb5b | ||
|
|
921efcbf4b | ||
|
|
705ab58bfb | ||
|
|
b42e32a955 | ||
|
|
dec60a0fdd | ||
|
|
fa84d5c502 | ||
|
|
34c763caf5 | ||
|
|
e5709dfabc | ||
|
|
5f4f4a8ab9 | ||
|
|
ca9e71087b | ||
|
|
6da483b3ef | ||
|
|
e300145263 | ||
|
|
eb3f0035fe | ||
|
|
6be3a2a142 | ||
|
|
e23893a419 | ||
|
|
7b4c1dcde0 | ||
|
|
4b7cb2a22a | ||
|
|
344a8a3887 | ||
|
|
0afda5dc27 | ||
|
|
0891ef6d0a | ||
|
|
cdb64ecb19 | ||
|
|
b05ac4f2a4 | ||
|
|
411189a076 | ||
|
|
63cf4d46c9 | ||
|
|
3c683f2192 | ||
|
|
d46e59bcd4 | ||
|
|
c45e76ec31 | ||
|
|
44828707ea | ||
|
|
74c047d249 | ||
|
|
50abfb98fe | ||
|
|
6104657970 | ||
|
|
02116d4c05 | ||
|
|
23f993bb54 | ||
|
|
16aedd61da | ||
|
|
5a2f3ad616 | ||
|
|
54971104f8 | ||
|
|
deeffbf77d | ||
|
|
7e8dd6bba8 | ||
|
|
dc4078d744 | ||
|
|
1eb168be55 | ||
|
|
3c6fd365fb | ||
|
|
53c9184057 | ||
|
|
16c9872571 | ||
|
|
17160bc467 | ||
|
|
3b39e58cf3 | ||
|
|
7db055116c | ||
|
|
0262ff1aac | ||
|
|
14def09ce3 | ||
|
|
96e59da6bc | ||
|
|
8f75b0a0c0 | ||
|
|
70a40cfc45 | ||
|
|
ef6b8b9ebc | ||
|
|
f9dbaa9407 | ||
|
|
ed338668d1 | ||
|
|
1e5d94a958 | ||
|
|
51553c565f | ||
|
|
ae36805aa1 | ||
|
|
22b7445ac4 | ||
|
|
9c278d7d0b | ||
|
|
3a1592692e | ||
|
|
bdec956708 | ||
|
|
d0d8a8a3af | ||
|
|
f787962be8 | ||
|
|
57b9b369b7 | ||
|
|
ccda5bdb7e | ||
|
|
bd5fa83fe0 | ||
|
|
59caf381f7 | ||
|
|
181cb1b1bd | ||
|
|
ba2dd2d872 | ||
|
|
4dc5acd68e | ||
|
|
f57116afbe | ||
|
|
99c41c7e34 | ||
|
|
d7b38d9513 | ||
|
|
2c7aad1dcd | ||
|
|
593dba72a8 | ||
|
|
09f2f2a9e7 | ||
|
|
6c583eedb9 | ||
|
|
1c4d7a6ad1 | ||
|
|
f75ed4bc66 | ||
|
|
f487deb7b9 | ||
|
|
ad30a8476c | ||
|
|
60db807443 | ||
|
|
d1dedff9ca | ||
|
|
9a04506b0d | ||
|
|
a58369fbb1 | ||
|
|
d63b5d71a1 | ||
|
|
360d790282 | ||
|
|
a0dd8f8e0f | ||
|
|
db7c001076 | ||
|
|
c96f905d2b | ||
|
|
4b3f04083b | ||
|
|
8276b6c9a9 | ||
|
|
43d6e788dc | ||
|
|
0c062a8485 | ||
|
|
99b649f24e | ||
|
|
7c6532f145 | ||
|
|
052669a0b0 | ||
|
|
0cf86d3bbc | ||
|
|
56a16b862a | ||
|
|
b2fffb2e23 | ||
|
|
3f7a27cdbb | ||
|
|
58e30b8c88 | ||
|
|
4025e55b95 | ||
|
|
e1e63ebd64 | ||
|
|
8279df48bf | ||
|
|
d86a06fab0 | ||
|
|
90b24dd915 | ||
|
|
bacd2a6893 | ||
|
|
0f059f247d | ||
|
|
e2b76d9c29 | ||
|
|
1107a2f2bc | ||
|
|
efd43013da | ||
|
|
7b8458b47d | ||
|
|
84eed09a17 | ||
|
|
35b1a40d49 | ||
|
|
81d7fe3fdb | ||
|
|
02187fed4e | ||
|
|
019bf013ac | ||
|
|
d6e59a6a0a | ||
|
|
46aa862943 | ||
|
|
0413cab0d9 | ||
|
|
3357ce8f33 | ||
|
|
1776f6e7fd | ||
|
|
edfe5e1156 | ||
|
|
0768992848 | ||
|
|
1224f94879 | ||
|
|
b58c5344b8 | ||
|
|
7175bc0595 | ||
|
|
b7a6f5696d | ||
|
|
abf5b89c28 | ||
|
|
d554444b0e | ||
|
|
16ae0725e6 | ||
|
|
61feced541 | ||
|
|
a1d4db2f1e | ||
|
|
357e9af627 | ||
|
|
a41519be63 | ||
|
|
870e6b07c8 | ||
|
|
6f41759519 | ||
|
|
6727c42f18 | ||
|
|
90c367842f | ||
|
|
a0bb6e370e | ||
|
|
f2910ab9d1 | ||
|
|
b4bfed2ccb | ||
|
|
2fcde61b6d | ||
|
|
ffddf10de5 | ||
|
|
6e3bd5e6f3 | ||
|
|
b21230c4d6 | ||
|
|
0a533b64e1 | ||
|
|
15b0e321bd | ||
|
|
4d749340a2 | ||
|
|
0ef6ffa452 | ||
|
|
d7b1310ba3 | ||
|
|
7408454a75 | ||
|
|
07b71468cc | ||
|
|
522e966194 | ||
|
|
937c60c9cf | ||
|
|
bbb1e22163 | ||
|
|
a16e83200a | ||
|
|
d437521710 | ||
|
|
5cbf4cf352 | ||
|
|
2985e3b75b | ||
|
|
f34a75fc5b | ||
|
|
5aa88714b8 | ||
|
|
ce56a414e0 | ||
|
|
ba4a7dcd45 | ||
|
|
85c648da6c | ||
|
|
483f8eb690 | ||
|
|
93c868d698 | ||
|
|
a14e70e3f4 | ||
|
|
a6ff606cae | ||
|
|
651eb3374c | ||
|
|
68c71adc5a | ||
|
|
0c4ca9c94d | ||
|
|
8c04f5b3f1 | ||
|
|
35b29a0a1e | ||
|
|
d289f432b1 | ||
|
|
e16e269775 | ||
|
|
4e5d0c2e84 | ||
|
|
c9a2034936 | ||
|
|
b70fc1151d | ||
|
|
c11034edcd | ||
|
|
804d9b42a5 | ||
|
|
b1bb4e6758 | ||
|
|
76ed8f0ba2 | ||
|
|
4dde7eaea1 | ||
|
|
2e2149c110 | ||
|
|
70bb9477c5 | ||
|
|
ec5363e9c1 | ||
|
|
dba3b1c559 | ||
|
|
9606e3f80c | ||
|
|
7bc7b500f5 | ||
|
|
c6e804fa10 | ||
|
|
1cbaf9bd9d | ||
|
|
45145685d5 | ||
|
|
2fbec6f21f | ||
|
|
ad29d2765f | ||
|
|
e47e751142 | ||
|
|
c63d4ccf3e | ||
|
|
e5c30cf841 | ||
|
|
c80678aac5 | ||
|
|
1754570057 | ||
|
|
d87b411193 | ||
|
|
8fc6284317 | ||
|
|
eae49d2367 | ||
|
|
69287c5198 | ||
|
|
e6b3984f78 | ||
|
|
547fe888d4 | ||
|
|
3454309cbc | ||
|
|
544c46cd44 | ||
|
|
2c100825cc | ||
|
|
558ecd84a6 | ||
|
|
df24cfff4f | ||
|
|
bd5d93a964 | ||
|
|
ae2ded119f | ||
|
|
abdb80a6be | ||
|
|
2f9cbe2bf1 | ||
|
|
2cca7d60d5 | ||
|
|
3df745d1d2 | ||
|
|
9862083e0b | ||
|
|
7a4976c470 | ||
|
|
8834a19743 | ||
|
|
6e15403f60 | ||
|
|
7e1cf10cb2 | ||
|
|
ee762c3c68 | ||
|
|
32c06414c5 | ||
|
|
e97e1ba4bc | ||
|
|
2f580f7800 | ||
|
|
1ce1459455 | ||
|
|
c26573482e | ||
|
|
414ec08dee | ||
|
|
1cc78191eb | ||
|
|
75c6c6081a | ||
|
|
8d2ebe9718 | ||
|
|
eed974b883 | ||
|
|
ae846dac4d | ||
|
|
0b09c00b50 | ||
|
|
f7a1874cb3 | ||
|
|
28fb04eb7b | ||
|
|
34310cf420 | ||
|
|
e1d61d7190 | ||
|
|
9c14ac84cb | ||
|
|
1d1ea7b6f2 | ||
|
|
92401f5b7c | ||
|
|
38ac9218ec | ||
|
|
48497c749a | ||
|
|
72a1892058 | ||
|
|
f2c328d212 | ||
|
|
e9eafc40a7 | ||
|
|
933ca1bf81 | ||
|
|
b4fc9aa7eb | ||
|
|
dcc475bbef | ||
|
|
1fe35ad0cd | ||
|
|
f1ed1e0f14 | ||
|
|
fcc746fb98 | ||
|
|
95934a5b7a | ||
|
|
d38b101820 | ||
|
|
91d730a7bc | ||
|
|
0cfa77b628 | ||
|
|
ca4881ad51 | ||
|
|
8c2c064fe2 | ||
|
|
10646b9b86 | ||
|
|
967b195946 | ||
|
|
1ae7771290 | ||
|
|
a585fe4d54 | ||
|
|
fa3a9fe70e | ||
|
|
99952a393f | ||
|
|
920a41e3ca | ||
|
|
e5bec957a1 | ||
|
|
41cb765255 | ||
|
|
2d12a3cd7a | ||
|
|
df4fe0176c | ||
|
|
4fcc80719e | ||
|
|
f6c66f6ee4 | ||
|
|
220d137e66 | ||
|
|
425803a1b6 | ||
|
|
c794ea614a | ||
|
|
9000838aab | ||
|
|
2790bda1e9 | ||
|
|
e13d4daa9a | ||
|
|
2f504a4e03 | ||
|
|
598a50a133 | ||
|
|
1b06a5a3e0 | ||
|
|
9f1d3b0269 | ||
|
|
a09543d38b | ||
|
|
2ab3539925 | ||
|
|
23ddf53abe | ||
|
|
d8720d0849 | ||
|
|
6753b55160 | ||
|
|
7f7f48ad56 | ||
|
|
149ca01029 | ||
|
|
5c8133a810 | ||
|
|
2adccdd1b0 | ||
|
|
b91068d658 | ||
|
|
4534cafd3f | ||
|
|
405e79d729 | ||
|
|
4df2349e9d | ||
|
|
a9b61d3e13 | ||
|
|
3767d14e5c | ||
|
|
889a050f25 | ||
|
|
0701fac807 | ||
|
|
9fba91069a | ||
|
|
4f9ce70ff8 | ||
|
|
5e00d4ded7 | ||
|
|
95cd9ee5bb | ||
|
|
40f16f8ef1 | ||
|
|
3d9288f82f | ||
|
|
c51f12f88b | ||
|
|
0618153390 | ||
|
|
a7c066291b | ||
|
|
a69ac372fa | ||
|
|
16b2a54981 | ||
|
|
3f68e0d66f | ||
|
|
12d483fde6 | ||
|
|
96034a9712 | ||
|
|
d2def4479b | ||
|
|
afbbb913e7 | ||
|
|
ad76f239a3 | ||
|
|
e6b096c9e0 | ||
|
|
6e26b4e6c7 | ||
|
|
ea79494b6d | ||
|
|
afb18a3e4d | ||
|
|
f9c9853102 | ||
|
|
b3eb9fb6fa | ||
|
|
d3b97bf51a | ||
|
|
7a2e491199 | ||
|
|
25efaf08b7 | ||
|
|
f893ea6b98 | ||
|
|
500745b62c | ||
|
|
9ebe5bf1a7 | ||
|
|
4aecb86daa | ||
|
|
6924dd6df6 | ||
|
|
431755144e | ||
|
|
d182f81754 | ||
|
|
de0193fffc | ||
|
|
53e86205ad | ||
|
|
aa670efe3a | ||
|
|
e693fe49a7 | ||
|
|
7eaa32d85f | ||
|
|
ab40d2c37a | ||
|
|
784206b39b | ||
|
|
7c8264e221 | ||
|
|
db7195aa30 | ||
|
|
eb7bbc1612 | ||
|
|
ee3792181d | ||
|
|
9804965a20 | ||
|
|
b84842df3d | ||
|
|
fc170d3033 | ||
|
|
8fa4ec7ad8 | ||
|
|
480825ddd7 | ||
|
|
260e328cc1 | ||
|
|
8873428b4b | ||
|
|
ab43c20b8f | ||
|
|
88d9d4f4a3 | ||
|
|
d4840f85c0 | ||
|
|
6f9ddeaed0 | ||
|
|
af5e73c8cb | ||
|
|
333b6e60e1 | ||
|
|
1b61337b75 | ||
|
|
77991896b4 | ||
|
|
60a670ce29 | ||
|
|
c1c69ed22b | ||
|
|
d71c74c6fb | ||
|
|
590aa8b43f | ||
|
|
607e0166f6 | ||
|
|
130c83ee92 | ||
|
|
fd5e418abf | ||
|
|
262d160314 | ||
|
|
9146827590 | ||
|
|
062b108259 | ||
|
|
ba796b6be1 | ||
|
|
3d763235e1 | ||
|
|
c30c6d9f10 | ||
|
|
311716ed18 | ||
|
|
19bb1b4aa4 | ||
|
|
b8749e36b9 | ||
|
|
00b6639155 | ||
|
|
71d7daaef3 | ||
|
|
8654c5d471 | ||
|
|
02124b3d38 | ||
|
|
340dcfb70d | ||
|
|
a37b92223a | ||
|
|
7d2b8cb4fc | ||
|
|
8d7a134cb4 | ||
|
|
4b849d7201 | ||
|
|
e03e185d30 | ||
|
|
7a02df5588 | ||
|
|
19494685ba | ||
|
|
1e26943c3e | ||
|
|
83fa850142 | ||
|
|
968a116d14 | ||
|
|
fb55b494d7 | ||
|
|
59b6a83d7d | ||
|
|
aabc4f0d7b | ||
|
|
391f686173 | ||
|
|
8e6f6d46ec | ||
|
|
2ba7a55439 | ||
|
|
e07df29ab9 | ||
|
|
abf24fe60f | ||
|
|
fad5f5b81f | ||
|
|
6961f49a0c | ||
|
|
6911f8652a | ||
|
|
6658cec6a0 | ||
|
|
14011b9d84 | ||
|
|
bd2d0b6790 | ||
|
|
d36f58230a | ||
|
|
018f950ca3 | ||
|
|
db8db9fae9 | ||
|
|
79ce8d6563 | ||
|
|
13eaa9a35a | ||
|
|
39f0d76b4b | ||
|
|
0a5832ec75 | ||
|
|
6e147b3ed2 | ||
|
|
c162f79daa | ||
|
|
87585be687 | ||
|
|
ea08d6413c | ||
|
|
879905edf6 | ||
|
|
6fd80a5582 | ||
|
|
0dc7333563 | ||
|
|
f61c3168d2 | ||
|
|
9cadd74a96 | ||
|
|
729fa2352b | ||
|
|
b673aaf9f0 | ||
|
|
3132cc6005 | ||
|
|
ac994d3077 | ||
|
|
02d4f7f2da | ||
|
|
d99569f005 | ||
|
|
ec5166249a | ||
|
|
dadd12adb3 | ||
|
|
88b4fb8c2a | ||
|
|
afecae3786 | ||
|
|
d18598bc33 | ||
|
|
794fc05ada | ||
|
|
5daeb7f876 | ||
|
|
53e71c545e | ||
|
|
959a55e36c | ||
|
|
64572b0024 | ||
|
|
9a0c1caa43 | ||
|
|
eed6723147 | ||
|
|
97fabf51b8 | ||
|
|
5e5e2b8aee | ||
|
|
e01071426f | ||
|
|
eed1bfbe50 | ||
|
|
0c3970a266 | ||
|
|
267cfb621e | ||
|
|
0e90febab2 | ||
|
|
31d947837f | ||
|
|
017b11fbba | ||
|
|
3c492062a9 | ||
|
|
b26b49d0ca | ||
|
|
ed08123550 | ||
|
|
add2db5b7a | ||
|
|
f272d7121a | ||
|
|
577556678c | ||
|
|
e146922367 | ||
|
|
6f1548b7f8 | ||
|
|
9e6fe47b44 | ||
|
|
60cfea1126 | ||
|
|
80a4a094af | ||
|
|
70e1560cb3 | ||
|
|
725033659a | ||
|
|
059111fb57 | ||
|
|
d4a5eadf13 | ||
|
|
79cf487ac5 | ||
|
|
52ecbab859 | ||
|
|
adfc79bf92 | ||
|
|
2447bab924 | ||
|
|
1057ca78a6 | ||
|
|
7e7f98fd92 | ||
|
|
64552ce2de | ||
|
|
7506256f42 | ||
|
|
db75230521 | ||
|
|
f8955d5607 | ||
|
|
0bad217b93 | ||
|
|
4da400a136 | ||
|
|
24740bd341 | ||
|
|
3b6a15de84 | ||
|
|
ac1f525a6c | ||
|
|
e3999bdb0c | ||
|
|
ad3d5a30ec | ||
|
|
e4b5847725 | ||
|
|
1a91a245a3 | ||
|
|
229f62d071 | ||
|
|
b96fe16770 | ||
|
|
97750cb5e2 | ||
|
|
e1a2bd11a9 | ||
|
|
229b408252 | ||
|
|
ae929438a5 | ||
|
|
5daaf84e05 | ||
|
|
19b09515a1 | ||
|
|
9ce6078c8b | ||
|
|
51f588f4b1 | ||
|
|
5ee6605703 | ||
|
|
7ef97cfd81 | ||
|
|
f4288f0bd4 | ||
|
|
4a701cb993 | ||
|
|
00dfb07b15 | ||
|
|
5fffa8e9db | ||
|
|
54d187a0ad | ||
|
|
192ce468b7 | ||
|
|
75c0cadb50 | ||
|
|
5d578d4b3b | ||
|
|
325a8889ab | ||
|
|
9cdd78e68c | ||
|
|
3a6770a1ae | ||
|
|
8073924056 | ||
|
|
7b53e1c54b | ||
|
|
c4c0516820 | ||
|
|
8d36f8850e | ||
|
|
abe5f43f3d | ||
|
|
c8d8a8d0b5 | ||
|
|
f60e88573a | ||
|
|
4216671ea2 | ||
|
|
ee3ea7a970 | ||
|
|
2b644dbb01 | ||
|
|
63878e7ffd | ||
|
|
007cd6cff1 | ||
|
|
4375215baa | ||
|
|
8cc5e9db13 | ||
|
|
5685f831a7 | ||
|
|
0cb3d12d94 | ||
|
|
0e38c6751b | ||
|
|
70ad1fb3d8 | ||
|
|
44f27d91a0 | ||
|
|
1bb559c285 | ||
|
|
7a005ef126 | ||
|
|
030c2a740f | ||
|
|
5dcde67ae9 | ||
|
|
ee06fa85f1 | ||
|
|
5b9352a946 | ||
|
|
b7927d8d75 | ||
|
|
c144f30606 | ||
|
|
d2dba3a0db | ||
|
|
2c991583ff | ||
|
|
2e14dec12d | ||
|
|
8826f0ff3c | ||
|
|
9129f7fb33 | ||
|
|
c0ed54406f | ||
|
|
18be257e10 | ||
|
|
615d549494 | ||
|
|
0ce39e7f52 | ||
|
|
3c68cbc955 | ||
|
|
300430e2d5 | ||
|
|
166a07732a | ||
|
|
510b517270 | ||
|
|
dea385384a | ||
|
|
7a1c9101b2 | ||
|
|
2be523cf77 | ||
|
|
c01e334487 | ||
|
|
a2418d1373 | ||
|
|
a697248b26 | ||
|
|
6058939c00 | ||
|
|
318de530e3 | ||
|
|
9e04b7796a | ||
|
|
e8099c4db5 | ||
|
|
bf808811cc | ||
|
|
f0293de1b9 | ||
|
|
810dcb90ce | ||
|
|
a2f2b8fabc | ||
|
|
cbc5f47786 | ||
|
|
3e3886ced7 | ||
|
|
9ce39fd2ba | ||
|
|
5b08cdedf0 | ||
|
|
67e4d40c49 | ||
|
|
537a733157 | ||
|
|
5136e7726d | ||
|
|
6e236ba74d | ||
|
|
374b665089 | ||
|
|
ffecc9a0c7 | ||
|
|
0b997418d3 | ||
|
|
eaad8a4cf5 | ||
|
|
396b4595f4 | ||
|
|
0752aae9ef | ||
|
|
ad2221a677 | ||
|
|
1713d693b1 | ||
|
|
f4f056449f | ||
|
|
6a70e3e45b | ||
|
|
a04cdee33f | ||
|
|
157769eeb4 | ||
|
|
667b66b926 | ||
|
|
c0f7b344d9 | ||
|
|
060c59e97d | ||
|
|
b3461b7134 | ||
|
|
001c450abb | ||
|
|
ceaa5763d4 | ||
|
|
b45fd58944 | ||
|
|
b3149def82 | ||
|
|
378d498402 | ||
|
|
98f52b32a3 | ||
|
|
0ab32a6f84 | ||
|
|
71cc22325d | ||
|
|
e1b2991aa6 | ||
|
|
033bcf80d0 | ||
|
|
103118d558 | ||
|
|
f91b5fa004 | ||
|
|
7179bf7b67 | ||
|
|
a3e6239e6e | ||
|
|
1fa12e56c6 | ||
|
|
4ff834de76 | ||
|
|
6db38ad769 | ||
|
|
293b7b09a9 | ||
|
|
d5945f9ee7 | ||
|
|
d1f5a6fc31 | ||
|
|
e7b9f5e4c3 | ||
|
|
7870749077 | ||
|
|
c5352f443a | ||
|
|
fd8b7aa0f2 | ||
|
|
458ea266ec | ||
|
|
9748eaba25 | ||
|
|
887a3740b2 | ||
|
|
2e7cfe9cd7 | ||
|
|
6dbe156a02 | ||
|
|
2a9ef6d48e | ||
|
|
6717ddbd0b | ||
|
|
47c1aab064 | ||
|
|
eda41658b9 | ||
|
|
7f79363944 | ||
|
|
25967f2a09 | ||
|
|
4d3963ad67 | ||
|
|
f78c5257dc | ||
|
|
ccc6234ac8 | ||
|
|
c81b0200eb | ||
|
|
f039d37c8a | ||
|
|
237975bfef | ||
|
|
015bc7c8c3 | ||
|
|
3da2a00ee9 | ||
|
|
16eca5bebf | ||
|
|
a4483cf255 | ||
|
|
0bf020a1b4 | ||
|
|
d43927a167 | ||
|
|
a62566e8fb | ||
|
|
23a1730106 | ||
|
|
f8ac5e0af3 | ||
|
|
eb48a153d9 | ||
|
|
1a78a6f786 | ||
|
|
f8f60c62fe | ||
|
|
453e507b89 | ||
|
|
022c32f9d5 | ||
|
|
af1a0c3520 | ||
|
|
0a6d9dfcf4 | ||
|
|
5bdedacab1 | ||
|
|
d7a1be2f3c | ||
|
|
d6dcbb63d4 | ||
|
|
b2770f67a1 | ||
|
|
aa2691b153 | ||
|
|
e9a9cbbd07 | ||
|
|
17e2222802 | ||
|
|
58b2970b19 | ||
|
|
fd69961185 | ||
|
|
e5cd813958 | ||
|
|
5b12423d98 | ||
|
|
4141f633a3 | ||
|
|
67854b3ebd | ||
|
|
0c21dbc7c8 | ||
|
|
5925aa50d8 | ||
|
|
852b016111 | ||
|
|
ba77a67ba7 | ||
|
|
c14a9a55d7 | ||
|
|
5203db6c9c | ||
|
|
30eb8dda1d | ||
|
|
69d40b5fe8 | ||
|
|
706e87659e | ||
|
|
5c785e49af | ||
|
|
0974085c6f | ||
|
|
e67ced8848 | ||
|
|
c2dea6b881 | ||
|
|
ee62b2cf31 | ||
|
|
252e06bee6 | ||
|
|
1f0ce88e08 | ||
|
|
7e8fb388a3 | ||
|
|
a3de360878 | ||
|
|
6ee556e386 | ||
|
|
623ed89100 | ||
|
|
8c114cae95 | ||
|
|
93dd58ec59 | ||
|
|
f0bc93ad8e | ||
|
|
27e8aad479 | ||
|
|
6298578db9 | ||
|
|
f079e5dadd | ||
|
|
e6fdead89f | ||
|
|
cfa6e3982c | ||
|
|
e4bc4d9071 | ||
|
|
d372e10f1a | ||
|
|
d1c93754db | ||
|
|
3d54a1abf3 | ||
|
|
06eef5779d | ||
|
|
b4d78376fb | ||
|
|
87a59a6de3 | ||
|
|
1e7741e341 | ||
|
|
ae5e484506 | ||
|
|
c9dd219565 | ||
|
|
55eb662dc9 | ||
|
|
2d202088c7 | ||
|
|
9f7c9180d9 | ||
|
|
fdc5e0a92d | ||
|
|
7f6fef1373 | ||
|
|
e4973f572f | ||
|
|
eb768f2076 | ||
|
|
df9723a011 | ||
|
|
51ad3fdb0b | ||
|
|
8e553e7a93 | ||
|
|
4c70e92293 | ||
|
|
3e983b121e | ||
|
|
9f3c962ea4 | ||
|
|
2a1a3fb1b5 | ||
|
|
bb28cc5c65 | ||
|
|
23b6a38e18 | ||
|
|
715cd9daf5 | ||
|
|
cbfdaec394 | ||
|
|
bb527ac981 | ||
|
|
961c26894d | ||
|
|
693bdebb30 | ||
|
|
353e24f1c5 | ||
|
|
59d1773057 | ||
|
|
93a1368b60 | ||
|
|
3bc0fe5a70 | ||
|
|
973c11a048 | ||
|
|
5094386516 | ||
|
|
64477c6573 | ||
|
|
f052c707e7 | ||
|
|
de0e1d3e10 | ||
|
|
e273da1b5b | ||
|
|
761f6963ab | ||
|
|
e5aff1316a | ||
|
|
8ee0fbe6a3 | ||
|
|
4c6b8b4173 | ||
|
|
6940a75591 | ||
|
|
7ba939b008 | ||
|
|
6918a36bee | ||
|
|
ba132fc411 | ||
|
|
b4a940a8d6 | ||
|
|
5e0dd5c63b | ||
|
|
f19114e530 | ||
|
|
0db40ecf0f | ||
|
|
8289067a4e | ||
|
|
9c5e3d094b | ||
|
|
cb12b19c1e | ||
|
|
5d0b8588f9 | ||
|
|
0c05e1036d | ||
|
|
266087c5f1 | ||
|
|
147b94d936 | ||
|
|
872511ebb9 | ||
|
|
ce8ed5bfeb | ||
|
|
d81838dfc4 | ||
|
|
79ec3594fe | ||
|
|
cdb246697e | ||
|
|
6476e688e5 | ||
|
|
5d1ec6a9c8 | ||
|
|
be8a7e981a | ||
|
|
d59731a678 | ||
|
|
0254510d53 | ||
|
|
9327955891 | ||
|
|
4daf08e20f | ||
|
|
6fc31ddedb | ||
|
|
fac8c9ee4e | ||
|
|
d05f7e2084 | ||
|
|
0a0a6bae0f | ||
|
|
560c063db4 | ||
|
|
54ac2d33e2 | ||
|
|
fb3be8a6a0 | ||
|
|
5a33953b78 | ||
|
|
ba7a8fc796 | ||
|
|
0bdee8219a | ||
|
|
f82951f412 | ||
|
|
35e188b851 | ||
|
|
8990e4666a | ||
|
|
ceff618e5d | ||
|
|
cf3aab9d38 | ||
|
|
a74c70e8a1 | ||
|
|
46817c7664 | ||
|
|
c0c9cab14c | ||
|
|
478a034740 | ||
|
|
01693cb155 | ||
|
|
7a44c9e650 | ||
|
|
70a6a275f4 | ||
|
|
e627ebc127 | ||
|
|
56b81b78c3 | ||
|
|
c304485079 | ||
|
|
df51797c29 | ||
|
|
754339214c | ||
|
|
057ecc3ed9 | ||
|
|
c14f79ebf7 | ||
|
|
71fdff17de | ||
|
|
04b4001277 | ||
|
|
fbfb8a3b41 | ||
|
|
1bee088fe6 | ||
|
|
d2e4d6ecf0 | ||
|
|
5f03f90582 | ||
|
|
e54d46aae1 | ||
|
|
54a3b9900e | ||
|
|
1dc16e900a | ||
|
|
08a7e666b2 | ||
|
|
678fd28f1d | ||
|
|
ff89c3b274 | ||
|
|
cff7aebe55 | ||
|
|
ed3a3d0876 | ||
|
|
425cd9eb26 | ||
|
|
ebe84dd8a4 | ||
|
|
217c4144b5 | ||
|
|
aaeed64621 | ||
|
|
9133a56d2a | ||
|
|
12bd7dc44f | ||
|
|
32fa86adaa | ||
|
|
1811cff1f9 | ||
|
|
2e6a5c0525 | ||
|
|
b68b214d08 | ||
|
|
153d12d93f | ||
|
|
1a62a773ae | ||
|
|
5f0cccb81e | ||
|
|
5c7f4b3df7 | ||
|
|
1248b76b41 | ||
|
|
c4176af1ea | ||
|
|
799c306138 | ||
|
|
c65e3fdf62 | ||
|
|
08712ef4f8 | ||
|
|
d0119f5bf1 | ||
|
|
f3c626c800 | ||
|
|
f1891478d5 | ||
|
|
7a2e6e640d | ||
|
|
890d418639 | ||
|
|
f38c934a6d | ||
|
|
f3540aac0f | ||
|
|
889ce984a9 | ||
|
|
89a437149c | ||
|
|
43f65651ac | ||
|
|
d74d69c1c8 | ||
|
|
fefc85683c | ||
|
|
6f97158c0e | ||
|
|
1320101112 | ||
|
|
031a267394 | ||
|
|
9119030959 | ||
|
|
9b10a8028d | ||
|
|
4be38fcb0e | ||
|
|
9090f4485a | ||
|
|
5749d66ac9 | ||
|
|
4bb4b4eb1d | ||
|
|
103d062f74 | ||
|
|
9893480089 | ||
|
|
5dbd240017 | ||
|
|
e0dce8fd01 | ||
|
|
492139942c | ||
|
|
8ebff1a908 | ||
|
|
44def1f6bc | ||
|
|
8934b9ab5c | ||
|
|
130a6b67bd | ||
|
|
2df32cd9a7 | ||
|
|
d413d58b47 | ||
|
|
9e632aa0bd | ||
|
|
964020ee12 | ||
|
|
672e14d6ea | ||
|
|
54baf04a86 | ||
|
|
64c83460b9 | ||
|
|
74ec3fa7d4 | ||
|
|
0821d7a803 | ||
|
|
55beb3978c | ||
|
|
9b044815de | ||
|
|
0668fea3b7 | ||
|
|
a6677b2e45 | ||
|
|
6f544f56d8 | ||
|
|
5556be9cab | ||
|
|
465c4afe8d | ||
|
|
78dd1e1d81 | ||
|
|
eebfc78ad3 | ||
|
|
4783685fdb | ||
|
|
bfd0363fad | ||
|
|
e9323ba2ec | ||
|
|
dac774c9d2 | ||
|
|
664ee2b433 | ||
|
|
85f283fe2b | ||
|
|
7bd7d66afc | ||
|
|
81b16aa900 | ||
|
|
e07fb34ace | ||
|
|
9303746d80 | ||
|
|
e3e8e67cb4 | ||
|
|
6490027e57 | ||
|
|
6cbe4f2ea7 | ||
|
|
960365a063 | ||
|
|
839d614c9c | ||
|
|
ae13e557a7 | ||
|
|
a245383f8c | ||
|
|
78b8d3e41d | ||
|
|
fcfaa04cc6 | ||
|
|
fe866b2d66 | ||
|
|
e7bbc4ac0c | ||
|
|
3b746c91df | ||
|
|
06f0129b59 | ||
|
|
641e75b8a8 | ||
|
|
35f9fda457 | ||
|
|
de29d69efe | ||
|
|
f587af1005 | ||
|
|
4ed6580e1d | ||
|
|
2f6213c944 | ||
|
|
f365b76cfc | ||
|
|
55921b262f | ||
|
|
3039c97989 | ||
|
|
a1af4f19c5 | ||
|
|
131e4087fd | ||
|
|
ee6471351d | ||
|
|
d93266fee2 | ||
|
|
dbbf39db6d | ||
|
|
d40ea44ae6 | ||
|
|
f0d4847946 | ||
|
|
98a9c766ef | ||
|
|
91393b650b | ||
|
|
49a4b119e1 | ||
|
|
e69fab822b | ||
|
|
45c58cc766 | ||
|
|
ca48f000bd | ||
|
|
21ba1e3958 | ||
|
|
062f3256a7 | ||
|
|
186f565b99 | ||
|
|
5c2b4398d9 | ||
|
|
a9fb61bbd6 | ||
|
|
a51e25dbde | ||
|
|
0a717ae82e | ||
|
|
f9e6751279 | ||
|
|
0306f8ec65 | ||
|
|
66f2e549ce | ||
|
|
9ab413643a | ||
|
|
3a4eeb01b0 | ||
|
|
57a8dcc155 | ||
|
|
2f21476b2a | ||
|
|
9f9e2f3b24 | ||
|
|
f886dfb60c | ||
|
|
c22b014056 | ||
|
|
d899b6a7e1 | ||
|
|
450dde3739 | ||
|
|
2ac40903f3 | ||
|
|
f328b39f57 | ||
|
|
06cc4b07ab | ||
|
|
1c0b68f0e3 | ||
|
|
efcce6a826 | ||
|
|
fa8177d0e5 | ||
|
|
d51cd8df89 | ||
|
|
5530d611b9 | ||
|
|
e73636bef3 | ||
|
|
74ff994281 | ||
|
|
7b28d353ee | ||
|
|
e2a8ca143a | ||
|
|
4e81f98927 | ||
|
|
ab4c994266 | ||
|
|
aea6a434f1 | ||
|
|
6c95d5a2de | ||
|
|
fcaabb2c1e | ||
|
|
66b2722cad | ||
|
|
2e95666939 | ||
|
|
cdfcd99695 | ||
|
|
e71d21fc27 | ||
|
|
e95ebfd6a0 | ||
|
|
e5a875856a | ||
|
|
930218c067 | ||
|
|
ff1362e462 | ||
|
|
01457bbe79 | ||
|
|
8c7da0bdb6 | ||
|
|
6f634c3f13 | ||
|
|
a7f5303eaf | ||
|
|
7159e4fbe2 | ||
|
|
36b243e9d2 | ||
|
|
bd70e00f08 | ||
|
|
0ca96130c8 | ||
|
|
09aa56b63d | ||
|
|
60cd6a455a | ||
|
|
4752ce5250 | ||
|
|
832569a79c | ||
|
|
ecd8cebbef | ||
|
|
3c37efa650 | ||
|
|
21b6ce204d | ||
|
|
337d2970a0 | ||
|
|
3e5bd25c6e | ||
|
|
7f0f68b707 | ||
|
|
ea85482736 | ||
|
|
01160a5361 | ||
|
|
f4b5a02197 | ||
|
|
f724f10a35 | ||
|
|
0c221ba3d7 | ||
|
|
1907aaf32f | ||
|
|
d6f26b3133 | ||
|
|
c97a55e65f | ||
|
|
4a6e38f7da | ||
|
|
845ef42338 | ||
|
|
fde8de8b9e | ||
|
|
88123261ac | ||
|
|
c04b76528b | ||
|
|
04a13c2ebb | ||
|
|
6b3cc62cbe | ||
|
|
8627be07e7 | ||
|
|
5509a5bca3 | ||
|
|
dd52949a2a | ||
|
|
a310ae6566 | ||
|
|
1f8643c538 | ||
|
|
6ea313970d | ||
|
|
13e6b15308 | ||
|
|
0efc2277dd | ||
|
|
381a2740ee | ||
|
|
9e6aecd707 | ||
|
|
f5510ef1b5 | ||
|
|
8b3b16bce4 | ||
|
|
024674eef3 | ||
|
|
c8e6224946 | ||
|
|
bf11300ab3 | ||
|
|
7361a35c94 | ||
|
|
02b2cebb85 | ||
|
|
9b6ae46e92 | ||
|
|
e5e5a42736 | ||
|
|
308d8afe4e | ||
|
|
b7d88b4c0f | ||
|
|
719ca63ec1 | ||
|
|
2100cd77ce | ||
|
|
58b13ae69a | ||
|
|
2cfb416fd0 | ||
|
|
993c9b31bd | ||
|
|
b5d6f0ad36 | ||
|
|
03c05a82e4 | ||
|
|
cc887d25e4 | ||
|
|
80e2d0651b | ||
|
|
50f07f9ef5 | ||
|
|
c517bdd2e1 | ||
|
|
ca3e549dd4 | ||
|
|
51f2ca72b9 | ||
|
|
771950f1de | ||
|
|
c969e9c014 | ||
|
|
658867cb46 | ||
|
|
344692f9f6 | ||
|
|
fd083078fc | ||
|
|
9bacae4b2e | ||
|
|
8f2ad38503 | ||
|
|
76baa6c5f8 | ||
|
|
84c822a0ca | ||
|
|
ddd938fd64 | ||
|
|
e91b30f4c7 | ||
|
|
31fb1801d2 | ||
|
|
117d0f2e38 | ||
|
|
79bb79debc | ||
|
|
11cd022965 | ||
|
|
d7b28a3586 | ||
|
|
dc14245105 | ||
|
|
e772dfaa12 | ||
|
|
4d29cae936 | ||
|
|
71ed0ffe13 | ||
|
|
56d0981cee | ||
|
|
ad43d10ce4 | ||
|
|
fb6618181a | ||
|
|
43a9bc0d7b | ||
|
|
f835b14902 | ||
|
|
c1c591d1eb | ||
|
|
4348549f2d | ||
|
|
e48df87e06 | ||
|
|
e718feb1f7 | ||
|
|
3b6f3f13d4 | ||
|
|
13fabaf6aa | ||
|
|
9cfcdb1c23 | ||
|
|
2800d0dcd3 | ||
|
|
3e2055255e | ||
|
|
64a8857884 | ||
|
|
808b291c2c | ||
|
|
cae3e7136e | ||
|
|
c069a187f8 | ||
|
|
91fa932168 | ||
|
|
188158a29b | ||
|
|
a3d5cb5851 | ||
|
|
0788582528 | ||
|
|
da81abc12e | ||
|
|
81b92111ca | ||
|
|
a809e72704 | ||
|
|
cb0e4b6e87 | ||
|
|
16df8d803c | ||
|
|
ce7ac78b42 | ||
|
|
c21e0755b3 | ||
|
|
e1dc0a576d | ||
|
|
a998db0570 | ||
|
|
c79ec45adb | ||
|
|
72481e8453 | ||
|
|
3753f7d138 | ||
|
|
4d92606562 | ||
|
|
2d0b6bcfcc | ||
|
|
57fb29b600 | ||
|
|
340647cb22 | ||
|
|
a06369dd7b | ||
|
|
95fe103718 | ||
|
|
036297ef36 | ||
|
|
129c055fee | ||
|
|
c688656607 | ||
|
|
b49e8deb3e | ||
|
|
17264e7872 | ||
|
|
022c0c3a89 | ||
|
|
b8539122ed | ||
|
|
4ca906a518 | ||
|
|
7bf67869b0 | ||
|
|
a032164a99 | ||
|
|
f588e7783e | ||
|
|
f8ca6c019f | ||
|
|
f88806fc3c | ||
|
|
ee0880fab7 | ||
|
|
261c674832 | ||
|
|
e95bc82b8e | ||
|
|
6d0cc49ecd | ||
|
|
e108833db2 | ||
|
|
151fdb9bad | ||
|
|
59ca8665fe | ||
|
|
71c101b82e | ||
|
|
860030824e | ||
|
|
46c4bf6e94 | ||
|
|
53ed6e54b5 | ||
|
|
3197c356e9 | ||
|
|
cdad083d7f | ||
|
|
2e076ef3f4 | ||
|
|
46e3a27626 | ||
|
|
1247867187 | ||
|
|
b1f863cc4d | ||
|
|
823b41b7ec | ||
|
|
16a2b3b19b | ||
|
|
0a2e899363 | ||
|
|
65d431c7a0 | ||
|
|
6b617955b7 | ||
|
|
10cf0470cb | ||
|
|
f91ca796de | ||
|
|
7f1fb41d48 | ||
|
|
ceb9c70fba | ||
|
|
5c9d11861e | ||
|
|
706e6c01aa | ||
|
|
64cecb4931 | ||
|
|
31e0dfef76 | ||
|
|
dc85f93423 | ||
|
|
4d5d407655 | ||
|
|
d2424ce540 | ||
|
|
4d5de8176a | ||
|
|
c451d00eb4 | ||
|
|
a8180bddad | ||
|
|
e988364766 | ||
|
|
396697ead2 | ||
|
|
2993bd8c05 | ||
|
|
fc50bb6e57 | ||
|
|
a064066e42 | ||
|
|
a6783e537b | ||
|
|
4b1dad96cd | ||
|
|
6758b51617 | ||
|
|
54fdd2da57 | ||
|
|
3132a4965e | ||
|
|
7ee3f10a81 | ||
|
|
accd65a26a | ||
|
|
e0ada7fc48 | ||
|
|
ad1401854c | ||
|
|
e18189caae | ||
|
|
d601d35a21 | ||
|
|
66fd402f00 | ||
|
|
835a04358c | ||
|
|
af9b4e448d | ||
|
|
39e8ba42ff | ||
|
|
1e52c956a8 | ||
|
|
d261ed074e | ||
|
|
47f9be32ce | ||
|
|
a17390c157 | ||
|
|
5ca5e0d00f | ||
|
|
b0085f2741 | ||
|
|
b983095e13 | ||
|
|
dd6e018e46 | ||
|
|
6f8394a086 | ||
|
|
a0739a18e8 | ||
|
|
27d33f015f | ||
|
|
ffb7ad1417 | ||
|
|
97e6bab6e3 | ||
|
|
b311b7620c | ||
|
|
25ec8fb2ab | ||
|
|
75100cd182 | ||
|
|
50d48ee3ec | ||
|
|
0e330f983b | ||
|
|
9523a929af | ||
|
|
3fcec069ed | ||
|
|
7c2e72aebb | ||
|
|
8d6fbddd67 | ||
|
|
66b5ac8ff1 | ||
|
|
e034fc12eb | ||
|
|
a8317ccacd | ||
|
|
74376586a8 | ||
|
|
ea49296cfe | ||
|
|
992f817fef | ||
|
|
36528fceab | ||
|
|
8323b8af4d | ||
|
|
0e496181a1 | ||
|
|
f47fc7a484 | ||
|
|
0585fb4c80 | ||
|
|
bdfcf6591e | ||
|
|
cad54f0f07 | ||
|
|
a52ab1685e | ||
|
|
3182816965 | ||
|
|
a8da4b0162 | ||
|
|
ab7f6e8300 | ||
|
|
943bf477a0 | ||
|
|
168f4c0056 | ||
|
|
35fef11d2a | ||
|
|
425a8a6412 | ||
|
|
b64495f7a9 | ||
|
|
014861a7f2 | ||
|
|
17edaa0e1f | ||
|
|
bbd0325c10 | ||
|
|
316c276545 | ||
|
|
32ea0213f7 | ||
|
|
86c2f0716e | ||
|
|
68b8d7d7f2 | ||
|
|
43a22f84d9 | ||
|
|
b3a0368b95 | ||
|
|
cd79330c4c | ||
|
|
245e09c723 | ||
|
|
495728593f | ||
|
|
e9c4b0dc01 | ||
|
|
9942bf2124 | ||
|
|
0a8ba068c4 | ||
|
|
a2bb70aaec | ||
|
|
5ed25d8bcb | ||
|
|
cafc068c39 | ||
|
|
b8dde0767b | ||
|
|
f8e5e3b3c0 | ||
|
|
edc19e99a9 | ||
|
|
2b0b3827ab | ||
|
|
8afe5a0087 | ||
|
|
0ecc53f3b6 | ||
|
|
b3f2827961 | ||
|
|
9c96a4d81b | ||
|
|
4f5e363452 | ||
|
|
92572ff919 | ||
|
|
39ddaf49be | ||
|
|
627dc2d4a0 | ||
|
|
42739bbb61 | ||
|
|
261c9eefe1 | ||
|
|
e21e4d2b16 | ||
|
|
8b6b8f0c53 | ||
|
|
5a9feb4411 | ||
|
|
f16128da09 | ||
|
|
48f9997ea9 | ||
|
|
f0e87094d6 | ||
|
|
e0882e9e04 | ||
|
|
d37885ea88 | ||
|
|
d13e5e7e3f | ||
|
|
aa9a024ee1 | ||
|
|
5bbf6d2ae9 | ||
|
|
30299a9f04 | ||
|
|
23d7fe936d | ||
|
|
b50c052222 | ||
|
|
ef9e9809e2 | ||
|
|
f139c3268b | ||
|
|
e869bfd991 | ||
|
|
d5309fcaf5 | ||
|
|
c4fc49553c | ||
|
|
75704899a7 | ||
|
|
70aa3b1ff1 | ||
|
|
6154a8169b | ||
|
|
cf0173e079 | ||
|
|
cc62fc6222 | ||
|
|
6cdadf1b37 | ||
|
|
dc90a66a96 | ||
|
|
4b629d20cf | ||
|
|
437bd13fd0 | ||
|
|
7ce1dc9069 | ||
|
|
5b4e517d9d | ||
|
|
6c3ed5e533 | ||
|
|
ec2762c31a | ||
|
|
29f3158b61 | ||
|
|
d83b7276fd | ||
|
|
1336010bb2 | ||
|
|
5cb3df6db1 | ||
|
|
4be0c1c0eb | ||
|
|
33e5e74228 | ||
|
|
2b06989372 | ||
|
|
cd4da2aca3 | ||
|
|
b335951862 | ||
|
|
e95e084956 | ||
|
|
b7d569de98 | ||
|
|
8320cca5cd | ||
|
|
bad5fec0f1 | ||
|
|
200a3b65ee | ||
|
|
d9fc2a93cc | ||
|
|
94f3533c29 | ||
|
|
77f7ad309e | ||
|
|
cefd270837 | ||
|
|
037e68a376 | ||
|
|
4da0785494 | ||
|
|
53171bafec | ||
|
|
16fe77e472 | ||
|
|
9f147e5b6d | ||
|
|
c43106a744 | ||
|
|
a73eb0377d | ||
|
|
e05514b455 | ||
|
|
901c7be9a8 | ||
|
|
e503cc3003 | ||
|
|
2ff777acb7 | ||
|
|
034d73a4eb | ||
|
|
aec55e50f9 | ||
|
|
1f356a67b2 | ||
|
|
0f60cd480d | ||
|
|
316b8c66db | ||
|
|
744d4ebbaf | ||
|
|
005deaccc8 | ||
|
|
b6f78ce1af | ||
|
|
932b504d82 | ||
|
|
1cca46cf7b | ||
|
|
a8f6d2adf0 | ||
|
|
203de18053 | ||
|
|
ee12b4164b | ||
|
|
b38459439d | ||
|
|
a2eddb3580 | ||
|
|
18adbc6bf0 | ||
|
|
c1ccef25a3 | ||
|
|
b1bea73efb | ||
|
|
4175d29056 | ||
|
|
46c78c33cf | ||
|
|
3fe5a41433 | ||
|
|
f2b1f95521 | ||
|
|
94f81caf28 | ||
|
|
669a4a299c | ||
|
|
afff55045f | ||
|
|
4fbcd2ba5d | ||
|
|
016295dfee | ||
|
|
6d2bc2929a | ||
|
|
23a5e566f2 | ||
|
|
4b387961a4 | ||
|
|
1765a8a7f9 | ||
|
|
837a5b52a7 | ||
|
|
180c4e855e | ||
|
|
01d2af9961 | ||
|
|
7f3cc6269b | ||
|
|
dbc0d54491 | ||
|
|
f843f5ae9d | ||
|
|
962cb290e4 | ||
|
|
7138655dd1 | ||
|
|
93acfc2e38 | ||
|
|
91878c4591 | ||
|
|
05ec1216e0 | ||
|
|
af0e6481f8 | ||
|
|
11a745c4d9 | ||
|
|
2393da4425 | ||
|
|
95ab08e02d | ||
|
|
95fdedf12e | ||
|
|
c73dd776db | ||
|
|
891e5fea3f | ||
|
|
bb2f6f23b5 | ||
|
|
cd9b03bdb9 | ||
|
|
a619269502 | ||
|
|
153b5c028b | ||
|
|
9a33bf2210 | ||
|
|
34b4cd2231 | ||
|
|
6045cbbc62 | ||
|
|
9bbf4044e0 | ||
|
|
fcf8a64d91 | ||
|
|
2c6ab18e41 | ||
|
|
2fea294b13 | ||
|
|
b47ecab1a9 | ||
|
|
b86c294250 | ||
|
|
3eacfb91aa | ||
|
|
94164c2a71 | ||
|
|
d85eb83ea2 | ||
|
|
b2002639db | ||
|
|
347cfe253f | ||
|
|
833e1836e1 | ||
|
|
e4be38b9f7 | ||
|
|
783e7f6939 | ||
|
|
c1c54f4848 | ||
|
|
86be6be2d2 | ||
|
|
35a63e867a | ||
|
|
9c12a417ee | ||
|
|
32a019c0d6 | ||
|
|
b7e4a3c99e | ||
|
|
039062d071 | ||
|
|
83ae3e8371 | ||
|
|
852de8bdfc | ||
|
|
b8acb860aa | ||
|
|
e6849b85d1 | ||
|
|
8fa9657ba6 | ||
|
|
04b038960b | ||
|
|
52507a5a95 | ||
|
|
d8505ba2ab | ||
|
|
fa26c0997e | ||
|
|
5a0aadd2ae | ||
|
|
025549ebf8 | ||
|
|
e85a583f0a | ||
|
|
f7244ddb7a | ||
|
|
d983a519e3 | ||
|
|
ae01070b8f | ||
|
|
0ffb40f4c1 | ||
|
|
8bcffb4ad5 | ||
|
|
b2118602d9 | ||
|
|
9303f3b47b | ||
|
|
e5c43cfc4b | ||
|
|
45fc08e221 | ||
|
|
67e8511106 | ||
|
|
4f7fd0a62b | ||
|
|
88fe454962 | ||
|
|
26f7a9be0a | ||
|
|
9256926bb7 | ||
|
|
2a83318739 | ||
|
|
d6e2535a5e | ||
|
|
2bffb7e22c | ||
|
|
24a162cf86 | ||
|
|
f3104f3bc4 | ||
|
|
45f1bf6709 | ||
|
|
40b2590815 | ||
|
|
dd9ab46b5c | ||
|
|
c2aeadae33 | ||
|
|
1bd9759ab7 | ||
|
|
dcdbb05168 | ||
|
|
ae117c47e9 | ||
|
|
7f7856f0e4 | ||
|
|
aa7b7c8619 | ||
|
|
ee0cbff245 | ||
|
|
c2c18b25d2 | ||
|
|
816c7c95ed | ||
|
|
cb5d65d11a | ||
|
|
75f3f43ba0 | ||
|
|
9a521355ed | ||
|
|
47bfdf0710 | ||
|
|
e1b49c3fb4 | ||
|
|
374dffc5fa | ||
|
|
4f735a5d11 | ||
|
|
94738d8fc4 | ||
|
|
adb4bfa10b | ||
|
|
48e6bbdc97 | ||
|
|
b54d6fea44 | ||
|
|
4462e6339d | ||
|
|
c1581b69f4 | ||
|
|
14284e0cc7 | ||
|
|
de40e733ec | ||
|
|
9d91b6f780 | ||
|
|
6a8b49f9c4 | ||
|
|
445a8a5647 | ||
|
|
83ce4a538a | ||
|
|
35a19d2007 | ||
|
|
505e12c5ea | ||
|
|
b2bfd7f23a | ||
|
|
cdb96e715d | ||
|
|
b3e5f09e3b | ||
|
|
db542d668a | ||
|
|
a8a79a55a4 | ||
|
|
47f62a87a7 | ||
|
|
44f353861a | ||
|
|
a2ef84a4a0 | ||
|
|
12ac20ec43 | ||
|
|
ecfbc7b9fd | ||
|
|
ba2fe0fb1f | ||
|
|
890a20edba | ||
|
|
e6f48c9403 | ||
|
|
909f0afa69 | ||
|
|
5ed2b99b8c | ||
|
|
7848751fd8 | ||
|
|
e593241d75 | ||
|
|
fcdc7b7aeb | ||
|
|
c3c7878f28 | ||
|
|
85f9ae5a0a | ||
|
|
98a97f34f5 | ||
|
|
98d647a3fe | ||
|
|
9a393b4f74 | ||
|
|
88d74235e1 | ||
|
|
36fa470348 | ||
|
|
33dce10bc3 | ||
|
|
feed0b288f | ||
|
|
1b7dc8a509 | ||
|
|
87cc3cf168 | ||
|
|
eac7b1e9f2 | ||
|
|
bb1a42df91 | ||
|
|
ac5ac3e9f1 | ||
|
|
bed25b317c | ||
|
|
1687e6682a | ||
|
|
22572c8ed1 | ||
|
|
8187a339f0 | ||
|
|
382c3930a2 | ||
|
|
a64a30c088 | ||
|
|
dac76a867f | ||
|
|
b2e86e105d | ||
|
|
b8e57c9b6f | ||
|
|
486a1bc9de | ||
|
|
b1b610f4b5 | ||
|
|
68447a6009 | ||
|
|
a55280b941 | ||
|
|
830462d525 | ||
|
|
ce8b29e9d0 | ||
|
|
6ab15f8eb1 | ||
|
|
96eb68e042 | ||
|
|
bf78bdd6d4 | ||
|
|
d998815847 | ||
|
|
00ba7b78ca | ||
|
|
0b735d94f1 | ||
|
|
301989540f | ||
|
|
e26b95a26f | ||
|
|
049c1ddb48 | ||
|
|
2f1c3075a2 | ||
|
|
b1a5068fd6 | ||
|
|
01fbd5d702 | ||
|
|
5916f92f1a | ||
|
|
5e45268f68 | ||
|
|
b8e28e0c12 | ||
|
|
04f824ea36 | ||
|
|
c216bea031 | ||
|
|
e72ef478dc | ||
|
|
897b4ef2cd | ||
|
|
2404899e28 | ||
|
|
a2dfc2cbdc | ||
|
|
92373b25a9 | ||
|
|
ce1840a9ae | ||
|
|
c4f4bdd789 | ||
|
|
ec5068e85b | ||
|
|
1d9d0ddf27 | ||
|
|
e393be90dd | ||
|
|
e633df06e4 | ||
|
|
0ff5f408d6 | ||
|
|
5eda42ff31 | ||
|
|
84168e22d0 | ||
|
|
b722845aff | ||
|
|
fd54682c02 | ||
|
|
f5e287ffa6 | ||
|
|
fb10a546d6 | ||
|
|
006897f1c0 | ||
|
|
968849e52b | ||
|
|
8bee47dc50 | ||
|
|
08250120d1 | ||
|
|
8892b70785 | ||
|
|
534e4cb591 | ||
|
|
489abdcb0b | ||
|
|
f6b6c2e9a3 | ||
|
|
43c016f024 | ||
|
|
c0e7d9cd8b | ||
|
|
5f687a31f8 | ||
|
|
f2d2478dee | ||
|
|
8a98789be1 | ||
|
|
87a5c8894a | ||
|
|
7e92ed4501 | ||
|
|
a57cdfff1e | ||
|
|
d4ff6d4d7a | ||
|
|
63d99d6a57 | ||
|
|
fce7d34171 | ||
|
|
e7df7f69b3 | ||
|
|
94cc18bd71 | ||
|
|
39024ce2ac | ||
|
|
7ac4f45e7b | ||
|
|
f209eebaf8 | ||
|
|
4889db78c9 | ||
|
|
bff200fede | ||
|
|
af6f783043 | ||
|
|
610adcbefc | ||
|
|
1d3631fa04 | ||
|
|
0630504664 | ||
|
|
6d5b698c39 | ||
|
|
dd9f1abcea | ||
|
|
b4bd34fb96 | ||
|
|
014971262d | ||
|
|
36ed69b07e | ||
|
|
ec4fc17e3a | ||
|
|
78b85fb664 |
15
.devcontainer/Dockerfile
Normal file
15
.devcontainer/Dockerfile
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
FROM python:3.12-bookworm
|
||||||
|
|
||||||
|
# Install Node.js 20.x
|
||||||
|
RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \
|
||||||
|
&& apt-get install -y nodejs \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Install global npm packages
|
||||||
|
RUN npm install -g husky vite
|
||||||
|
|
||||||
|
# Create and activate Python virtual environment
|
||||||
|
RUN python -m venv /opt/venv
|
||||||
|
ENV PATH="/opt/venv/bin:$PATH"
|
||||||
|
|
||||||
|
WORKDIR /workspace
|
||||||
49
.devcontainer/devc-welcome.md
Normal file
49
.devcontainer/devc-welcome.md
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# Welcome to DocsGPT Devcontainer
|
||||||
|
|
||||||
|
Welcome to the DocsGPT development environment! This guide will help you get started quickly.
|
||||||
|
|
||||||
|
## Starting Services
|
||||||
|
|
||||||
|
To run DocsGPT, you need to start three main services: Flask (backend), Celery (task queue), and Vite (frontend). Here are the commands to start each service within the devcontainer:
|
||||||
|
|
||||||
|
### Vite (Frontend)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd frontend
|
||||||
|
npm run dev -- --host
|
||||||
|
```
|
||||||
|
|
||||||
|
### Flask (Backend)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
flask --app application/app.py run --host=0.0.0.0 --port=7091
|
||||||
|
```
|
||||||
|
|
||||||
|
### Celery (Task Queue)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
celery -A application.app.celery worker -l INFO
|
||||||
|
```
|
||||||
|
|
||||||
|
## Github Codespaces Instructions
|
||||||
|
|
||||||
|
### 1. Make Ports Public:
|
||||||
|
|
||||||
|
Go to the "Ports" panel in Codespaces (usually located at the bottom of the VS Code window).
|
||||||
|
|
||||||
|
For both port 5173 and 7091, right-click on the port and select "Make Public".
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
### 2. Update VITE_API_HOST:
|
||||||
|
|
||||||
|
After making port 7091 public, copy the public URL provided by Codespaces for port 7091.
|
||||||
|
|
||||||
|
Open the file frontend/.env.development.
|
||||||
|
|
||||||
|
Find the line VITE_API_HOST=http://localhost:7091.
|
||||||
|
|
||||||
|
Replace http://localhost:7091 with the public URL you copied from Codespaces.
|
||||||
|
|
||||||
|

|
||||||
24
.devcontainer/devcontainer.json
Normal file
24
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"name": "DocsGPT Dev Container",
|
||||||
|
"dockerComposeFile": ["docker-compose-dev.yaml", "docker-compose.override.yaml"],
|
||||||
|
"service": "dev",
|
||||||
|
"workspaceFolder": "/workspace",
|
||||||
|
"postCreateCommand": ".devcontainer/post-create-command.sh",
|
||||||
|
"forwardPorts": [7091, 5173, 6379, 27017],
|
||||||
|
"customizations": {
|
||||||
|
"vscode": {
|
||||||
|
"extensions": [
|
||||||
|
"ms-python.python",
|
||||||
|
"ms-toolsai.jupyter",
|
||||||
|
"esbenp.prettier-vscode",
|
||||||
|
"dbaeumer.vscode-eslint"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"codespaces": {
|
||||||
|
"openFiles": [
|
||||||
|
".devcontainer/devc-welcome.md",
|
||||||
|
"CONTRIBUTING.md"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
version: "3.9"
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
|
||||||
redis:
|
redis:
|
||||||
40
.devcontainer/docker-compose.override.yaml
Normal file
40
.devcontainer/docker-compose.override.yaml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
dev:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
volumes:
|
||||||
|
- ../:/workspace:cached
|
||||||
|
command: sleep infinity
|
||||||
|
depends_on:
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
mongo:
|
||||||
|
condition: service_healthy
|
||||||
|
environment:
|
||||||
|
- CELERY_BROKER_URL=redis://redis:6379/0
|
||||||
|
- CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||||
|
- MONGO_URI=mongodb://mongo:27017/docsgpt
|
||||||
|
- CACHE_REDIS_URL=redis://redis:6379/2
|
||||||
|
networks:
|
||||||
|
- default
|
||||||
|
|
||||||
|
redis:
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "ping"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 30s
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
mongo:
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 30s
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
name: docsgpt-dev-network
|
||||||
32
.devcontainer/post-create-command.sh
Executable file
32
.devcontainer/post-create-command.sh
Executable file
@@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e # Exit immediately if a command exits with a non-zero status
|
||||||
|
|
||||||
|
if [ ! -f frontend/.env.development ]; then
|
||||||
|
cp -n .env-template frontend/.env.development || true # Assuming .env-template is in the root
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Determine VITE_API_HOST based on environment
|
||||||
|
if [ -n "$CODESPACES" ]; then
|
||||||
|
# Running in Codespaces
|
||||||
|
CODESPACE_NAME=$(echo "$CODESPACES" | cut -d'-' -f1) # Extract codespace name
|
||||||
|
PUBLIC_API_HOST="https://${CODESPACE_NAME}-7091.${GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN}"
|
||||||
|
echo "Setting VITE_API_HOST for Codespaces: $PUBLIC_API_HOST in frontend/.env.development"
|
||||||
|
sed -i "s|VITE_API_HOST=.*|VITE_API_HOST=$PUBLIC_API_HOST|" frontend/.env.development
|
||||||
|
else
|
||||||
|
# Not running in Codespaces (local devcontainer)
|
||||||
|
DEFAULT_API_HOST="http://localhost:7091"
|
||||||
|
echo "Setting VITE_API_HOST for local dev: $DEFAULT_API_HOST in frontend/.env.development"
|
||||||
|
sed -i "s|VITE_API_HOST=.*|VITE_API_HOST=$DEFAULT_API_HOST|" frontend/.env.development
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
mkdir -p model
|
||||||
|
if [ ! -d model/all-mpnet-base-v2 ]; then
|
||||||
|
wget -q https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip -O model/mpnet-base-v2.zip
|
||||||
|
unzip -q model/mpnet-base-v2.zip -d model
|
||||||
|
rm model/mpnet-base-v2.zip
|
||||||
|
fi
|
||||||
|
pip install -r application/requirements.txt
|
||||||
|
cd frontend
|
||||||
|
npm install --include=dev
|
||||||
@@ -1,2 +1,9 @@
|
|||||||
OPENAI_API_KEY=<LLM api key (for example, open ai key)>
|
API_KEY=<LLM api key (for example, open ai key)>
|
||||||
EMBEDDINGS_KEY=<LLM embeddings api key (for example, open ai key)>
|
LLM_NAME=docsgpt
|
||||||
|
VITE_API_STREAMING=true
|
||||||
|
|
||||||
|
#For Azure (you can delete it if you don't use Azure)
|
||||||
|
OPENAI_API_BASE=
|
||||||
|
OPENAI_API_VERSION=
|
||||||
|
AZURE_DEPLOYMENT_NAME=
|
||||||
|
AZURE_EMBEDDINGS_DEPLOYMENT_NAME=
|
||||||
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
# Auto detect text files and perform LF normalization
|
||||||
|
* text=auto
|
||||||
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# These are supported funding model platforms
|
||||||
|
|
||||||
|
github: arc53
|
||||||
138
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
138
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
name: "🐛 Bug Report"
|
||||||
|
description: "Submit a bug report to help us improve"
|
||||||
|
title: "🐛 Bug Report: "
|
||||||
|
labels: ["type: bug"]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: We value your time and your efforts to submit this bug report is appreciated. 🙏
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
attributes:
|
||||||
|
label: "📜 Description"
|
||||||
|
description: "A clear and concise description of what the bug is."
|
||||||
|
placeholder: "It bugs out when ..."
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: steps-to-reproduce
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
attributes:
|
||||||
|
label: "👟 Reproduction steps"
|
||||||
|
description: "How do you trigger this bug? Please walk us through it step by step."
|
||||||
|
placeholder: "1. Go to '...'
|
||||||
|
2. Click on '....'
|
||||||
|
3. Scroll down to '....'
|
||||||
|
4. See error"
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: expected-behavior
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
attributes:
|
||||||
|
label: "👍 Expected behavior"
|
||||||
|
description: "What did you think should happen?"
|
||||||
|
placeholder: "It should ..."
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: actual-behavior
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
attributes:
|
||||||
|
label: "👎 Actual Behavior with Screenshots"
|
||||||
|
description: "What did actually happen? Add screenshots, if applicable."
|
||||||
|
placeholder: "It actually ..."
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: operating-system
|
||||||
|
attributes:
|
||||||
|
label: "💻 Operating system"
|
||||||
|
description: "What OS is your app running on?"
|
||||||
|
options:
|
||||||
|
- Linux
|
||||||
|
- MacOS
|
||||||
|
- Windows
|
||||||
|
- Something else
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: browsers
|
||||||
|
attributes:
|
||||||
|
label: What browsers are you seeing the problem on?
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- Firefox
|
||||||
|
- Chrome
|
||||||
|
- Safari
|
||||||
|
- Microsoft Edge
|
||||||
|
- Something else
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: dev-environment
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
attributes:
|
||||||
|
label: "🤖 What development environment are you experiencing this bug on?"
|
||||||
|
options:
|
||||||
|
- Docker
|
||||||
|
- Local dev server
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: env-vars
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
attributes:
|
||||||
|
label: "🔒 Did you set the correct environment variables in the right path? List the environment variable names (not values please!)"
|
||||||
|
description: "Please refer to the [Project setup instructions](https://github.com/arc53/DocsGPT#quickstart) if you are unsure."
|
||||||
|
placeholder: "It actually ..."
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: additional-context
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
attributes:
|
||||||
|
label: "📃 Provide any additional context for the Bug."
|
||||||
|
description: "Add any other context about the problem here."
|
||||||
|
placeholder: "It actually ..."
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: logs
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
attributes:
|
||||||
|
label: 📖 Relevant log output
|
||||||
|
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
|
||||||
|
render: shell
|
||||||
|
|
||||||
|
- type: checkboxes
|
||||||
|
id: no-duplicate-issues
|
||||||
|
attributes:
|
||||||
|
label: "👀 Have you spent some time to check if this bug has been raised before?"
|
||||||
|
options:
|
||||||
|
- label: "I checked and didn't find similar issue"
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: willing-to-submit-pr
|
||||||
|
attributes:
|
||||||
|
label: 🔗 Are you willing to submit PR?
|
||||||
|
description: This is absolutely not required, but we are happy to guide you in the contribution process.
|
||||||
|
options: # Added options key
|
||||||
|
- "Yes, I am willing to submit a PR!"
|
||||||
|
- "No"
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
|
||||||
|
- type: checkboxes
|
||||||
|
id: terms
|
||||||
|
attributes:
|
||||||
|
label: 🧑⚖️ Code of Conduct
|
||||||
|
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/arc53/DocsGPT/blob/main/CODE_OF_CONDUCT.md)
|
||||||
|
options:
|
||||||
|
- label: I agree to follow this project's Code of Conduct
|
||||||
|
required: true
|
||||||
54
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
54
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
name: 🚀 Feature
|
||||||
|
description: "Submit a proposal for a new feature"
|
||||||
|
title: "🚀 Feature: "
|
||||||
|
labels: [feature]
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: We value your time and your efforts to submit this bug report is appreciated. 🙏
|
||||||
|
- type: textarea
|
||||||
|
id: feature-description
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
attributes:
|
||||||
|
label: "🔖 Feature description"
|
||||||
|
description: "A clear and concise description of what the feature is."
|
||||||
|
placeholder: "You should add ..."
|
||||||
|
- type: textarea
|
||||||
|
id: pitch
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
attributes:
|
||||||
|
label: "🎤 Why is this feature needed ?"
|
||||||
|
description: "Please explain why this feature should be implemented and how it would be used. Add examples, if applicable."
|
||||||
|
placeholder: "In my use-case, ..."
|
||||||
|
- type: textarea
|
||||||
|
id: solution
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
attributes:
|
||||||
|
label: "✌️ How do you aim to achieve this?"
|
||||||
|
description: "A clear and concise description of what you want to happen."
|
||||||
|
placeholder: "I want this feature to, ..."
|
||||||
|
- type: textarea
|
||||||
|
id: alternative
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
attributes:
|
||||||
|
label: "🔄️ Additional Information"
|
||||||
|
description: "A clear and concise description of any alternative solutions or additional solutions you've considered."
|
||||||
|
placeholder: "I tried, ..."
|
||||||
|
- type: checkboxes
|
||||||
|
id: no-duplicate-issues
|
||||||
|
attributes:
|
||||||
|
label: "👀 Have you spent some time to check if this feature request has been raised before?"
|
||||||
|
options:
|
||||||
|
- label: "I checked and didn't find similar issue"
|
||||||
|
required: true
|
||||||
|
- type: dropdown
|
||||||
|
id: willing-to-submit-pr
|
||||||
|
attributes:
|
||||||
|
label: Are you willing to submit PR?
|
||||||
|
description: This is absolutely not required, but we are happy to guide you in the contribution process.
|
||||||
|
options:
|
||||||
|
- "Yes I am willing to submit a PR!"
|
||||||
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
- **What kind of change does this PR introduce?** (Bug fix, feature, docs update, ...)
|
||||||
|
|
||||||
|
- **Why was this change needed?** (You can also link to an open issue here)
|
||||||
|
|
||||||
|
- **Other information**:
|
||||||
19
.github/dependabot.yml
vendored
Normal file
19
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# To get started with Dependabot version updates, you'll need to specify which
|
||||||
|
# package ecosystems to update and where the package manifests are located.
|
||||||
|
# Please see the documentation for all configuration options:
|
||||||
|
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
||||||
|
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "pip" # See documentation for possible values
|
||||||
|
directory: "/application" # Location of package manifests
|
||||||
|
schedule:
|
||||||
|
interval: "daily"
|
||||||
|
- package-ecosystem: "npm" # See documentation for possible values
|
||||||
|
directory: "/frontend" # Location of package manifests
|
||||||
|
schedule:
|
||||||
|
interval: "daily"
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "daily"
|
||||||
11
.github/holopin.yml
vendored
Normal file
11
.github/holopin.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
organization: docsgpt
|
||||||
|
defaultSticker: cm1ulwkkl180570cl82rtzympu
|
||||||
|
stickers:
|
||||||
|
- id: cm1ulwkkl180570cl82rtzympu
|
||||||
|
alias: contributor2024
|
||||||
|
- id: cm1ureg8o130450cl8c1po6mil
|
||||||
|
alias: api
|
||||||
|
- id: cm1urhmag148240cl8yvqxkthx
|
||||||
|
alias: lpc
|
||||||
|
- id: cm1urlcpq622090cl2tvu4w71y
|
||||||
|
alias: lexeu
|
||||||
31
.github/labeler.yml
vendored
Normal file
31
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
repo:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: '*'
|
||||||
|
|
||||||
|
github:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: '.github/**/*'
|
||||||
|
|
||||||
|
application:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'application/**/*'
|
||||||
|
|
||||||
|
docs:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'docs/**/*'
|
||||||
|
|
||||||
|
extensions:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'extensions/**/*'
|
||||||
|
|
||||||
|
frontend:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'frontend/**/*'
|
||||||
|
|
||||||
|
scripts:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'scripts/**/*'
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'tests/**/*'
|
||||||
40
.github/workflows/bandit.yaml
vendored
Normal file
40
.github/workflows/bandit.yaml
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
name: Bandit Security Scan
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
bandit_scan:
|
||||||
|
if: ${{ github.repository == 'arc53/DocsGPT' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
security-events: write
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.12'
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install bandit # Bandit is needed for this action
|
||||||
|
if [ -f application/requirements.txt ]; then pip install -r application/requirements.txt; fi
|
||||||
|
|
||||||
|
- name: Run Bandit scan
|
||||||
|
uses: PyCQA/bandit-action@v1
|
||||||
|
with:
|
||||||
|
severity: medium
|
||||||
|
confidence: medium
|
||||||
|
targets: application/
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
102
.github/workflows/ci.yml
vendored
102
.github/workflows/ci.yml
vendored
@@ -1,48 +1,112 @@
|
|||||||
name: Build and push DocsGPT Docker image
|
name: Build and push DocsGPT Docker image
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
release:
|
||||||
push:
|
types: [published]
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
build:
|
||||||
runs-on: ubuntu-latest
|
if: github.repository == 'arc53/DocsGPT'
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- platform: linux/amd64
|
||||||
|
runner: ubuntu-latest
|
||||||
|
suffix: amd64
|
||||||
|
- platform: linux/arm64
|
||||||
|
runner: ubuntu-24.04-arm
|
||||||
|
suffix: arm64
|
||||||
|
runs-on: ${{ matrix.runner }}
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU # Only needed for emulation, not for native arm64 builds
|
||||||
uses: docker/setup-qemu-action@v1
|
if: matrix.platform == 'linux/arm64'
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver: docker-container
|
||||||
|
install: true
|
||||||
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
- name: Login to ghcr.io
|
- name: Login to ghcr.io
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
# Runs a single command using the runners shell
|
- name: Build and push platform-specific images
|
||||||
- name: Build and push Docker images to docker.io and ghcr.io
|
uses: docker/build-push-action@v6
|
||||||
uses: docker/build-push-action@v4
|
|
||||||
with:
|
with:
|
||||||
file: './application/Dockerfile'
|
file: './application/Dockerfile'
|
||||||
platforms: linux/amd64
|
platforms: ${{ matrix.platform }}
|
||||||
context: ./application
|
context: ./application
|
||||||
push: true
|
push: true
|
||||||
tags: |
|
tags: |
|
||||||
${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
|
${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
|
||||||
ghcr.io/${{ github.repository_owner }}/docsgpt:latest
|
ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
|
||||||
|
provenance: false
|
||||||
|
sbom: false
|
||||||
|
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
|
||||||
|
cache-to: type=inline
|
||||||
|
|
||||||
|
manifest:
|
||||||
|
if: github.repository == 'arc53/DocsGPT'
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
packages: write
|
||||||
|
steps:
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver: docker-container
|
||||||
|
install: true
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Login to ghcr.io
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Create and push manifest for DockerHub
|
||||||
|
run: |
|
||||||
|
set -e
|
||||||
|
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }} \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-arm64
|
||||||
|
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}
|
||||||
|
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt:latest \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-arm64
|
||||||
|
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
|
||||||
|
|
||||||
|
- name: Create and push manifest for ghcr.io
|
||||||
|
run: |
|
||||||
|
set -e
|
||||||
|
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }} \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-arm64
|
||||||
|
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}
|
||||||
|
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt:latest \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-arm64
|
||||||
|
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt:latest
|
||||||
102
.github/workflows/cife.yml
vendored
102
.github/workflows/cife.yml
vendored
@@ -1,48 +1,112 @@
|
|||||||
name: Build and push DocsGPT-FE Docker image
|
name: Build and push DocsGPT-FE Docker image
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
release:
|
||||||
push:
|
types: [published]
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
build:
|
||||||
runs-on: ubuntu-latest
|
if: github.repository == 'arc53/DocsGPT'
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- platform: linux/amd64
|
||||||
|
runner: ubuntu-latest
|
||||||
|
suffix: amd64
|
||||||
|
- platform: linux/arm64
|
||||||
|
runner: ubuntu-24.04-arm
|
||||||
|
suffix: arm64
|
||||||
|
runs-on: ${{ matrix.runner }}
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU # Only needed for emulation, not for native arm64 builds
|
||||||
uses: docker/setup-qemu-action@v1
|
if: matrix.platform == 'linux/arm64'
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver: docker-container
|
||||||
|
install: true
|
||||||
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
- name: Login to ghcr.io
|
- name: Login to ghcr.io
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
# Runs a single command using the runners shell
|
- name: Build and push platform-specific images
|
||||||
- name: Build and push Docker images to docker.io and ghcr.io
|
uses: docker/build-push-action@v6
|
||||||
uses: docker/build-push-action@v4
|
|
||||||
with:
|
with:
|
||||||
file: './frontend/Dockerfile'
|
file: './frontend/Dockerfile'
|
||||||
platforms: linux/amd64
|
platforms: ${{ matrix.platform }}
|
||||||
context: ./frontend
|
context: ./frontend
|
||||||
push: true
|
push: true
|
||||||
tags: |
|
tags: |
|
||||||
${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
|
${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
|
||||||
ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest
|
ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
|
||||||
|
provenance: false
|
||||||
|
sbom: false
|
||||||
|
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
|
||||||
|
cache-to: type=inline
|
||||||
|
|
||||||
|
manifest:
|
||||||
|
if: github.repository == 'arc53/DocsGPT'
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
packages: write
|
||||||
|
steps:
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver: docker-container
|
||||||
|
install: true
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Login to ghcr.io
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Create and push manifest for DockerHub
|
||||||
|
run: |
|
||||||
|
set -e
|
||||||
|
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }} \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
|
||||||
|
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}
|
||||||
|
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
|
||||||
|
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
|
||||||
|
|
||||||
|
- name: Create and push manifest for ghcr.io
|
||||||
|
run: |
|
||||||
|
set -e
|
||||||
|
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }} \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
|
||||||
|
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}
|
||||||
|
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
|
||||||
|
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest
|
||||||
100
.github/workflows/docker-develop-build.yml
vendored
Normal file
100
.github/workflows/docker-develop-build.yml
vendored
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
name: Build and push multi-arch DocsGPT Docker image
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
if: github.repository == 'arc53/DocsGPT'
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- platform: linux/amd64
|
||||||
|
runner: ubuntu-latest
|
||||||
|
suffix: amd64
|
||||||
|
- platform: linux/arm64
|
||||||
|
runner: ubuntu-24.04-arm
|
||||||
|
suffix: arm64
|
||||||
|
runs-on: ${{ matrix.runner }}
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver: docker-container
|
||||||
|
install: true
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Login to ghcr.io
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push platform-specific images
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
file: './application/Dockerfile'
|
||||||
|
platforms: ${{ matrix.platform }}
|
||||||
|
context: ./application
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
${{ secrets.DOCKER_USERNAME }}/docsgpt:develop-${{ matrix.suffix }}
|
||||||
|
ghcr.io/${{ github.repository_owner }}/docsgpt:develop-${{ matrix.suffix }}
|
||||||
|
provenance: false
|
||||||
|
sbom: false
|
||||||
|
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt:develop
|
||||||
|
cache-to: type=inline
|
||||||
|
|
||||||
|
manifest:
|
||||||
|
if: github.repository == 'arc53/DocsGPT'
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
packages: write
|
||||||
|
steps:
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver: docker-container
|
||||||
|
install: true
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Login to ghcr.io
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Create and push manifest for DockerHub
|
||||||
|
run: |
|
||||||
|
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop-amd64 \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop-arm64
|
||||||
|
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop
|
||||||
|
|
||||||
|
- name: Create and push manifest for ghcr.io
|
||||||
|
run: |
|
||||||
|
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt:develop \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:develop-amd64 \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:develop-arm64
|
||||||
|
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt:develop
|
||||||
104
.github/workflows/docker-develop-fe-build.yml
vendored
Normal file
104
.github/workflows/docker-develop-fe-build.yml
vendored
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
name: Build and push DocsGPT FE Docker image for development
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
if: github.repository == 'arc53/DocsGPT'
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- platform: linux/amd64
|
||||||
|
runner: ubuntu-latest
|
||||||
|
suffix: amd64
|
||||||
|
- platform: linux/arm64
|
||||||
|
runner: ubuntu-24.04-arm
|
||||||
|
suffix: arm64
|
||||||
|
runs-on: ${{ matrix.runner }}
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU # Only needed for emulation, not for native arm64 builds
|
||||||
|
if: matrix.platform == 'linux/arm64'
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver: docker-container
|
||||||
|
install: true
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Login to ghcr.io
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push platform-specific images
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
file: './frontend/Dockerfile'
|
||||||
|
platforms: ${{ matrix.platform }}
|
||||||
|
context: ./frontend
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop-${{ matrix.suffix }}
|
||||||
|
ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop-${{ matrix.suffix }}
|
||||||
|
provenance: false
|
||||||
|
sbom: false
|
||||||
|
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop
|
||||||
|
cache-to: type=inline
|
||||||
|
|
||||||
|
manifest:
|
||||||
|
if: github.repository == 'arc53/DocsGPT'
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
packages: write
|
||||||
|
steps:
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
driver: docker-container
|
||||||
|
install: true
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Login to ghcr.io
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Create and push manifest for DockerHub
|
||||||
|
run: |
|
||||||
|
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop-amd64 \
|
||||||
|
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop-arm64
|
||||||
|
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop
|
||||||
|
|
||||||
|
- name: Create and push manifest for ghcr.io
|
||||||
|
run: |
|
||||||
|
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop-amd64 \
|
||||||
|
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop-arm64
|
||||||
|
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop
|
||||||
16
.github/workflows/labeler.yml
vendored
Normal file
16
.github/workflows/labeler.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# https://github.com/actions/labeler
|
||||||
|
name: Pull Request Labeler
|
||||||
|
on:
|
||||||
|
- pull_request_target
|
||||||
|
jobs:
|
||||||
|
triage:
|
||||||
|
if: github.repository == 'arc53/DocsGPT'
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/labeler@v5
|
||||||
|
with:
|
||||||
|
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
sync-labels: true
|
||||||
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
ruff:
|
ruff:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Lint with Ruff
|
- name: Lint with Ruff
|
||||||
uses: chartboost/ruff-action@v1
|
uses: chartboost/ruff-action@v1
|
||||||
|
|||||||
30
.github/workflows/pytest.yml
vendored
Normal file
30
.github/workflows/pytest.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
name: Run python tests with pytest
|
||||||
|
on: [push, pull_request]
|
||||||
|
jobs:
|
||||||
|
pytest_and_coverage:
|
||||||
|
name: Run tests and count coverage
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
python-version: ["3.12"]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install pytest pytest-cov
|
||||||
|
cd application
|
||||||
|
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||||
|
- name: Test with pytest and generate coverage report
|
||||||
|
run: |
|
||||||
|
python -m pytest --cov=application --cov-report=xml
|
||||||
|
- name: Upload coverage reports to Codecov
|
||||||
|
if: github.event_name == 'pull_request' && matrix.python-version == '3.12'
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
env:
|
||||||
|
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
|
||||||
4
.github/workflows/sync_fork.yaml
vendored
4
.github/workflows/sync_fork.yaml
vendored
@@ -5,7 +5,7 @@ permissions:
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 * * * *" # every hour
|
- cron: "0 0 * * *" # every hour
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -17,7 +17,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# Step 1: run a standard checkout action
|
# Step 1: run a standard checkout action
|
||||||
- name: Checkout target repo
|
- name: Checkout target repo
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
# Step 2: run the sync action
|
# Step 2: run the sync action
|
||||||
- name: Sync upstream changes
|
- name: Sync upstream changes
|
||||||
|
|||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -5,7 +5,7 @@ __pycache__/
|
|||||||
|
|
||||||
# C extensions
|
# C extensions
|
||||||
*.so
|
*.so
|
||||||
|
*.next
|
||||||
# Distribution / packaging
|
# Distribution / packaging
|
||||||
.Python
|
.Python
|
||||||
build/
|
build/
|
||||||
@@ -75,6 +75,7 @@ target/
|
|||||||
|
|
||||||
# Jupyter Notebook
|
# Jupyter Notebook
|
||||||
.ipynb_checkpoints
|
.ipynb_checkpoints
|
||||||
|
**/*.ipynb
|
||||||
|
|
||||||
# IPython
|
# IPython
|
||||||
profile_default/
|
profile_default/
|
||||||
@@ -112,6 +113,7 @@ venv.bak/
|
|||||||
# Spyder project settings
|
# Spyder project settings
|
||||||
.spyderproject
|
.spyderproject
|
||||||
.spyproject
|
.spyproject
|
||||||
|
.jwt_secret_key
|
||||||
|
|
||||||
# Rope project settings
|
# Rope project settings
|
||||||
.ropeproject
|
.ropeproject
|
||||||
@@ -170,3 +172,6 @@ application/vectors/
|
|||||||
**/yarn.lock
|
**/yarn.lock
|
||||||
|
|
||||||
node_modules/
|
node_modules/
|
||||||
|
.vscode/settings.json
|
||||||
|
/models/
|
||||||
|
model/
|
||||||
|
|||||||
71
.vscode/launch.json
vendored
Normal file
71
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
{
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Frontend Debug (npm)",
|
||||||
|
"type": "node-terminal",
|
||||||
|
"request": "launch",
|
||||||
|
"command": "npm run dev",
|
||||||
|
"cwd": "${workspaceFolder}/frontend"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Flask Debugger",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"module": "flask",
|
||||||
|
"env": {
|
||||||
|
"FLASK_APP": "application/app.py",
|
||||||
|
"PYTHONPATH": "${workspaceFolder}",
|
||||||
|
"FLASK_ENV": "development",
|
||||||
|
"FLASK_DEBUG": "1",
|
||||||
|
"FLASK_RUN_PORT": "7091",
|
||||||
|
"FLASK_RUN_HOST": "0.0.0.0"
|
||||||
|
|
||||||
|
},
|
||||||
|
"args": [
|
||||||
|
"run",
|
||||||
|
"--no-debugger"
|
||||||
|
],
|
||||||
|
"cwd": "${workspaceFolder}",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Celery Debugger",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"module": "celery",
|
||||||
|
"env": {
|
||||||
|
"PYTHONPATH": "${workspaceFolder}",
|
||||||
|
},
|
||||||
|
"args": [
|
||||||
|
"-A",
|
||||||
|
"application.app.celery",
|
||||||
|
"worker",
|
||||||
|
"-l",
|
||||||
|
"INFO",
|
||||||
|
"--pool=solo"
|
||||||
|
],
|
||||||
|
"cwd": "${workspaceFolder}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Dev Containers (Mongo + Redis)",
|
||||||
|
"type": "node-terminal",
|
||||||
|
"request": "launch",
|
||||||
|
"command": "docker compose -f deployment/docker-compose-dev.yaml up --build",
|
||||||
|
"cwd": "${workspaceFolder}"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"compounds": [
|
||||||
|
{
|
||||||
|
"name": "DocsGPT: Full Stack",
|
||||||
|
"configurations": [
|
||||||
|
"Frontend Debug (npm)",
|
||||||
|
"Flask Debugger",
|
||||||
|
"Celery Debugger"
|
||||||
|
],
|
||||||
|
"presentation": {
|
||||||
|
"group": "DocsGPT",
|
||||||
|
"order": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -2,58 +2,58 @@
|
|||||||
|
|
||||||
## Our Pledge
|
## Our Pledge
|
||||||
|
|
||||||
We as members, contributors, and leaders pledge to make participation in our
|
We as members, contributors and leaders pledge to make participation in our
|
||||||
community a harassment-free experience for everyone, regardless of age, body
|
community, a harassment-free experience for everyone, regardless of age, body
|
||||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
identity and expression, level of experience, education, socio-economic status,
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
nationality, personal appearance, race, religion, or sexual identity
|
nationality, personal appearance, race, religion or sexual identity
|
||||||
and orientation.
|
and orientation.
|
||||||
|
|
||||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
diverse, inclusive, and healthy community.
|
diverse, inclusive and a healthy community.
|
||||||
|
|
||||||
## Our Standards
|
## Our Standards
|
||||||
|
|
||||||
Examples of behavior that contributes to a positive environment for our
|
Examples of behavior that contribute to a positive environment for our
|
||||||
community include:
|
community include:
|
||||||
|
|
||||||
* Demonstrating empathy and kindness toward other people
|
## Demonstrating empathy and kindness towards other people
|
||||||
* Being respectful of differing opinions, viewpoints, and experiences
|
1. Being respectful and open to differing opinions, viewpoints, and experiences
|
||||||
* Giving and gracefully accepting constructive feedback
|
2. Giving and gracefully accepting constructive feedback
|
||||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
3. Taking accountability and offering apologies to those who have been impacted by our errors,
|
||||||
and learning from the experience
|
while also gaining insights from the situation
|
||||||
* Focusing on what is best not just for us as individuals, but for the
|
4. Focusing on what is best not just for us as individuals but for the
|
||||||
overall community
|
community as a whole
|
||||||
|
|
||||||
Examples of unacceptable behavior include:
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
* The use of sexualized language or imagery, and sexual attention or
|
1. The use of sexualized language or imagery, and sexual attention or
|
||||||
advances of any kind
|
advances of any kind
|
||||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
2. Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
* Public or private harassment
|
3. Public or private harassment
|
||||||
* Publishing others' private information, such as a physical or email
|
4. Publishing other's private information, such as a physical or email
|
||||||
address, without their explicit permission
|
address, without their explicit permission
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
5. Other conduct which could reasonably be considered inappropriate in a
|
||||||
professional setting
|
professional setting
|
||||||
|
|
||||||
## Enforcement Responsibilities
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
Community leaders are responsible for clarifying and enforcing our standards of
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
acceptable behavior and will take appropriate and fair corrective action in
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
response to any behavior that they deem inappropriate, threatening, offensive
|
||||||
or harmful.
|
or harmful.
|
||||||
|
|
||||||
Community leaders have the right and responsibility to remove, edit, or reject
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
not aligned to this Code of Conduct and will communicate reasons for moderation
|
||||||
decisions when appropriate.
|
decisions when appropriate.
|
||||||
|
|
||||||
## Scope
|
## Scope
|
||||||
|
|
||||||
This Code of Conduct applies within all community spaces, and also applies when
|
This Code of Conduct applies within all community spaces and also applies when
|
||||||
an individual is officially representing the community in public spaces.
|
an individual is officially representing the community in public spaces.
|
||||||
Examples of representing our community include using an official e-mail address,
|
Examples of representing our community include using an official e-mail address,
|
||||||
posting via an official social media account, or acting as an appointed
|
posting via an official social media account or acting as an appointed
|
||||||
representative at an online or offline event.
|
representative at an online or offline event.
|
||||||
|
|
||||||
## Enforcement
|
## Enforcement
|
||||||
@@ -63,29 +63,27 @@ reported to the community leaders responsible for enforcement at
|
|||||||
contact@arc53.com.
|
contact@arc53.com.
|
||||||
All complaints will be reviewed and investigated promptly and fairly.
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
All community leaders are obligated to respect the privacy and security of the
|
All community leaders are obligated to be respectful towards the privacy and security of the
|
||||||
reporter of any incident.
|
reporter of any incident.
|
||||||
|
|
||||||
## Enforcement Guidelines
|
## Enforcement Guidelines
|
||||||
|
|
||||||
Community leaders will follow these Community Impact Guidelines in determining
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
the consequences for any action they deem in violation of this Code of Conduct:
|
the consequences for any action that they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
### 1. Correction
|
### 1. Correction
|
||||||
|
* **Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community space.
|
||||||
|
|
||||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
* **Consequence**: A private, written warning from community leaders, providing
|
||||||
unprofessional or unwelcome in the community.
|
|
||||||
|
|
||||||
**Consequence**: A private, written warning from community leaders, providing
|
|
||||||
clarity around the nature of the violation and an explanation of why the
|
clarity around the nature of the violation and an explanation of why the
|
||||||
behavior was inappropriate. A public apology may be requested.
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
### 2. Warning
|
### 2. Warning
|
||||||
|
* **Community Impact**: A violation through a single incident or series
|
||||||
**Community Impact**: A violation through a single incident or series
|
|
||||||
of actions.
|
of actions.
|
||||||
|
|
||||||
**Consequence**: A warning with consequences for continued behavior. No
|
* **Consequence**: A warning with consequences for continued behavior. No
|
||||||
interaction with the people involved, including unsolicited interaction with
|
interaction with the people involved, including unsolicited interaction with
|
||||||
those enforcing the Code of Conduct, for a specified period of time. This
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
includes avoiding interactions in community spaces as well as external channels
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
@@ -93,23 +91,21 @@ like social media. Violating these terms may lead to a temporary or
|
|||||||
permanent ban.
|
permanent ban.
|
||||||
|
|
||||||
### 3. Temporary Ban
|
### 3. Temporary Ban
|
||||||
|
* **Community Impact**: A serious violation of community standards, including
|
||||||
**Community Impact**: A serious violation of community standards, including
|
|
||||||
sustained inappropriate behavior.
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
**Consequence**: A temporary ban from any sort of interaction or public
|
* **Consequence**: A temporary ban from any sort of interaction or public
|
||||||
communication with the community for a specified period of time. No public or
|
communication with the community for a specified period of time. No public or
|
||||||
private interaction with the people involved, including unsolicited interaction
|
private interaction with the people involved, including unsolicited interaction
|
||||||
with those enforcing the Code of Conduct, is allowed during this period.
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
Violating these terms may lead to a permanent ban.
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
### 4. Permanent Ban
|
### 4. Permanent Ban
|
||||||
|
* **Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior,harassment of an
|
||||||
|
individual or aggression towards or disparagement of classes of individuals.
|
||||||
|
|
||||||
**Community Impact**: Demonstrating a pattern of violation of community
|
* **Consequence**: A permanent ban from any sort of public interaction within
|
||||||
standards, including sustained inappropriate behavior, harassment of an
|
|
||||||
individual, or aggression toward or disparagement of classes of individuals.
|
|
||||||
|
|
||||||
**Consequence**: A permanent ban from any sort of public interaction within
|
|
||||||
the community.
|
the community.
|
||||||
|
|
||||||
## Attribution
|
## Attribution
|
||||||
|
|||||||
157
CONTRIBUTING.md
157
CONTRIBUTING.md
@@ -1,38 +1,151 @@
|
|||||||
# Welcome to DocsGPT Contributing guideline
|
# Welcome to DocsGPT Contributing Guidelines
|
||||||
|
|
||||||
Thank you for choosing this project to contribute to, we are all very grateful!
|
Thank you for choosing to contribute to DocsGPT! We are all very grateful!
|
||||||
|
|
||||||
# We accept different types of contributions
|
# We accept different types of contributions
|
||||||
|
|
||||||
📣 Discussions - where you can start a new topic or answer some questions
|
📣 **Discussions** - Engage in conversations, start new topics, or help answer questions.
|
||||||
|
|
||||||
🐞 Issues - Is how we track tasks, sometimes its bugs that need fixing, sometimes its new features
|
🐞 **Issues** - This is where we keep track of tasks. It could be bugs, fixes or suggestions for new features.
|
||||||
|
|
||||||
🛠️ Pull requests - Is how you can suggest changes to our repository, to work on existing issue or to add new features
|
🛠️ **Pull requests** - Suggest changes to our repository, either by working on existing issues or adding new features.
|
||||||
|
|
||||||
📚 Wiki - where we have our documentation
|
📚 **Wiki** - This is where our documentation resides.
|
||||||
|
|
||||||
|
|
||||||
## 🐞 Issues and Pull requests
|
## 🐞 Issues and Pull requests
|
||||||
|
|
||||||
We value contributions to our issues in form of discussion or suggestion, we recommend that you check out existing issues and our [Roadmap](https://github.com/orgs/arc53/projects/2)
|
- We value contributions in the form of discussions or suggestions. We recommend taking a look at existing issues and our [roadmap](https://github.com/orgs/arc53/projects/2).
|
||||||
|
|
||||||
If you want to contribute by writing code there are few things that you should know before doing it:
|
|
||||||
We have frontend (React, Vite) and Backend (python)
|
|
||||||
|
|
||||||
### If you are looking to contribute to Frontend (⚛️React, Vite):
|
|
||||||
Current frontend is being migrated from /application to /frontend with a new design, so please contribute to the new on. Check out this [Milestone](https://github.com/arc53/DocsGPT/milestone/1) and its issues also [Figma](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1)
|
|
||||||
Please try to follow guidelines
|
|
||||||
|
|
||||||
|
|
||||||
### If you are looking to contribute to Backend (🐍Python):
|
- If you're interested in contributing code, here are some important things to know:
|
||||||
Check out our issues, and contribute to /application or /scripts (ignore old ingest_rst.py ingest_rst_sphinx.py files, they will be deprecated soon)
|
|
||||||
Currently we don't have any tests(which would be useful😉) but before submitting you PR make sure that after you ingested some test data its queryable
|
|
||||||
|
|
||||||
### Workflow:
|
- We have a frontend built on React (Vite) and a backend in Python.
|
||||||
Create a fork, make changes on your forked repository, submit changes in a form of pull request
|
|
||||||
|
|
||||||
## Questions / collaboration
|
|
||||||
Please join our [Discord](https://discord.gg/n5BX8dh8rU) don't hesitate, we are very friendly and welcoming to new contributors.
|
|
||||||
|
|
||||||
# Thank you so much for considering to contribute to DocsGPT!🙏
|
Before creating issues, please check out how the latest version of our app looks and works by launching it via [Quickstart](https://github.com/arc53/DocsGPT#quickstart) the version on our live demo is slightly modified with login. Your issues should relate to the version you can launch via [Quickstart](https://github.com/arc53/DocsGPT#quickstart).
|
||||||
|
|
||||||
|
### 👨💻 If you're interested in contributing code, here are some important things to know:
|
||||||
|
|
||||||
|
For instructions on setting up a development environment, please refer to our [Development Deployment Guide](https://docs.docsgpt.cloud/Deploying/Development-Environment).
|
||||||
|
|
||||||
|
Tech Stack Overview:
|
||||||
|
|
||||||
|
- 🌐 Frontend: Built with React (Vite) ⚛️,
|
||||||
|
|
||||||
|
- 🖥 Backend: Developed in Python 🐍
|
||||||
|
|
||||||
|
### 🌐 Frontend Contributions (⚛️ React, Vite)
|
||||||
|
|
||||||
|
* The updated Figma design can be found [here](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1). Please try to follow the guidelines.
|
||||||
|
* **Coding Style:** We follow a strict coding style enforced by ESLint and Prettier. Please ensure your code adheres to the configuration provided in our repository's `fronetend/.eslintrc.js` file. We recommend configuring your editor with ESLint and Prettier to help with this.
|
||||||
|
* **Component Structure:** Strive for small, reusable components. Favor functional components and hooks over class components where possible.
|
||||||
|
* **State Management** If you need to add stores, please use Redux.
|
||||||
|
|
||||||
|
### 🖥 Backend Contributions (🐍 Python)
|
||||||
|
|
||||||
|
- Review our issues and contribute to [`/application`](https://github.com/arc53/DocsGPT/tree/main/application)
|
||||||
|
- All new code should be covered with unit tests ([pytest](https://github.com/pytest-dev/pytest)). Please find tests under [`/tests`](https://github.com/arc53/DocsGPT/tree/main/tests) folder.
|
||||||
|
- Before submitting your Pull Request, ensure it can be queried after ingesting some test data.
|
||||||
|
- **Coding Style:** We adhere to the [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guide for Python code. We use `ruff` as our linter and code formatter. Please ensure your code is formatted correctly and passes `ruff` checks before submitting.
|
||||||
|
- **Type Hinting:** Please use type hints for all function arguments and return values. This improves code readability and helps catch errors early. Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def my_function(name: str, count: int) -> list[str]:
|
||||||
|
...
|
||||||
|
```
|
||||||
|
- **Docstrings:** All functions and classes should have docstrings explaining their purpose, parameters, and return values. We prefer the [Google style docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def my_function(name: str, count: int) -> list[str]:
|
||||||
|
"""Does something with a name and a count.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: The name to use.
|
||||||
|
count: The number of times to do it.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A list of strings.
|
||||||
|
"""
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
To run unit tests from the root of the repository, execute:
|
||||||
|
```
|
||||||
|
python -m pytest
|
||||||
|
```
|
||||||
|
|
||||||
|
## Workflow 📈
|
||||||
|
|
||||||
|
Here's a step-by-step guide on how to contribute to DocsGPT:
|
||||||
|
|
||||||
|
1. **Fork the Repository:**
|
||||||
|
- Click the "Fork" button at the top-right of this repository to create your fork.
|
||||||
|
|
||||||
|
2. **Clone the Forked Repository:**
|
||||||
|
- Clone the repository using:
|
||||||
|
``` shell
|
||||||
|
git clone https://github.com/<your-github-username>/DocsGPT.git
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Keep your Fork in Sync:**
|
||||||
|
- Before you make any changes, make sure that your fork is in sync to avoid merge conflicts using:
|
||||||
|
```shell
|
||||||
|
git remote add upstream https://github.com/arc53/DocsGPT.git
|
||||||
|
git pull upstream main
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Create and Switch to a New Branch:**
|
||||||
|
- Create a new branch for your contribution using:
|
||||||
|
```shell
|
||||||
|
git checkout -b your-branch-name
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Make Changes:**
|
||||||
|
- Make the required changes in your branch.
|
||||||
|
|
||||||
|
6. **Add Changes to the Staging Area:**
|
||||||
|
- Add your changes to the staging area using:
|
||||||
|
```shell
|
||||||
|
git add .
|
||||||
|
```
|
||||||
|
|
||||||
|
7. **Commit Your Changes:**
|
||||||
|
- Commit your changes with a descriptive commit message using:
|
||||||
|
```shell
|
||||||
|
git commit -m "Your descriptive commit message"
|
||||||
|
```
|
||||||
|
|
||||||
|
8. **Push Your Changes to the Remote Repository:**
|
||||||
|
- Push your branch with changes to your fork on GitHub using:
|
||||||
|
```shell
|
||||||
|
git push origin your-branch-name
|
||||||
|
```
|
||||||
|
|
||||||
|
9. **Submit a Pull Request (PR):**
|
||||||
|
- Create a Pull Request from your branch to the main repository. Make sure to include a detailed description of your changes and reference any related issues.
|
||||||
|
|
||||||
|
10. **Collaborate:**
|
||||||
|
- Be responsive to comments and feedback on your PR.
|
||||||
|
- Make necessary updates as suggested.
|
||||||
|
- Once your PR is approved, it will be merged into the main repository.
|
||||||
|
|
||||||
|
11. **Testing:**
|
||||||
|
- Before submitting a Pull Request, ensure your code passes all unit tests.
|
||||||
|
- To run unit tests from the root of the repository, execute:
|
||||||
|
```shell
|
||||||
|
python -m pytest
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note: You should run the unit test only after making the changes to the backend code.*
|
||||||
|
|
||||||
|
12. **Questions and Collaboration:**
|
||||||
|
- Feel free to join our Discord. We're very friendly and welcoming to new contributors, so don't hesitate to reach out.
|
||||||
|
|
||||||
|
Thank you for considering contributing to DocsGPT! 🙏
|
||||||
|
|
||||||
|
## Questions/collaboration
|
||||||
|
Feel free to join our [Discord](https://discord.gg/n5BX8dh8rU). We're very friendly and welcoming to new contributors, so don't hesitate to reach out.
|
||||||
|
# Thank you so much for considering to contributing DocsGPT!🙏
|
||||||
|
|||||||
179
README.md
179
README.md
@@ -3,102 +3,161 @@
|
|||||||
</h1>
|
</h1>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<strong>Open-Source Documentation Assistant</strong>
|
<strong>Private AI for agents, assistants and enterprise search</strong>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="left">
|
<p align="left">
|
||||||
<strong>DocsGPT</strong> is a cutting-edge open-source solution that streamlines the process of finding information in project documentation. With its integration of the powerful <strong>GPT</strong> models, developers can easily ask questions about a project and receive accurate answers.
|
<strong><a href="https://www.docsgpt.cloud/">DocsGPT</a></strong> is an open-source AI platform for building intelligent agents and assistants. Features Agent Builder, deep research tools, document analysis (PDF, Office, web content), Multi-model support (choose your provider or run locally), and rich API connectivity for agents with actionable tools and integrations. Deploy anywhere with complete privacy control.
|
||||||
|
|
||||||
Say goodbye to time-consuming manual searches, and let <strong>DocsGPT</strong> help you quickly find the information you need. Try it out and see how it revolutionizes your project documentation experience. Contribute to its development and be a part of the future of AI-powered assistance.
|
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
<a href="https://github.com/arc53/DocsGPT"></a>
|
||||||
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
<a href="https://github.com/arc53/DocsGPT"></a>
|
||||||
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
<a href="https://github.com/arc53/DocsGPT/blob/main/LICENSE"></a>
|
||||||
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
<a href="https://www.bestpractices.dev/projects/9907"><img src="https://www.bestpractices.dev/projects/9907/badge"></a>
|
||||||
|
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
||||||
|
<a href="https://twitter.com/docsgptai"></a>
|
||||||
|
|
||||||
|
<a href="https://docs.docsgpt.cloud/quickstart">⚡️ Quickstart</a> • <a href="https://app.docsgpt.cloud/">☁️ Cloud Version</a> • <a href="https://discord.gg/n5BX8dh8rU">💬 Discord</a>
|
||||||
|
<br>
|
||||||
|
<a href="https://docs.docsgpt.cloud/">📖 Documentation</a> • <a href="https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md">👫 Contribute</a> • <a href="https://blog.docsgpt.cloud/">🗞 Blog</a>
|
||||||
|
<br>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
<div align="center">
|
||||||

|
<img src="https://d3dg1063dc54p9.cloudfront.net/videos/demov7.gif" alt="video-example-of-docs-gpt" width="800" height="450">
|
||||||
|
</div>
|
||||||
|
<h3 align="left">
|
||||||
## Features
|
<strong>Key Features:</strong>
|
||||||
|
</h3>
|
||||||

|
<ul align="left">
|
||||||
|
<li><strong>🗂️ Wide Format Support:</strong> Reads PDF, DOCX, CSV, XLSX, EPUB, MD, RST, HTML, MDX, JSON, PPTX, and images.</li>
|
||||||
|
<li><strong>🌐 Web & Data Integration:</strong> Ingests from URLs, sitemaps, Reddit, GitHub and web crawlers.</li>
|
||||||
|
<li><strong>✅ Reliable Answers:</strong> Get accurate, hallucination-free responses with source citations viewable in a clean UI.</li>
|
||||||
|
<li><strong>🔑 Streamlined API Keys:</strong> Generate keys linked to your settings, documents, and models, simplifying chatbot and integration setup.</li>
|
||||||
|
<li><strong>🔗 Actionable Tooling:</strong> Connect to APIs, tools, and other services to enable LLM actions.</li>
|
||||||
|
<li><strong>🧩 Pre-built Integrations:</strong> Use readily available HTML/React chat widgets, search tools, Discord/Telegram bots, and more.</li>
|
||||||
|
<li><strong>🔌 Flexible Deployment:</strong> Works with major LLMs (OpenAI, Google, Anthropic) and local models (Ollama, llama_cpp).</li>
|
||||||
|
<li><strong>🏢 Secure & Scalable:</strong> Run privately and securely with Kubernetes support, designed for enterprise-grade reliability.</li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
## Roadmap
|
## Roadmap
|
||||||
|
|
||||||
You can find our [Roadmap](https://github.com/orgs/arc53/projects/2) here, please don't hesitate contributing or creating issues, it helps us make DocsGPT better!
|
- [x] Full GoogleAI compatibility (Jan 2025)
|
||||||
|
- [x] Add tools (Jan 2025)
|
||||||
|
- [x] Manually updating chunks in the app UI (Feb 2025)
|
||||||
|
- [x] Devcontainer for easy development (Feb 2025)
|
||||||
|
- [x] ReACT agent (March 2025)
|
||||||
|
- [x] Chatbots menu re-design to handle tools, agent types, and more (April 2025)
|
||||||
|
- [x] New input box in the conversation menu (April 2025)
|
||||||
|
- [x] Add triggerable actions / tools (webhook) (April 2025)
|
||||||
|
- [x] Agent optimisations (May 2025)
|
||||||
|
- [x] Filesystem sources update (July 2025)
|
||||||
|
- [x] Json Responses (August 2025)
|
||||||
|
- [x] MCP support (August 2025)
|
||||||
|
- [x] Google Drive integration (September 2025)
|
||||||
|
- [ ] Add OAuth 2.0 authentication for MCP (September 2025)
|
||||||
|
- [ ] Sharepoint integration (October 2025)
|
||||||
|
- [ ] Deep Agents (October 2025)
|
||||||
|
- [ ] Agent scheduling
|
||||||
|
|
||||||
|
You can find our full roadmap [here](https://github.com/orgs/arc53/projects/2). Please don't hesitate to contribute or create issues, it helps us improve DocsGPT!
|
||||||
|
|
||||||
|
### Production Support / Help for Companies:
|
||||||
|
|
||||||
## [Live preview](https://docsgpt.arc53.com/)
|
We're eager to provide personalized assistance when deploying your DocsGPT to a live environment.
|
||||||
|
|
||||||
## [Join Our Discord](https://discord.gg/n5BX8dh8rU)
|
[Get a Demo :wave:](https://www.docsgpt.cloud/contact)
|
||||||
|
|
||||||
|
[Send Email :email:](mailto:support@docsgpt.cloud?subject=DocsGPT%20support%2Fsolutions)
|
||||||
|
|
||||||
## Project structure
|
## Join the Lighthouse Program 🌟
|
||||||
- Application - flask app (main application)
|
|
||||||
|
|
||||||
- Extensions - chrome extension
|
Calling all developers and GenAI innovators! The **DocsGPT Lighthouse Program** connects technical leaders actively deploying or extending DocsGPT in real-world scenarios. Collaborate directly with our team to shape the roadmap, access priority support, and build enterprise-ready solutions with exclusive community insights.
|
||||||
|
|
||||||
- Scripts - script that creates similarity search index and store for other libraries.
|
[Learn More & Apply →](https://docs.google.com/forms/d/1KAADiJinUJ8EMQyfTXUIGyFbqINNClNR3jBNWq7DgTE)
|
||||||
|
|
||||||
- frontend - frontend in vite and
|
|
||||||
|
|
||||||
## QuickStart
|
## QuickStart
|
||||||
|
|
||||||
Note: Make sure you have docker installed
|
> [!Note]
|
||||||
|
> Make sure you have [Docker](https://docs.docker.com/engine/install/) installed
|
||||||
|
|
||||||
1. Open dowload this repository with `git clone https://github.com/arc53/DocsGPT.git`
|
A more detailed [Quickstart](https://docs.docsgpt.cloud/quickstart) is available in our documentation
|
||||||
2. Create .env file in your root directory and set your OPENAI_API_KEY with your openai api key and VITE_API_STREAMING to true or false if you dont want streaming answers
|
|
||||||
3. Run `docker-compose build && docker-compose up`
|
|
||||||
4. Navigate to http://localhost:5173/
|
|
||||||
|
|
||||||
To stop just run Ctrl + C
|
1. **Clone the repository:**
|
||||||
|
|
||||||
## Development environments
|
```bash
|
||||||
|
git clone https://github.com/arc53/DocsGPT.git
|
||||||
|
cd DocsGPT
|
||||||
|
```
|
||||||
|
|
||||||
Spin up only 2 containers from docker-compose.yaml (by deleting all services except for redis and mongo)
|
**For macOS and Linux:**
|
||||||
|
|
||||||
Make sure you have python 3.10 or 3.11 installed
|
2. **Run the setup script:**
|
||||||
|
|
||||||
1. Navigate to `/application` folder
|
```bash
|
||||||
2. Run `docker-compose -f docker-compose-dev.yaml build && docker-compose -f docker-compose-dev.yaml up -d`
|
./setup.sh
|
||||||
3. Export required variables
|
```
|
||||||
`export CELERY_BROKER_URL=redis://localhost:6379/0`
|
|
||||||
`export CELERY_RESULT_BACKEND=redis://localhost:6379/1`
|
|
||||||
`export MONGO_URI=mongodb://localhost:27017/docsgpt`
|
|
||||||
4. Install dependencies
|
|
||||||
`pip install -r requirements.txt`
|
|
||||||
5. Prepare .env file
|
|
||||||
Copy .env_sample and create .env with your openai api token
|
|
||||||
6. Run the app
|
|
||||||
`python wsgi.py`
|
|
||||||
7. Start worker with `celery -A app.celery worker -l INFO`
|
|
||||||
|
|
||||||
To start frontend
|
**For Windows:**
|
||||||
1. Navigate to `/frontend` folder
|
|
||||||
2. Install dependencies
|
|
||||||
`npm install`
|
|
||||||
3. Run the app
|
|
||||||
4. `npm run dev`
|
|
||||||
|
|
||||||
|
2. **Run the PowerShell setup script:**
|
||||||
|
|
||||||
[How to install the Chrome extension](https://github.com/arc53/docsgpt/wiki#launch-chrome-extension)
|
```powershell
|
||||||
|
PowerShell -ExecutionPolicy Bypass -File .\setup.ps1
|
||||||
|
```
|
||||||
|
|
||||||
|
Either script will guide you through setting up DocsGPT. Four options available: using the public API, running locally, connecting to a local inference engine, or using a cloud API provider. Scripts will automatically configure your `.env` file and handle necessary downloads and installations based on your chosen option.
|
||||||
|
|
||||||
## [Guides](https://github.com/arc53/docsgpt/wiki)
|
**Navigate to http://localhost:5173/**
|
||||||
|
|
||||||
## [Interested in contributing?](https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md)
|
To stop DocsGPT, open a terminal in the `DocsGPT` directory and run:
|
||||||
|
|
||||||
## [How to use any other documentation](https://github.com/arc53/docsgpt/wiki/How-to-train-on-other-documentation)
|
```bash
|
||||||
|
docker compose -f deployment/docker-compose.yaml down
|
||||||
|
```
|
||||||
|
|
||||||
## [How to host it locally (so all data will stay on-premises)](https://github.com/arc53/DocsGPT/wiki/How-to-use-different-LLM's#hosting-everything-locally)
|
(or use the specific `docker compose down` command shown after running the setup script).
|
||||||
|
|
||||||
Built with [🦜️🔗 LangChain](https://github.com/hwchase17/langchain)
|
> [!Note]
|
||||||
|
> For development environment setup instructions, please refer to the [Development Environment Guide](https://docs.docsgpt.cloud/Deploying/Development-Environment).
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
Please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for information about how to get involved. We welcome issues, questions, and pull requests.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Project Structure
|
||||||
|
|
||||||
|
- Application - Flask app (main application).
|
||||||
|
|
||||||
|
- Extensions - Extensions, like react widget or discord bot.
|
||||||
|
|
||||||
|
- Frontend - Frontend uses <a href="https://vitejs.dev/">Vite</a> and <a href="https://react.dev/">React</a>.
|
||||||
|
|
||||||
|
- Scripts - Miscellaneous scripts.
|
||||||
|
|
||||||
|
## Code Of Conduct
|
||||||
|
|
||||||
|
We as members, contributors, and leaders, pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Please refer to the [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) file for more information about contributing.
|
||||||
|
|
||||||
|
## Many Thanks To Our Contributors⚡
|
||||||
|
|
||||||
|
<a href="https://github.com/arc53/DocsGPT/graphs/contributors" alt="View Contributors">
|
||||||
|
<img src="https://contrib.rocks/image?repo=arc53/DocsGPT" alt="Contributors" />
|
||||||
|
</a>
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
The source code license is [MIT](https://opensource.org/license/mit/), as described in the [LICENSE](LICENSE) file.
|
||||||
|
|
||||||
|
<p>This project is supported by:</p>
|
||||||
|
<p>
|
||||||
|
<a href="https://www.digitalocean.com/?utm_medium=opensource&utm_source=DocsGPT">
|
||||||
|
<img src="https://opensource.nyc3.cdn.digitaloceanspaces.com/attribution/assets/SVG/DO_Logo_horizontal_blue.svg" width="201px">
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
|||||||
BIN
Readme Logo.png
BIN
Readme Logo.png
Binary file not shown.
|
Before Width: | Height: | Size: 23 KiB |
14
SECURITY.md
Normal file
14
SECURITY.md
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
Supported Versions:
|
||||||
|
|
||||||
|
Currently, we support security patches by committing changes and bumping the version published on Github.
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
Found a vulnerability? Please email us:
|
||||||
|
|
||||||
|
security@arc53.com
|
||||||
|
|
||||||
@@ -1,6 +1,11 @@
|
|||||||
API_KEY=your_api_key
|
API_KEY=your_api_key
|
||||||
EMBEDDINGS_KEY=your_api_key
|
EMBEDDINGS_KEY=your_api_key
|
||||||
CELERY_BROKER_URL=redis://localhost:6379/0
|
API_URL=http://localhost:7091
|
||||||
CELERY_RESULT_BACKEND=redis://localhost:6379/1
|
FLASK_APP=application/app.py
|
||||||
MONGO_URI=mongodb://localhost:27017/docsgpt
|
FLASK_DEBUG=true
|
||||||
API_URL=http://localhost:5001
|
|
||||||
|
#For OPENAI on Azure
|
||||||
|
OPENAI_API_BASE=
|
||||||
|
OPENAI_API_VERSION=
|
||||||
|
AZURE_DEPLOYMENT_NAME=
|
||||||
|
AZURE_EMBEDDINGS_DEPLOYMENT_NAME=
|
||||||
@@ -1,25 +1,87 @@
|
|||||||
FROM python:3.10-slim-bullseye as builder
|
# Builder Stage
|
||||||
|
FROM ubuntu:24.04 as builder
|
||||||
|
|
||||||
# Tiktoken requires Rust toolchain, so build it in a separate stage
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
RUN apt-get update && apt-get install -y gcc curl
|
|
||||||
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && apt-get install --reinstall libc6-dev -y
|
RUN apt-get update && \
|
||||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
apt-get install -y software-properties-common && \
|
||||||
RUN pip install --upgrade pip && pip install tiktoken==0.3.3
|
add-apt-repository ppa:deadsnakes/ppa && \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends gcc wget unzip libc6-dev python3.12 python3.12-venv && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Verify Python installation and setup symlink
|
||||||
|
RUN if [ -f /usr/bin/python3.12 ]; then \
|
||||||
|
ln -s /usr/bin/python3.12 /usr/bin/python; \
|
||||||
|
else \
|
||||||
|
echo "Python 3.12 not found"; exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Download and unzip the model
|
||||||
|
RUN wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip && \
|
||||||
|
unzip mpnet-base-v2.zip -d models && \
|
||||||
|
rm mpnet-base-v2.zip
|
||||||
|
|
||||||
|
# Install Rust
|
||||||
|
RUN wget -q -O - https://sh.rustup.rs | sh -s -- -y
|
||||||
|
|
||||||
|
# Clean up to reduce container size
|
||||||
|
RUN apt-get remove --purge -y wget unzip && apt-get autoremove -y && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Copy requirements.txt
|
||||||
COPY requirements.txt .
|
COPY requirements.txt .
|
||||||
RUN pip install -r requirements.txt
|
|
||||||
|
|
||||||
|
# Setup Python virtual environment
|
||||||
|
RUN python3.12 -m venv /venv
|
||||||
|
|
||||||
FROM python:3.10-slim-bullseye
|
# Activate virtual environment and install Python packages
|
||||||
# Copy pre-built packages from builder stage
|
ENV PATH="/venv/bin:$PATH"
|
||||||
COPY --from=builder /usr/local/lib/python3.10/site-packages/ /usr/local/lib/python3.10/site-packages/
|
|
||||||
RUN pip install gunicorn==20.1.0
|
# Install Python packages
|
||||||
RUN pip install celery==5.2.7
|
RUN pip install --no-cache-dir --upgrade pip && \
|
||||||
|
pip install --no-cache-dir tiktoken && \
|
||||||
|
pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
# Final Stage
|
||||||
|
FROM ubuntu:24.04 as final
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y software-properties-common && \
|
||||||
|
add-apt-repository ppa:deadsnakes/ppa && \
|
||||||
|
apt-get update && apt-get install -y --no-install-recommends python3.12 && \
|
||||||
|
ln -s /usr/bin/python3.12 /usr/bin/python && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY . /app
|
|
||||||
ENV FLASK_APP=app.py
|
|
||||||
ENV FLASK_DEBUG=true
|
|
||||||
|
|
||||||
|
# Create a non-root user: `appuser` (Feel free to choose a name)
|
||||||
|
RUN groupadd -r appuser && \
|
||||||
|
useradd -r -g appuser -d /app -s /sbin/nologin -c "Docker image user" appuser
|
||||||
|
|
||||||
EXPOSE 5001
|
# Copy the virtual environment and model from the builder stage
|
||||||
|
COPY --from=builder /venv /venv
|
||||||
|
|
||||||
CMD ["gunicorn", "-w", "2", "--timeout", "120", "--bind", "0.0.0.0:5001", "wsgi:app"]
|
COPY --from=builder /models /app/models
|
||||||
|
|
||||||
|
# Copy your application code
|
||||||
|
COPY . /app/application
|
||||||
|
|
||||||
|
# Change the ownership of the /app directory to the appuser
|
||||||
|
|
||||||
|
RUN mkdir -p /app/application/inputs/local
|
||||||
|
RUN chown -R appuser:appuser /app
|
||||||
|
|
||||||
|
# Set environment variables
|
||||||
|
ENV FLASK_APP=app.py \
|
||||||
|
FLASK_DEBUG=true \
|
||||||
|
PATH="/venv/bin:$PATH"
|
||||||
|
|
||||||
|
# Expose the port the app runs on
|
||||||
|
EXPOSE 7091
|
||||||
|
|
||||||
|
# Switch to non-root user
|
||||||
|
USER appuser
|
||||||
|
|
||||||
|
# Start Gunicorn
|
||||||
|
CMD ["gunicorn", "-w", "1", "--timeout", "120", "--bind", "0.0.0.0:7091", "--preload", "application.wsgi:app"]
|
||||||
|
|||||||
0
application/__init__.py
Normal file
0
application/__init__.py
Normal file
0
application/agents/__init__.py
Normal file
0
application/agents/__init__.py
Normal file
16
application/agents/agent_creator.py
Normal file
16
application/agents/agent_creator.py
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
from application.agents.classic_agent import ClassicAgent
|
||||||
|
from application.agents.react_agent import ReActAgent
|
||||||
|
|
||||||
|
|
||||||
|
class AgentCreator:
|
||||||
|
agents = {
|
||||||
|
"classic": ClassicAgent,
|
||||||
|
"react": ReActAgent,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create_agent(cls, type, *args, **kwargs):
|
||||||
|
agent_class = cls.agents.get(type.lower())
|
||||||
|
if not agent_class:
|
||||||
|
raise ValueError(f"No agent class found for type {type}")
|
||||||
|
return agent_class(*args, **kwargs)
|
||||||
409
application/agents/base.py
Normal file
409
application/agents/base.py
Normal file
@@ -0,0 +1,409 @@
|
|||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Dict, Generator, List, Optional
|
||||||
|
|
||||||
|
from bson.objectid import ObjectId
|
||||||
|
|
||||||
|
from application.agents.tools.tool_action_parser import ToolActionParser
|
||||||
|
from application.agents.tools.tool_manager import ToolManager
|
||||||
|
from application.core.mongo_db import MongoDB
|
||||||
|
from application.core.settings import settings
|
||||||
|
from application.llm.handlers.handler_creator import LLMHandlerCreator
|
||||||
|
from application.llm.llm_creator import LLMCreator
|
||||||
|
from application.logging import build_stack_data, log_activity, LogContext
|
||||||
|
from application.retriever.base import BaseRetriever
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseAgent(ABC):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
endpoint: str,
|
||||||
|
llm_name: str,
|
||||||
|
gpt_model: str,
|
||||||
|
api_key: str,
|
||||||
|
user_api_key: Optional[str] = None,
|
||||||
|
prompt: str = "",
|
||||||
|
chat_history: Optional[List[Dict]] = None,
|
||||||
|
decoded_token: Optional[Dict] = None,
|
||||||
|
attachments: Optional[List[Dict]] = None,
|
||||||
|
json_schema: Optional[Dict] = None,
|
||||||
|
):
|
||||||
|
self.endpoint = endpoint
|
||||||
|
self.llm_name = llm_name
|
||||||
|
self.gpt_model = gpt_model
|
||||||
|
self.api_key = api_key
|
||||||
|
self.user_api_key = user_api_key
|
||||||
|
self.prompt = prompt
|
||||||
|
self.decoded_token = decoded_token or {}
|
||||||
|
self.user: str = decoded_token.get("sub")
|
||||||
|
self.tool_config: Dict = {}
|
||||||
|
self.tools: List[Dict] = []
|
||||||
|
self.tool_calls: List[Dict] = []
|
||||||
|
self.chat_history: List[Dict] = chat_history if chat_history is not None else []
|
||||||
|
self.llm = LLMCreator.create_llm(
|
||||||
|
llm_name,
|
||||||
|
api_key=api_key,
|
||||||
|
user_api_key=user_api_key,
|
||||||
|
decoded_token=decoded_token,
|
||||||
|
)
|
||||||
|
self.llm_handler = LLMHandlerCreator.create_handler(
|
||||||
|
llm_name if llm_name else "default"
|
||||||
|
)
|
||||||
|
self.attachments = attachments or []
|
||||||
|
self.json_schema = json_schema
|
||||||
|
|
||||||
|
@log_activity()
|
||||||
|
def gen(
|
||||||
|
self, query: str, retriever: BaseRetriever, log_context: LogContext = None
|
||||||
|
) -> Generator[Dict, None, None]:
|
||||||
|
yield from self._gen_inner(query, retriever, log_context)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _gen_inner(
|
||||||
|
self, query: str, retriever: BaseRetriever, log_context: LogContext
|
||||||
|
) -> Generator[Dict, None, None]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _get_tools(self, api_key: str = None) -> Dict[str, Dict]:
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
agents_collection = db["agents"]
|
||||||
|
tools_collection = db["user_tools"]
|
||||||
|
|
||||||
|
agent_data = agents_collection.find_one({"key": api_key or self.user_api_key})
|
||||||
|
tool_ids = agent_data.get("tools", []) if agent_data else []
|
||||||
|
|
||||||
|
tools = (
|
||||||
|
tools_collection.find(
|
||||||
|
{"_id": {"$in": [ObjectId(tool_id) for tool_id in tool_ids]}}
|
||||||
|
)
|
||||||
|
if tool_ids
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
tools = list(tools)
|
||||||
|
tools_by_id = {str(tool["_id"]): tool for tool in tools} if tools else {}
|
||||||
|
|
||||||
|
return tools_by_id
|
||||||
|
|
||||||
|
def _get_user_tools(self, user="local"):
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
user_tools_collection = db["user_tools"]
|
||||||
|
user_tools = user_tools_collection.find({"user": user, "status": True})
|
||||||
|
user_tools = list(user_tools)
|
||||||
|
|
||||||
|
return {str(i): tool for i, tool in enumerate(user_tools)}
|
||||||
|
|
||||||
|
def _build_tool_parameters(self, action):
|
||||||
|
params = {"type": "object", "properties": {}, "required": []}
|
||||||
|
for param_type in ["query_params", "headers", "body", "parameters"]:
|
||||||
|
if param_type in action and action[param_type].get("properties"):
|
||||||
|
for k, v in action[param_type]["properties"].items():
|
||||||
|
if v.get("filled_by_llm", True):
|
||||||
|
params["properties"][k] = {
|
||||||
|
key: value
|
||||||
|
for key, value in v.items()
|
||||||
|
if key != "filled_by_llm" and key != "value"
|
||||||
|
}
|
||||||
|
|
||||||
|
params["required"].append(k)
|
||||||
|
return params
|
||||||
|
|
||||||
|
def _prepare_tools(self, tools_dict):
|
||||||
|
self.tools = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": f"{action['name']}_{tool_id}",
|
||||||
|
"description": action["description"],
|
||||||
|
"parameters": self._build_tool_parameters(action),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for tool_id, tool in tools_dict.items()
|
||||||
|
if (
|
||||||
|
(tool["name"] == "api_tool" and "actions" in tool.get("config", {}))
|
||||||
|
or (tool["name"] != "api_tool" and "actions" in tool)
|
||||||
|
)
|
||||||
|
for action in (
|
||||||
|
tool["config"]["actions"].values()
|
||||||
|
if tool["name"] == "api_tool"
|
||||||
|
else tool["actions"]
|
||||||
|
)
|
||||||
|
if action.get("active", True)
|
||||||
|
]
|
||||||
|
|
||||||
|
def _execute_tool_action(self, tools_dict, call):
|
||||||
|
parser = ToolActionParser(self.llm.__class__.__name__)
|
||||||
|
tool_id, action_name, call_args = parser.parse_args(call)
|
||||||
|
|
||||||
|
call_id = getattr(call, "id", None) or str(uuid.uuid4())
|
||||||
|
|
||||||
|
# Check if parsing failed
|
||||||
|
if tool_id is None or action_name is None:
|
||||||
|
error_message = f"Error: Failed to parse LLM tool call. Tool name: {getattr(call, 'name', 'unknown')}"
|
||||||
|
logger.error(error_message)
|
||||||
|
|
||||||
|
tool_call_data = {
|
||||||
|
"tool_name": "unknown",
|
||||||
|
"call_id": call_id,
|
||||||
|
"action_name": getattr(call, "name", "unknown"),
|
||||||
|
"arguments": call_args or {},
|
||||||
|
"result": f"Failed to parse tool call. Invalid tool name format: {getattr(call, 'name', 'unknown')}",
|
||||||
|
}
|
||||||
|
yield {"type": "tool_call", "data": {**tool_call_data, "status": "error"}}
|
||||||
|
self.tool_calls.append(tool_call_data)
|
||||||
|
return "Failed to parse tool call.", call_id
|
||||||
|
|
||||||
|
# Check if tool_id exists in available tools
|
||||||
|
if tool_id not in tools_dict:
|
||||||
|
error_message = f"Error: Tool ID '{tool_id}' extracted from LLM call not found in available tools_dict. Available IDs: {list(tools_dict.keys())}"
|
||||||
|
logger.error(error_message)
|
||||||
|
|
||||||
|
# Return error result
|
||||||
|
tool_call_data = {
|
||||||
|
"tool_name": "unknown",
|
||||||
|
"call_id": call_id,
|
||||||
|
"action_name": f"{action_name}_{tool_id}",
|
||||||
|
"arguments": call_args,
|
||||||
|
"result": f"Tool with ID {tool_id} not found. Available tools: {list(tools_dict.keys())}",
|
||||||
|
}
|
||||||
|
yield {"type": "tool_call", "data": {**tool_call_data, "status": "error"}}
|
||||||
|
self.tool_calls.append(tool_call_data)
|
||||||
|
return f"Tool with ID {tool_id} not found.", call_id
|
||||||
|
|
||||||
|
tool_call_data = {
|
||||||
|
"tool_name": tools_dict[tool_id]["name"],
|
||||||
|
"call_id": call_id,
|
||||||
|
"action_name": f"{action_name}_{tool_id}",
|
||||||
|
"arguments": call_args,
|
||||||
|
}
|
||||||
|
yield {"type": "tool_call", "data": {**tool_call_data, "status": "pending"}}
|
||||||
|
|
||||||
|
tool_data = tools_dict[tool_id]
|
||||||
|
action_data = (
|
||||||
|
tool_data["config"]["actions"][action_name]
|
||||||
|
if tool_data["name"] == "api_tool"
|
||||||
|
else next(
|
||||||
|
action
|
||||||
|
for action in tool_data["actions"]
|
||||||
|
if action["name"] == action_name
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
query_params, headers, body, parameters = {}, {}, {}, {}
|
||||||
|
param_types = {
|
||||||
|
"query_params": query_params,
|
||||||
|
"headers": headers,
|
||||||
|
"body": body,
|
||||||
|
"parameters": parameters,
|
||||||
|
}
|
||||||
|
|
||||||
|
for param_type, target_dict in param_types.items():
|
||||||
|
if param_type in action_data and action_data[param_type].get("properties"):
|
||||||
|
for param, details in action_data[param_type]["properties"].items():
|
||||||
|
if param not in call_args and "value" in details:
|
||||||
|
target_dict[param] = details["value"]
|
||||||
|
for param, value in call_args.items():
|
||||||
|
for param_type, target_dict in param_types.items():
|
||||||
|
if param_type in action_data and param in action_data[param_type].get(
|
||||||
|
"properties", {}
|
||||||
|
):
|
||||||
|
target_dict[param] = value
|
||||||
|
tm = ToolManager(config={})
|
||||||
|
tool = tm.load_tool(
|
||||||
|
tool_data["name"],
|
||||||
|
tool_config=(
|
||||||
|
{
|
||||||
|
"url": tool_data["config"]["actions"][action_name]["url"],
|
||||||
|
"method": tool_data["config"]["actions"][action_name]["method"],
|
||||||
|
"headers": headers,
|
||||||
|
"query_params": query_params,
|
||||||
|
}
|
||||||
|
if tool_data["name"] == "api_tool"
|
||||||
|
else tool_data["config"]
|
||||||
|
),
|
||||||
|
user_id=self.user, # Pass user ID for MCP tools credential decryption
|
||||||
|
)
|
||||||
|
if tool_data["name"] == "api_tool":
|
||||||
|
print(
|
||||||
|
f"Executing api: {action_name} with query_params: {query_params}, headers: {headers}, body: {body}"
|
||||||
|
)
|
||||||
|
result = tool.execute_action(action_name, **body)
|
||||||
|
else:
|
||||||
|
print(f"Executing tool: {action_name} with args: {call_args}")
|
||||||
|
result = tool.execute_action(action_name, **parameters)
|
||||||
|
tool_call_data["result"] = (
|
||||||
|
f"{str(result)[:50]}..." if len(str(result)) > 50 else result
|
||||||
|
)
|
||||||
|
|
||||||
|
yield {"type": "tool_call", "data": {**tool_call_data, "status": "completed"}}
|
||||||
|
self.tool_calls.append(tool_call_data)
|
||||||
|
|
||||||
|
return result, call_id
|
||||||
|
|
||||||
|
def _get_truncated_tool_calls(self):
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
**tool_call,
|
||||||
|
"result": (
|
||||||
|
f"{str(tool_call['result'])[:50]}..."
|
||||||
|
if len(str(tool_call["result"])) > 50
|
||||||
|
else tool_call["result"]
|
||||||
|
),
|
||||||
|
"status": "completed",
|
||||||
|
}
|
||||||
|
for tool_call in self.tool_calls
|
||||||
|
]
|
||||||
|
|
||||||
|
def _build_messages(
|
||||||
|
self,
|
||||||
|
system_prompt: str,
|
||||||
|
query: str,
|
||||||
|
retrieved_data: List[Dict],
|
||||||
|
) -> List[Dict]:
|
||||||
|
docs_with_filenames = []
|
||||||
|
for doc in retrieved_data:
|
||||||
|
filename = doc.get("filename") or doc.get("title") or doc.get("source")
|
||||||
|
if filename:
|
||||||
|
chunk_header = str(filename)
|
||||||
|
docs_with_filenames.append(f"{chunk_header}\n{doc['text']}")
|
||||||
|
else:
|
||||||
|
docs_with_filenames.append(doc["text"])
|
||||||
|
docs_together = "\n\n".join(docs_with_filenames)
|
||||||
|
p_chat_combine = system_prompt.replace("{summaries}", docs_together)
|
||||||
|
messages_combine = [{"role": "system", "content": p_chat_combine}]
|
||||||
|
|
||||||
|
for i in self.chat_history:
|
||||||
|
if "prompt" in i and "response" in i:
|
||||||
|
messages_combine.append({"role": "user", "content": i["prompt"]})
|
||||||
|
messages_combine.append({"role": "assistant", "content": i["response"]})
|
||||||
|
if "tool_calls" in i:
|
||||||
|
for tool_call in i["tool_calls"]:
|
||||||
|
call_id = tool_call.get("call_id") or str(uuid.uuid4())
|
||||||
|
|
||||||
|
function_call_dict = {
|
||||||
|
"function_call": {
|
||||||
|
"name": tool_call.get("action_name"),
|
||||||
|
"args": tool_call.get("arguments"),
|
||||||
|
"call_id": call_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function_response_dict = {
|
||||||
|
"function_response": {
|
||||||
|
"name": tool_call.get("action_name"),
|
||||||
|
"response": {"result": tool_call.get("result")},
|
||||||
|
"call_id": call_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
messages_combine.append(
|
||||||
|
{"role": "assistant", "content": [function_call_dict]}
|
||||||
|
)
|
||||||
|
messages_combine.append(
|
||||||
|
{"role": "tool", "content": [function_response_dict]}
|
||||||
|
)
|
||||||
|
messages_combine.append({"role": "user", "content": query})
|
||||||
|
return messages_combine
|
||||||
|
|
||||||
|
def _retriever_search(
|
||||||
|
self,
|
||||||
|
retriever: BaseRetriever,
|
||||||
|
query: str,
|
||||||
|
log_context: Optional[LogContext] = None,
|
||||||
|
) -> List[Dict]:
|
||||||
|
retrieved_data = retriever.search(query)
|
||||||
|
if log_context:
|
||||||
|
data = build_stack_data(retriever, exclude_attributes=["llm"])
|
||||||
|
log_context.stacks.append({"component": "retriever", "data": data})
|
||||||
|
return retrieved_data
|
||||||
|
|
||||||
|
def _llm_gen(self, messages: List[Dict], log_context: Optional[LogContext] = None):
|
||||||
|
gen_kwargs = {"model": self.gpt_model, "messages": messages}
|
||||||
|
|
||||||
|
if (
|
||||||
|
hasattr(self.llm, "_supports_tools")
|
||||||
|
and self.llm._supports_tools
|
||||||
|
and self.tools
|
||||||
|
):
|
||||||
|
gen_kwargs["tools"] = self.tools
|
||||||
|
|
||||||
|
if (
|
||||||
|
self.json_schema
|
||||||
|
and hasattr(self.llm, "_supports_structured_output")
|
||||||
|
and self.llm._supports_structured_output()
|
||||||
|
):
|
||||||
|
structured_format = self.llm.prepare_structured_output_format(
|
||||||
|
self.json_schema
|
||||||
|
)
|
||||||
|
if structured_format:
|
||||||
|
if self.llm_name == "openai":
|
||||||
|
gen_kwargs["response_format"] = structured_format
|
||||||
|
elif self.llm_name == "google":
|
||||||
|
gen_kwargs["response_schema"] = structured_format
|
||||||
|
|
||||||
|
resp = self.llm.gen_stream(**gen_kwargs)
|
||||||
|
|
||||||
|
if log_context:
|
||||||
|
data = build_stack_data(self.llm, exclude_attributes=["client"])
|
||||||
|
log_context.stacks.append({"component": "llm", "data": data})
|
||||||
|
return resp
|
||||||
|
|
||||||
|
def _llm_handler(
|
||||||
|
self,
|
||||||
|
resp,
|
||||||
|
tools_dict: Dict,
|
||||||
|
messages: List[Dict],
|
||||||
|
log_context: Optional[LogContext] = None,
|
||||||
|
attachments: Optional[List[Dict]] = None,
|
||||||
|
):
|
||||||
|
resp = self.llm_handler.process_message_flow(
|
||||||
|
self, resp, tools_dict, messages, attachments, True
|
||||||
|
)
|
||||||
|
if log_context:
|
||||||
|
data = build_stack_data(self.llm_handler, exclude_attributes=["tool_calls"])
|
||||||
|
log_context.stacks.append({"component": "llm_handler", "data": data})
|
||||||
|
return resp
|
||||||
|
|
||||||
|
def _handle_response(self, response, tools_dict, messages, log_context):
|
||||||
|
is_structured_output = (
|
||||||
|
self.json_schema is not None
|
||||||
|
and hasattr(self.llm, "_supports_structured_output")
|
||||||
|
and self.llm._supports_structured_output()
|
||||||
|
)
|
||||||
|
|
||||||
|
if isinstance(response, str):
|
||||||
|
answer_data = {"answer": response}
|
||||||
|
if is_structured_output:
|
||||||
|
answer_data["structured"] = True
|
||||||
|
answer_data["schema"] = self.json_schema
|
||||||
|
yield answer_data
|
||||||
|
return
|
||||||
|
if hasattr(response, "message") and getattr(response.message, "content", None):
|
||||||
|
answer_data = {"answer": response.message.content}
|
||||||
|
if is_structured_output:
|
||||||
|
answer_data["structured"] = True
|
||||||
|
answer_data["schema"] = self.json_schema
|
||||||
|
yield answer_data
|
||||||
|
return
|
||||||
|
processed_response_gen = self._llm_handler(
|
||||||
|
response, tools_dict, messages, log_context, self.attachments
|
||||||
|
)
|
||||||
|
|
||||||
|
for event in processed_response_gen:
|
||||||
|
if isinstance(event, str):
|
||||||
|
answer_data = {"answer": event}
|
||||||
|
if is_structured_output:
|
||||||
|
answer_data["structured"] = True
|
||||||
|
answer_data["schema"] = self.json_schema
|
||||||
|
yield answer_data
|
||||||
|
elif hasattr(event, "message") and getattr(event.message, "content", None):
|
||||||
|
answer_data = {"answer": event.message.content}
|
||||||
|
if is_structured_output:
|
||||||
|
answer_data["structured"] = True
|
||||||
|
answer_data["schema"] = self.json_schema
|
||||||
|
yield answer_data
|
||||||
|
elif isinstance(event, dict) and "type" in event:
|
||||||
|
yield event
|
||||||
53
application/agents/classic_agent.py
Normal file
53
application/agents/classic_agent.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
from typing import Dict, Generator
|
||||||
|
from application.agents.base import BaseAgent
|
||||||
|
from application.logging import LogContext
|
||||||
|
from application.retriever.base import BaseRetriever
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ClassicAgent(BaseAgent):
|
||||||
|
"""A simplified agent with clear execution flow.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
1. Processes a query through retrieval
|
||||||
|
2. Sets up available tools
|
||||||
|
3. Generates responses using LLM
|
||||||
|
4. Handles tool interactions if needed
|
||||||
|
5. Returns standardized outputs
|
||||||
|
|
||||||
|
Easy to extend by overriding specific steps.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _gen_inner(
|
||||||
|
self, query: str, retriever: BaseRetriever, log_context: LogContext
|
||||||
|
) -> Generator[Dict, None, None]:
|
||||||
|
# Step 1: Retrieve relevant data
|
||||||
|
retrieved_data = self._retriever_search(retriever, query, log_context)
|
||||||
|
|
||||||
|
# Step 2: Prepare tools
|
||||||
|
tools_dict = (
|
||||||
|
self._get_user_tools(self.user)
|
||||||
|
if not self.user_api_key
|
||||||
|
else self._get_tools(self.user_api_key)
|
||||||
|
)
|
||||||
|
self._prepare_tools(tools_dict)
|
||||||
|
|
||||||
|
# Step 3: Build and process messages
|
||||||
|
messages = self._build_messages(self.prompt, query, retrieved_data)
|
||||||
|
llm_response = self._llm_gen(messages, log_context)
|
||||||
|
|
||||||
|
# Step 4: Handle the response
|
||||||
|
yield from self._handle_response(
|
||||||
|
llm_response, tools_dict, messages, log_context
|
||||||
|
)
|
||||||
|
|
||||||
|
# Step 5: Return metadata
|
||||||
|
yield {"sources": retrieved_data}
|
||||||
|
yield {"tool_calls": self._get_truncated_tool_calls()}
|
||||||
|
|
||||||
|
# Log tool calls for debugging
|
||||||
|
log_context.stacks.append(
|
||||||
|
{"component": "agent", "data": {"tool_calls": self.tool_calls.copy()}}
|
||||||
|
)
|
||||||
229
application/agents/react_agent.py
Normal file
229
application/agents/react_agent.py
Normal file
@@ -0,0 +1,229 @@
|
|||||||
|
import os
|
||||||
|
from typing import Dict, Generator, List, Any
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from application.agents.base import BaseAgent
|
||||||
|
from application.logging import build_stack_data, LogContext
|
||||||
|
from application.retriever.base import BaseRetriever
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
current_dir = os.path.dirname(
|
||||||
|
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
)
|
||||||
|
with open(
|
||||||
|
os.path.join(current_dir, "application/prompts", "react_planning_prompt.txt"), "r"
|
||||||
|
) as f:
|
||||||
|
planning_prompt_template = f.read()
|
||||||
|
with open(
|
||||||
|
os.path.join(current_dir, "application/prompts", "react_final_prompt.txt"),
|
||||||
|
"r",
|
||||||
|
) as f:
|
||||||
|
final_prompt_template = f.read()
|
||||||
|
|
||||||
|
MAX_ITERATIONS_REASONING = 10
|
||||||
|
|
||||||
|
class ReActAgent(BaseAgent):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.plan: str = ""
|
||||||
|
self.observations: List[str] = []
|
||||||
|
|
||||||
|
def _extract_content_from_llm_response(self, resp: Any) -> str:
|
||||||
|
"""
|
||||||
|
Helper to extract string content from various LLM response types.
|
||||||
|
Handles strings, message objects (OpenAI-like), and streams.
|
||||||
|
Adapt stream handling for your specific LLM client if not OpenAI.
|
||||||
|
"""
|
||||||
|
collected_content = []
|
||||||
|
if isinstance(resp, str):
|
||||||
|
collected_content.append(resp)
|
||||||
|
elif ( # OpenAI non-streaming or Anthropic non-streaming (older SDK style)
|
||||||
|
hasattr(resp, "message")
|
||||||
|
and hasattr(resp.message, "content")
|
||||||
|
and resp.message.content is not None
|
||||||
|
):
|
||||||
|
collected_content.append(resp.message.content)
|
||||||
|
elif ( # OpenAI non-streaming (Pydantic model), Anthropic new SDK non-streaming
|
||||||
|
hasattr(resp, "choices") and resp.choices and
|
||||||
|
hasattr(resp.choices[0], "message") and
|
||||||
|
hasattr(resp.choices[0].message, "content") and
|
||||||
|
resp.choices[0].message.content is not None
|
||||||
|
):
|
||||||
|
collected_content.append(resp.choices[0].message.content) # OpenAI
|
||||||
|
elif ( # Anthropic new SDK non-streaming content block
|
||||||
|
hasattr(resp, "content") and isinstance(resp.content, list) and resp.content and
|
||||||
|
hasattr(resp.content[0], "text")
|
||||||
|
):
|
||||||
|
collected_content.append(resp.content[0].text) # Anthropic
|
||||||
|
else:
|
||||||
|
# Assume resp is a stream if not a recognized object
|
||||||
|
try:
|
||||||
|
for chunk in resp: # This will fail if resp is not iterable (e.g. a non-streaming response object)
|
||||||
|
content_piece = ""
|
||||||
|
# OpenAI-like stream
|
||||||
|
if hasattr(chunk, 'choices') and len(chunk.choices) > 0 and \
|
||||||
|
hasattr(chunk.choices[0], 'delta') and \
|
||||||
|
hasattr(chunk.choices[0].delta, 'content') and \
|
||||||
|
chunk.choices[0].delta.content is not None:
|
||||||
|
content_piece = chunk.choices[0].delta.content
|
||||||
|
# Anthropic-like stream (ContentBlockDelta)
|
||||||
|
elif hasattr(chunk, 'type') and chunk.type == 'content_block_delta' and \
|
||||||
|
hasattr(chunk, 'delta') and hasattr(chunk.delta, 'text'):
|
||||||
|
content_piece = chunk.delta.text
|
||||||
|
elif isinstance(chunk, str): # Simplest case: stream of strings
|
||||||
|
content_piece = chunk
|
||||||
|
|
||||||
|
if content_piece:
|
||||||
|
collected_content.append(content_piece)
|
||||||
|
except TypeError: # If resp is not iterable (e.g. a final response object that wasn't caught above)
|
||||||
|
logger.debug(f"Response type {type(resp)} could not be iterated as a stream. It might be a non-streaming object not handled by specific checks.")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing potential stream chunk: {e}, chunk was: {getattr(chunk, '__dict__', chunk)}")
|
||||||
|
|
||||||
|
|
||||||
|
return "".join(collected_content)
|
||||||
|
|
||||||
|
def _gen_inner(
|
||||||
|
self, query: str, retriever: BaseRetriever, log_context: LogContext
|
||||||
|
) -> Generator[Dict, None, None]:
|
||||||
|
# Reset state for this generation call
|
||||||
|
self.plan = ""
|
||||||
|
self.observations = []
|
||||||
|
retrieved_data = self._retriever_search(retriever, query, log_context)
|
||||||
|
|
||||||
|
if self.user_api_key:
|
||||||
|
tools_dict = self._get_tools(self.user_api_key)
|
||||||
|
else:
|
||||||
|
tools_dict = self._get_user_tools(self.user)
|
||||||
|
self._prepare_tools(tools_dict)
|
||||||
|
|
||||||
|
docs_together = "\n".join([doc["text"] for doc in retrieved_data])
|
||||||
|
iterating_reasoning = 0
|
||||||
|
while iterating_reasoning < MAX_ITERATIONS_REASONING:
|
||||||
|
iterating_reasoning += 1
|
||||||
|
# 1. Create Plan
|
||||||
|
logger.info("ReActAgent: Creating plan...")
|
||||||
|
plan_stream = self._create_plan(query, docs_together, log_context)
|
||||||
|
current_plan_parts = []
|
||||||
|
yield {"thought": f"Reasoning... (iteration {iterating_reasoning})\n\n"}
|
||||||
|
for line_chunk in plan_stream:
|
||||||
|
current_plan_parts.append(line_chunk)
|
||||||
|
yield {"thought": line_chunk}
|
||||||
|
self.plan = "".join(current_plan_parts)
|
||||||
|
if self.plan:
|
||||||
|
self.observations.append(f"Plan: {self.plan} Iteration: {iterating_reasoning}")
|
||||||
|
|
||||||
|
|
||||||
|
max_obs_len = 20000
|
||||||
|
obs_str = "\n".join(self.observations)
|
||||||
|
if len(obs_str) > max_obs_len:
|
||||||
|
obs_str = obs_str[:max_obs_len] + "\n...[observations truncated]"
|
||||||
|
execution_prompt_str = (
|
||||||
|
(self.prompt or "")
|
||||||
|
+ f"\n\nFollow this plan:\n{self.plan}"
|
||||||
|
+ f"\n\nObservations:\n{obs_str}"
|
||||||
|
+ f"\n\nIf there is enough data to complete user query '{query}', Respond with 'SATISFIED' only. Otherwise, continue. Dont Menstion 'SATISFIED' in your response if you are not ready. "
|
||||||
|
)
|
||||||
|
|
||||||
|
messages = self._build_messages(execution_prompt_str, query, retrieved_data)
|
||||||
|
|
||||||
|
resp_from_llm_gen = self._llm_gen(messages, log_context)
|
||||||
|
|
||||||
|
initial_llm_thought_content = self._extract_content_from_llm_response(resp_from_llm_gen)
|
||||||
|
if initial_llm_thought_content:
|
||||||
|
self.observations.append(f"Initial thought/response: {initial_llm_thought_content}")
|
||||||
|
else:
|
||||||
|
logger.info("ReActAgent: Initial LLM response (before handler) had no textual content (might be only tool calls).")
|
||||||
|
resp_after_handler = self._llm_handler(resp_from_llm_gen, tools_dict, messages, log_context)
|
||||||
|
|
||||||
|
for tool_call_info in self.tool_calls: # Iterate over self.tool_calls populated by _llm_handler
|
||||||
|
observation_string = (
|
||||||
|
f"Executed Action: Tool '{tool_call_info.get('tool_name', 'N/A')}' "
|
||||||
|
f"with arguments '{tool_call_info.get('arguments', '{}')}'. Result: '{str(tool_call_info.get('result', ''))[:200]}...'"
|
||||||
|
)
|
||||||
|
self.observations.append(observation_string)
|
||||||
|
|
||||||
|
content_after_handler = self._extract_content_from_llm_response(resp_after_handler)
|
||||||
|
if content_after_handler:
|
||||||
|
self.observations.append(f"Response after tool execution: {content_after_handler}")
|
||||||
|
else:
|
||||||
|
logger.info("ReActAgent: LLM response after handler had no textual content.")
|
||||||
|
|
||||||
|
if log_context:
|
||||||
|
log_context.stacks.append(
|
||||||
|
{"component": "agent_tool_calls", "data": {"tool_calls": self.tool_calls.copy()}}
|
||||||
|
)
|
||||||
|
|
||||||
|
yield {"sources": retrieved_data}
|
||||||
|
|
||||||
|
display_tool_calls = []
|
||||||
|
for tc in self.tool_calls:
|
||||||
|
cleaned_tc = tc.copy()
|
||||||
|
if len(str(cleaned_tc.get("result", ""))) > 50:
|
||||||
|
cleaned_tc["result"] = str(cleaned_tc["result"])[:50] + "..."
|
||||||
|
display_tool_calls.append(cleaned_tc)
|
||||||
|
if display_tool_calls:
|
||||||
|
yield {"tool_calls": display_tool_calls}
|
||||||
|
|
||||||
|
if "SATISFIED" in content_after_handler:
|
||||||
|
logger.info("ReActAgent: LLM satisfied with the plan and data. Stopping reasoning.")
|
||||||
|
break
|
||||||
|
|
||||||
|
# 3. Create Final Answer based on all observations
|
||||||
|
final_answer_stream = self._create_final_answer(query, self.observations, log_context)
|
||||||
|
for answer_chunk in final_answer_stream:
|
||||||
|
yield {"answer": answer_chunk}
|
||||||
|
logger.info("ReActAgent: Finished generating final answer.")
|
||||||
|
|
||||||
|
def _create_plan(
|
||||||
|
self, query: str, docs_data: str, log_context: LogContext = None
|
||||||
|
) -> Generator[str, None, None]:
|
||||||
|
plan_prompt_filled = planning_prompt_template.replace("{query}", query)
|
||||||
|
if "{summaries}" in plan_prompt_filled:
|
||||||
|
summaries = docs_data if docs_data else "No documents retrieved."
|
||||||
|
plan_prompt_filled = plan_prompt_filled.replace("{summaries}", summaries)
|
||||||
|
plan_prompt_filled = plan_prompt_filled.replace("{prompt}", self.prompt or "")
|
||||||
|
plan_prompt_filled = plan_prompt_filled.replace("{observations}", "\n".join(self.observations))
|
||||||
|
|
||||||
|
messages = [{"role": "user", "content": plan_prompt_filled}]
|
||||||
|
|
||||||
|
plan_stream_from_llm = self.llm.gen_stream(
|
||||||
|
model=self.gpt_model, messages=messages, tools=getattr(self, 'tools', None) # Use self.tools
|
||||||
|
)
|
||||||
|
if log_context:
|
||||||
|
data = build_stack_data(self.llm)
|
||||||
|
log_context.stacks.append({"component": "planning_llm", "data": data})
|
||||||
|
|
||||||
|
for chunk in plan_stream_from_llm:
|
||||||
|
content_piece = self._extract_content_from_llm_response(chunk)
|
||||||
|
if content_piece:
|
||||||
|
yield content_piece
|
||||||
|
|
||||||
|
def _create_final_answer(
|
||||||
|
self, query: str, observations: List[str], log_context: LogContext = None
|
||||||
|
) -> Generator[str, None, None]:
|
||||||
|
observation_string = "\n".join(observations)
|
||||||
|
max_obs_len = 10000
|
||||||
|
if len(observation_string) > max_obs_len:
|
||||||
|
observation_string = observation_string[:max_obs_len] + "\n...[observations truncated]"
|
||||||
|
logger.warning("ReActAgent: Truncated observations for final answer prompt due to length.")
|
||||||
|
|
||||||
|
final_answer_prompt_filled = final_prompt_template.format(
|
||||||
|
query=query, observations=observation_string
|
||||||
|
)
|
||||||
|
|
||||||
|
messages = [{"role": "user", "content": final_answer_prompt_filled}]
|
||||||
|
|
||||||
|
# Final answer should synthesize, not call tools.
|
||||||
|
final_answer_stream_from_llm = self.llm.gen_stream(
|
||||||
|
model=self.gpt_model, messages=messages, tools=None
|
||||||
|
)
|
||||||
|
if log_context:
|
||||||
|
data = build_stack_data(self.llm)
|
||||||
|
log_context.stacks.append({"component": "final_answer_llm", "data": data})
|
||||||
|
|
||||||
|
for chunk in final_answer_stream_from_llm:
|
||||||
|
content_piece = self._extract_content_from_llm_response(chunk)
|
||||||
|
if content_piece:
|
||||||
|
yield content_piece
|
||||||
72
application/agents/tools/api_tool.py
Normal file
72
application/agents/tools/api_tool.py
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
import json
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from application.agents.tools.base import Tool
|
||||||
|
|
||||||
|
|
||||||
|
class APITool(Tool):
|
||||||
|
"""
|
||||||
|
API Tool
|
||||||
|
A flexible tool for performing various API actions (e.g., sending messages, retrieving data) via custom user-specified APIs
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
self.url = config.get("url", "")
|
||||||
|
self.method = config.get("method", "GET")
|
||||||
|
self.headers = config.get("headers", {"Content-Type": "application/json"})
|
||||||
|
self.query_params = config.get("query_params", {})
|
||||||
|
|
||||||
|
def execute_action(self, action_name, **kwargs):
|
||||||
|
return self._make_api_call(
|
||||||
|
self.url, self.method, self.headers, self.query_params, kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
def _make_api_call(self, url, method, headers, query_params, body):
|
||||||
|
if query_params:
|
||||||
|
url = f"{url}?{requests.compat.urlencode(query_params)}"
|
||||||
|
# if isinstance(body, dict):
|
||||||
|
# body = json.dumps(body)
|
||||||
|
try:
|
||||||
|
print(f"Making API call: {method} {url} with body: {body}")
|
||||||
|
if body == "{}":
|
||||||
|
body = None
|
||||||
|
response = requests.request(method, url, headers=headers, data=body)
|
||||||
|
response.raise_for_status()
|
||||||
|
content_type = response.headers.get(
|
||||||
|
"Content-Type", "application/json"
|
||||||
|
).lower()
|
||||||
|
if "application/json" in content_type:
|
||||||
|
try:
|
||||||
|
data = response.json()
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
print(f"Error decoding JSON: {e}. Raw response: {response.text}")
|
||||||
|
return {
|
||||||
|
"status_code": response.status_code,
|
||||||
|
"message": f"API call returned invalid JSON. Error: {e}",
|
||||||
|
"data": response.text,
|
||||||
|
}
|
||||||
|
elif "text/" in content_type or "application/xml" in content_type:
|
||||||
|
data = response.text
|
||||||
|
elif not response.content:
|
||||||
|
data = None
|
||||||
|
else:
|
||||||
|
print(f"Unsupported content type: {content_type}")
|
||||||
|
data = response.content
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status_code": response.status_code,
|
||||||
|
"data": data,
|
||||||
|
"message": "API call successful.",
|
||||||
|
}
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
return {
|
||||||
|
"status_code": response.status_code if response else None,
|
||||||
|
"message": f"API call failed: {str(e)}",
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_actions_metadata(self):
|
||||||
|
return []
|
||||||
|
|
||||||
|
def get_config_requirements(self):
|
||||||
|
return {}
|
||||||
21
application/agents/tools/base.py
Normal file
21
application/agents/tools/base.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
|
class Tool(ABC):
|
||||||
|
@abstractmethod
|
||||||
|
def execute_action(self, action_name: str, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_actions_metadata(self):
|
||||||
|
"""
|
||||||
|
Returns a list of JSON objects describing the actions supported by the tool.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_config_requirements(self):
|
||||||
|
"""
|
||||||
|
Returns a dictionary describing the configuration requirements for the tool.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
182
application/agents/tools/brave.py
Normal file
182
application/agents/tools/brave.py
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
import requests
|
||||||
|
from application.agents.tools.base import Tool
|
||||||
|
|
||||||
|
|
||||||
|
class BraveSearchTool(Tool):
|
||||||
|
"""
|
||||||
|
Brave Search
|
||||||
|
A tool for performing web and image searches using the Brave Search API.
|
||||||
|
Requires an API key for authentication.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
self.token = config.get("token", "")
|
||||||
|
self.base_url = "https://api.search.brave.com/res/v1"
|
||||||
|
|
||||||
|
def execute_action(self, action_name, **kwargs):
|
||||||
|
actions = {
|
||||||
|
"brave_web_search": self._web_search,
|
||||||
|
"brave_image_search": self._image_search,
|
||||||
|
}
|
||||||
|
|
||||||
|
if action_name in actions:
|
||||||
|
return actions[action_name](**kwargs)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown action: {action_name}")
|
||||||
|
|
||||||
|
def _web_search(
|
||||||
|
self,
|
||||||
|
query,
|
||||||
|
country="ALL",
|
||||||
|
search_lang="en",
|
||||||
|
count=10,
|
||||||
|
offset=0,
|
||||||
|
safesearch="off",
|
||||||
|
freshness=None,
|
||||||
|
result_filter=None,
|
||||||
|
extra_snippets=False,
|
||||||
|
summary=False,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Performs a web search using the Brave Search API.
|
||||||
|
"""
|
||||||
|
print(f"Performing Brave web search for: {query}")
|
||||||
|
|
||||||
|
url = f"{self.base_url}/web/search"
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"q": query,
|
||||||
|
"country": country,
|
||||||
|
"search_lang": search_lang,
|
||||||
|
"count": min(count, 20),
|
||||||
|
"offset": min(offset, 9),
|
||||||
|
"safesearch": safesearch,
|
||||||
|
}
|
||||||
|
|
||||||
|
if freshness:
|
||||||
|
params["freshness"] = freshness
|
||||||
|
if result_filter:
|
||||||
|
params["result_filter"] = result_filter
|
||||||
|
if extra_snippets:
|
||||||
|
params["extra_snippets"] = 1
|
||||||
|
if summary:
|
||||||
|
params["summary"] = 1
|
||||||
|
headers = {
|
||||||
|
"Accept": "application/json",
|
||||||
|
"Accept-Encoding": "gzip",
|
||||||
|
"X-Subscription-Token": self.token,
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.get(url, params=params, headers=headers)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
return {
|
||||||
|
"status_code": response.status_code,
|
||||||
|
"results": response.json(),
|
||||||
|
"message": "Search completed successfully.",
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
"status_code": response.status_code,
|
||||||
|
"message": f"Search failed with status code: {response.status_code}.",
|
||||||
|
}
|
||||||
|
|
||||||
|
def _image_search(
|
||||||
|
self,
|
||||||
|
query,
|
||||||
|
country="ALL",
|
||||||
|
search_lang="en",
|
||||||
|
count=5,
|
||||||
|
safesearch="off",
|
||||||
|
spellcheck=False,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Performs an image search using the Brave Search API.
|
||||||
|
"""
|
||||||
|
print(f"Performing Brave image search for: {query}")
|
||||||
|
|
||||||
|
url = f"{self.base_url}/images/search"
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"q": query,
|
||||||
|
"country": country,
|
||||||
|
"search_lang": search_lang,
|
||||||
|
"count": min(count, 100), # API max is 100
|
||||||
|
"safesearch": safesearch,
|
||||||
|
"spellcheck": 1 if spellcheck else 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Accept": "application/json",
|
||||||
|
"Accept-Encoding": "gzip",
|
||||||
|
"X-Subscription-Token": self.token,
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.get(url, params=params, headers=headers)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
return {
|
||||||
|
"status_code": response.status_code,
|
||||||
|
"results": response.json(),
|
||||||
|
"message": "Image search completed successfully.",
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
"status_code": response.status_code,
|
||||||
|
"message": f"Image search failed with status code: {response.status_code}.",
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_actions_metadata(self):
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"name": "brave_web_search",
|
||||||
|
"description": "Perform a web search using Brave Search",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The search query (max 400 characters, 50 words)",
|
||||||
|
},
|
||||||
|
"search_lang": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The search language preference (default: en)",
|
||||||
|
},
|
||||||
|
"freshness": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Time filter for results (pd: last 24h, pw: last week, pm: last month, py: last year)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["query"],
|
||||||
|
"additionalProperties": False,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "brave_image_search",
|
||||||
|
"description": "Perform an image search using Brave Search",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The search query (max 400 characters, 50 words)",
|
||||||
|
},
|
||||||
|
"count": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Number of results to return (max 100, default: 5)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["query"],
|
||||||
|
"additionalProperties": False,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_config_requirements(self):
|
||||||
|
return {
|
||||||
|
"token": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Brave Search API key for authentication",
|
||||||
|
},
|
||||||
|
}
|
||||||
76
application/agents/tools/cryptoprice.py
Normal file
76
application/agents/tools/cryptoprice.py
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
import requests
|
||||||
|
from application.agents.tools.base import Tool
|
||||||
|
|
||||||
|
|
||||||
|
class CryptoPriceTool(Tool):
|
||||||
|
"""
|
||||||
|
CryptoPrice
|
||||||
|
A tool for retrieving cryptocurrency prices using the CryptoCompare public API
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def execute_action(self, action_name, **kwargs):
|
||||||
|
actions = {"cryptoprice_get": self._get_price}
|
||||||
|
|
||||||
|
if action_name in actions:
|
||||||
|
return actions[action_name](**kwargs)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown action: {action_name}")
|
||||||
|
|
||||||
|
def _get_price(self, symbol, currency):
|
||||||
|
"""
|
||||||
|
Fetches the current price of a given cryptocurrency symbol in the specified currency.
|
||||||
|
Example:
|
||||||
|
symbol = "BTC"
|
||||||
|
currency = "USD"
|
||||||
|
returns price in USD.
|
||||||
|
"""
|
||||||
|
url = f"https://min-api.cryptocompare.com/data/price?fsym={symbol.upper()}&tsyms={currency.upper()}"
|
||||||
|
response = requests.get(url)
|
||||||
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
|
if currency.upper() in data:
|
||||||
|
return {
|
||||||
|
"status_code": response.status_code,
|
||||||
|
"price": data[currency.upper()],
|
||||||
|
"message": f"Price of {symbol.upper()} in {currency.upper()} retrieved successfully.",
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
"status_code": response.status_code,
|
||||||
|
"message": f"Couldn't find price for {symbol.upper()} in {currency.upper()}.",
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
"status_code": response.status_code,
|
||||||
|
"message": "Failed to retrieve price.",
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_actions_metadata(self):
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"name": "cryptoprice_get",
|
||||||
|
"description": "Retrieve the price of a specified cryptocurrency in a given currency",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"symbol": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The cryptocurrency symbol (e.g. BTC)",
|
||||||
|
},
|
||||||
|
"currency": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The currency in which you want the price (e.g. USD)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["symbol", "currency"],
|
||||||
|
"additionalProperties": False,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_config_requirements(self):
|
||||||
|
# No specific configuration needed for this tool as it just queries a public endpoint
|
||||||
|
return {}
|
||||||
114
application/agents/tools/duckduckgo.py
Normal file
114
application/agents/tools/duckduckgo.py
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
from application.agents.tools.base import Tool
|
||||||
|
from duckduckgo_search import DDGS
|
||||||
|
|
||||||
|
|
||||||
|
class DuckDuckGoSearchTool(Tool):
|
||||||
|
"""
|
||||||
|
DuckDuckGo Search
|
||||||
|
A tool for performing web and image searches using DuckDuckGo.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def execute_action(self, action_name, **kwargs):
|
||||||
|
actions = {
|
||||||
|
"ddg_web_search": self._web_search,
|
||||||
|
"ddg_image_search": self._image_search,
|
||||||
|
}
|
||||||
|
|
||||||
|
if action_name in actions:
|
||||||
|
return actions[action_name](**kwargs)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown action: {action_name}")
|
||||||
|
|
||||||
|
def _web_search(
|
||||||
|
self,
|
||||||
|
query,
|
||||||
|
max_results=5,
|
||||||
|
):
|
||||||
|
print(f"Performing DuckDuckGo web search for: {query}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
results = DDGS().text(
|
||||||
|
query,
|
||||||
|
max_results=max_results,
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status_code": 200,
|
||||||
|
"results": results,
|
||||||
|
"message": "Web search completed successfully.",
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
return {
|
||||||
|
"status_code": 500,
|
||||||
|
"message": f"Web search failed: {str(e)}",
|
||||||
|
}
|
||||||
|
|
||||||
|
def _image_search(
|
||||||
|
self,
|
||||||
|
query,
|
||||||
|
max_results=5,
|
||||||
|
):
|
||||||
|
print(f"Performing DuckDuckGo image search for: {query}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
results = DDGS().images(
|
||||||
|
keywords=query,
|
||||||
|
max_results=max_results,
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status_code": 200,
|
||||||
|
"results": results,
|
||||||
|
"message": "Image search completed successfully.",
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
return {
|
||||||
|
"status_code": 500,
|
||||||
|
"message": f"Image search failed: {str(e)}",
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_actions_metadata(self):
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"name": "ddg_web_search",
|
||||||
|
"description": "Perform a web search using DuckDuckGo.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Search query",
|
||||||
|
},
|
||||||
|
"max_results": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Number of results to return (default: 5)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["query"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "ddg_image_search",
|
||||||
|
"description": "Perform an image search using DuckDuckGo.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Search query",
|
||||||
|
},
|
||||||
|
"max_results": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Number of results to return (default: 5, max: 50)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["query"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_config_requirements(self):
|
||||||
|
return {}
|
||||||
861
application/agents/tools/mcp_tool.py
Normal file
861
application/agents/tools/mcp_tool.py
Normal file
@@ -0,0 +1,861 @@
|
|||||||
|
import asyncio
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
from urllib.parse import parse_qs, urlparse
|
||||||
|
|
||||||
|
from application.agents.tools.base import Tool
|
||||||
|
from application.api.user.tasks import mcp_oauth_status_task, mcp_oauth_task
|
||||||
|
from application.cache import get_redis_instance
|
||||||
|
|
||||||
|
from application.core.mongo_db import MongoDB
|
||||||
|
|
||||||
|
from application.core.settings import settings
|
||||||
|
|
||||||
|
from application.security.encryption import decrypt_credentials
|
||||||
|
from fastmcp import Client
|
||||||
|
from fastmcp.client.auth import BearerAuth
|
||||||
|
from fastmcp.client.transports import (
|
||||||
|
SSETransport,
|
||||||
|
StdioTransport,
|
||||||
|
StreamableHttpTransport,
|
||||||
|
)
|
||||||
|
from mcp.client.auth import OAuthClientProvider, TokenStorage
|
||||||
|
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken
|
||||||
|
|
||||||
|
from pydantic import AnyHttpUrl, ValidationError
|
||||||
|
from redis import Redis
|
||||||
|
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
|
||||||
|
_mcp_clients_cache = {}
|
||||||
|
|
||||||
|
|
||||||
|
class MCPTool(Tool):
|
||||||
|
"""
|
||||||
|
MCP Tool
|
||||||
|
Connect to remote Model Context Protocol (MCP) servers to access dynamic tools and resources. Supports various authentication methods and provides secure access to external services through the MCP protocol.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict[str, Any], user_id: Optional[str] = None):
|
||||||
|
"""
|
||||||
|
Initialize the MCP Tool with configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Dictionary containing MCP server configuration:
|
||||||
|
- server_url: URL of the remote MCP server
|
||||||
|
- transport_type: Transport type (auto, sse, http, stdio)
|
||||||
|
- auth_type: Type of authentication (bearer, oauth, api_key, basic, none)
|
||||||
|
- encrypted_credentials: Encrypted credentials (if available)
|
||||||
|
- timeout: Request timeout in seconds (default: 30)
|
||||||
|
- headers: Custom headers for requests
|
||||||
|
- command: Command for STDIO transport
|
||||||
|
- args: Arguments for STDIO transport
|
||||||
|
- oauth_scopes: OAuth scopes for oauth auth type
|
||||||
|
- oauth_client_name: OAuth client name for oauth auth type
|
||||||
|
user_id: User ID for decrypting credentials (required if encrypted_credentials exist)
|
||||||
|
"""
|
||||||
|
self.config = config
|
||||||
|
self.user_id = user_id
|
||||||
|
self.server_url = config.get("server_url", "")
|
||||||
|
self.transport_type = config.get("transport_type", "auto")
|
||||||
|
self.auth_type = config.get("auth_type", "none")
|
||||||
|
self.timeout = config.get("timeout", 30)
|
||||||
|
self.custom_headers = config.get("headers", {})
|
||||||
|
|
||||||
|
self.auth_credentials = {}
|
||||||
|
if config.get("encrypted_credentials") and user_id:
|
||||||
|
self.auth_credentials = decrypt_credentials(
|
||||||
|
config["encrypted_credentials"], user_id
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.auth_credentials = config.get("auth_credentials", {})
|
||||||
|
self.oauth_scopes = config.get("oauth_scopes", [])
|
||||||
|
self.oauth_task_id = config.get("oauth_task_id", None)
|
||||||
|
self.oauth_client_name = config.get("oauth_client_name", "DocsGPT-MCP")
|
||||||
|
self.redirect_uri = f"{settings.API_URL}/api/mcp_server/callback"
|
||||||
|
|
||||||
|
self.available_tools = []
|
||||||
|
self._cache_key = self._generate_cache_key()
|
||||||
|
self._client = None
|
||||||
|
|
||||||
|
# Only validate and setup if server_url is provided and not OAuth
|
||||||
|
|
||||||
|
if self.server_url and self.auth_type != "oauth":
|
||||||
|
self._setup_client()
|
||||||
|
|
||||||
|
def _generate_cache_key(self) -> str:
|
||||||
|
"""Generate a unique cache key for this MCP server configuration."""
|
||||||
|
auth_key = ""
|
||||||
|
if self.auth_type == "oauth":
|
||||||
|
scopes_str = ",".join(self.oauth_scopes) if self.oauth_scopes else "none"
|
||||||
|
auth_key = f"oauth:{self.oauth_client_name}:{scopes_str}"
|
||||||
|
elif self.auth_type in ["bearer"]:
|
||||||
|
token = self.auth_credentials.get(
|
||||||
|
"bearer_token", ""
|
||||||
|
) or self.auth_credentials.get("access_token", "")
|
||||||
|
auth_key = f"bearer:{token[:10]}..." if token else "bearer:none"
|
||||||
|
elif self.auth_type == "api_key":
|
||||||
|
api_key = self.auth_credentials.get("api_key", "")
|
||||||
|
auth_key = f"apikey:{api_key[:10]}..." if api_key else "apikey:none"
|
||||||
|
elif self.auth_type == "basic":
|
||||||
|
username = self.auth_credentials.get("username", "")
|
||||||
|
auth_key = f"basic:{username}"
|
||||||
|
else:
|
||||||
|
auth_key = "none"
|
||||||
|
return f"{self.server_url}#{self.transport_type}#{auth_key}"
|
||||||
|
|
||||||
|
def _setup_client(self):
|
||||||
|
"""Setup FastMCP client with proper transport and authentication."""
|
||||||
|
global _mcp_clients_cache
|
||||||
|
if self._cache_key in _mcp_clients_cache:
|
||||||
|
cached_data = _mcp_clients_cache[self._cache_key]
|
||||||
|
if time.time() - cached_data["created_at"] < 1800:
|
||||||
|
self._client = cached_data["client"]
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
del _mcp_clients_cache[self._cache_key]
|
||||||
|
transport = self._create_transport()
|
||||||
|
auth = None
|
||||||
|
|
||||||
|
if self.auth_type == "oauth":
|
||||||
|
redis_client = get_redis_instance()
|
||||||
|
auth = DocsGPTOAuth(
|
||||||
|
mcp_url=self.server_url,
|
||||||
|
scopes=self.oauth_scopes,
|
||||||
|
redis_client=redis_client,
|
||||||
|
redirect_uri=self.redirect_uri,
|
||||||
|
task_id=self.oauth_task_id,
|
||||||
|
db=db,
|
||||||
|
user_id=self.user_id,
|
||||||
|
)
|
||||||
|
elif self.auth_type == "bearer":
|
||||||
|
token = self.auth_credentials.get(
|
||||||
|
"bearer_token", ""
|
||||||
|
) or self.auth_credentials.get("access_token", "")
|
||||||
|
if token:
|
||||||
|
auth = BearerAuth(token)
|
||||||
|
self._client = Client(transport, auth=auth)
|
||||||
|
_mcp_clients_cache[self._cache_key] = {
|
||||||
|
"client": self._client,
|
||||||
|
"created_at": time.time(),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _create_transport(self):
|
||||||
|
"""Create appropriate transport based on configuration."""
|
||||||
|
headers = {"Content-Type": "application/json", "User-Agent": "DocsGPT-MCP/1.0"}
|
||||||
|
headers.update(self.custom_headers)
|
||||||
|
|
||||||
|
if self.auth_type == "api_key":
|
||||||
|
api_key = self.auth_credentials.get("api_key", "")
|
||||||
|
header_name = self.auth_credentials.get("api_key_header", "X-API-Key")
|
||||||
|
if api_key:
|
||||||
|
headers[header_name] = api_key
|
||||||
|
elif self.auth_type == "basic":
|
||||||
|
username = self.auth_credentials.get("username", "")
|
||||||
|
password = self.auth_credentials.get("password", "")
|
||||||
|
if username and password:
|
||||||
|
credentials = base64.b64encode(
|
||||||
|
f"{username}:{password}".encode()
|
||||||
|
).decode()
|
||||||
|
headers["Authorization"] = f"Basic {credentials}"
|
||||||
|
if self.transport_type == "auto":
|
||||||
|
if "sse" in self.server_url.lower() or self.server_url.endswith("/sse"):
|
||||||
|
transport_type = "sse"
|
||||||
|
else:
|
||||||
|
transport_type = "http"
|
||||||
|
else:
|
||||||
|
transport_type = self.transport_type
|
||||||
|
if transport_type == "sse":
|
||||||
|
headers.update({"Accept": "text/event-stream", "Cache-Control": "no-cache"})
|
||||||
|
return SSETransport(url=self.server_url, headers=headers)
|
||||||
|
elif transport_type == "http":
|
||||||
|
return StreamableHttpTransport(url=self.server_url, headers=headers)
|
||||||
|
elif transport_type == "stdio":
|
||||||
|
command = self.config.get("command", "python")
|
||||||
|
args = self.config.get("args", [])
|
||||||
|
env = self.auth_credentials if self.auth_credentials else None
|
||||||
|
return StdioTransport(command=command, args=args, env=env)
|
||||||
|
else:
|
||||||
|
return StreamableHttpTransport(url=self.server_url, headers=headers)
|
||||||
|
|
||||||
|
def _format_tools(self, tools_response) -> List[Dict]:
|
||||||
|
"""Format tools response to match expected format."""
|
||||||
|
if hasattr(tools_response, "tools"):
|
||||||
|
tools = tools_response.tools
|
||||||
|
elif isinstance(tools_response, list):
|
||||||
|
tools = tools_response
|
||||||
|
else:
|
||||||
|
tools = []
|
||||||
|
tools_dict = []
|
||||||
|
for tool in tools:
|
||||||
|
if hasattr(tool, "name"):
|
||||||
|
tool_dict = {
|
||||||
|
"name": tool.name,
|
||||||
|
"description": tool.description,
|
||||||
|
}
|
||||||
|
if hasattr(tool, "inputSchema"):
|
||||||
|
tool_dict["inputSchema"] = tool.inputSchema
|
||||||
|
tools_dict.append(tool_dict)
|
||||||
|
elif isinstance(tool, dict):
|
||||||
|
tools_dict.append(tool)
|
||||||
|
else:
|
||||||
|
if hasattr(tool, "model_dump"):
|
||||||
|
tools_dict.append(tool.model_dump())
|
||||||
|
else:
|
||||||
|
tools_dict.append({"name": str(tool), "description": ""})
|
||||||
|
return tools_dict
|
||||||
|
|
||||||
|
async def _execute_with_client(self, operation: str, *args, **kwargs):
|
||||||
|
"""Execute operation with FastMCP client."""
|
||||||
|
if not self._client:
|
||||||
|
raise Exception("FastMCP client not initialized")
|
||||||
|
async with self._client:
|
||||||
|
if operation == "ping":
|
||||||
|
return await self._client.ping()
|
||||||
|
elif operation == "list_tools":
|
||||||
|
tools_response = await self._client.list_tools()
|
||||||
|
self.available_tools = self._format_tools(tools_response)
|
||||||
|
return self.available_tools
|
||||||
|
elif operation == "call_tool":
|
||||||
|
tool_name = args[0]
|
||||||
|
tool_args = kwargs
|
||||||
|
return await self._client.call_tool(tool_name, tool_args)
|
||||||
|
elif operation == "list_resources":
|
||||||
|
return await self._client.list_resources()
|
||||||
|
elif operation == "list_prompts":
|
||||||
|
return await self._client.list_prompts()
|
||||||
|
else:
|
||||||
|
raise Exception(f"Unknown operation: {operation}")
|
||||||
|
|
||||||
|
def _run_async_operation(self, operation: str, *args, **kwargs):
|
||||||
|
"""Run async operation in sync context."""
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
loop = asyncio.get_running_loop()
|
||||||
|
import concurrent.futures
|
||||||
|
|
||||||
|
def run_in_thread():
|
||||||
|
new_loop = asyncio.new_event_loop()
|
||||||
|
asyncio.set_event_loop(new_loop)
|
||||||
|
try:
|
||||||
|
return new_loop.run_until_complete(
|
||||||
|
self._execute_with_client(operation, *args, **kwargs)
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
new_loop.close()
|
||||||
|
|
||||||
|
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||||
|
future = executor.submit(run_in_thread)
|
||||||
|
return future.result(timeout=self.timeout)
|
||||||
|
except RuntimeError:
|
||||||
|
loop = asyncio.new_event_loop()
|
||||||
|
asyncio.set_event_loop(loop)
|
||||||
|
try:
|
||||||
|
return loop.run_until_complete(
|
||||||
|
self._execute_with_client(operation, *args, **kwargs)
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
loop.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error occurred while running async operation: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def discover_tools(self) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Discover available tools from the MCP server using FastMCP.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of tool definitions from the server
|
||||||
|
"""
|
||||||
|
if not self.server_url:
|
||||||
|
return []
|
||||||
|
if not self._client:
|
||||||
|
self._setup_client()
|
||||||
|
try:
|
||||||
|
tools = self._run_async_operation("list_tools")
|
||||||
|
self.available_tools = tools
|
||||||
|
return self.available_tools
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"Failed to discover tools from MCP server: {str(e)}")
|
||||||
|
|
||||||
|
def execute_action(self, action_name: str, **kwargs) -> Any:
|
||||||
|
"""
|
||||||
|
Execute an action on the remote MCP server using FastMCP.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
action_name: Name of the action to execute
|
||||||
|
**kwargs: Parameters for the action
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Result from the MCP server
|
||||||
|
"""
|
||||||
|
if not self.server_url:
|
||||||
|
raise Exception("No MCP server configured")
|
||||||
|
if not self._client:
|
||||||
|
self._setup_client()
|
||||||
|
cleaned_kwargs = {}
|
||||||
|
for key, value in kwargs.items():
|
||||||
|
if value == "" or value is None:
|
||||||
|
continue
|
||||||
|
cleaned_kwargs[key] = value
|
||||||
|
try:
|
||||||
|
result = self._run_async_operation(
|
||||||
|
"call_tool", action_name, **cleaned_kwargs
|
||||||
|
)
|
||||||
|
return self._format_result(result)
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"Failed to execute action '{action_name}': {str(e)}")
|
||||||
|
|
||||||
|
def _format_result(self, result) -> Dict:
|
||||||
|
"""Format FastMCP result to match expected format."""
|
||||||
|
if hasattr(result, "content"):
|
||||||
|
content_list = []
|
||||||
|
for content_item in result.content:
|
||||||
|
if hasattr(content_item, "text"):
|
||||||
|
content_list.append({"type": "text", "text": content_item.text})
|
||||||
|
elif hasattr(content_item, "data"):
|
||||||
|
content_list.append({"type": "data", "data": content_item.data})
|
||||||
|
else:
|
||||||
|
content_list.append(
|
||||||
|
{"type": "unknown", "content": str(content_item)}
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"content": content_list,
|
||||||
|
"isError": getattr(result, "isError", False),
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return result
|
||||||
|
|
||||||
|
def test_connection(self) -> Dict:
|
||||||
|
"""
|
||||||
|
Test the connection to the MCP server and validate functionality.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with connection test results including tool count
|
||||||
|
"""
|
||||||
|
if not self.server_url:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"message": "No MCP server URL configured",
|
||||||
|
"tools_count": 0,
|
||||||
|
"transport_type": self.transport_type,
|
||||||
|
"auth_type": self.auth_type,
|
||||||
|
"error_type": "ConfigurationError",
|
||||||
|
}
|
||||||
|
if not self._client:
|
||||||
|
self._setup_client()
|
||||||
|
try:
|
||||||
|
if self.auth_type == "oauth":
|
||||||
|
return self._test_oauth_connection()
|
||||||
|
else:
|
||||||
|
return self._test_regular_connection()
|
||||||
|
except Exception as e:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"message": f"Connection failed: {str(e)}",
|
||||||
|
"tools_count": 0,
|
||||||
|
"transport_type": self.transport_type,
|
||||||
|
"auth_type": self.auth_type,
|
||||||
|
"error_type": type(e).__name__,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _test_regular_connection(self) -> Dict:
|
||||||
|
"""Test connection for non-OAuth auth types."""
|
||||||
|
try:
|
||||||
|
self._run_async_operation("ping")
|
||||||
|
ping_success = True
|
||||||
|
except Exception:
|
||||||
|
ping_success = False
|
||||||
|
tools = self.discover_tools()
|
||||||
|
|
||||||
|
message = f"Successfully connected to MCP server. Found {len(tools)} tools."
|
||||||
|
if not ping_success:
|
||||||
|
message += " (Ping not supported, but tool discovery worked)"
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"message": message,
|
||||||
|
"tools_count": len(tools),
|
||||||
|
"transport_type": self.transport_type,
|
||||||
|
"auth_type": self.auth_type,
|
||||||
|
"ping_supported": ping_success,
|
||||||
|
"tools": [tool.get("name", "unknown") for tool in tools],
|
||||||
|
}
|
||||||
|
|
||||||
|
def _test_oauth_connection(self) -> Dict:
|
||||||
|
"""Test connection for OAuth auth type with proper async handling."""
|
||||||
|
try:
|
||||||
|
task = mcp_oauth_task.delay(config=self.config, user=self.user_id)
|
||||||
|
if not task:
|
||||||
|
raise Exception("Failed to start OAuth authentication")
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"requires_oauth": True,
|
||||||
|
"task_id": task.id,
|
||||||
|
"status": "pending",
|
||||||
|
"message": "OAuth flow started",
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"message": f"OAuth connection failed: {str(e)}",
|
||||||
|
"tools_count": 0,
|
||||||
|
"transport_type": self.transport_type,
|
||||||
|
"auth_type": self.auth_type,
|
||||||
|
"error_type": type(e).__name__,
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_actions_metadata(self) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Get metadata for all available actions.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of action metadata dictionaries
|
||||||
|
"""
|
||||||
|
actions = []
|
||||||
|
for tool in self.available_tools:
|
||||||
|
input_schema = (
|
||||||
|
tool.get("inputSchema")
|
||||||
|
or tool.get("input_schema")
|
||||||
|
or tool.get("schema")
|
||||||
|
or tool.get("parameters")
|
||||||
|
)
|
||||||
|
|
||||||
|
parameters_schema = {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {},
|
||||||
|
"required": [],
|
||||||
|
}
|
||||||
|
|
||||||
|
if input_schema:
|
||||||
|
if isinstance(input_schema, dict):
|
||||||
|
if "properties" in input_schema:
|
||||||
|
parameters_schema = {
|
||||||
|
"type": input_schema.get("type", "object"),
|
||||||
|
"properties": input_schema.get("properties", {}),
|
||||||
|
"required": input_schema.get("required", []),
|
||||||
|
}
|
||||||
|
|
||||||
|
for key in ["additionalProperties", "description"]:
|
||||||
|
if key in input_schema:
|
||||||
|
parameters_schema[key] = input_schema[key]
|
||||||
|
else:
|
||||||
|
parameters_schema["properties"] = input_schema
|
||||||
|
action = {
|
||||||
|
"name": tool.get("name", ""),
|
||||||
|
"description": tool.get("description", ""),
|
||||||
|
"parameters": parameters_schema,
|
||||||
|
}
|
||||||
|
actions.append(action)
|
||||||
|
return actions
|
||||||
|
|
||||||
|
def get_config_requirements(self) -> Dict:
|
||||||
|
"""Get configuration requirements for the MCP tool."""
|
||||||
|
return {
|
||||||
|
"server_url": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "URL of the remote MCP server (e.g., https://api.example.com/mcp or https://docs.mcp.cloudflare.com/sse)",
|
||||||
|
"required": True,
|
||||||
|
},
|
||||||
|
"transport_type": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Transport type for connection",
|
||||||
|
"enum": ["auto", "sse", "http", "stdio"],
|
||||||
|
"default": "auto",
|
||||||
|
"required": False,
|
||||||
|
"help": {
|
||||||
|
"auto": "Automatically detect best transport",
|
||||||
|
"sse": "Server-Sent Events (for real-time streaming)",
|
||||||
|
"http": "HTTP streaming (recommended for production)",
|
||||||
|
"stdio": "Standard I/O (for local servers)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"auth_type": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Authentication type",
|
||||||
|
"enum": ["none", "bearer", "oauth", "api_key", "basic"],
|
||||||
|
"default": "none",
|
||||||
|
"required": True,
|
||||||
|
"help": {
|
||||||
|
"none": "No authentication",
|
||||||
|
"bearer": "Bearer token authentication",
|
||||||
|
"oauth": "OAuth 2.1 authentication (with frontend integration)",
|
||||||
|
"api_key": "API key authentication",
|
||||||
|
"basic": "Basic authentication",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"auth_credentials": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Authentication credentials (varies by auth_type)",
|
||||||
|
"required": False,
|
||||||
|
"properties": {
|
||||||
|
"bearer_token": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Bearer token for bearer auth",
|
||||||
|
},
|
||||||
|
"access_token": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Access token for OAuth (if pre-obtained)",
|
||||||
|
},
|
||||||
|
"api_key": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "API key for api_key auth",
|
||||||
|
},
|
||||||
|
"api_key_header": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Header name for API key (default: X-API-Key)",
|
||||||
|
},
|
||||||
|
"username": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Username for basic auth",
|
||||||
|
},
|
||||||
|
"password": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Password for basic auth",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"oauth_scopes": {
|
||||||
|
"type": "array",
|
||||||
|
"description": "OAuth scopes to request (for oauth auth_type)",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"required": False,
|
||||||
|
"default": [],
|
||||||
|
},
|
||||||
|
"oauth_client_name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Client name for OAuth registration (for oauth auth_type)",
|
||||||
|
"default": "DocsGPT-MCP",
|
||||||
|
"required": False,
|
||||||
|
},
|
||||||
|
"headers": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Custom headers to send with requests",
|
||||||
|
"required": False,
|
||||||
|
},
|
||||||
|
"timeout": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Request timeout in seconds",
|
||||||
|
"default": 30,
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 300,
|
||||||
|
"required": False,
|
||||||
|
},
|
||||||
|
"command": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Command to run for STDIO transport (e.g., 'python')",
|
||||||
|
"required": False,
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"type": "array",
|
||||||
|
"description": "Arguments for STDIO command",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"required": False,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class DocsGPTOAuth(OAuthClientProvider):
|
||||||
|
"""
|
||||||
|
Custom OAuth handler for DocsGPT that uses frontend redirect instead of browser.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
mcp_url: str,
|
||||||
|
redirect_uri: str,
|
||||||
|
redis_client: Redis | None = None,
|
||||||
|
redis_prefix: str = "mcp_oauth:",
|
||||||
|
task_id: str = None,
|
||||||
|
scopes: str | list[str] | None = None,
|
||||||
|
client_name: str = "DocsGPT-MCP",
|
||||||
|
user_id=None,
|
||||||
|
db=None,
|
||||||
|
additional_client_metadata: dict[str, Any] | None = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize custom OAuth client provider for DocsGPT.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
mcp_url: Full URL to the MCP endpoint
|
||||||
|
redirect_uri: Custom redirect URI for DocsGPT frontend
|
||||||
|
redis_client: Redis client for storing auth state
|
||||||
|
redis_prefix: Prefix for Redis keys
|
||||||
|
task_id: Task ID for tracking auth status
|
||||||
|
scopes: OAuth scopes to request
|
||||||
|
client_name: Name for this client during registration
|
||||||
|
user_id: User ID for token storage
|
||||||
|
db: Database instance for token storage
|
||||||
|
additional_client_metadata: Extra fields for OAuthClientMetadata
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.redirect_uri = redirect_uri
|
||||||
|
self.redis_client = redis_client
|
||||||
|
self.redis_prefix = redis_prefix
|
||||||
|
self.task_id = task_id
|
||||||
|
self.user_id = user_id
|
||||||
|
self.db = db
|
||||||
|
|
||||||
|
parsed_url = urlparse(mcp_url)
|
||||||
|
self.server_base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
|
||||||
|
|
||||||
|
if isinstance(scopes, list):
|
||||||
|
scopes = " ".join(scopes)
|
||||||
|
client_metadata = OAuthClientMetadata(
|
||||||
|
client_name=client_name,
|
||||||
|
redirect_uris=[AnyHttpUrl(redirect_uri)],
|
||||||
|
grant_types=["authorization_code", "refresh_token"],
|
||||||
|
response_types=["code"],
|
||||||
|
scope=scopes,
|
||||||
|
**(additional_client_metadata or {}),
|
||||||
|
)
|
||||||
|
|
||||||
|
storage = DBTokenStorage(
|
||||||
|
server_url=self.server_base_url, user_id=self.user_id, db_client=self.db
|
||||||
|
)
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
server_url=self.server_base_url,
|
||||||
|
client_metadata=client_metadata,
|
||||||
|
storage=storage,
|
||||||
|
redirect_handler=self.redirect_handler,
|
||||||
|
callback_handler=self.callback_handler,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.auth_url = None
|
||||||
|
self.extracted_state = None
|
||||||
|
|
||||||
|
def _process_auth_url(self, authorization_url: str) -> tuple[str, str]:
|
||||||
|
"""Process authorization URL to extract state"""
|
||||||
|
try:
|
||||||
|
parsed_url = urlparse(authorization_url)
|
||||||
|
query_params = parse_qs(parsed_url.query)
|
||||||
|
|
||||||
|
state_params = query_params.get("state", [])
|
||||||
|
if state_params:
|
||||||
|
state = state_params[0]
|
||||||
|
else:
|
||||||
|
raise ValueError("No state in auth URL")
|
||||||
|
return authorization_url, state
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"Failed to process auth URL: {e}")
|
||||||
|
|
||||||
|
async def redirect_handler(self, authorization_url: str) -> None:
|
||||||
|
"""Store auth URL and state in Redis for frontend to use."""
|
||||||
|
auth_url, state = self._process_auth_url(authorization_url)
|
||||||
|
logging.info(
|
||||||
|
"[DocsGPTOAuth] Processed auth_url: %s, state: %s", auth_url, state
|
||||||
|
)
|
||||||
|
self.auth_url = auth_url
|
||||||
|
self.extracted_state = state
|
||||||
|
|
||||||
|
if self.redis_client and self.extracted_state:
|
||||||
|
key = f"{self.redis_prefix}auth_url:{self.extracted_state}"
|
||||||
|
self.redis_client.setex(key, 600, auth_url)
|
||||||
|
logging.info("[DocsGPTOAuth] Stored auth_url in Redis: %s", key)
|
||||||
|
|
||||||
|
if self.task_id:
|
||||||
|
status_key = f"mcp_oauth_status:{self.task_id}"
|
||||||
|
status_data = {
|
||||||
|
"status": "requires_redirect",
|
||||||
|
"message": "OAuth authorization required",
|
||||||
|
"authorization_url": self.auth_url,
|
||||||
|
"state": self.extracted_state,
|
||||||
|
"requires_oauth": True,
|
||||||
|
"task_id": self.task_id,
|
||||||
|
}
|
||||||
|
self.redis_client.setex(status_key, 600, json.dumps(status_data))
|
||||||
|
|
||||||
|
async def callback_handler(self) -> tuple[str, str | None]:
|
||||||
|
"""Wait for auth code from Redis using the state value."""
|
||||||
|
if not self.redis_client or not self.extracted_state:
|
||||||
|
raise Exception("Redis client or state not configured for OAuth")
|
||||||
|
poll_interval = 1
|
||||||
|
max_wait_time = 300
|
||||||
|
code_key = f"{self.redis_prefix}code:{self.extracted_state}"
|
||||||
|
|
||||||
|
if self.task_id:
|
||||||
|
status_key = f"mcp_oauth_status:{self.task_id}"
|
||||||
|
status_data = {
|
||||||
|
"status": "awaiting_callback",
|
||||||
|
"message": "Waiting for OAuth callback...",
|
||||||
|
"authorization_url": self.auth_url,
|
||||||
|
"state": self.extracted_state,
|
||||||
|
"requires_oauth": True,
|
||||||
|
"task_id": self.task_id,
|
||||||
|
}
|
||||||
|
self.redis_client.setex(status_key, 600, json.dumps(status_data))
|
||||||
|
start_time = time.time()
|
||||||
|
while time.time() - start_time < max_wait_time:
|
||||||
|
code_data = self.redis_client.get(code_key)
|
||||||
|
if code_data:
|
||||||
|
code = code_data.decode()
|
||||||
|
returned_state = self.extracted_state
|
||||||
|
|
||||||
|
self.redis_client.delete(code_key)
|
||||||
|
self.redis_client.delete(
|
||||||
|
f"{self.redis_prefix}auth_url:{self.extracted_state}"
|
||||||
|
)
|
||||||
|
self.redis_client.delete(
|
||||||
|
f"{self.redis_prefix}state:{self.extracted_state}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.task_id:
|
||||||
|
status_data = {
|
||||||
|
"status": "callback_received",
|
||||||
|
"message": "OAuth callback received, completing authentication...",
|
||||||
|
"task_id": self.task_id,
|
||||||
|
}
|
||||||
|
self.redis_client.setex(status_key, 600, json.dumps(status_data))
|
||||||
|
return code, returned_state
|
||||||
|
error_key = f"{self.redis_prefix}error:{self.extracted_state}"
|
||||||
|
error_data = self.redis_client.get(error_key)
|
||||||
|
if error_data:
|
||||||
|
error_msg = error_data.decode()
|
||||||
|
self.redis_client.delete(error_key)
|
||||||
|
self.redis_client.delete(
|
||||||
|
f"{self.redis_prefix}auth_url:{self.extracted_state}"
|
||||||
|
)
|
||||||
|
self.redis_client.delete(
|
||||||
|
f"{self.redis_prefix}state:{self.extracted_state}"
|
||||||
|
)
|
||||||
|
raise Exception(f"OAuth error: {error_msg}")
|
||||||
|
await asyncio.sleep(poll_interval)
|
||||||
|
self.redis_client.delete(f"{self.redis_prefix}auth_url:{self.extracted_state}")
|
||||||
|
self.redis_client.delete(f"{self.redis_prefix}state:{self.extracted_state}")
|
||||||
|
raise Exception("OAuth callback timeout: no code received within 5 minutes")
|
||||||
|
|
||||||
|
|
||||||
|
class DBTokenStorage(TokenStorage):
|
||||||
|
def __init__(self, server_url: str, user_id: str, db_client):
|
||||||
|
self.server_url = server_url
|
||||||
|
self.user_id = user_id
|
||||||
|
self.db_client = db_client
|
||||||
|
self.collection = db_client["connector_sessions"]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_base_url(url: str) -> str:
|
||||||
|
parsed = urlparse(url)
|
||||||
|
return f"{parsed.scheme}://{parsed.netloc}"
|
||||||
|
|
||||||
|
def get_db_key(self) -> dict:
|
||||||
|
return {
|
||||||
|
"server_url": self.get_base_url(self.server_url),
|
||||||
|
"user_id": self.user_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
async def get_tokens(self) -> OAuthToken | None:
|
||||||
|
doc = await asyncio.to_thread(self.collection.find_one, self.get_db_key())
|
||||||
|
if not doc or "tokens" not in doc:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
tokens = OAuthToken.model_validate(doc["tokens"])
|
||||||
|
return tokens
|
||||||
|
except ValidationError as e:
|
||||||
|
logging.error(f"Could not load tokens: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def set_tokens(self, tokens: OAuthToken) -> None:
|
||||||
|
await asyncio.to_thread(
|
||||||
|
self.collection.update_one,
|
||||||
|
self.get_db_key(),
|
||||||
|
{"$set": {"tokens": tokens.model_dump()}},
|
||||||
|
True,
|
||||||
|
)
|
||||||
|
logging.info(f"Saved tokens for {self.get_base_url(self.server_url)}")
|
||||||
|
|
||||||
|
async def get_client_info(self) -> OAuthClientInformationFull | None:
|
||||||
|
doc = await asyncio.to_thread(self.collection.find_one, self.get_db_key())
|
||||||
|
if not doc or "client_info" not in doc:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
client_info = OAuthClientInformationFull.model_validate(doc["client_info"])
|
||||||
|
tokens = await self.get_tokens()
|
||||||
|
if tokens is None:
|
||||||
|
logging.debug(
|
||||||
|
"No tokens found, clearing client info to force fresh registration."
|
||||||
|
)
|
||||||
|
await asyncio.to_thread(
|
||||||
|
self.collection.update_one,
|
||||||
|
self.get_db_key(),
|
||||||
|
{"$unset": {"client_info": ""}},
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
return client_info
|
||||||
|
except ValidationError as e:
|
||||||
|
logging.error(f"Could not load client info: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _serialize_client_info(self, info: dict) -> dict:
|
||||||
|
if "redirect_uris" in info and isinstance(info["redirect_uris"], list):
|
||||||
|
info["redirect_uris"] = [str(u) for u in info["redirect_uris"]]
|
||||||
|
return info
|
||||||
|
|
||||||
|
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
|
||||||
|
serialized_info = self._serialize_client_info(client_info.model_dump())
|
||||||
|
await asyncio.to_thread(
|
||||||
|
self.collection.update_one,
|
||||||
|
self.get_db_key(),
|
||||||
|
{"$set": {"client_info": serialized_info}},
|
||||||
|
True,
|
||||||
|
)
|
||||||
|
logging.info(f"Saved client info for {self.get_base_url(self.server_url)}")
|
||||||
|
|
||||||
|
async def clear(self) -> None:
|
||||||
|
await asyncio.to_thread(self.collection.delete_one, self.get_db_key())
|
||||||
|
logging.info(f"Cleared OAuth cache for {self.get_base_url(self.server_url)}")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def clear_all(cls, db_client) -> None:
|
||||||
|
collection = db_client["connector_sessions"]
|
||||||
|
await asyncio.to_thread(collection.delete_many, {})
|
||||||
|
logging.info("Cleared all OAuth client cache data.")
|
||||||
|
|
||||||
|
|
||||||
|
class MCPOAuthManager:
|
||||||
|
"""Manager for handling MCP OAuth callbacks."""
|
||||||
|
|
||||||
|
def __init__(self, redis_client: Redis | None, redis_prefix: str = "mcp_oauth:"):
|
||||||
|
self.redis_client = redis_client
|
||||||
|
self.redis_prefix = redis_prefix
|
||||||
|
|
||||||
|
def handle_oauth_callback(
|
||||||
|
self, state: str, code: str, error: Optional[str] = None
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Handle OAuth callback from provider.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: The state parameter from OAuth callback
|
||||||
|
code: The authorization code from OAuth callback
|
||||||
|
error: Error message if OAuth failed
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if successful, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if not self.redis_client or not state:
|
||||||
|
raise Exception("Redis client or state not provided")
|
||||||
|
if error:
|
||||||
|
error_key = f"{self.redis_prefix}error:{state}"
|
||||||
|
self.redis_client.setex(error_key, 300, error)
|
||||||
|
raise Exception(f"OAuth error received: {error}")
|
||||||
|
code_key = f"{self.redis_prefix}code:{state}"
|
||||||
|
self.redis_client.setex(code_key, 300, code)
|
||||||
|
|
||||||
|
state_key = f"{self.redis_prefix}state:{state}"
|
||||||
|
self.redis_client.setex(state_key, 300, "completed")
|
||||||
|
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error handling OAuth callback: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_oauth_status(self, task_id: str) -> Dict[str, Any]:
|
||||||
|
"""Get current status of OAuth flow using provided task_id."""
|
||||||
|
if not task_id:
|
||||||
|
return {"status": "not_started", "message": "OAuth flow not started"}
|
||||||
|
return mcp_oauth_status_task(task_id)
|
||||||
127
application/agents/tools/ntfy.py
Normal file
127
application/agents/tools/ntfy.py
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
import requests
|
||||||
|
from application.agents.tools.base import Tool
|
||||||
|
|
||||||
|
class NtfyTool(Tool):
|
||||||
|
"""
|
||||||
|
Ntfy Tool
|
||||||
|
A tool for sending notifications to ntfy topics on a specified server.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config):
|
||||||
|
"""
|
||||||
|
Initialize the NtfyTool with configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (dict): Configuration dictionary containing the access token.
|
||||||
|
"""
|
||||||
|
self.config = config
|
||||||
|
self.token = config.get("token", "")
|
||||||
|
|
||||||
|
def execute_action(self, action_name, **kwargs):
|
||||||
|
"""
|
||||||
|
Execute the specified action with given parameters.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
action_name (str): Name of the action to execute.
|
||||||
|
**kwargs: Parameters for the action, including server_url.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Result of the action with status code and message.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the action name is unknown.
|
||||||
|
"""
|
||||||
|
actions = {
|
||||||
|
"ntfy_send_message": self._send_message,
|
||||||
|
}
|
||||||
|
if action_name in actions:
|
||||||
|
return actions[action_name](**kwargs)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown action: {action_name}")
|
||||||
|
|
||||||
|
def _send_message(self, server_url, message, topic, title=None, priority=None):
|
||||||
|
"""
|
||||||
|
Send a message to an ntfy topic on the specified server.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
server_url (str): Base URL of the ntfy server (e.g., https://ntfy.sh).
|
||||||
|
message (str): The message text to send.
|
||||||
|
topic (str): The topic to send the message to.
|
||||||
|
title (str, optional): Title of the notification.
|
||||||
|
priority (int, optional): Priority of the notification (1-5).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Response with status code and a confirmation message.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If priority is not an integer between 1 and 5.
|
||||||
|
"""
|
||||||
|
url = f"{server_url.rstrip('/')}/{topic}"
|
||||||
|
headers = {}
|
||||||
|
if title:
|
||||||
|
headers["X-Title"] = title
|
||||||
|
if priority:
|
||||||
|
try:
|
||||||
|
priority = int(priority)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
raise ValueError("Priority must be convertible to an integer")
|
||||||
|
if priority < 1 or priority > 5:
|
||||||
|
raise ValueError("Priority must be an integer between 1 and 5")
|
||||||
|
headers["X-Priority"] = str(priority)
|
||||||
|
if self.token:
|
||||||
|
headers["Authorization"] = f"Basic {self.token}"
|
||||||
|
data = message.encode("utf-8")
|
||||||
|
response = requests.post(url, headers=headers, data=data)
|
||||||
|
return {"status_code": response.status_code, "message": "Message sent"}
|
||||||
|
|
||||||
|
def get_actions_metadata(self):
|
||||||
|
"""
|
||||||
|
Provide metadata about available actions.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of dictionaries describing each action.
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"name": "ntfy_send_message",
|
||||||
|
"description": "Send a notification to an ntfy topic",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"server_url": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Base URL of the ntfy server",
|
||||||
|
},
|
||||||
|
"message": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Text to send in the notification",
|
||||||
|
},
|
||||||
|
"topic": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Topic to send the notification to",
|
||||||
|
},
|
||||||
|
"title": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Title of the notification (optional)",
|
||||||
|
},
|
||||||
|
"priority": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Priority of the notification (1-5, optional)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["server_url", "message", "topic"],
|
||||||
|
"additionalProperties": False,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_config_requirements(self):
|
||||||
|
"""
|
||||||
|
Specify the configuration requirements.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Dictionary describing required config parameters.
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
"token": {"type": "string", "description": "Access token for authentication"},
|
||||||
|
}
|
||||||
163
application/agents/tools/postgres.py
Normal file
163
application/agents/tools/postgres.py
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
import psycopg2
|
||||||
|
from application.agents.tools.base import Tool
|
||||||
|
|
||||||
|
class PostgresTool(Tool):
|
||||||
|
"""
|
||||||
|
PostgreSQL Database Tool
|
||||||
|
A tool for connecting to a PostgreSQL database using a connection string,
|
||||||
|
executing SQL queries, and retrieving schema information.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
self.connection_string = config.get("token", "")
|
||||||
|
|
||||||
|
def execute_action(self, action_name, **kwargs):
|
||||||
|
actions = {
|
||||||
|
"postgres_execute_sql": self._execute_sql,
|
||||||
|
"postgres_get_schema": self._get_schema,
|
||||||
|
}
|
||||||
|
|
||||||
|
if action_name in actions:
|
||||||
|
return actions[action_name](**kwargs)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown action: {action_name}")
|
||||||
|
|
||||||
|
def _execute_sql(self, sql_query):
|
||||||
|
"""
|
||||||
|
Executes an SQL query against the PostgreSQL database using a connection string.
|
||||||
|
"""
|
||||||
|
conn = None # Initialize conn to None for error handling
|
||||||
|
try:
|
||||||
|
conn = psycopg2.connect(self.connection_string)
|
||||||
|
cur = conn.cursor()
|
||||||
|
cur.execute(sql_query)
|
||||||
|
conn.commit()
|
||||||
|
|
||||||
|
if sql_query.strip().lower().startswith("select"):
|
||||||
|
column_names = [desc[0] for desc in cur.description] if cur.description else []
|
||||||
|
results = []
|
||||||
|
rows = cur.fetchall()
|
||||||
|
for row in rows:
|
||||||
|
results.append(dict(zip(column_names, row)))
|
||||||
|
response_data = {"data": results, "column_names": column_names}
|
||||||
|
else:
|
||||||
|
row_count = cur.rowcount
|
||||||
|
response_data = {"message": f"Query executed successfully, {row_count} rows affected."}
|
||||||
|
|
||||||
|
cur.close()
|
||||||
|
return {
|
||||||
|
"status_code": 200,
|
||||||
|
"message": "SQL query executed successfully.",
|
||||||
|
"response_data": response_data,
|
||||||
|
}
|
||||||
|
|
||||||
|
except psycopg2.Error as e:
|
||||||
|
error_message = f"Database error: {e}"
|
||||||
|
print(f"Database error: {e}")
|
||||||
|
return {
|
||||||
|
"status_code": 500,
|
||||||
|
"message": "Failed to execute SQL query.",
|
||||||
|
"error": error_message,
|
||||||
|
}
|
||||||
|
finally:
|
||||||
|
if conn: # Ensure connection is closed even if errors occur
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
def _get_schema(self, db_name):
|
||||||
|
"""
|
||||||
|
Retrieves the schema of the PostgreSQL database using a connection string.
|
||||||
|
"""
|
||||||
|
conn = None # Initialize conn to None for error handling
|
||||||
|
try:
|
||||||
|
conn = psycopg2.connect(self.connection_string)
|
||||||
|
cur = conn.cursor()
|
||||||
|
|
||||||
|
cur.execute("""
|
||||||
|
SELECT
|
||||||
|
table_name,
|
||||||
|
column_name,
|
||||||
|
data_type,
|
||||||
|
column_default,
|
||||||
|
is_nullable
|
||||||
|
FROM
|
||||||
|
information_schema.columns
|
||||||
|
WHERE
|
||||||
|
table_schema = 'public'
|
||||||
|
ORDER BY
|
||||||
|
table_name,
|
||||||
|
ordinal_position;
|
||||||
|
""")
|
||||||
|
|
||||||
|
schema_data = {}
|
||||||
|
for row in cur.fetchall():
|
||||||
|
table_name, column_name, data_type, column_default, is_nullable = row
|
||||||
|
if table_name not in schema_data:
|
||||||
|
schema_data[table_name] = []
|
||||||
|
schema_data[table_name].append({
|
||||||
|
"column_name": column_name,
|
||||||
|
"data_type": data_type,
|
||||||
|
"column_default": column_default,
|
||||||
|
"is_nullable": is_nullable
|
||||||
|
})
|
||||||
|
|
||||||
|
cur.close()
|
||||||
|
return {
|
||||||
|
"status_code": 200,
|
||||||
|
"message": "Database schema retrieved successfully.",
|
||||||
|
"schema": schema_data,
|
||||||
|
}
|
||||||
|
|
||||||
|
except psycopg2.Error as e:
|
||||||
|
error_message = f"Database error: {e}"
|
||||||
|
print(f"Database error: {e}")
|
||||||
|
return {
|
||||||
|
"status_code": 500,
|
||||||
|
"message": "Failed to retrieve database schema.",
|
||||||
|
"error": error_message,
|
||||||
|
}
|
||||||
|
finally:
|
||||||
|
if conn: # Ensure connection is closed even if errors occur
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
def get_actions_metadata(self):
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"name": "postgres_execute_sql",
|
||||||
|
"description": "Execute an SQL query against the PostgreSQL database and return the results. Use this tool to interact with the database, e.g., retrieve specific data or perform updates. Only SELECT queries will return data, other queries will return execution status.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"sql_query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The SQL query to execute.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["sql_query"],
|
||||||
|
"additionalProperties": False,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "postgres_get_schema",
|
||||||
|
"description": "Retrieve the schema of the PostgreSQL database, including tables and their columns. Use this to understand the database structure before executing queries. db_name is 'default' if not provided.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"db_name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The name of the database to retrieve the schema for.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["db_name"],
|
||||||
|
"additionalProperties": False,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_config_requirements(self):
|
||||||
|
return {
|
||||||
|
"token": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "PostgreSQL database connection string (e.g., 'postgresql://user:password@host:port/dbname')",
|
||||||
|
},
|
||||||
|
}
|
||||||
83
application/agents/tools/read_webpage.py
Normal file
83
application/agents/tools/read_webpage.py
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
import requests
|
||||||
|
from markdownify import markdownify
|
||||||
|
from application.agents.tools.base import Tool
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
class ReadWebpageTool(Tool):
|
||||||
|
"""
|
||||||
|
Read Webpage (browser)
|
||||||
|
A tool to fetch the HTML content of a URL and convert it to Markdown.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config=None):
|
||||||
|
"""
|
||||||
|
Initializes the tool.
|
||||||
|
:param config: Optional configuration dictionary. Not used by this tool.
|
||||||
|
"""
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def execute_action(self, action_name: str, **kwargs) -> str:
|
||||||
|
"""
|
||||||
|
Executes the specified action. For this tool, the only action is 'read_webpage'.
|
||||||
|
|
||||||
|
:param action_name: The name of the action to execute. Should be 'read_webpage'.
|
||||||
|
:param kwargs: Keyword arguments, must include 'url'.
|
||||||
|
:return: The Markdown content of the webpage or an error message.
|
||||||
|
"""
|
||||||
|
if action_name != "read_webpage":
|
||||||
|
return f"Error: Unknown action '{action_name}'. This tool only supports 'read_webpage'."
|
||||||
|
|
||||||
|
url = kwargs.get("url")
|
||||||
|
if not url:
|
||||||
|
return "Error: URL parameter is missing."
|
||||||
|
|
||||||
|
# Ensure the URL has a scheme (if not, default to http)
|
||||||
|
parsed_url = urlparse(url)
|
||||||
|
if not parsed_url.scheme:
|
||||||
|
url = "http://" + url
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.get(url, timeout=10, headers={'User-Agent': 'DocsGPT-Agent/1.0'})
|
||||||
|
response.raise_for_status() # Raise an exception for HTTP errors (4xx or 5xx)
|
||||||
|
|
||||||
|
html_content = response.text
|
||||||
|
#soup = BeautifulSoup(html_content, 'html.parser')
|
||||||
|
|
||||||
|
|
||||||
|
markdown_content = markdownify(html_content, heading_style="ATX", newline_style="BACKSLASH")
|
||||||
|
|
||||||
|
return markdown_content
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
return f"Error fetching URL {url}: {e}"
|
||||||
|
except Exception as e:
|
||||||
|
return f"Error processing URL {url}: {e}"
|
||||||
|
|
||||||
|
def get_actions_metadata(self):
|
||||||
|
"""
|
||||||
|
Returns metadata for the actions supported by this tool.
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"name": "read_webpage",
|
||||||
|
"description": "Fetches the HTML content of a given URL and returns it as clean Markdown text. Input must be a valid URL.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"url": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The fully qualified URL of the webpage to read (e.g., 'https://www.example.com').",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["url"],
|
||||||
|
"additionalProperties": False,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_config_requirements(self):
|
||||||
|
"""
|
||||||
|
Returns a dictionary describing the configuration requirements for the tool.
|
||||||
|
This tool does not require any specific configuration.
|
||||||
|
"""
|
||||||
|
return {}
|
||||||
86
application/agents/tools/telegram.py
Normal file
86
application/agents/tools/telegram.py
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
import requests
|
||||||
|
from application.agents.tools.base import Tool
|
||||||
|
|
||||||
|
|
||||||
|
class TelegramTool(Tool):
|
||||||
|
"""
|
||||||
|
Telegram Bot
|
||||||
|
A flexible Telegram tool for performing various actions (e.g., sending messages, images).
|
||||||
|
Requires a bot token and chat ID for configuration
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
self.token = config.get("token", "")
|
||||||
|
|
||||||
|
def execute_action(self, action_name, **kwargs):
|
||||||
|
actions = {
|
||||||
|
"telegram_send_message": self._send_message,
|
||||||
|
"telegram_send_image": self._send_image,
|
||||||
|
}
|
||||||
|
|
||||||
|
if action_name in actions:
|
||||||
|
return actions[action_name](**kwargs)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown action: {action_name}")
|
||||||
|
|
||||||
|
def _send_message(self, text, chat_id):
|
||||||
|
print(f"Sending message: {text}")
|
||||||
|
url = f"https://api.telegram.org/bot{self.token}/sendMessage"
|
||||||
|
payload = {"chat_id": chat_id, "text": text}
|
||||||
|
response = requests.post(url, data=payload)
|
||||||
|
return {"status_code": response.status_code, "message": "Message sent"}
|
||||||
|
|
||||||
|
def _send_image(self, image_url, chat_id):
|
||||||
|
print(f"Sending image: {image_url}")
|
||||||
|
url = f"https://api.telegram.org/bot{self.token}/sendPhoto"
|
||||||
|
payload = {"chat_id": chat_id, "photo": image_url}
|
||||||
|
response = requests.post(url, data=payload)
|
||||||
|
return {"status_code": response.status_code, "message": "Image sent"}
|
||||||
|
|
||||||
|
def get_actions_metadata(self):
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"name": "telegram_send_message",
|
||||||
|
"description": "Send a notification to Telegram chat",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"text": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Text to send in the notification",
|
||||||
|
},
|
||||||
|
"chat_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Chat ID to send the notification to",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["text"],
|
||||||
|
"additionalProperties": False,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "telegram_send_image",
|
||||||
|
"description": "Send an image to the Telegram chat",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"image_url": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "URL of the image to send",
|
||||||
|
},
|
||||||
|
"chat_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Chat ID to send the image to",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["image_url"],
|
||||||
|
"additionalProperties": False,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_config_requirements(self):
|
||||||
|
return {
|
||||||
|
"token": {"type": "string", "description": "Bot token for authentication"},
|
||||||
|
}
|
||||||
61
application/agents/tools/tool_action_parser.py
Normal file
61
application/agents/tools/tool_action_parser.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ToolActionParser:
|
||||||
|
def __init__(self, llm_type):
|
||||||
|
self.llm_type = llm_type
|
||||||
|
self.parsers = {
|
||||||
|
"OpenAILLM": self._parse_openai_llm,
|
||||||
|
"GoogleLLM": self._parse_google_llm,
|
||||||
|
}
|
||||||
|
|
||||||
|
def parse_args(self, call):
|
||||||
|
parser = self.parsers.get(self.llm_type, self._parse_openai_llm)
|
||||||
|
return parser(call)
|
||||||
|
|
||||||
|
def _parse_openai_llm(self, call):
|
||||||
|
try:
|
||||||
|
call_args = json.loads(call.arguments)
|
||||||
|
tool_parts = call.name.split("_")
|
||||||
|
|
||||||
|
# If the tool name doesn't contain an underscore, it's likely a hallucinated tool
|
||||||
|
if len(tool_parts) < 2:
|
||||||
|
logger.warning(f"Invalid tool name format: {call.name}. Expected format: action_name_tool_id")
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
|
tool_id = tool_parts[-1]
|
||||||
|
action_name = "_".join(tool_parts[:-1])
|
||||||
|
|
||||||
|
# Validate that tool_id looks like a numerical ID
|
||||||
|
if not tool_id.isdigit():
|
||||||
|
logger.warning(f"Tool ID '{tool_id}' is not numerical. This might be a hallucinated tool call.")
|
||||||
|
|
||||||
|
except (AttributeError, TypeError) as e:
|
||||||
|
logger.error(f"Error parsing OpenAI LLM call: {e}")
|
||||||
|
return None, None, None
|
||||||
|
return tool_id, action_name, call_args
|
||||||
|
|
||||||
|
def _parse_google_llm(self, call):
|
||||||
|
try:
|
||||||
|
call_args = call.arguments
|
||||||
|
tool_parts = call.name.split("_")
|
||||||
|
|
||||||
|
# If the tool name doesn't contain an underscore, it's likely a hallucinated tool
|
||||||
|
if len(tool_parts) < 2:
|
||||||
|
logger.warning(f"Invalid tool name format: {call.name}. Expected format: action_name_tool_id")
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
|
tool_id = tool_parts[-1]
|
||||||
|
action_name = "_".join(tool_parts[:-1])
|
||||||
|
|
||||||
|
# Validate that tool_id looks like a numerical ID
|
||||||
|
if not tool_id.isdigit():
|
||||||
|
logger.warning(f"Tool ID '{tool_id}' is not numerical. This might be a hallucinated tool call.")
|
||||||
|
|
||||||
|
except (AttributeError, TypeError) as e:
|
||||||
|
logger.error(f"Error parsing Google LLM call: {e}")
|
||||||
|
return None, None, None
|
||||||
|
return tool_id, action_name, call_args
|
||||||
49
application/agents/tools/tool_manager.py
Normal file
49
application/agents/tools/tool_manager.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
import importlib
|
||||||
|
import inspect
|
||||||
|
import os
|
||||||
|
import pkgutil
|
||||||
|
|
||||||
|
from application.agents.tools.base import Tool
|
||||||
|
|
||||||
|
|
||||||
|
class ToolManager:
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
self.tools = {}
|
||||||
|
self.load_tools()
|
||||||
|
|
||||||
|
def load_tools(self):
|
||||||
|
tools_dir = os.path.join(os.path.dirname(__file__))
|
||||||
|
for finder, name, ispkg in pkgutil.iter_modules([tools_dir]):
|
||||||
|
if name == "base" or name.startswith("__"):
|
||||||
|
continue
|
||||||
|
module = importlib.import_module(f"application.agents.tools.{name}")
|
||||||
|
for member_name, obj in inspect.getmembers(module, inspect.isclass):
|
||||||
|
if issubclass(obj, Tool) and obj is not Tool:
|
||||||
|
tool_config = self.config.get(name, {})
|
||||||
|
self.tools[name] = obj(tool_config)
|
||||||
|
|
||||||
|
def load_tool(self, tool_name, tool_config, user_id=None):
|
||||||
|
self.config[tool_name] = tool_config
|
||||||
|
module = importlib.import_module(f"application.agents.tools.{tool_name}")
|
||||||
|
for member_name, obj in inspect.getmembers(module, inspect.isclass):
|
||||||
|
if issubclass(obj, Tool) and obj is not Tool:
|
||||||
|
if tool_name == "mcp_tool" and user_id:
|
||||||
|
return obj(tool_config, user_id)
|
||||||
|
else:
|
||||||
|
return obj(tool_config)
|
||||||
|
|
||||||
|
def execute_action(self, tool_name, action_name, user_id=None, **kwargs):
|
||||||
|
if tool_name not in self.tools:
|
||||||
|
raise ValueError(f"Tool '{tool_name}' not loaded")
|
||||||
|
if tool_name == "mcp_tool" and user_id:
|
||||||
|
tool_config = self.config.get(tool_name, {})
|
||||||
|
tool = self.load_tool(tool_name, tool_config, user_id)
|
||||||
|
return tool.execute_action(action_name, **kwargs)
|
||||||
|
return self.tools[tool_name].execute_action(action_name, **kwargs)
|
||||||
|
|
||||||
|
def get_all_actions_metadata(self):
|
||||||
|
metadata = []
|
||||||
|
for tool in self.tools.values():
|
||||||
|
metadata.extend(tool.get_actions_metadata())
|
||||||
|
return metadata
|
||||||
7
application/api/__init__.py
Normal file
7
application/api/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
from flask_restx import Api
|
||||||
|
|
||||||
|
api = Api(
|
||||||
|
version="1.0",
|
||||||
|
title="DocsGPT API",
|
||||||
|
description="API for DocsGPT",
|
||||||
|
)
|
||||||
19
application/api/answer/__init__.py
Normal file
19
application/api/answer/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
from flask import Blueprint
|
||||||
|
|
||||||
|
from application.api import api
|
||||||
|
from application.api.answer.routes.answer import AnswerResource
|
||||||
|
from application.api.answer.routes.base import answer_ns
|
||||||
|
from application.api.answer.routes.stream import StreamResource
|
||||||
|
|
||||||
|
|
||||||
|
answer = Blueprint("answer", __name__)
|
||||||
|
|
||||||
|
api.add_namespace(answer_ns)
|
||||||
|
|
||||||
|
|
||||||
|
def init_answer_routes():
|
||||||
|
api.add_resource(StreamResource, "/stream")
|
||||||
|
api.add_resource(AnswerResource, "/api/answer")
|
||||||
|
|
||||||
|
|
||||||
|
init_answer_routes()
|
||||||
0
application/api/answer/routes/__init__.py
Normal file
0
application/api/answer/routes/__init__.py
Normal file
122
application/api/answer/routes/answer.py
Normal file
122
application/api/answer/routes/answer.py
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
import logging
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from flask import make_response, request
|
||||||
|
from flask_restx import fields, Resource
|
||||||
|
|
||||||
|
from application.api import api
|
||||||
|
|
||||||
|
from application.api.answer.routes.base import answer_ns, BaseAnswerResource
|
||||||
|
|
||||||
|
from application.api.answer.services.stream_processor import StreamProcessor
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@answer_ns.route("/api/answer")
|
||||||
|
class AnswerResource(Resource, BaseAnswerResource):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
Resource.__init__(self, *args, **kwargs)
|
||||||
|
BaseAnswerResource.__init__(self)
|
||||||
|
|
||||||
|
answer_model = answer_ns.model(
|
||||||
|
"AnswerModel",
|
||||||
|
{
|
||||||
|
"question": fields.String(
|
||||||
|
required=True, description="Question to be asked"
|
||||||
|
),
|
||||||
|
"history": fields.List(
|
||||||
|
fields.String,
|
||||||
|
required=False,
|
||||||
|
description="Conversation history (only for new conversations)",
|
||||||
|
),
|
||||||
|
"conversation_id": fields.String(
|
||||||
|
required=False,
|
||||||
|
description="Existing conversation ID (loads history)",
|
||||||
|
),
|
||||||
|
"prompt_id": fields.String(
|
||||||
|
required=False, default="default", description="Prompt ID"
|
||||||
|
),
|
||||||
|
"chunks": fields.Integer(
|
||||||
|
required=False, default=2, description="Number of chunks"
|
||||||
|
),
|
||||||
|
"token_limit": fields.Integer(required=False, description="Token limit"),
|
||||||
|
"retriever": fields.String(required=False, description="Retriever type"),
|
||||||
|
"api_key": fields.String(required=False, description="API key"),
|
||||||
|
"active_docs": fields.String(
|
||||||
|
required=False, description="Active documents"
|
||||||
|
),
|
||||||
|
"isNoneDoc": fields.Boolean(
|
||||||
|
required=False, description="Flag indicating if no document is used"
|
||||||
|
),
|
||||||
|
"save_conversation": fields.Boolean(
|
||||||
|
required=False,
|
||||||
|
default=True,
|
||||||
|
description="Whether to save the conversation",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
@api.expect(answer_model)
|
||||||
|
@api.doc(description="Provide a response based on the question and retriever")
|
||||||
|
def post(self):
|
||||||
|
data = request.get_json()
|
||||||
|
if error := self.validate_request(data):
|
||||||
|
return error
|
||||||
|
decoded_token = getattr(request, "decoded_token", None)
|
||||||
|
processor = StreamProcessor(data, decoded_token)
|
||||||
|
try:
|
||||||
|
processor.initialize()
|
||||||
|
if not processor.decoded_token:
|
||||||
|
return make_response({"error": "Unauthorized"}, 401)
|
||||||
|
agent = processor.create_agent()
|
||||||
|
retriever = processor.create_retriever()
|
||||||
|
|
||||||
|
stream = self.complete_stream(
|
||||||
|
question=data["question"],
|
||||||
|
agent=agent,
|
||||||
|
retriever=retriever,
|
||||||
|
conversation_id=processor.conversation_id,
|
||||||
|
user_api_key=processor.agent_config.get("user_api_key"),
|
||||||
|
decoded_token=processor.decoded_token,
|
||||||
|
isNoneDoc=data.get("isNoneDoc"),
|
||||||
|
index=None,
|
||||||
|
should_save_conversation=data.get("save_conversation", True),
|
||||||
|
)
|
||||||
|
stream_result = self.process_response_stream(stream)
|
||||||
|
|
||||||
|
if len(stream_result) == 7:
|
||||||
|
(
|
||||||
|
conversation_id,
|
||||||
|
response,
|
||||||
|
sources,
|
||||||
|
tool_calls,
|
||||||
|
thought,
|
||||||
|
error,
|
||||||
|
structured_info,
|
||||||
|
) = stream_result
|
||||||
|
else:
|
||||||
|
conversation_id, response, sources, tool_calls, thought, error = (
|
||||||
|
stream_result
|
||||||
|
)
|
||||||
|
structured_info = None
|
||||||
|
|
||||||
|
if error:
|
||||||
|
return make_response({"error": error}, 400)
|
||||||
|
result = {
|
||||||
|
"conversation_id": conversation_id,
|
||||||
|
"answer": response,
|
||||||
|
"sources": sources,
|
||||||
|
"tool_calls": tool_calls,
|
||||||
|
"thought": thought,
|
||||||
|
}
|
||||||
|
|
||||||
|
if structured_info:
|
||||||
|
result.update(structured_info)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"/api/answer - error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||||
|
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||||
|
)
|
||||||
|
return make_response({"error": str(e)}, 500)
|
||||||
|
return make_response(result, 200)
|
||||||
263
application/api/answer/routes/base.py
Normal file
263
application/api/answer/routes/base.py
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Any, Dict, Generator, List, Optional
|
||||||
|
|
||||||
|
from flask import Response
|
||||||
|
from flask_restx import Namespace
|
||||||
|
|
||||||
|
from application.api.answer.services.conversation_service import ConversationService
|
||||||
|
|
||||||
|
from application.core.mongo_db import MongoDB
|
||||||
|
from application.core.settings import settings
|
||||||
|
from application.llm.llm_creator import LLMCreator
|
||||||
|
from application.utils import check_required_fields, get_gpt_model
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
answer_ns = Namespace("answer", description="Answer related operations", path="/")
|
||||||
|
|
||||||
|
|
||||||
|
class BaseAnswerResource:
|
||||||
|
"""Shared base class for answer endpoints"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
self.user_logs_collection = db["user_logs"]
|
||||||
|
self.gpt_model = get_gpt_model()
|
||||||
|
self.conversation_service = ConversationService()
|
||||||
|
|
||||||
|
def validate_request(
|
||||||
|
self, data: Dict[str, Any], require_conversation_id: bool = False
|
||||||
|
) -> Optional[Response]:
|
||||||
|
"""Common request validation"""
|
||||||
|
required_fields = ["question"]
|
||||||
|
if require_conversation_id:
|
||||||
|
required_fields.append("conversation_id")
|
||||||
|
if missing_fields := check_required_fields(data, required_fields):
|
||||||
|
return missing_fields
|
||||||
|
return None
|
||||||
|
|
||||||
|
def complete_stream(
|
||||||
|
self,
|
||||||
|
question: str,
|
||||||
|
agent: Any,
|
||||||
|
retriever: Any,
|
||||||
|
conversation_id: Optional[str],
|
||||||
|
user_api_key: Optional[str],
|
||||||
|
decoded_token: Dict[str, Any],
|
||||||
|
isNoneDoc: bool = False,
|
||||||
|
index: Optional[int] = None,
|
||||||
|
should_save_conversation: bool = True,
|
||||||
|
attachment_ids: Optional[List[str]] = None,
|
||||||
|
agent_id: Optional[str] = None,
|
||||||
|
is_shared_usage: bool = False,
|
||||||
|
shared_token: Optional[str] = None,
|
||||||
|
) -> Generator[str, None, None]:
|
||||||
|
"""
|
||||||
|
Generator function that streams the complete conversation response.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
question: The user's question
|
||||||
|
agent: The agent instance
|
||||||
|
retriever: The retriever instance
|
||||||
|
conversation_id: Existing conversation ID
|
||||||
|
user_api_key: User's API key if any
|
||||||
|
decoded_token: Decoded JWT token
|
||||||
|
isNoneDoc: Flag for document-less responses
|
||||||
|
index: Index of message to update
|
||||||
|
should_save_conversation: Whether to persist the conversation
|
||||||
|
attachment_ids: List of attachment IDs
|
||||||
|
agent_id: ID of agent used
|
||||||
|
is_shared_usage: Flag for shared agent usage
|
||||||
|
shared_token: Token for shared agent
|
||||||
|
|
||||||
|
Yields:
|
||||||
|
Server-sent event strings
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
response_full, thought, source_log_docs, tool_calls = "", "", [], []
|
||||||
|
is_structured = False
|
||||||
|
schema_info = None
|
||||||
|
structured_chunks = []
|
||||||
|
|
||||||
|
for line in agent.gen(query=question, retriever=retriever):
|
||||||
|
if "answer" in line:
|
||||||
|
response_full += str(line["answer"])
|
||||||
|
if line.get("structured"):
|
||||||
|
is_structured = True
|
||||||
|
schema_info = line.get("schema")
|
||||||
|
structured_chunks.append(line["answer"])
|
||||||
|
else:
|
||||||
|
data = json.dumps({"type": "answer", "answer": line["answer"]})
|
||||||
|
yield f"data: {data}\n\n"
|
||||||
|
elif "sources" in line:
|
||||||
|
truncated_sources = []
|
||||||
|
source_log_docs = line["sources"]
|
||||||
|
for source in line["sources"]:
|
||||||
|
truncated_source = source.copy()
|
||||||
|
if "text" in truncated_source:
|
||||||
|
truncated_source["text"] = (
|
||||||
|
truncated_source["text"][:100].strip() + "..."
|
||||||
|
)
|
||||||
|
truncated_sources.append(truncated_source)
|
||||||
|
if truncated_sources:
|
||||||
|
data = json.dumps(
|
||||||
|
{"type": "source", "source": truncated_sources}
|
||||||
|
)
|
||||||
|
yield f"data: {data}\n\n"
|
||||||
|
elif "tool_calls" in line:
|
||||||
|
tool_calls = line["tool_calls"]
|
||||||
|
elif "thought" in line:
|
||||||
|
thought += line["thought"]
|
||||||
|
data = json.dumps({"type": "thought", "thought": line["thought"]})
|
||||||
|
yield f"data: {data}\n\n"
|
||||||
|
elif "type" in line:
|
||||||
|
data = json.dumps(line)
|
||||||
|
yield f"data: {data}\n\n"
|
||||||
|
|
||||||
|
if is_structured and structured_chunks:
|
||||||
|
structured_data = {
|
||||||
|
"type": "structured_answer",
|
||||||
|
"answer": response_full,
|
||||||
|
"structured": True,
|
||||||
|
"schema": schema_info,
|
||||||
|
}
|
||||||
|
data = json.dumps(structured_data)
|
||||||
|
yield f"data: {data}\n\n"
|
||||||
|
|
||||||
|
if isNoneDoc:
|
||||||
|
for doc in source_log_docs:
|
||||||
|
doc["source"] = "None"
|
||||||
|
llm = LLMCreator.create_llm(
|
||||||
|
settings.LLM_PROVIDER,
|
||||||
|
api_key=settings.API_KEY,
|
||||||
|
user_api_key=user_api_key,
|
||||||
|
decoded_token=decoded_token,
|
||||||
|
)
|
||||||
|
|
||||||
|
if should_save_conversation:
|
||||||
|
conversation_id = self.conversation_service.save_conversation(
|
||||||
|
conversation_id,
|
||||||
|
question,
|
||||||
|
response_full,
|
||||||
|
thought,
|
||||||
|
source_log_docs,
|
||||||
|
tool_calls,
|
||||||
|
llm,
|
||||||
|
self.gpt_model,
|
||||||
|
decoded_token,
|
||||||
|
index=index,
|
||||||
|
api_key=user_api_key,
|
||||||
|
agent_id=agent_id,
|
||||||
|
is_shared_usage=is_shared_usage,
|
||||||
|
shared_token=shared_token,
|
||||||
|
attachment_ids=attachment_ids,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
conversation_id = None
|
||||||
|
id_data = {"type": "id", "id": str(conversation_id)}
|
||||||
|
data = json.dumps(id_data)
|
||||||
|
yield f"data: {data}\n\n"
|
||||||
|
|
||||||
|
retriever_params = retriever.get_params()
|
||||||
|
log_data = {
|
||||||
|
"action": "stream_answer",
|
||||||
|
"level": "info",
|
||||||
|
"user": decoded_token.get("sub"),
|
||||||
|
"api_key": user_api_key,
|
||||||
|
"question": question,
|
||||||
|
"response": response_full,
|
||||||
|
"sources": source_log_docs,
|
||||||
|
"retriever_params": retriever_params,
|
||||||
|
"attachments": attachment_ids,
|
||||||
|
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
||||||
|
}
|
||||||
|
if is_structured:
|
||||||
|
log_data["structured_output"] = True
|
||||||
|
if schema_info:
|
||||||
|
log_data["schema"] = schema_info
|
||||||
|
|
||||||
|
# clean up text fields to be no longer than 10000 characters
|
||||||
|
for key, value in log_data.items():
|
||||||
|
if isinstance(value, str) and len(value) > 10000:
|
||||||
|
log_data[key] = value[:10000]
|
||||||
|
|
||||||
|
self.user_logs_collection.insert_one(log_data)
|
||||||
|
|
||||||
|
# End of stream
|
||||||
|
|
||||||
|
data = json.dumps({"type": "end"})
|
||||||
|
yield f"data: {data}\n\n"
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in stream: {str(e)}", exc_info=True)
|
||||||
|
data = json.dumps(
|
||||||
|
{
|
||||||
|
"type": "error",
|
||||||
|
"error": "Please try again later. We apologize for any inconvenience.",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
yield f"data: {data}\n\n"
|
||||||
|
return
|
||||||
|
|
||||||
|
def process_response_stream(self, stream):
|
||||||
|
"""Process the stream response for non-streaming endpoint"""
|
||||||
|
conversation_id = ""
|
||||||
|
response_full = ""
|
||||||
|
source_log_docs = []
|
||||||
|
tool_calls = []
|
||||||
|
thought = ""
|
||||||
|
stream_ended = False
|
||||||
|
is_structured = False
|
||||||
|
schema_info = None
|
||||||
|
|
||||||
|
for line in stream:
|
||||||
|
try:
|
||||||
|
event_data = line.replace("data: ", "").strip()
|
||||||
|
event = json.loads(event_data)
|
||||||
|
|
||||||
|
if event["type"] == "id":
|
||||||
|
conversation_id = event["id"]
|
||||||
|
elif event["type"] == "answer":
|
||||||
|
response_full += event["answer"]
|
||||||
|
elif event["type"] == "structured_answer":
|
||||||
|
response_full = event["answer"]
|
||||||
|
is_structured = True
|
||||||
|
schema_info = event.get("schema")
|
||||||
|
elif event["type"] == "source":
|
||||||
|
source_log_docs = event["source"]
|
||||||
|
elif event["type"] == "tool_calls":
|
||||||
|
tool_calls = event["tool_calls"]
|
||||||
|
elif event["type"] == "thought":
|
||||||
|
thought = event["thought"]
|
||||||
|
elif event["type"] == "error":
|
||||||
|
logger.error(f"Error from stream: {event['error']}")
|
||||||
|
return None, None, None, None, event["error"]
|
||||||
|
elif event["type"] == "end":
|
||||||
|
stream_ended = True
|
||||||
|
except (json.JSONDecodeError, KeyError) as e:
|
||||||
|
logger.warning(f"Error parsing stream event: {e}, line: {line}")
|
||||||
|
continue
|
||||||
|
if not stream_ended:
|
||||||
|
logger.error("Stream ended unexpectedly without an 'end' event.")
|
||||||
|
return None, None, None, None, "Stream ended unexpectedly"
|
||||||
|
|
||||||
|
result = (
|
||||||
|
conversation_id,
|
||||||
|
response_full,
|
||||||
|
source_log_docs,
|
||||||
|
tool_calls,
|
||||||
|
thought,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
|
||||||
|
if is_structured:
|
||||||
|
result = result + ({"structured": True, "schema": schema_info},)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def error_stream_generate(self, err_response):
|
||||||
|
data = json.dumps({"type": "error", "error": err_response})
|
||||||
|
yield f"data: {data}\n\n"
|
||||||
117
application/api/answer/routes/stream.py
Normal file
117
application/api/answer/routes/stream.py
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
import logging
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from flask import request, Response
|
||||||
|
from flask_restx import fields, Resource
|
||||||
|
|
||||||
|
from application.api import api
|
||||||
|
|
||||||
|
from application.api.answer.routes.base import answer_ns, BaseAnswerResource
|
||||||
|
|
||||||
|
from application.api.answer.services.stream_processor import StreamProcessor
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@answer_ns.route("/stream")
|
||||||
|
class StreamResource(Resource, BaseAnswerResource):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
Resource.__init__(self, *args, **kwargs)
|
||||||
|
BaseAnswerResource.__init__(self)
|
||||||
|
|
||||||
|
stream_model = answer_ns.model(
|
||||||
|
"StreamModel",
|
||||||
|
{
|
||||||
|
"question": fields.String(
|
||||||
|
required=True, description="Question to be asked"
|
||||||
|
),
|
||||||
|
"history": fields.List(
|
||||||
|
fields.String,
|
||||||
|
required=False,
|
||||||
|
description="Conversation history (only for new conversations)",
|
||||||
|
),
|
||||||
|
"conversation_id": fields.String(
|
||||||
|
required=False,
|
||||||
|
description="Existing conversation ID (loads history)",
|
||||||
|
),
|
||||||
|
"prompt_id": fields.String(
|
||||||
|
required=False, default="default", description="Prompt ID"
|
||||||
|
),
|
||||||
|
"chunks": fields.Integer(
|
||||||
|
required=False, default=2, description="Number of chunks"
|
||||||
|
),
|
||||||
|
"token_limit": fields.Integer(required=False, description="Token limit"),
|
||||||
|
"retriever": fields.String(required=False, description="Retriever type"),
|
||||||
|
"api_key": fields.String(required=False, description="API key"),
|
||||||
|
"active_docs": fields.String(
|
||||||
|
required=False, description="Active documents"
|
||||||
|
),
|
||||||
|
"isNoneDoc": fields.Boolean(
|
||||||
|
required=False, description="Flag indicating if no document is used"
|
||||||
|
),
|
||||||
|
"index": fields.Integer(
|
||||||
|
required=False, description="Index of the query to update"
|
||||||
|
),
|
||||||
|
"save_conversation": fields.Boolean(
|
||||||
|
required=False,
|
||||||
|
default=True,
|
||||||
|
description="Whether to save the conversation",
|
||||||
|
),
|
||||||
|
"attachments": fields.List(
|
||||||
|
fields.String, required=False, description="List of attachment IDs"
|
||||||
|
),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
@api.expect(stream_model)
|
||||||
|
@api.doc(description="Stream a response based on the question and retriever")
|
||||||
|
def post(self):
|
||||||
|
data = request.get_json()
|
||||||
|
if error := self.validate_request(data, "index" in data):
|
||||||
|
return error
|
||||||
|
decoded_token = getattr(request, "decoded_token", None)
|
||||||
|
processor = StreamProcessor(data, decoded_token)
|
||||||
|
try:
|
||||||
|
processor.initialize()
|
||||||
|
agent = processor.create_agent()
|
||||||
|
retriever = processor.create_retriever()
|
||||||
|
|
||||||
|
return Response(
|
||||||
|
self.complete_stream(
|
||||||
|
question=data["question"],
|
||||||
|
agent=agent,
|
||||||
|
retriever=retriever,
|
||||||
|
conversation_id=processor.conversation_id,
|
||||||
|
user_api_key=processor.agent_config.get("user_api_key"),
|
||||||
|
decoded_token=processor.decoded_token,
|
||||||
|
isNoneDoc=data.get("isNoneDoc"),
|
||||||
|
index=data.get("index"),
|
||||||
|
should_save_conversation=data.get("save_conversation", True),
|
||||||
|
attachment_ids=data.get("attachments", []),
|
||||||
|
agent_id=data.get("agent_id"),
|
||||||
|
is_shared_usage=processor.is_shared_usage,
|
||||||
|
shared_token=processor.shared_token,
|
||||||
|
),
|
||||||
|
mimetype="text/event-stream",
|
||||||
|
)
|
||||||
|
except ValueError as e:
|
||||||
|
message = "Malformed request body"
|
||||||
|
logger.error(
|
||||||
|
f"/stream - error: {message} - specific error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||||
|
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||||
|
)
|
||||||
|
return Response(
|
||||||
|
self.error_stream_generate(message),
|
||||||
|
status=400,
|
||||||
|
mimetype="text/event-stream",
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"/stream - error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||||
|
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||||
|
)
|
||||||
|
return Response(
|
||||||
|
self.error_stream_generate("Unknown error occurred"),
|
||||||
|
status=400,
|
||||||
|
mimetype="text/event-stream",
|
||||||
|
)
|
||||||
0
application/api/answer/services/__init__.py
Normal file
0
application/api/answer/services/__init__.py
Normal file
180
application/api/answer/services/conversation_service.py
Normal file
180
application/api/answer/services/conversation_service.py
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
import logging
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
from application.core.mongo_db import MongoDB
|
||||||
|
|
||||||
|
from application.core.settings import settings
|
||||||
|
from bson import ObjectId
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ConversationService:
|
||||||
|
def __init__(self):
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
self.conversations_collection = db["conversations"]
|
||||||
|
self.agents_collection = db["agents"]
|
||||||
|
|
||||||
|
def get_conversation(
|
||||||
|
self, conversation_id: str, user_id: str
|
||||||
|
) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Retrieve a conversation with proper access control"""
|
||||||
|
if not conversation_id or not user_id:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
conversation = self.conversations_collection.find_one(
|
||||||
|
{
|
||||||
|
"_id": ObjectId(conversation_id),
|
||||||
|
"$or": [{"user": user_id}, {"shared_with": user_id}],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if not conversation:
|
||||||
|
logger.warning(
|
||||||
|
f"Conversation not found or unauthorized - ID: {conversation_id}, User: {user_id}"
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
conversation["_id"] = str(conversation["_id"])
|
||||||
|
return conversation
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error fetching conversation: {str(e)}", exc_info=True)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def save_conversation(
|
||||||
|
self,
|
||||||
|
conversation_id: Optional[str],
|
||||||
|
question: str,
|
||||||
|
response: str,
|
||||||
|
thought: str,
|
||||||
|
sources: List[Dict[str, Any]],
|
||||||
|
tool_calls: List[Dict[str, Any]],
|
||||||
|
llm: Any,
|
||||||
|
gpt_model: str,
|
||||||
|
decoded_token: Dict[str, Any],
|
||||||
|
index: Optional[int] = None,
|
||||||
|
api_key: Optional[str] = None,
|
||||||
|
agent_id: Optional[str] = None,
|
||||||
|
is_shared_usage: bool = False,
|
||||||
|
shared_token: Optional[str] = None,
|
||||||
|
attachment_ids: Optional[List[str]] = None,
|
||||||
|
) -> str:
|
||||||
|
"""Save or update a conversation in the database"""
|
||||||
|
user_id = decoded_token.get("sub")
|
||||||
|
if not user_id:
|
||||||
|
raise ValueError("User ID not found in token")
|
||||||
|
current_time = datetime.now(timezone.utc)
|
||||||
|
|
||||||
|
# clean up in sources array such that we save max 1k characters for text part
|
||||||
|
for source in sources:
|
||||||
|
if "text" in source and isinstance(source["text"], str):
|
||||||
|
source["text"] = source["text"][:1000]
|
||||||
|
|
||||||
|
if conversation_id is not None and index is not None:
|
||||||
|
# Update existing conversation with new query
|
||||||
|
|
||||||
|
result = self.conversations_collection.update_one(
|
||||||
|
{
|
||||||
|
"_id": ObjectId(conversation_id),
|
||||||
|
"user": user_id,
|
||||||
|
f"queries.{index}": {"$exists": True},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$set": {
|
||||||
|
f"queries.{index}.prompt": question,
|
||||||
|
f"queries.{index}.response": response,
|
||||||
|
f"queries.{index}.thought": thought,
|
||||||
|
f"queries.{index}.sources": sources,
|
||||||
|
f"queries.{index}.tool_calls": tool_calls,
|
||||||
|
f"queries.{index}.timestamp": current_time,
|
||||||
|
f"queries.{index}.attachments": attachment_ids,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.matched_count == 0:
|
||||||
|
raise ValueError("Conversation not found or unauthorized")
|
||||||
|
self.conversations_collection.update_one(
|
||||||
|
{
|
||||||
|
"_id": ObjectId(conversation_id),
|
||||||
|
"user": user_id,
|
||||||
|
f"queries.{index}": {"$exists": True},
|
||||||
|
},
|
||||||
|
{"$push": {"queries": {"$each": [], "$slice": index + 1}}},
|
||||||
|
)
|
||||||
|
return conversation_id
|
||||||
|
elif conversation_id:
|
||||||
|
# Append new message to existing conversation
|
||||||
|
|
||||||
|
result = self.conversations_collection.update_one(
|
||||||
|
{"_id": ObjectId(conversation_id), "user": user_id},
|
||||||
|
{
|
||||||
|
"$push": {
|
||||||
|
"queries": {
|
||||||
|
"prompt": question,
|
||||||
|
"response": response,
|
||||||
|
"thought": thought,
|
||||||
|
"sources": sources,
|
||||||
|
"tool_calls": tool_calls,
|
||||||
|
"timestamp": current_time,
|
||||||
|
"attachments": attachment_ids,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.matched_count == 0:
|
||||||
|
raise ValueError("Conversation not found or unauthorized")
|
||||||
|
return conversation_id
|
||||||
|
else:
|
||||||
|
# Create new conversation
|
||||||
|
|
||||||
|
messages_summary = [
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Summarise following conversation in no more than 3 "
|
||||||
|
"words, respond ONLY with the summary, use the same "
|
||||||
|
"language as the user query",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Summarise following conversation in no more than 3 words, "
|
||||||
|
"respond ONLY with the summary, use the same language as the "
|
||||||
|
"user query \n\nUser: " + question + "\n\n" + "AI: " + response,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
completion = llm.gen(
|
||||||
|
model=gpt_model, messages=messages_summary, max_tokens=30
|
||||||
|
)
|
||||||
|
|
||||||
|
conversation_data = {
|
||||||
|
"user": user_id,
|
||||||
|
"date": current_time,
|
||||||
|
"name": completion,
|
||||||
|
"queries": [
|
||||||
|
{
|
||||||
|
"prompt": question,
|
||||||
|
"response": response,
|
||||||
|
"thought": thought,
|
||||||
|
"sources": sources,
|
||||||
|
"tool_calls": tool_calls,
|
||||||
|
"timestamp": current_time,
|
||||||
|
"attachments": attachment_ids,
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
if api_key:
|
||||||
|
if agent_id:
|
||||||
|
conversation_data["agent_id"] = agent_id
|
||||||
|
if is_shared_usage:
|
||||||
|
conversation_data["is_shared_usage"] = is_shared_usage
|
||||||
|
conversation_data["shared_token"] = shared_token
|
||||||
|
agent = self.agents_collection.find_one({"key": api_key})
|
||||||
|
if agent:
|
||||||
|
conversation_data["api_key"] = agent["key"]
|
||||||
|
result = self.conversations_collection.insert_one(conversation_data)
|
||||||
|
return str(result.inserted_id)
|
||||||
353
application/api/answer/services/stream_processor.py
Normal file
353
application/api/answer/services/stream_processor.py
Normal file
@@ -0,0 +1,353 @@
|
|||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
from bson.dbref import DBRef
|
||||||
|
|
||||||
|
from bson.objectid import ObjectId
|
||||||
|
|
||||||
|
from application.agents.agent_creator import AgentCreator
|
||||||
|
from application.api.answer.services.conversation_service import ConversationService
|
||||||
|
from application.core.mongo_db import MongoDB
|
||||||
|
from application.core.settings import settings
|
||||||
|
from application.retriever.retriever_creator import RetrieverCreator
|
||||||
|
from application.utils import get_gpt_model, limit_chat_history
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_prompt(prompt_id: str, prompts_collection=None) -> str:
|
||||||
|
"""
|
||||||
|
Get a prompt by preset name or MongoDB ID
|
||||||
|
"""
|
||||||
|
current_dir = Path(__file__).resolve().parents[3]
|
||||||
|
prompts_dir = current_dir / "prompts"
|
||||||
|
|
||||||
|
preset_mapping = {
|
||||||
|
"default": "chat_combine_default.txt",
|
||||||
|
"creative": "chat_combine_creative.txt",
|
||||||
|
"strict": "chat_combine_strict.txt",
|
||||||
|
"reduce": "chat_reduce_prompt.txt",
|
||||||
|
}
|
||||||
|
|
||||||
|
if prompt_id in preset_mapping:
|
||||||
|
file_path = os.path.join(prompts_dir, preset_mapping[prompt_id])
|
||||||
|
try:
|
||||||
|
with open(file_path, "r") as f:
|
||||||
|
return f.read()
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise FileNotFoundError(f"Prompt file not found: {file_path}")
|
||||||
|
try:
|
||||||
|
if prompts_collection is None:
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
prompts_collection = db["prompts"]
|
||||||
|
prompt_doc = prompts_collection.find_one({"_id": ObjectId(prompt_id)})
|
||||||
|
if not prompt_doc:
|
||||||
|
raise ValueError(f"Prompt with ID {prompt_id} not found")
|
||||||
|
return prompt_doc["content"]
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Invalid prompt ID: {prompt_id}") from e
|
||||||
|
|
||||||
|
|
||||||
|
class StreamProcessor:
|
||||||
|
def __init__(
|
||||||
|
self, request_data: Dict[str, Any], decoded_token: Optional[Dict[str, Any]]
|
||||||
|
):
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
self.db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
self.agents_collection = self.db["agents"]
|
||||||
|
self.attachments_collection = self.db["attachments"]
|
||||||
|
self.prompts_collection = self.db["prompts"]
|
||||||
|
|
||||||
|
self.data = request_data
|
||||||
|
self.decoded_token = decoded_token
|
||||||
|
self.initial_user_id = (
|
||||||
|
self.decoded_token.get("sub") if self.decoded_token is not None else None
|
||||||
|
)
|
||||||
|
self.conversation_id = self.data.get("conversation_id")
|
||||||
|
self.source = {}
|
||||||
|
self.all_sources = []
|
||||||
|
self.attachments = []
|
||||||
|
self.history = []
|
||||||
|
self.agent_config = {}
|
||||||
|
self.retriever_config = {}
|
||||||
|
self.is_shared_usage = False
|
||||||
|
self.shared_token = None
|
||||||
|
self.gpt_model = get_gpt_model()
|
||||||
|
self.conversation_service = ConversationService()
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
"""Initialize all required components for processing"""
|
||||||
|
self._configure_agent()
|
||||||
|
self._configure_source()
|
||||||
|
self._configure_retriever()
|
||||||
|
self._configure_agent()
|
||||||
|
self._load_conversation_history()
|
||||||
|
self._process_attachments()
|
||||||
|
|
||||||
|
def _load_conversation_history(self):
|
||||||
|
"""Load conversation history either from DB or request"""
|
||||||
|
if self.conversation_id and self.initial_user_id:
|
||||||
|
conversation = self.conversation_service.get_conversation(
|
||||||
|
self.conversation_id, self.initial_user_id
|
||||||
|
)
|
||||||
|
if not conversation:
|
||||||
|
raise ValueError("Conversation not found or unauthorized")
|
||||||
|
self.history = [
|
||||||
|
{"prompt": query["prompt"], "response": query["response"]}
|
||||||
|
for query in conversation.get("queries", [])
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
self.history = limit_chat_history(
|
||||||
|
json.loads(self.data.get("history", "[]")), gpt_model=self.gpt_model
|
||||||
|
)
|
||||||
|
|
||||||
|
def _process_attachments(self):
|
||||||
|
"""Process any attachments in the request"""
|
||||||
|
attachment_ids = self.data.get("attachments", [])
|
||||||
|
self.attachments = self._get_attachments_content(
|
||||||
|
attachment_ids, self.initial_user_id
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_attachments_content(self, attachment_ids, user_id):
|
||||||
|
"""
|
||||||
|
Retrieve content from attachment documents based on their IDs.
|
||||||
|
"""
|
||||||
|
if not attachment_ids:
|
||||||
|
return []
|
||||||
|
attachments = []
|
||||||
|
for attachment_id in attachment_ids:
|
||||||
|
try:
|
||||||
|
attachment_doc = self.attachments_collection.find_one(
|
||||||
|
{"_id": ObjectId(attachment_id), "user": user_id}
|
||||||
|
)
|
||||||
|
|
||||||
|
if attachment_doc:
|
||||||
|
attachments.append(attachment_doc)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Error retrieving attachment {attachment_id}: {e}", exc_info=True
|
||||||
|
)
|
||||||
|
return attachments
|
||||||
|
|
||||||
|
def _get_agent_key(self, agent_id: Optional[str], user_id: Optional[str]) -> tuple:
|
||||||
|
"""Get API key for agent with access control"""
|
||||||
|
if not agent_id:
|
||||||
|
return None, False, None
|
||||||
|
try:
|
||||||
|
agent = self.agents_collection.find_one({"_id": ObjectId(agent_id)})
|
||||||
|
if agent is None:
|
||||||
|
raise Exception("Agent not found")
|
||||||
|
is_owner = agent.get("user") == user_id
|
||||||
|
is_shared_with_user = agent.get(
|
||||||
|
"shared_publicly", False
|
||||||
|
) or user_id in agent.get("shared_with", [])
|
||||||
|
|
||||||
|
if not (is_owner or is_shared_with_user):
|
||||||
|
raise Exception("Unauthorized access to the agent")
|
||||||
|
if is_owner:
|
||||||
|
self.agents_collection.update_one(
|
||||||
|
{"_id": ObjectId(agent_id)},
|
||||||
|
{
|
||||||
|
"$set": {
|
||||||
|
"lastUsedAt": datetime.datetime.now(datetime.timezone.utc)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
return str(agent["key"]), not is_owner, agent.get("shared_token")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in get_agent_key: {str(e)}", exc_info=True)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _get_data_from_api_key(self, api_key: str) -> Dict[str, Any]:
|
||||||
|
data = self.agents_collection.find_one({"key": api_key})
|
||||||
|
if not data:
|
||||||
|
raise Exception("Invalid API Key, please generate a new key", 401)
|
||||||
|
source = data.get("source")
|
||||||
|
if isinstance(source, DBRef):
|
||||||
|
source_doc = self.db.dereference(source)
|
||||||
|
if source_doc:
|
||||||
|
data["source"] = str(source_doc["_id"])
|
||||||
|
data["retriever"] = source_doc.get("retriever", data.get("retriever"))
|
||||||
|
data["chunks"] = source_doc.get("chunks", data.get("chunks"))
|
||||||
|
else:
|
||||||
|
data["source"] = None
|
||||||
|
elif source == "default":
|
||||||
|
data["source"] = "default"
|
||||||
|
else:
|
||||||
|
data["source"] = None
|
||||||
|
# Handle multiple sources
|
||||||
|
|
||||||
|
sources = data.get("sources", [])
|
||||||
|
if sources and isinstance(sources, list):
|
||||||
|
sources_list = []
|
||||||
|
for i, source_ref in enumerate(sources):
|
||||||
|
if source_ref == "default":
|
||||||
|
processed_source = {
|
||||||
|
"id": "default",
|
||||||
|
"retriever": "classic",
|
||||||
|
"chunks": data.get("chunks", "2"),
|
||||||
|
}
|
||||||
|
sources_list.append(processed_source)
|
||||||
|
elif isinstance(source_ref, DBRef):
|
||||||
|
source_doc = self.db.dereference(source_ref)
|
||||||
|
if source_doc:
|
||||||
|
processed_source = {
|
||||||
|
"id": str(source_doc["_id"]),
|
||||||
|
"retriever": source_doc.get("retriever", "classic"),
|
||||||
|
"chunks": source_doc.get("chunks", data.get("chunks", "2")),
|
||||||
|
}
|
||||||
|
sources_list.append(processed_source)
|
||||||
|
data["sources"] = sources_list
|
||||||
|
else:
|
||||||
|
data["sources"] = []
|
||||||
|
return data
|
||||||
|
|
||||||
|
def _configure_source(self):
|
||||||
|
"""Configure the source based on agent data"""
|
||||||
|
api_key = self.data.get("api_key") or self.agent_key
|
||||||
|
|
||||||
|
if api_key:
|
||||||
|
agent_data = self._get_data_from_api_key(api_key)
|
||||||
|
|
||||||
|
if agent_data.get("sources") and len(agent_data["sources"]) > 0:
|
||||||
|
source_ids = [
|
||||||
|
source["id"] for source in agent_data["sources"] if source.get("id")
|
||||||
|
]
|
||||||
|
if source_ids:
|
||||||
|
self.source = {"active_docs": source_ids}
|
||||||
|
else:
|
||||||
|
self.source = {}
|
||||||
|
self.all_sources = agent_data["sources"]
|
||||||
|
elif agent_data.get("source"):
|
||||||
|
self.source = {"active_docs": agent_data["source"]}
|
||||||
|
self.all_sources = [
|
||||||
|
{
|
||||||
|
"id": agent_data["source"],
|
||||||
|
"retriever": agent_data.get("retriever", "classic"),
|
||||||
|
}
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
self.source = {}
|
||||||
|
self.all_sources = []
|
||||||
|
return
|
||||||
|
if "active_docs" in self.data:
|
||||||
|
self.source = {"active_docs": self.data["active_docs"]}
|
||||||
|
return
|
||||||
|
self.source = {}
|
||||||
|
self.all_sources = []
|
||||||
|
|
||||||
|
def _configure_agent(self):
|
||||||
|
"""Configure the agent based on request data"""
|
||||||
|
agent_id = self.data.get("agent_id")
|
||||||
|
self.agent_key, self.is_shared_usage, self.shared_token = self._get_agent_key(
|
||||||
|
agent_id, self.initial_user_id
|
||||||
|
)
|
||||||
|
|
||||||
|
api_key = self.data.get("api_key")
|
||||||
|
if api_key:
|
||||||
|
data_key = self._get_data_from_api_key(api_key)
|
||||||
|
self.agent_config.update(
|
||||||
|
{
|
||||||
|
"prompt_id": data_key.get("prompt_id", "default"),
|
||||||
|
"agent_type": data_key.get("agent_type", settings.AGENT_NAME),
|
||||||
|
"user_api_key": api_key,
|
||||||
|
"json_schema": data_key.get("json_schema"),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
self.initial_user_id = data_key.get("user")
|
||||||
|
self.decoded_token = {"sub": data_key.get("user")}
|
||||||
|
if data_key.get("source"):
|
||||||
|
self.source = {"active_docs": data_key["source"]}
|
||||||
|
if data_key.get("retriever"):
|
||||||
|
self.retriever_config["retriever_name"] = data_key["retriever"]
|
||||||
|
if data_key.get("chunks") is not None:
|
||||||
|
try:
|
||||||
|
self.retriever_config["chunks"] = int(data_key["chunks"])
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
logger.warning(
|
||||||
|
f"Invalid chunks value: {data_key['chunks']}, using default value 2"
|
||||||
|
)
|
||||||
|
self.retriever_config["chunks"] = 2
|
||||||
|
elif self.agent_key:
|
||||||
|
data_key = self._get_data_from_api_key(self.agent_key)
|
||||||
|
self.agent_config.update(
|
||||||
|
{
|
||||||
|
"prompt_id": data_key.get("prompt_id", "default"),
|
||||||
|
"agent_type": data_key.get("agent_type", settings.AGENT_NAME),
|
||||||
|
"user_api_key": self.agent_key,
|
||||||
|
"json_schema": data_key.get("json_schema"),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
self.decoded_token = (
|
||||||
|
self.decoded_token
|
||||||
|
if self.is_shared_usage
|
||||||
|
else {"sub": data_key.get("user")}
|
||||||
|
)
|
||||||
|
if data_key.get("source"):
|
||||||
|
self.source = {"active_docs": data_key["source"]}
|
||||||
|
if data_key.get("retriever"):
|
||||||
|
self.retriever_config["retriever_name"] = data_key["retriever"]
|
||||||
|
if data_key.get("chunks") is not None:
|
||||||
|
try:
|
||||||
|
self.retriever_config["chunks"] = int(data_key["chunks"])
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
logger.warning(
|
||||||
|
f"Invalid chunks value: {data_key['chunks']}, using default value 2"
|
||||||
|
)
|
||||||
|
self.retriever_config["chunks"] = 2
|
||||||
|
else:
|
||||||
|
self.agent_config.update(
|
||||||
|
{
|
||||||
|
"prompt_id": self.data.get("prompt_id", "default"),
|
||||||
|
"agent_type": settings.AGENT_NAME,
|
||||||
|
"user_api_key": None,
|
||||||
|
"json_schema": None,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def _configure_retriever(self):
|
||||||
|
"""Configure the retriever based on request data"""
|
||||||
|
self.retriever_config = {
|
||||||
|
"retriever_name": self.data.get("retriever", "classic"),
|
||||||
|
"chunks": int(self.data.get("chunks", 2)),
|
||||||
|
"token_limit": self.data.get("token_limit", settings.DEFAULT_MAX_HISTORY),
|
||||||
|
}
|
||||||
|
|
||||||
|
api_key = self.data.get("api_key") or self.agent_key
|
||||||
|
if not api_key and "isNoneDoc" in self.data and self.data["isNoneDoc"]:
|
||||||
|
self.retriever_config["chunks"] = 0
|
||||||
|
|
||||||
|
def create_agent(self):
|
||||||
|
"""Create and return the configured agent"""
|
||||||
|
return AgentCreator.create_agent(
|
||||||
|
self.agent_config["agent_type"],
|
||||||
|
endpoint="stream",
|
||||||
|
llm_name=settings.LLM_PROVIDER,
|
||||||
|
gpt_model=self.gpt_model,
|
||||||
|
api_key=settings.API_KEY,
|
||||||
|
user_api_key=self.agent_config["user_api_key"],
|
||||||
|
prompt=get_prompt(self.agent_config["prompt_id"], self.prompts_collection),
|
||||||
|
chat_history=self.history,
|
||||||
|
decoded_token=self.decoded_token,
|
||||||
|
attachments=self.attachments,
|
||||||
|
json_schema=self.agent_config.get("json_schema"),
|
||||||
|
)
|
||||||
|
|
||||||
|
def create_retriever(self):
|
||||||
|
"""Create and return the configured retriever"""
|
||||||
|
return RetrieverCreator.create_retriever(
|
||||||
|
self.retriever_config["retriever_name"],
|
||||||
|
source=self.source,
|
||||||
|
chat_history=self.history,
|
||||||
|
prompt=get_prompt(self.agent_config["prompt_id"], self.prompts_collection),
|
||||||
|
chunks=self.retriever_config["chunks"],
|
||||||
|
token_limit=self.retriever_config["token_limit"],
|
||||||
|
gpt_model=self.gpt_model,
|
||||||
|
user_api_key=self.agent_config["user_api_key"],
|
||||||
|
decoded_token=self.decoded_token,
|
||||||
|
)
|
||||||
695
application/api/connector/routes.py
Normal file
695
application/api/connector/routes.py
Normal file
@@ -0,0 +1,695 @@
|
|||||||
|
import base64
|
||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
|
||||||
|
from bson.objectid import ObjectId
|
||||||
|
from flask import (
|
||||||
|
Blueprint,
|
||||||
|
current_app,
|
||||||
|
jsonify,
|
||||||
|
make_response,
|
||||||
|
request
|
||||||
|
)
|
||||||
|
from flask_restx import fields, Namespace, Resource
|
||||||
|
|
||||||
|
|
||||||
|
from application.api.user.tasks import (
|
||||||
|
ingest_connector_task,
|
||||||
|
)
|
||||||
|
from application.core.mongo_db import MongoDB
|
||||||
|
from application.core.settings import settings
|
||||||
|
from application.api import api
|
||||||
|
|
||||||
|
|
||||||
|
from application.utils import (
|
||||||
|
check_required_fields
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
from application.parser.connectors.connector_creator import ConnectorCreator
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
sources_collection = db["sources"]
|
||||||
|
sessions_collection = db["connector_sessions"]
|
||||||
|
|
||||||
|
connector = Blueprint("connector", __name__)
|
||||||
|
connectors_ns = Namespace("connectors", description="Connector operations", path="/")
|
||||||
|
api.add_namespace(connectors_ns)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/upload")
|
||||||
|
class UploadConnector(Resource):
|
||||||
|
@api.expect(
|
||||||
|
api.model(
|
||||||
|
"ConnectorUploadModel",
|
||||||
|
{
|
||||||
|
"user": fields.String(required=True, description="User ID"),
|
||||||
|
"source": fields.String(
|
||||||
|
required=True, description="Source type (google_drive, github, etc.)"
|
||||||
|
),
|
||||||
|
"name": fields.String(required=True, description="Job name"),
|
||||||
|
"data": fields.String(required=True, description="Configuration data"),
|
||||||
|
"repo_url": fields.String(description="GitHub repository URL"),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
@api.doc(
|
||||||
|
description="Uploads connector source for vectorization",
|
||||||
|
)
|
||||||
|
def post(self):
|
||||||
|
decoded_token = request.decoded_token
|
||||||
|
if not decoded_token:
|
||||||
|
return make_response(jsonify({"success": False}), 401)
|
||||||
|
data = request.form
|
||||||
|
required_fields = ["user", "source", "name", "data"]
|
||||||
|
missing_fields = check_required_fields(data, required_fields)
|
||||||
|
if missing_fields:
|
||||||
|
return missing_fields
|
||||||
|
try:
|
||||||
|
config = json.loads(data["data"])
|
||||||
|
source_data = None
|
||||||
|
sync_frequency = config.get("sync_frequency", "never")
|
||||||
|
|
||||||
|
if data["source"] == "github":
|
||||||
|
source_data = config.get("repo_url")
|
||||||
|
elif data["source"] in ["crawler", "url"]:
|
||||||
|
source_data = config.get("url")
|
||||||
|
elif data["source"] == "reddit":
|
||||||
|
source_data = config
|
||||||
|
elif data["source"] in ConnectorCreator.get_supported_connectors():
|
||||||
|
session_token = config.get("session_token")
|
||||||
|
if not session_token:
|
||||||
|
return make_response(jsonify({
|
||||||
|
"success": False,
|
||||||
|
"error": f"Missing session_token in {data['source']} configuration"
|
||||||
|
}), 400)
|
||||||
|
|
||||||
|
file_ids = config.get("file_ids", [])
|
||||||
|
if isinstance(file_ids, str):
|
||||||
|
file_ids = [id.strip() for id in file_ids.split(',') if id.strip()]
|
||||||
|
elif not isinstance(file_ids, list):
|
||||||
|
file_ids = []
|
||||||
|
|
||||||
|
folder_ids = config.get("folder_ids", [])
|
||||||
|
if isinstance(folder_ids, str):
|
||||||
|
folder_ids = [id.strip() for id in folder_ids.split(',') if id.strip()]
|
||||||
|
elif not isinstance(folder_ids, list):
|
||||||
|
folder_ids = []
|
||||||
|
|
||||||
|
config["file_ids"] = file_ids
|
||||||
|
config["folder_ids"] = folder_ids
|
||||||
|
|
||||||
|
task = ingest_connector_task.delay(
|
||||||
|
job_name=data["name"],
|
||||||
|
user=decoded_token.get("sub"),
|
||||||
|
source_type=data["source"],
|
||||||
|
session_token=session_token,
|
||||||
|
file_ids=file_ids,
|
||||||
|
folder_ids=folder_ids,
|
||||||
|
recursive=config.get("recursive", False),
|
||||||
|
retriever=config.get("retriever", "classic"),
|
||||||
|
sync_frequency=sync_frequency
|
||||||
|
)
|
||||||
|
return make_response(jsonify({"success": True, "task_id": task.id}), 200)
|
||||||
|
task = ingest_connector_task.delay(
|
||||||
|
source_data=source_data,
|
||||||
|
job_name=data["name"],
|
||||||
|
user=decoded_token.get("sub"),
|
||||||
|
loader=data["source"],
|
||||||
|
sync_frequency=sync_frequency
|
||||||
|
)
|
||||||
|
except Exception as err:
|
||||||
|
current_app.logger.error(
|
||||||
|
f"Error uploading connector source: {err}", exc_info=True
|
||||||
|
)
|
||||||
|
return make_response(jsonify({"success": False}), 400)
|
||||||
|
return make_response(jsonify({"success": True, "task_id": task.id}), 200)
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/task_status")
|
||||||
|
class ConnectorTaskStatus(Resource):
|
||||||
|
task_status_model = api.model(
|
||||||
|
"ConnectorTaskStatusModel",
|
||||||
|
{"task_id": fields.String(required=True, description="Task ID")},
|
||||||
|
)
|
||||||
|
|
||||||
|
@api.expect(task_status_model)
|
||||||
|
@api.doc(description="Get connector task status")
|
||||||
|
def get(self):
|
||||||
|
task_id = request.args.get("task_id")
|
||||||
|
if not task_id:
|
||||||
|
return make_response(
|
||||||
|
jsonify({"success": False, "message": "Task ID is required"}), 400
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
from application.celery_init import celery
|
||||||
|
|
||||||
|
task = celery.AsyncResult(task_id)
|
||||||
|
task_meta = task.info
|
||||||
|
print(f"Task status: {task.status}")
|
||||||
|
if not isinstance(
|
||||||
|
task_meta, (dict, list, str, int, float, bool, type(None))
|
||||||
|
):
|
||||||
|
task_meta = str(task_meta)
|
||||||
|
except Exception as err:
|
||||||
|
current_app.logger.error(f"Error getting task status: {err}", exc_info=True)
|
||||||
|
return make_response(jsonify({"success": False}), 400)
|
||||||
|
return make_response(jsonify({"status": task.status, "result": task_meta}), 200)
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/sources")
|
||||||
|
class ConnectorSources(Resource):
|
||||||
|
@api.doc(description="Get connector sources")
|
||||||
|
def get(self):
|
||||||
|
decoded_token = request.decoded_token
|
||||||
|
if not decoded_token:
|
||||||
|
return make_response(jsonify({"success": False}), 401)
|
||||||
|
user = decoded_token.get("sub")
|
||||||
|
try:
|
||||||
|
sources = sources_collection.find({"user": user, "type": "connector:file"}).sort("date", -1)
|
||||||
|
connector_sources = []
|
||||||
|
for source in sources:
|
||||||
|
connector_sources.append({
|
||||||
|
"id": str(source["_id"]),
|
||||||
|
"name": source.get("name"),
|
||||||
|
"date": source.get("date"),
|
||||||
|
"type": source.get("type"),
|
||||||
|
"source": source.get("source"),
|
||||||
|
"tokens": source.get("tokens", ""),
|
||||||
|
"retriever": source.get("retriever", "classic"),
|
||||||
|
"syncFrequency": source.get("sync_frequency", ""),
|
||||||
|
})
|
||||||
|
except Exception as err:
|
||||||
|
current_app.logger.error(f"Error retrieving connector sources: {err}", exc_info=True)
|
||||||
|
return make_response(jsonify({"success": False}), 400)
|
||||||
|
return make_response(jsonify(connector_sources), 200)
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/delete")
|
||||||
|
class DeleteConnectorSource(Resource):
|
||||||
|
@api.doc(
|
||||||
|
description="Delete a connector source",
|
||||||
|
params={"source_id": "The source ID to delete"},
|
||||||
|
)
|
||||||
|
def delete(self):
|
||||||
|
decoded_token = request.decoded_token
|
||||||
|
if not decoded_token:
|
||||||
|
return make_response(jsonify({"success": False}), 401)
|
||||||
|
source_id = request.args.get("source_id")
|
||||||
|
if not source_id:
|
||||||
|
return make_response(
|
||||||
|
jsonify({"success": False, "message": "source_id is required"}), 400
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
result = sources_collection.delete_one(
|
||||||
|
{"_id": ObjectId(source_id), "user": decoded_token.get("sub")}
|
||||||
|
)
|
||||||
|
if result.deleted_count == 0:
|
||||||
|
return make_response(
|
||||||
|
jsonify({"success": False, "message": "Source not found"}), 404
|
||||||
|
)
|
||||||
|
except Exception as err:
|
||||||
|
current_app.logger.error(
|
||||||
|
f"Error deleting connector source: {err}", exc_info=True
|
||||||
|
)
|
||||||
|
return make_response(jsonify({"success": False}), 400)
|
||||||
|
return make_response(jsonify({"success": True}), 200)
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/auth")
|
||||||
|
class ConnectorAuth(Resource):
|
||||||
|
@api.doc(description="Get connector OAuth authorization URL", params={"provider": "Connector provider (e.g., google_drive)"})
|
||||||
|
def get(self):
|
||||||
|
try:
|
||||||
|
provider = request.args.get('provider') or request.args.get('source')
|
||||||
|
if not provider:
|
||||||
|
return make_response(jsonify({"success": False, "error": "Missing provider"}), 400)
|
||||||
|
|
||||||
|
if not ConnectorCreator.is_supported(provider):
|
||||||
|
return make_response(jsonify({"success": False, "error": f"Unsupported provider: {provider}"}), 400)
|
||||||
|
|
||||||
|
decoded_token = request.decoded_token
|
||||||
|
if not decoded_token:
|
||||||
|
return make_response(jsonify({"success": False, "error": "Unauthorized"}), 401)
|
||||||
|
user_id = decoded_token.get('sub')
|
||||||
|
|
||||||
|
now = datetime.datetime.now(datetime.timezone.utc)
|
||||||
|
result = sessions_collection.insert_one({
|
||||||
|
"provider": provider,
|
||||||
|
"user": user_id,
|
||||||
|
"status": "pending",
|
||||||
|
"created_at": now
|
||||||
|
})
|
||||||
|
state_dict = {
|
||||||
|
"provider": provider,
|
||||||
|
"object_id": str(result.inserted_id)
|
||||||
|
}
|
||||||
|
state = base64.urlsafe_b64encode(json.dumps(state_dict).encode()).decode()
|
||||||
|
|
||||||
|
auth = ConnectorCreator.create_auth(provider)
|
||||||
|
authorization_url = auth.get_authorization_url(state=state)
|
||||||
|
return make_response(jsonify({
|
||||||
|
"success": True,
|
||||||
|
"authorization_url": authorization_url,
|
||||||
|
"state": state
|
||||||
|
}), 200)
|
||||||
|
except Exception as e:
|
||||||
|
current_app.logger.error(f"Error generating connector auth URL: {e}")
|
||||||
|
return make_response(jsonify({"success": False, "error": str(e)}), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/callback")
|
||||||
|
class ConnectorsCallback(Resource):
|
||||||
|
@api.doc(description="Handle OAuth callback for external connectors")
|
||||||
|
def get(self):
|
||||||
|
"""Handle OAuth callback for external connectors"""
|
||||||
|
try:
|
||||||
|
from application.parser.connectors.connector_creator import ConnectorCreator
|
||||||
|
from flask import request, redirect
|
||||||
|
|
||||||
|
authorization_code = request.args.get('code')
|
||||||
|
state = request.args.get('state')
|
||||||
|
error = request.args.get('error')
|
||||||
|
|
||||||
|
state_dict = json.loads(base64.urlsafe_b64decode(state.encode()).decode())
|
||||||
|
provider = state_dict["provider"]
|
||||||
|
state_object_id = state_dict["object_id"]
|
||||||
|
|
||||||
|
if error:
|
||||||
|
if error == "access_denied":
|
||||||
|
return redirect(f"/api/connectors/callback-status?status=cancelled&message=Authentication+was+cancelled.+You+can+try+again+if+you'd+like+to+connect+your+account.&provider={provider}")
|
||||||
|
else:
|
||||||
|
current_app.logger.warning(f"OAuth error in callback: {error}")
|
||||||
|
return redirect(f"/api/connectors/callback-status?status=error&message=Authentication+failed.+Please+try+again+and+make+sure+to+grant+all+requested+permissions.&provider={provider}")
|
||||||
|
|
||||||
|
if not authorization_code:
|
||||||
|
return redirect(f"/api/connectors/callback-status?status=error&message=Authentication+failed.+Please+try+again+and+make+sure+to+grant+all+requested+permissions.&provider={provider}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
auth = ConnectorCreator.create_auth(provider)
|
||||||
|
token_info = auth.exchange_code_for_tokens(authorization_code)
|
||||||
|
|
||||||
|
session_token = str(uuid.uuid4())
|
||||||
|
|
||||||
|
try:
|
||||||
|
credentials = auth.create_credentials_from_token_info(token_info)
|
||||||
|
service = auth.build_drive_service(credentials)
|
||||||
|
user_info = service.about().get(fields="user").execute()
|
||||||
|
user_email = user_info.get('user', {}).get('emailAddress', 'Connected User')
|
||||||
|
except Exception as e:
|
||||||
|
current_app.logger.warning(f"Could not get user info: {e}")
|
||||||
|
user_email = 'Connected User'
|
||||||
|
|
||||||
|
sanitized_token_info = {
|
||||||
|
"access_token": token_info.get("access_token"),
|
||||||
|
"refresh_token": token_info.get("refresh_token"),
|
||||||
|
"token_uri": token_info.get("token_uri"),
|
||||||
|
"expiry": token_info.get("expiry")
|
||||||
|
}
|
||||||
|
|
||||||
|
sessions_collection.find_one_and_update(
|
||||||
|
{"_id": ObjectId(state_object_id), "provider": provider},
|
||||||
|
{
|
||||||
|
"$set": {
|
||||||
|
"session_token": session_token,
|
||||||
|
"token_info": sanitized_token_info,
|
||||||
|
"user_email": user_email,
|
||||||
|
"status": "authorized"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Redirect to success page with session token and user email
|
||||||
|
return redirect(f"/api/connectors/callback-status?status=success&message=Authentication+successful&provider={provider}&session_token={session_token}&user_email={user_email}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
current_app.logger.error(f"Error exchanging code for tokens: {str(e)}", exc_info=True)
|
||||||
|
return redirect(f"/api/connectors/callback-status?status=error&message=Authentication+failed.+Please+try+again+and+make+sure+to+grant+all+requested+permissions.&provider={provider}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
current_app.logger.error(f"Error handling connector callback: {e}")
|
||||||
|
return redirect("/api/connectors/callback-status?status=error&message=Authentication+failed.+Please+try+again+and+make+sure+to+grant+all+requested+permissions.")
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/refresh")
|
||||||
|
class ConnectorRefresh(Resource):
|
||||||
|
@api.expect(api.model("ConnectorRefreshModel", {"provider": fields.String(required=True), "refresh_token": fields.String(required=True)}))
|
||||||
|
@api.doc(description="Refresh connector access token")
|
||||||
|
def post(self):
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
provider = data.get('provider')
|
||||||
|
refresh_token = data.get('refresh_token')
|
||||||
|
|
||||||
|
if not provider or not refresh_token:
|
||||||
|
return make_response(jsonify({"success": False, "error": "provider and refresh_token are required"}), 400)
|
||||||
|
|
||||||
|
auth = ConnectorCreator.create_auth(provider)
|
||||||
|
token_info = auth.refresh_access_token(refresh_token)
|
||||||
|
return make_response(jsonify({"success": True, "token_info": token_info}), 200)
|
||||||
|
except Exception as e:
|
||||||
|
current_app.logger.error(f"Error refreshing token for connector: {e}")
|
||||||
|
return make_response(jsonify({"success": False, "error": str(e)}), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/files")
|
||||||
|
class ConnectorFiles(Resource):
|
||||||
|
@api.expect(api.model("ConnectorFilesModel", {
|
||||||
|
"provider": fields.String(required=True),
|
||||||
|
"session_token": fields.String(required=True),
|
||||||
|
"folder_id": fields.String(required=False),
|
||||||
|
"limit": fields.Integer(required=False),
|
||||||
|
"page_token": fields.String(required=False),
|
||||||
|
"search_query": fields.String(required=False)
|
||||||
|
}))
|
||||||
|
@api.doc(description="List files from a connector provider (supports pagination and search)")
|
||||||
|
def post(self):
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
provider = data.get('provider')
|
||||||
|
session_token = data.get('session_token')
|
||||||
|
folder_id = data.get('folder_id')
|
||||||
|
limit = data.get('limit', 10)
|
||||||
|
page_token = data.get('page_token')
|
||||||
|
search_query = data.get('search_query')
|
||||||
|
|
||||||
|
if not provider or not session_token:
|
||||||
|
return make_response(jsonify({"success": False, "error": "provider and session_token are required"}), 400)
|
||||||
|
|
||||||
|
decoded_token = request.decoded_token
|
||||||
|
if not decoded_token:
|
||||||
|
return make_response(jsonify({"success": False, "error": "Unauthorized"}), 401)
|
||||||
|
user = decoded_token.get('sub')
|
||||||
|
session = sessions_collection.find_one({"session_token": session_token, "user": user})
|
||||||
|
if not session:
|
||||||
|
return make_response(jsonify({"success": False, "error": "Invalid or unauthorized session"}), 401)
|
||||||
|
|
||||||
|
loader = ConnectorCreator.create_connector(provider, session_token)
|
||||||
|
input_config = {
|
||||||
|
'limit': limit,
|
||||||
|
'list_only': True,
|
||||||
|
'session_token': session_token,
|
||||||
|
'folder_id': folder_id,
|
||||||
|
'page_token': page_token
|
||||||
|
}
|
||||||
|
if search_query:
|
||||||
|
input_config['search_query'] = search_query
|
||||||
|
|
||||||
|
documents = loader.load_data(input_config)
|
||||||
|
|
||||||
|
files = []
|
||||||
|
for doc in documents[:limit]:
|
||||||
|
metadata = doc.extra_info
|
||||||
|
modified_time = metadata.get('modified_time')
|
||||||
|
if modified_time:
|
||||||
|
date_part = modified_time.split('T')[0]
|
||||||
|
time_part = modified_time.split('T')[1].split('.')[0].split('Z')[0]
|
||||||
|
formatted_time = f"{date_part} {time_part}"
|
||||||
|
else:
|
||||||
|
formatted_time = None
|
||||||
|
|
||||||
|
files.append({
|
||||||
|
'id': doc.doc_id,
|
||||||
|
'name': metadata.get('file_name', 'Unknown File'),
|
||||||
|
'type': metadata.get('mime_type', 'unknown'),
|
||||||
|
'size': metadata.get('size', None),
|
||||||
|
'modifiedTime': formatted_time,
|
||||||
|
'isFolder': metadata.get('is_folder', False)
|
||||||
|
})
|
||||||
|
|
||||||
|
next_token = getattr(loader, 'next_page_token', None)
|
||||||
|
has_more = bool(next_token)
|
||||||
|
|
||||||
|
return make_response(jsonify({
|
||||||
|
"success": True,
|
||||||
|
"files": files,
|
||||||
|
"total": len(files),
|
||||||
|
"next_page_token": next_token,
|
||||||
|
"has_more": has_more
|
||||||
|
}), 200)
|
||||||
|
except Exception as e:
|
||||||
|
current_app.logger.error(f"Error loading connector files: {e}")
|
||||||
|
return make_response(jsonify({"success": False, "error": f"Failed to load files: {str(e)}"}), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/validate-session")
|
||||||
|
class ConnectorValidateSession(Resource):
|
||||||
|
@api.expect(api.model("ConnectorValidateSessionModel", {"provider": fields.String(required=True), "session_token": fields.String(required=True)}))
|
||||||
|
@api.doc(description="Validate connector session token and return user info and access token")
|
||||||
|
def post(self):
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
provider = data.get('provider')
|
||||||
|
session_token = data.get('session_token')
|
||||||
|
if not provider or not session_token:
|
||||||
|
return make_response(jsonify({"success": False, "error": "provider and session_token are required"}), 400)
|
||||||
|
|
||||||
|
decoded_token = request.decoded_token
|
||||||
|
if not decoded_token:
|
||||||
|
return make_response(jsonify({"success": False, "error": "Unauthorized"}), 401)
|
||||||
|
user = decoded_token.get('sub')
|
||||||
|
|
||||||
|
session = sessions_collection.find_one({"session_token": session_token, "user": user})
|
||||||
|
if not session or "token_info" not in session:
|
||||||
|
return make_response(jsonify({"success": False, "error": "Invalid or expired session"}), 401)
|
||||||
|
|
||||||
|
token_info = session["token_info"]
|
||||||
|
auth = ConnectorCreator.create_auth(provider)
|
||||||
|
is_expired = auth.is_token_expired(token_info)
|
||||||
|
|
||||||
|
if is_expired and token_info.get('refresh_token'):
|
||||||
|
try:
|
||||||
|
refreshed_token_info = auth.refresh_access_token(token_info.get('refresh_token'))
|
||||||
|
sanitized_token_info = {
|
||||||
|
"access_token": refreshed_token_info.get("access_token"),
|
||||||
|
"refresh_token": refreshed_token_info.get("refresh_token"),
|
||||||
|
"token_uri": refreshed_token_info.get("token_uri"),
|
||||||
|
"expiry": refreshed_token_info.get("expiry")
|
||||||
|
}
|
||||||
|
sessions_collection.update_one(
|
||||||
|
{"session_token": session_token},
|
||||||
|
{"$set": {"token_info": sanitized_token_info}}
|
||||||
|
)
|
||||||
|
token_info = sanitized_token_info
|
||||||
|
is_expired = False
|
||||||
|
except Exception as refresh_error:
|
||||||
|
current_app.logger.error(f"Failed to refresh token: {refresh_error}")
|
||||||
|
|
||||||
|
if is_expired:
|
||||||
|
return make_response(jsonify({
|
||||||
|
"success": False,
|
||||||
|
"expired": True,
|
||||||
|
"error": "Session token has expired. Please reconnect."
|
||||||
|
}), 401)
|
||||||
|
|
||||||
|
return make_response(jsonify({
|
||||||
|
"success": True,
|
||||||
|
"expired": False,
|
||||||
|
"user_email": session.get('user_email', 'Connected User'),
|
||||||
|
"access_token": token_info.get('access_token')
|
||||||
|
}), 200)
|
||||||
|
except Exception as e:
|
||||||
|
current_app.logger.error(f"Error validating connector session: {e}")
|
||||||
|
return make_response(jsonify({"success": False, "error": str(e)}), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/disconnect")
|
||||||
|
class ConnectorDisconnect(Resource):
|
||||||
|
@api.expect(api.model("ConnectorDisconnectModel", {"provider": fields.String(required=True), "session_token": fields.String(required=False)}))
|
||||||
|
@api.doc(description="Disconnect a connector session")
|
||||||
|
def post(self):
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
provider = data.get('provider')
|
||||||
|
session_token = data.get('session_token')
|
||||||
|
if not provider:
|
||||||
|
return make_response(jsonify({"success": False, "error": "provider is required"}), 400)
|
||||||
|
|
||||||
|
|
||||||
|
if session_token:
|
||||||
|
sessions_collection.delete_one({"session_token": session_token})
|
||||||
|
|
||||||
|
return make_response(jsonify({"success": True}), 200)
|
||||||
|
except Exception as e:
|
||||||
|
current_app.logger.error(f"Error disconnecting connector session: {e}")
|
||||||
|
return make_response(jsonify({"success": False, "error": str(e)}), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/sync")
|
||||||
|
class ConnectorSync(Resource):
|
||||||
|
@api.expect(
|
||||||
|
api.model(
|
||||||
|
"ConnectorSyncModel",
|
||||||
|
{
|
||||||
|
"source_id": fields.String(required=True, description="Source ID to sync"),
|
||||||
|
"session_token": fields.String(required=True, description="Authentication token")
|
||||||
|
},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
@api.doc(description="Sync connector source to check for modifications")
|
||||||
|
def post(self):
|
||||||
|
decoded_token = request.decoded_token
|
||||||
|
if not decoded_token:
|
||||||
|
return make_response(jsonify({"success": False}), 401)
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
source_id = data.get('source_id')
|
||||||
|
session_token = data.get('session_token')
|
||||||
|
|
||||||
|
if not all([source_id, session_token]):
|
||||||
|
return make_response(
|
||||||
|
jsonify({
|
||||||
|
"success": False,
|
||||||
|
"error": "source_id and session_token are required"
|
||||||
|
}),
|
||||||
|
400
|
||||||
|
)
|
||||||
|
source = sources_collection.find_one({"_id": ObjectId(source_id)})
|
||||||
|
if not source:
|
||||||
|
return make_response(
|
||||||
|
jsonify({
|
||||||
|
"success": False,
|
||||||
|
"error": "Source not found"
|
||||||
|
}),
|
||||||
|
404
|
||||||
|
)
|
||||||
|
|
||||||
|
if source.get('user') != decoded_token.get('sub'):
|
||||||
|
return make_response(
|
||||||
|
jsonify({
|
||||||
|
"success": False,
|
||||||
|
"error": "Unauthorized access to source"
|
||||||
|
}),
|
||||||
|
403
|
||||||
|
)
|
||||||
|
|
||||||
|
remote_data = {}
|
||||||
|
try:
|
||||||
|
if source.get('remote_data'):
|
||||||
|
remote_data = json.loads(source.get('remote_data'))
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
current_app.logger.error(f"Invalid remote_data format for source {source_id}")
|
||||||
|
remote_data = {}
|
||||||
|
|
||||||
|
source_type = remote_data.get('provider')
|
||||||
|
if not source_type:
|
||||||
|
return make_response(
|
||||||
|
jsonify({
|
||||||
|
"success": False,
|
||||||
|
"error": "Source provider not found in remote_data"
|
||||||
|
}),
|
||||||
|
400
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract configuration from remote_data
|
||||||
|
file_ids = remote_data.get('file_ids', [])
|
||||||
|
folder_ids = remote_data.get('folder_ids', [])
|
||||||
|
recursive = remote_data.get('recursive', True)
|
||||||
|
|
||||||
|
# Start the sync task
|
||||||
|
task = ingest_connector_task.delay(
|
||||||
|
job_name=source.get('name'),
|
||||||
|
user=decoded_token.get('sub'),
|
||||||
|
source_type=source_type,
|
||||||
|
session_token=session_token,
|
||||||
|
file_ids=file_ids,
|
||||||
|
folder_ids=folder_ids,
|
||||||
|
recursive=recursive,
|
||||||
|
retriever=source.get('retriever', 'classic'),
|
||||||
|
operation_mode="sync",
|
||||||
|
doc_id=source_id,
|
||||||
|
sync_frequency=source.get('sync_frequency', 'never')
|
||||||
|
)
|
||||||
|
|
||||||
|
return make_response(
|
||||||
|
jsonify({
|
||||||
|
"success": True,
|
||||||
|
"task_id": task.id
|
||||||
|
}),
|
||||||
|
200
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as err:
|
||||||
|
current_app.logger.error(
|
||||||
|
f"Error syncing connector source: {err}",
|
||||||
|
exc_info=True
|
||||||
|
)
|
||||||
|
return make_response(
|
||||||
|
jsonify({
|
||||||
|
"success": False,
|
||||||
|
"error": str(err)
|
||||||
|
}),
|
||||||
|
400
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@connectors_ns.route("/api/connectors/callback-status")
|
||||||
|
class ConnectorCallbackStatus(Resource):
|
||||||
|
@api.doc(description="Return HTML page with connector authentication status")
|
||||||
|
def get(self):
|
||||||
|
"""Return HTML page with connector authentication status"""
|
||||||
|
try:
|
||||||
|
status = request.args.get('status', 'error')
|
||||||
|
message = request.args.get('message', '')
|
||||||
|
provider = request.args.get('provider', 'connector')
|
||||||
|
session_token = request.args.get('session_token', '')
|
||||||
|
user_email = request.args.get('user_email', '')
|
||||||
|
|
||||||
|
html_content = f"""
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>{provider.replace('_', ' ').title()} Authentication</title>
|
||||||
|
<style>
|
||||||
|
body {{ font-family: Arial, sans-serif; text-align: center; padding: 40px; }}
|
||||||
|
.container {{ max-width: 600px; margin: 0 auto; }}
|
||||||
|
.success {{ color: #4CAF50; }}
|
||||||
|
.error {{ color: #F44336; }}
|
||||||
|
.cancelled {{ color: #FF9800; }}
|
||||||
|
</style>
|
||||||
|
<script>
|
||||||
|
window.onload = function() {{
|
||||||
|
const status = "{status}";
|
||||||
|
const sessionToken = "{session_token}";
|
||||||
|
const userEmail = "{user_email}";
|
||||||
|
|
||||||
|
if (status === "success" && window.opener) {{
|
||||||
|
window.opener.postMessage({{
|
||||||
|
type: '{provider}_auth_success',
|
||||||
|
session_token: sessionToken,
|
||||||
|
user_email: userEmail
|
||||||
|
}}, '*');
|
||||||
|
|
||||||
|
setTimeout(() => window.close(), 3000);
|
||||||
|
}} else if (status === "cancelled" || status === "error") {{
|
||||||
|
setTimeout(() => window.close(), 3000);
|
||||||
|
}}
|
||||||
|
}};
|
||||||
|
</script>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="container">
|
||||||
|
<h2>{provider.replace('_', ' ').title()} Authentication</h2>
|
||||||
|
<div class="{status}">
|
||||||
|
<p>{message}</p>
|
||||||
|
{f'<p>Connected as: {user_email}</p>' if status == 'success' else ''}
|
||||||
|
</div>
|
||||||
|
<p><small>You can close this window. {f"Your {provider.replace('_', ' ').title()} is now connected and ready to use." if status == 'success' else "Feel free to close this window."}</small></p>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
|
||||||
|
return make_response(html_content, 200, {'Content-Type': 'text/html'})
|
||||||
|
except Exception as e:
|
||||||
|
current_app.logger.error(f"Error rendering callback status page: {e}")
|
||||||
|
return make_response("Authentication error occurred", 500, {'Content-Type': 'text/html'})
|
||||||
|
|
||||||
|
|
||||||
0
application/api/internal/__init__.py
Normal file
0
application/api/internal/__init__.py
Normal file
127
application/api/internal/routes.py
Executable file
127
application/api/internal/routes.py
Executable file
@@ -0,0 +1,127 @@
|
|||||||
|
import os
|
||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
from flask import Blueprint, request, send_from_directory
|
||||||
|
from werkzeug.utils import secure_filename
|
||||||
|
from bson.objectid import ObjectId
|
||||||
|
import logging
|
||||||
|
from application.core.mongo_db import MongoDB
|
||||||
|
from application.core.settings import settings
|
||||||
|
from application.storage.storage_creator import StorageCreator
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
conversations_collection = db["conversations"]
|
||||||
|
sources_collection = db["sources"]
|
||||||
|
|
||||||
|
current_dir = os.path.dirname(
|
||||||
|
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
internal = Blueprint("internal", __name__)
|
||||||
|
|
||||||
|
|
||||||
|
@internal.route("/api/download", methods=["get"])
|
||||||
|
def download_file():
|
||||||
|
user = secure_filename(request.args.get("user"))
|
||||||
|
job_name = secure_filename(request.args.get("name"))
|
||||||
|
filename = secure_filename(request.args.get("file"))
|
||||||
|
save_dir = os.path.join(current_dir, settings.UPLOAD_FOLDER, user, job_name)
|
||||||
|
return send_from_directory(save_dir, filename, as_attachment=True)
|
||||||
|
|
||||||
|
|
||||||
|
@internal.route("/api/upload_index", methods=["POST"])
|
||||||
|
def upload_index_files():
|
||||||
|
"""Upload two files(index.faiss, index.pkl) to the user's folder."""
|
||||||
|
if "user" not in request.form:
|
||||||
|
return {"status": "no user"}
|
||||||
|
user = request.form["user"]
|
||||||
|
if "name" not in request.form:
|
||||||
|
return {"status": "no name"}
|
||||||
|
job_name = request.form["name"]
|
||||||
|
tokens = request.form["tokens"]
|
||||||
|
retriever = request.form["retriever"]
|
||||||
|
id = request.form["id"]
|
||||||
|
type = request.form["type"]
|
||||||
|
remote_data = request.form["remote_data"] if "remote_data" in request.form else None
|
||||||
|
sync_frequency = request.form["sync_frequency"] if "sync_frequency" in request.form else None
|
||||||
|
|
||||||
|
file_path = request.form.get("file_path")
|
||||||
|
directory_structure = request.form.get("directory_structure")
|
||||||
|
|
||||||
|
if directory_structure:
|
||||||
|
try:
|
||||||
|
directory_structure = json.loads(directory_structure)
|
||||||
|
except Exception:
|
||||||
|
logger.error("Error parsing directory_structure")
|
||||||
|
directory_structure = {}
|
||||||
|
else:
|
||||||
|
directory_structure = {}
|
||||||
|
|
||||||
|
storage = StorageCreator.get_storage()
|
||||||
|
index_base_path = f"indexes/{id}"
|
||||||
|
|
||||||
|
if settings.VECTOR_STORE == "faiss":
|
||||||
|
if "file_faiss" not in request.files:
|
||||||
|
logger.error("No file_faiss part")
|
||||||
|
return {"status": "no file"}
|
||||||
|
file_faiss = request.files["file_faiss"]
|
||||||
|
if file_faiss.filename == "":
|
||||||
|
return {"status": "no file name"}
|
||||||
|
if "file_pkl" not in request.files:
|
||||||
|
logger.error("No file_pkl part")
|
||||||
|
return {"status": "no file"}
|
||||||
|
file_pkl = request.files["file_pkl"]
|
||||||
|
if file_pkl.filename == "":
|
||||||
|
return {"status": "no file name"}
|
||||||
|
|
||||||
|
# Save index files to storage
|
||||||
|
faiss_storage_path = f"{index_base_path}/index.faiss"
|
||||||
|
pkl_storage_path = f"{index_base_path}/index.pkl"
|
||||||
|
storage.save_file(file_faiss, faiss_storage_path)
|
||||||
|
storage.save_file(file_pkl, pkl_storage_path)
|
||||||
|
|
||||||
|
|
||||||
|
existing_entry = sources_collection.find_one({"_id": ObjectId(id)})
|
||||||
|
if existing_entry:
|
||||||
|
sources_collection.update_one(
|
||||||
|
{"_id": ObjectId(id)},
|
||||||
|
{
|
||||||
|
"$set": {
|
||||||
|
"user": user,
|
||||||
|
"name": job_name,
|
||||||
|
"language": job_name,
|
||||||
|
"date": datetime.datetime.now(),
|
||||||
|
"model": settings.EMBEDDINGS_NAME,
|
||||||
|
"type": type,
|
||||||
|
"tokens": tokens,
|
||||||
|
"retriever": retriever,
|
||||||
|
"remote_data": remote_data,
|
||||||
|
"sync_frequency": sync_frequency,
|
||||||
|
"file_path": file_path,
|
||||||
|
"directory_structure": directory_structure,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
sources_collection.insert_one(
|
||||||
|
{
|
||||||
|
"_id": ObjectId(id),
|
||||||
|
"user": user,
|
||||||
|
"name": job_name,
|
||||||
|
"language": job_name,
|
||||||
|
"date": datetime.datetime.now(),
|
||||||
|
"model": settings.EMBEDDINGS_NAME,
|
||||||
|
"type": type,
|
||||||
|
"tokens": tokens,
|
||||||
|
"retriever": retriever,
|
||||||
|
"remote_data": remote_data,
|
||||||
|
"sync_frequency": sync_frequency,
|
||||||
|
"file_path": file_path,
|
||||||
|
"directory_structure": directory_structure,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return {"status": "ok"}
|
||||||
0
application/api/user/__init__.py
Normal file
0
application/api/user/__init__.py
Normal file
4566
application/api/user/routes.py
Normal file
4566
application/api/user/routes.py
Normal file
File diff suppressed because it is too large
Load Diff
112
application/api/user/tasks.py
Normal file
112
application/api/user/tasks.py
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
from datetime import timedelta
|
||||||
|
|
||||||
|
from application.celery_init import celery
|
||||||
|
from application.worker import (
|
||||||
|
agent_webhook_worker,
|
||||||
|
attachment_worker,
|
||||||
|
ingest_worker,
|
||||||
|
mcp_oauth,
|
||||||
|
mcp_oauth_status,
|
||||||
|
remote_worker,
|
||||||
|
sync_worker,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True)
|
||||||
|
def ingest(self, directory, formats, job_name, user, file_path, filename):
|
||||||
|
resp = ingest_worker(self, directory, formats, job_name, file_path, filename, user)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True)
|
||||||
|
def ingest_remote(self, source_data, job_name, user, loader):
|
||||||
|
resp = remote_worker(self, source_data, job_name, user, loader)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True)
|
||||||
|
def reingest_source_task(self, source_id, user):
|
||||||
|
from application.worker import reingest_source_worker
|
||||||
|
|
||||||
|
resp = reingest_source_worker(self, source_id, user)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True)
|
||||||
|
def schedule_syncs(self, frequency):
|
||||||
|
resp = sync_worker(self, frequency)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True)
|
||||||
|
def store_attachment(self, file_info, user):
|
||||||
|
resp = attachment_worker(self, file_info, user)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True)
|
||||||
|
def process_agent_webhook(self, agent_id, payload):
|
||||||
|
resp = agent_webhook_worker(self, agent_id, payload)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True)
|
||||||
|
def ingest_connector_task(
|
||||||
|
self,
|
||||||
|
job_name,
|
||||||
|
user,
|
||||||
|
source_type,
|
||||||
|
session_token=None,
|
||||||
|
file_ids=None,
|
||||||
|
folder_ids=None,
|
||||||
|
recursive=True,
|
||||||
|
retriever="classic",
|
||||||
|
operation_mode="upload",
|
||||||
|
doc_id=None,
|
||||||
|
sync_frequency="never",
|
||||||
|
):
|
||||||
|
from application.worker import ingest_connector
|
||||||
|
|
||||||
|
resp = ingest_connector(
|
||||||
|
self,
|
||||||
|
job_name,
|
||||||
|
user,
|
||||||
|
source_type,
|
||||||
|
session_token=session_token,
|
||||||
|
file_ids=file_ids,
|
||||||
|
folder_ids=folder_ids,
|
||||||
|
recursive=recursive,
|
||||||
|
retriever=retriever,
|
||||||
|
operation_mode=operation_mode,
|
||||||
|
doc_id=doc_id,
|
||||||
|
sync_frequency=sync_frequency,
|
||||||
|
)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@celery.on_after_configure.connect
|
||||||
|
def setup_periodic_tasks(sender, **kwargs):
|
||||||
|
sender.add_periodic_task(
|
||||||
|
timedelta(days=1),
|
||||||
|
schedule_syncs.s("daily"),
|
||||||
|
)
|
||||||
|
sender.add_periodic_task(
|
||||||
|
timedelta(weeks=1),
|
||||||
|
schedule_syncs.s("weekly"),
|
||||||
|
)
|
||||||
|
sender.add_periodic_task(
|
||||||
|
timedelta(days=30),
|
||||||
|
schedule_syncs.s("monthly"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True)
|
||||||
|
def mcp_oauth_task(self, config, user):
|
||||||
|
resp = mcp_oauth(self, config, user)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@celery.task(bind=True)
|
||||||
|
def mcp_oauth_status_task(self, task_id):
|
||||||
|
resp = mcp_oauth_status(self, task_id)
|
||||||
|
return resp
|
||||||
@@ -1,546 +1,115 @@
|
|||||||
import asyncio
|
|
||||||
import datetime
|
|
||||||
import http.client
|
|
||||||
import json
|
|
||||||
import os
|
import os
|
||||||
import traceback
|
|
||||||
|
|
||||||
import openai
|
|
||||||
import dotenv
|
|
||||||
import requests
|
|
||||||
from celery import Celery
|
|
||||||
from celery.result import AsyncResult
|
|
||||||
from flask import Flask, request, render_template, send_from_directory, jsonify, Response
|
|
||||||
from langchain import FAISS
|
|
||||||
from langchain import VectorDBQA, HuggingFaceHub, Cohere, OpenAI
|
|
||||||
from langchain.chains import LLMChain, ConversationalRetrievalChain
|
|
||||||
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
|
|
||||||
from langchain.chains.question_answering import load_qa_chain
|
|
||||||
from langchain.chat_models import ChatOpenAI
|
|
||||||
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceHubEmbeddings, CohereEmbeddings, \
|
|
||||||
HuggingFaceInstructEmbeddings
|
|
||||||
from langchain.prompts import PromptTemplate
|
|
||||||
from langchain.prompts.chat import (
|
|
||||||
ChatPromptTemplate,
|
|
||||||
SystemMessagePromptTemplate,
|
|
||||||
HumanMessagePromptTemplate,
|
|
||||||
AIMessagePromptTemplate,
|
|
||||||
)
|
|
||||||
from pymongo import MongoClient
|
|
||||||
from werkzeug.utils import secure_filename
|
|
||||||
from langchain.llms import GPT4All
|
|
||||||
|
|
||||||
from core.settings import settings
|
|
||||||
from error import bad_request
|
|
||||||
from worker import ingest_worker
|
|
||||||
|
|
||||||
# os.environ["LANGCHAIN_HANDLER"] = "langchain"
|
|
||||||
|
|
||||||
if settings.LLM_NAME == "manifest":
|
|
||||||
from manifest import Manifest
|
|
||||||
from langchain.llms.manifest import ManifestWrapper
|
|
||||||
|
|
||||||
manifest = Manifest(
|
|
||||||
client_name="huggingface",
|
|
||||||
client_connection="http://127.0.0.1:5000"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Redirect PosixPath to WindowsPath on Windows
|
|
||||||
import platform
|
import platform
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import dotenv
|
||||||
|
from flask import Flask, jsonify, redirect, request
|
||||||
|
from jose import jwt
|
||||||
|
|
||||||
|
from application.auth import handle_auth
|
||||||
|
|
||||||
|
from application.core.logging_config import setup_logging
|
||||||
|
|
||||||
|
setup_logging()
|
||||||
|
|
||||||
|
from application.api import api # noqa: E402
|
||||||
|
from application.api.answer import answer # noqa: E402
|
||||||
|
from application.api.internal.routes import internal # noqa: E402
|
||||||
|
from application.api.user.routes import user # noqa: E402
|
||||||
|
from application.api.connector.routes import connector # noqa: E402
|
||||||
|
from application.celery_init import celery # noqa: E402
|
||||||
|
from application.core.settings import settings # noqa: E402
|
||||||
|
|
||||||
|
|
||||||
if platform.system() == "Windows":
|
if platform.system() == "Windows":
|
||||||
import pathlib
|
import pathlib
|
||||||
|
|
||||||
temp = pathlib.PosixPath
|
|
||||||
pathlib.PosixPath = pathlib.WindowsPath
|
pathlib.PosixPath = pathlib.WindowsPath
|
||||||
|
|
||||||
# loading the .env file
|
|
||||||
dotenv.load_dotenv()
|
dotenv.load_dotenv()
|
||||||
|
|
||||||
# load the prompts
|
|
||||||
with open("prompts/combine_prompt.txt", "r") as f:
|
|
||||||
template = f.read()
|
|
||||||
|
|
||||||
with open("prompts/combine_prompt_hist.txt", "r") as f:
|
|
||||||
template_hist = f.read()
|
|
||||||
|
|
||||||
with open("prompts/question_prompt.txt", "r") as f:
|
|
||||||
template_quest = f.read()
|
|
||||||
|
|
||||||
with open("prompts/chat_combine_prompt.txt", "r") as f:
|
|
||||||
chat_combine_template = f.read()
|
|
||||||
|
|
||||||
with open("prompts/chat_reduce_prompt.txt", "r") as f:
|
|
||||||
chat_reduce_template = f.read()
|
|
||||||
|
|
||||||
if settings.API_KEY is not None:
|
|
||||||
api_key_set = True
|
|
||||||
else:
|
|
||||||
api_key_set = False
|
|
||||||
if settings.EMBEDDINGS_KEY is not None:
|
|
||||||
embeddings_key_set = True
|
|
||||||
else:
|
|
||||||
embeddings_key_set = False
|
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER = "inputs"
|
app.register_blueprint(user)
|
||||||
app.config['CELERY_BROKER_URL'] = settings.CELERY_BROKER_URL
|
app.register_blueprint(answer)
|
||||||
app.config['CELERY_RESULT_BACKEND'] = settings.CELERY_RESULT_BACKEND
|
app.register_blueprint(internal)
|
||||||
app.config['MONGO_URI'] = settings.MONGO_URI
|
app.register_blueprint(connector)
|
||||||
celery = Celery()
|
app.config.update(
|
||||||
celery.config_from_object('celeryconfig')
|
UPLOAD_FOLDER="inputs",
|
||||||
mongo = MongoClient(app.config['MONGO_URI'])
|
CELERY_BROKER_URL=settings.CELERY_BROKER_URL,
|
||||||
db = mongo["docsgpt"]
|
CELERY_RESULT_BACKEND=settings.CELERY_RESULT_BACKEND,
|
||||||
vectors_collection = db["vectors"]
|
MONGO_URI=settings.MONGO_URI,
|
||||||
|
)
|
||||||
|
celery.config_from_object("application.celeryconfig")
|
||||||
|
api.init_app(app)
|
||||||
|
|
||||||
|
if settings.AUTH_TYPE in ("simple_jwt", "session_jwt") and not settings.JWT_SECRET_KEY:
|
||||||
async def async_generate(chain, question, chat_history):
|
key_file = ".jwt_secret_key"
|
||||||
result = await chain.arun({"question": question, "chat_history": chat_history})
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def run_async_chain(chain, question, chat_history):
|
|
||||||
loop = asyncio.new_event_loop()
|
|
||||||
asyncio.set_event_loop(loop)
|
|
||||||
result = {}
|
|
||||||
try:
|
try:
|
||||||
answer = loop.run_until_complete(async_generate(chain, question, chat_history))
|
with open(key_file, "r") as f:
|
||||||
finally:
|
settings.JWT_SECRET_KEY = f.read().strip()
|
||||||
loop.close()
|
except FileNotFoundError:
|
||||||
result["answer"] = answer
|
new_key = os.urandom(32).hex()
|
||||||
return result
|
with open(key_file, "w") as f:
|
||||||
|
f.write(new_key)
|
||||||
|
settings.JWT_SECRET_KEY = new_key
|
||||||
def get_vectorstore(data):
|
except Exception as e:
|
||||||
if "active_docs" in data:
|
raise RuntimeError(f"Failed to setup JWT_SECRET_KEY: {e}")
|
||||||
if data["active_docs"].split("/")[0] == "local":
|
SIMPLE_JWT_TOKEN = None
|
||||||
if data["active_docs"].split("/")[1] == "default":
|
if settings.AUTH_TYPE == "simple_jwt":
|
||||||
vectorstore = ""
|
payload = {"sub": "local"}
|
||||||
else:
|
SIMPLE_JWT_TOKEN = jwt.encode(payload, settings.JWT_SECRET_KEY, algorithm="HS256")
|
||||||
vectorstore = "indexes/" + data["active_docs"]
|
print(f"Generated Simple JWT Token: {SIMPLE_JWT_TOKEN}")
|
||||||
else:
|
|
||||||
vectorstore = "vectors/" + data["active_docs"]
|
|
||||||
if data['active_docs'] == "default":
|
|
||||||
vectorstore = ""
|
|
||||||
else:
|
|
||||||
vectorstore = ""
|
|
||||||
return vectorstore
|
|
||||||
|
|
||||||
def get_docsearch(vectorstore, embeddings_key):
|
|
||||||
if settings.EMBEDDINGS_NAME == "openai_text-embedding-ada-002":
|
|
||||||
docsearch = FAISS.load_local(vectorstore, OpenAIEmbeddings(openai_api_key=embeddings_key))
|
|
||||||
elif settings.EMBEDDINGS_NAME == "huggingface_sentence-transformers/all-mpnet-base-v2":
|
|
||||||
docsearch = FAISS.load_local(vectorstore, HuggingFaceHubEmbeddings())
|
|
||||||
elif settings.EMBEDDINGS_NAME == "huggingface_hkunlp/instructor-large":
|
|
||||||
docsearch = FAISS.load_local(vectorstore, HuggingFaceInstructEmbeddings())
|
|
||||||
elif settings.EMBEDDINGS_NAME == "cohere_medium":
|
|
||||||
docsearch = FAISS.load_local(vectorstore, CohereEmbeddings(cohere_api_key=embeddings_key))
|
|
||||||
return docsearch
|
|
||||||
|
|
||||||
|
|
||||||
@celery.task(bind=True)
|
|
||||||
def ingest(self, directory, formats, name_job, filename, user):
|
|
||||||
resp = ingest_worker(self, directory, formats, name_job, filename, user)
|
|
||||||
return resp
|
|
||||||
|
|
||||||
|
|
||||||
@app.route("/")
|
@app.route("/")
|
||||||
def home():
|
def home():
|
||||||
return render_template("index.html", api_key_set=api_key_set, llm_choice=settings.LLM_NAME,
|
if request.remote_addr in ("0.0.0.0", "127.0.0.1", "localhost", "172.18.0.1"):
|
||||||
embeddings_choice=settings.EMBEDDINGS_NAME)
|
return redirect("http://localhost:5173")
|
||||||
|
|
||||||
def complete_stream(question, docsearch, chat_history, api_key):
|
|
||||||
openai.api_key = api_key
|
|
||||||
llm = ChatOpenAI(openai_api_key=api_key)
|
|
||||||
docs = docsearch.similarity_search(question, k=2)
|
|
||||||
# join all page_content together with a newline
|
|
||||||
docs_together = "\n".join([doc.page_content for doc in docs])
|
|
||||||
p_chat_combine = chat_combine_template.replace("{summaries}", docs_together)
|
|
||||||
messages_combine = [{"role": "system", "content": p_chat_combine}]
|
|
||||||
if len(chat_history) > 1:
|
|
||||||
tokens_current_history = 0
|
|
||||||
# count tokens in history
|
|
||||||
chat_history.reverse()
|
|
||||||
for i in chat_history:
|
|
||||||
if "prompt" in i and "response" in i:
|
|
||||||
tokens_batch = llm.get_num_tokens(i["prompt"]) + llm.get_num_tokens(i["response"])
|
|
||||||
if tokens_current_history + tokens_batch < settings.TOKENS_MAX_HISTORY:
|
|
||||||
tokens_current_history += tokens_batch
|
|
||||||
messages_combine.append({"role": "user", "content": i["prompt"]})
|
|
||||||
messages_combine.append({"role": "system", "content": i["response"]})
|
|
||||||
messages_combine.append({"role": "user", "content": question})
|
|
||||||
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo",
|
|
||||||
messages=messages_combine, stream=True, max_tokens=500, temperature=0)
|
|
||||||
|
|
||||||
for line in completion:
|
|
||||||
if 'content' in line['choices'][0]['delta']:
|
|
||||||
# check if the delta contains content
|
|
||||||
data = json.dumps({"answer": str(line['choices'][0]['delta']['content'])})
|
|
||||||
yield f"data: {data}\n\n"
|
|
||||||
# send data.type = "end" to indicate that the stream has ended as json
|
|
||||||
data = json.dumps({"type": "end"})
|
|
||||||
yield f"data: {data}\n\n"
|
|
||||||
@app.route("/stream", methods=['POST', 'GET'])
|
|
||||||
def stream():
|
|
||||||
# get parameter from url question
|
|
||||||
question = request.args.get('question')
|
|
||||||
history = request.args.get('history')
|
|
||||||
# history to json object from string
|
|
||||||
history = json.loads(history)
|
|
||||||
|
|
||||||
# check if active_docs is set
|
|
||||||
|
|
||||||
if not api_key_set:
|
|
||||||
api_key = request.args.get("api_key")
|
|
||||||
else:
|
else:
|
||||||
api_key = settings.API_KEY
|
return "Welcome to DocsGPT Backend!"
|
||||||
if not embeddings_key_set:
|
|
||||||
embeddings_key = request.args.get("embeddings_key")
|
|
||||||
else:
|
|
||||||
embeddings_key = settings.EMBEDDINGS_KEY
|
|
||||||
if "active_docs" in request.args:
|
|
||||||
vectorstore = get_vectorstore({"active_docs": request.args.get("active_docs")})
|
|
||||||
else:
|
|
||||||
vectorstore = ""
|
|
||||||
docsearch = get_docsearch(vectorstore, embeddings_key)
|
|
||||||
|
|
||||||
|
|
||||||
#question = "Hi"
|
@app.route("/api/config")
|
||||||
return Response(complete_stream(question, docsearch,
|
def get_config():
|
||||||
chat_history= history, api_key=api_key), mimetype='text/event-stream')
|
response = {
|
||||||
|
"auth_type": settings.AUTH_TYPE,
|
||||||
|
"requires_auth": settings.AUTH_TYPE in ["simple_jwt", "session_jwt"],
|
||||||
|
}
|
||||||
|
return jsonify(response)
|
||||||
|
|
||||||
|
|
||||||
@app.route("/api/answer", methods=["POST"])
|
@app.route("/api/generate_token")
|
||||||
def api_answer():
|
def generate_token():
|
||||||
data = request.get_json()
|
if settings.AUTH_TYPE == "session_jwt":
|
||||||
question = data["question"]
|
new_user_id = str(uuid.uuid4())
|
||||||
history = data["history"]
|
token = jwt.encode(
|
||||||
print('-' * 5)
|
{"sub": new_user_id}, settings.JWT_SECRET_KEY, algorithm="HS256"
|
||||||
if not api_key_set:
|
|
||||||
api_key = data["api_key"]
|
|
||||||
else:
|
|
||||||
api_key = settings.API_KEY
|
|
||||||
if not embeddings_key_set:
|
|
||||||
embeddings_key = data["embeddings_key"]
|
|
||||||
else:
|
|
||||||
embeddings_key = settings.EMBEDDINGS_KEY
|
|
||||||
|
|
||||||
# use try and except to check for exception
|
|
||||||
try:
|
|
||||||
# check if the vectorstore is set
|
|
||||||
vectorstore = get_vectorstore(data)
|
|
||||||
# loading the index and the store and the prompt template
|
|
||||||
# Note if you have used other embeddings than OpenAI, you need to change the embeddings
|
|
||||||
docsearch = get_docsearch(vectorstore, embeddings_key)
|
|
||||||
|
|
||||||
q_prompt = PromptTemplate(input_variables=["context", "question"], template=template_quest,
|
|
||||||
template_format="jinja2")
|
|
||||||
if settings.LLM_NAME == "openai_chat":
|
|
||||||
llm = ChatOpenAI(openai_api_key=api_key) # optional parameter: model_name="gpt-4"
|
|
||||||
messages_combine = [SystemMessagePromptTemplate.from_template(chat_combine_template)]
|
|
||||||
if history:
|
|
||||||
tokens_current_history = 0
|
|
||||||
#count tokens in history
|
|
||||||
history.reverse()
|
|
||||||
for i in history:
|
|
||||||
if "prompt" in i and "response" in i:
|
|
||||||
tokens_batch = llm.get_num_tokens(i["prompt"]) + llm.get_num_tokens(i["response"])
|
|
||||||
if tokens_current_history + tokens_batch < settings.TOKENS_MAX_HISTORY:
|
|
||||||
tokens_current_history += tokens_batch
|
|
||||||
messages_combine.append(HumanMessagePromptTemplate.from_template(i["prompt"]))
|
|
||||||
messages_combine.append(AIMessagePromptTemplate.from_template(i["response"]))
|
|
||||||
messages_combine.append(HumanMessagePromptTemplate.from_template("{question}"))
|
|
||||||
import sys
|
|
||||||
print(messages_combine, file=sys.stderr)
|
|
||||||
p_chat_combine = ChatPromptTemplate.from_messages(messages_combine)
|
|
||||||
elif settings.LLM_NAME == "openai":
|
|
||||||
llm = OpenAI(openai_api_key=api_key, temperature=0)
|
|
||||||
elif settings.LLM_NAME == "manifest":
|
|
||||||
llm = ManifestWrapper(client=manifest, llm_kwargs={"temperature": 0.001, "max_tokens": 2048})
|
|
||||||
elif settings.LLM_NAME == "huggingface":
|
|
||||||
llm = HuggingFaceHub(repo_id="bigscience/bloom", huggingfacehub_api_token=api_key)
|
|
||||||
elif settings.LLM_NAME == "cohere":
|
|
||||||
llm = Cohere(model="command-xlarge-nightly", cohere_api_key=api_key)
|
|
||||||
elif settings.LLM_NAME == "gpt4all":
|
|
||||||
llm = GPT4All(model=settings.MODEL_PATH)
|
|
||||||
else:
|
|
||||||
raise ValueError("unknown LLM model")
|
|
||||||
|
|
||||||
if settings.LLM_NAME == "openai_chat":
|
|
||||||
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
|
|
||||||
doc_chain = load_qa_chain(llm, chain_type="map_reduce", combine_prompt=p_chat_combine)
|
|
||||||
chain = ConversationalRetrievalChain(
|
|
||||||
retriever=docsearch.as_retriever(k=2),
|
|
||||||
question_generator=question_generator,
|
|
||||||
combine_docs_chain=doc_chain,
|
|
||||||
)
|
)
|
||||||
chat_history = []
|
return jsonify({"token": token})
|
||||||
# result = chain({"question": question, "chat_history": chat_history})
|
return jsonify({"error": "Token generation not allowed in current auth mode"}), 400
|
||||||
# generate async with async generate method
|
|
||||||
result = run_async_chain(chain, question, chat_history)
|
|
||||||
elif settings.LLM_NAME == "gpt4all":
|
|
||||||
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
|
|
||||||
doc_chain = load_qa_chain(llm, chain_type="map_reduce", combine_prompt=p_chat_combine)
|
|
||||||
chain = ConversationalRetrievalChain(
|
|
||||||
retriever=docsearch.as_retriever(k=2),
|
|
||||||
question_generator=question_generator,
|
|
||||||
combine_docs_chain=doc_chain,
|
|
||||||
)
|
|
||||||
chat_history = []
|
|
||||||
# result = chain({"question": question, "chat_history": chat_history})
|
|
||||||
# generate async with async generate method
|
|
||||||
result = run_async_chain(chain, question, chat_history)
|
|
||||||
|
|
||||||
|
|
||||||
|
@app.before_request
|
||||||
|
def authenticate_request():
|
||||||
|
if request.method == "OPTIONS":
|
||||||
|
return "", 200
|
||||||
|
decoded_token = handle_auth(request)
|
||||||
|
if not decoded_token:
|
||||||
|
request.decoded_token = None
|
||||||
|
elif "error" in decoded_token:
|
||||||
|
return jsonify(decoded_token), 401
|
||||||
else:
|
else:
|
||||||
qa_chain = load_qa_chain(llm=llm, chain_type="map_reduce",
|
request.decoded_token = decoded_token
|
||||||
combine_prompt=chat_combine_template, question_prompt=q_prompt)
|
|
||||||
chain = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch, k=3)
|
|
||||||
result = chain({"query": question})
|
|
||||||
|
|
||||||
print(result)
|
|
||||||
|
|
||||||
# some formatting for the frontend
|
|
||||||
if "result" in result:
|
|
||||||
result['answer'] = result['result']
|
|
||||||
result['answer'] = result['answer'].replace("\\n", "\n")
|
|
||||||
try:
|
|
||||||
result['answer'] = result['answer'].split("SOURCES:")[0]
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# mock result
|
|
||||||
# result = {
|
|
||||||
# "answer": "The answer is 42",
|
|
||||||
# "sources": ["https://en.wikipedia.org/wiki/42_(number)", "https://en.wikipedia.org/wiki/42_(number)"]
|
|
||||||
# }
|
|
||||||
return result
|
|
||||||
except Exception as e:
|
|
||||||
# print whole traceback
|
|
||||||
traceback.print_exc()
|
|
||||||
print(str(e))
|
|
||||||
return bad_request(500, str(e))
|
|
||||||
|
|
||||||
|
|
||||||
@app.route("/api/docs_check", methods=["POST"])
|
|
||||||
def check_docs():
|
|
||||||
# check if docs exist in a vectorstore folder
|
|
||||||
data = request.get_json()
|
|
||||||
# split docs on / and take first part
|
|
||||||
if data["docs"].split("/")[0] == "local":
|
|
||||||
return {"status": 'exists'}
|
|
||||||
vectorstore = "vectors/" + data["docs"]
|
|
||||||
base_path = 'https://raw.githubusercontent.com/arc53/DocsHUB/main/'
|
|
||||||
if os.path.exists(vectorstore) or data["docs"] == "default":
|
|
||||||
return {"status": 'exists'}
|
|
||||||
else:
|
|
||||||
r = requests.get(base_path + vectorstore + "index.faiss")
|
|
||||||
|
|
||||||
if r.status_code != 200:
|
|
||||||
return {"status": 'null'}
|
|
||||||
else:
|
|
||||||
if not os.path.exists(vectorstore):
|
|
||||||
os.makedirs(vectorstore)
|
|
||||||
with open(vectorstore + "index.faiss", "wb") as f:
|
|
||||||
f.write(r.content)
|
|
||||||
|
|
||||||
# download the store
|
|
||||||
r = requests.get(base_path + vectorstore + "index.pkl")
|
|
||||||
with open(vectorstore + "index.pkl", "wb") as f:
|
|
||||||
f.write(r.content)
|
|
||||||
|
|
||||||
return {"status": 'loaded'}
|
|
||||||
|
|
||||||
|
|
||||||
@app.route("/api/feedback", methods=["POST"])
|
|
||||||
def api_feedback():
|
|
||||||
data = request.get_json()
|
|
||||||
question = data["question"]
|
|
||||||
answer = data["answer"]
|
|
||||||
feedback = data["feedback"]
|
|
||||||
|
|
||||||
print('-' * 5)
|
|
||||||
print("Question: " + question)
|
|
||||||
print("Answer: " + answer)
|
|
||||||
print("Feedback: " + feedback)
|
|
||||||
print('-' * 5)
|
|
||||||
response = requests.post(
|
|
||||||
url="https://86x89umx77.execute-api.eu-west-2.amazonaws.com/docsgpt-feedback",
|
|
||||||
headers={
|
|
||||||
"Content-Type": "application/json; charset=utf-8",
|
|
||||||
},
|
|
||||||
data=json.dumps({
|
|
||||||
"answer": answer,
|
|
||||||
"question": question,
|
|
||||||
"feedback": feedback
|
|
||||||
})
|
|
||||||
)
|
|
||||||
return {"status": http.client.responses.get(response.status_code, 'ok')}
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/combine', methods=['GET'])
|
|
||||||
def combined_json():
|
|
||||||
user = 'local'
|
|
||||||
"""Provide json file with combined available indexes."""
|
|
||||||
# get json from https://d3dg1063dc54p9.cloudfront.net/combined.json
|
|
||||||
|
|
||||||
data = [{
|
|
||||||
"name": 'default',
|
|
||||||
"language": 'default',
|
|
||||||
"version": '',
|
|
||||||
"description": 'default',
|
|
||||||
"fullName": 'default',
|
|
||||||
"date": 'default',
|
|
||||||
"docLink": 'default',
|
|
||||||
"model": settings.EMBEDDINGS_NAME,
|
|
||||||
"location": "local"
|
|
||||||
}]
|
|
||||||
# structure: name, language, version, description, fullName, date, docLink
|
|
||||||
# append data from vectors_collection
|
|
||||||
for index in vectors_collection.find({'user': user}):
|
|
||||||
data.append({
|
|
||||||
"name": index['name'],
|
|
||||||
"language": index['language'],
|
|
||||||
"version": '',
|
|
||||||
"description": index['name'],
|
|
||||||
"fullName": index['name'],
|
|
||||||
"date": index['date'],
|
|
||||||
"docLink": index['location'],
|
|
||||||
"model": settings.EMBEDDINGS_NAME,
|
|
||||||
"location": "local"
|
|
||||||
})
|
|
||||||
|
|
||||||
data_remote = requests.get("https://d3dg1063dc54p9.cloudfront.net/combined.json").json()
|
|
||||||
for index in data_remote:
|
|
||||||
index['location'] = "remote"
|
|
||||||
data.append(index)
|
|
||||||
|
|
||||||
return jsonify(data)
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/upload', methods=['POST'])
|
|
||||||
def upload_file():
|
|
||||||
"""Upload a file to get vectorized and indexed."""
|
|
||||||
if 'user' not in request.form:
|
|
||||||
return {"status": 'no user'}
|
|
||||||
user = secure_filename(request.form['user'])
|
|
||||||
if 'name' not in request.form:
|
|
||||||
return {"status": 'no name'}
|
|
||||||
job_name = secure_filename(request.form['name'])
|
|
||||||
# check if the post request has the file part
|
|
||||||
if 'file' not in request.files:
|
|
||||||
print('No file part')
|
|
||||||
return {"status": 'no file'}
|
|
||||||
file = request.files['file']
|
|
||||||
if file.filename == '':
|
|
||||||
return {"status": 'no file name'}
|
|
||||||
|
|
||||||
if file:
|
|
||||||
filename = secure_filename(file.filename)
|
|
||||||
# save dir
|
|
||||||
save_dir = os.path.join(app.config['UPLOAD_FOLDER'], user, job_name)
|
|
||||||
# create dir if not exists
|
|
||||||
if not os.path.exists(save_dir):
|
|
||||||
os.makedirs(save_dir)
|
|
||||||
|
|
||||||
file.save(os.path.join(save_dir, filename))
|
|
||||||
task = ingest.delay('temp', [".rst", ".md", ".pdf", ".txt"], job_name, filename, user)
|
|
||||||
# task id
|
|
||||||
task_id = task.id
|
|
||||||
return {"status": 'ok', "task_id": task_id}
|
|
||||||
else:
|
|
||||||
return {"status": 'error'}
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/task_status', methods=['GET'])
|
|
||||||
def task_status():
|
|
||||||
"""Get celery job status."""
|
|
||||||
task_id = request.args.get('task_id')
|
|
||||||
task = AsyncResult(task_id)
|
|
||||||
task_meta = task.info
|
|
||||||
return {"status": task.status, "result": task_meta}
|
|
||||||
|
|
||||||
|
|
||||||
### Backgound task api
|
|
||||||
@app.route('/api/upload_index', methods=['POST'])
|
|
||||||
def upload_index_files():
|
|
||||||
"""Upload two files(index.faiss, index.pkl) to the user's folder."""
|
|
||||||
if 'user' not in request.form:
|
|
||||||
return {"status": 'no user'}
|
|
||||||
user = secure_filename(request.form['user'])
|
|
||||||
if 'name' not in request.form:
|
|
||||||
return {"status": 'no name'}
|
|
||||||
job_name = secure_filename(request.form['name'])
|
|
||||||
if 'file_faiss' not in request.files:
|
|
||||||
print('No file part')
|
|
||||||
return {"status": 'no file'}
|
|
||||||
file_faiss = request.files['file_faiss']
|
|
||||||
if file_faiss.filename == '':
|
|
||||||
return {"status": 'no file name'}
|
|
||||||
if 'file_pkl' not in request.files:
|
|
||||||
print('No file part')
|
|
||||||
return {"status": 'no file'}
|
|
||||||
file_pkl = request.files['file_pkl']
|
|
||||||
if file_pkl.filename == '':
|
|
||||||
return {"status": 'no file name'}
|
|
||||||
|
|
||||||
# saves index files
|
|
||||||
save_dir = os.path.join('indexes', user, job_name)
|
|
||||||
if not os.path.exists(save_dir):
|
|
||||||
os.makedirs(save_dir)
|
|
||||||
file_faiss.save(os.path.join(save_dir, 'index.faiss'))
|
|
||||||
file_pkl.save(os.path.join(save_dir, 'index.pkl'))
|
|
||||||
# create entry in vectors_collection
|
|
||||||
vectors_collection.insert_one({
|
|
||||||
"user": user,
|
|
||||||
"name": job_name,
|
|
||||||
"language": job_name,
|
|
||||||
"location": save_dir,
|
|
||||||
"date": datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S"),
|
|
||||||
"model": settings.EMBEDDINGS_NAME,
|
|
||||||
"type": "local"
|
|
||||||
})
|
|
||||||
return {"status": 'ok'}
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/download', methods=['get'])
|
|
||||||
def download_file():
|
|
||||||
user = secure_filename(request.args.get('user'))
|
|
||||||
job_name = secure_filename(request.args.get('name'))
|
|
||||||
filename = secure_filename(request.args.get('file'))
|
|
||||||
save_dir = os.path.join(app.config['UPLOAD_FOLDER'], user, job_name)
|
|
||||||
return send_from_directory(save_dir, filename, as_attachment=True)
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/api/delete_old', methods=['get'])
|
|
||||||
def delete_old():
|
|
||||||
"""Delete old indexes."""
|
|
||||||
import shutil
|
|
||||||
path = request.args.get('path')
|
|
||||||
dirs = path.split('/')
|
|
||||||
dirs_clean = []
|
|
||||||
for i in range(1, len(dirs)):
|
|
||||||
dirs_clean.append(secure_filename(dirs[i]))
|
|
||||||
# check that path strats with indexes or vectors
|
|
||||||
if dirs[0] not in ['indexes', 'vectors']:
|
|
||||||
return {"status": 'error'}
|
|
||||||
path_clean = '/'.join(dirs)
|
|
||||||
vectors_collection.delete_one({'location': path})
|
|
||||||
try:
|
|
||||||
shutil.rmtree(path_clean)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass
|
|
||||||
return {"status": 'ok'}
|
|
||||||
|
|
||||||
|
|
||||||
# handling CORS
|
|
||||||
@app.after_request
|
@app.after_request
|
||||||
def after_request(response):
|
def after_request(response):
|
||||||
response.headers.add('Access-Control-Allow-Origin', '*')
|
response.headers.add("Access-Control-Allow-Origin", "*")
|
||||||
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
|
response.headers.add("Access-Control-Allow-Headers", "Content-Type, Authorization")
|
||||||
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
|
response.headers.add(
|
||||||
response.headers.add('Access-Control-Allow-Credentials', 'true')
|
"Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS"
|
||||||
|
)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
app.run(debug=True, port=5001)
|
app.run(debug=settings.FLASK_DEBUG_MODE, port=7091)
|
||||||
|
|||||||
28
application/auth.py
Normal file
28
application/auth.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
from jose import jwt
|
||||||
|
|
||||||
|
from application.core.settings import settings
|
||||||
|
|
||||||
|
|
||||||
|
def handle_auth(request, data={}):
|
||||||
|
if settings.AUTH_TYPE in ["simple_jwt", "session_jwt"]:
|
||||||
|
jwt_token = request.headers.get("Authorization")
|
||||||
|
if not jwt_token:
|
||||||
|
return None
|
||||||
|
|
||||||
|
jwt_token = jwt_token.replace("Bearer ", "")
|
||||||
|
|
||||||
|
try:
|
||||||
|
decoded_token = jwt.decode(
|
||||||
|
jwt_token,
|
||||||
|
settings.JWT_SECRET_KEY,
|
||||||
|
algorithms=["HS256"],
|
||||||
|
options={"verify_exp": False},
|
||||||
|
)
|
||||||
|
return decoded_token
|
||||||
|
except Exception as e:
|
||||||
|
return {
|
||||||
|
"message": f"Authentication error: {str(e)}",
|
||||||
|
"error": "invalid_token",
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {"sub": "local"}
|
||||||
117
application/cache.py
Normal file
117
application/cache.py
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
from threading import Lock
|
||||||
|
|
||||||
|
import redis
|
||||||
|
|
||||||
|
from application.core.settings import settings
|
||||||
|
from application.utils import get_hash
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_redis_instance = None
|
||||||
|
_redis_creation_failed = False
|
||||||
|
_instance_lock = Lock()
|
||||||
|
|
||||||
|
def get_redis_instance():
|
||||||
|
global _redis_instance, _redis_creation_failed
|
||||||
|
if _redis_instance is None and not _redis_creation_failed:
|
||||||
|
with _instance_lock:
|
||||||
|
if _redis_instance is None and not _redis_creation_failed:
|
||||||
|
try:
|
||||||
|
_redis_instance = redis.Redis.from_url(
|
||||||
|
settings.CACHE_REDIS_URL, socket_connect_timeout=2
|
||||||
|
)
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error(f"Invalid Redis URL: {e}")
|
||||||
|
_redis_creation_failed = True # Stop future attempts
|
||||||
|
_redis_instance = None
|
||||||
|
except redis.ConnectionError as e:
|
||||||
|
logger.error(f"Redis connection error: {e}")
|
||||||
|
_redis_instance = None # Keep trying for connection errors
|
||||||
|
return _redis_instance
|
||||||
|
|
||||||
|
|
||||||
|
def gen_cache_key(messages, model="docgpt", tools=None):
|
||||||
|
if not all(isinstance(msg, dict) for msg in messages):
|
||||||
|
raise ValueError("All messages must be dictionaries.")
|
||||||
|
messages_str = json.dumps(messages)
|
||||||
|
tools_str = json.dumps(str(tools)) if tools else ""
|
||||||
|
combined = f"{model}_{messages_str}_{tools_str}"
|
||||||
|
cache_key = get_hash(combined)
|
||||||
|
return cache_key
|
||||||
|
|
||||||
|
|
||||||
|
def gen_cache(func):
|
||||||
|
def wrapper(self, model, messages, stream, tools=None, *args, **kwargs):
|
||||||
|
if tools is not None:
|
||||||
|
return func(self, model, messages, stream, tools, *args, **kwargs)
|
||||||
|
|
||||||
|
try:
|
||||||
|
cache_key = gen_cache_key(messages, model, tools)
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error(f"Cache key generation failed: {e}")
|
||||||
|
return func(self, model, messages, stream, tools, *args, **kwargs)
|
||||||
|
|
||||||
|
redis_client = get_redis_instance()
|
||||||
|
if redis_client:
|
||||||
|
try:
|
||||||
|
cached_response = redis_client.get(cache_key)
|
||||||
|
if cached_response:
|
||||||
|
return cached_response.decode("utf-8")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting cached response: {e}", exc_info=True)
|
||||||
|
|
||||||
|
result = func(self, model, messages, stream, tools, *args, **kwargs)
|
||||||
|
if redis_client and isinstance(result, str):
|
||||||
|
try:
|
||||||
|
redis_client.set(cache_key, result, ex=1800)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error setting cache: {e}", exc_info=True)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def stream_cache(func):
|
||||||
|
def wrapper(self, model, messages, stream, tools=None, *args, **kwargs):
|
||||||
|
if tools is not None:
|
||||||
|
yield from func(self, model, messages, stream, tools, *args, **kwargs)
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
cache_key = gen_cache_key(messages, model, tools)
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error(f"Cache key generation failed: {e}")
|
||||||
|
yield from func(self, model, messages, stream, tools, *args, **kwargs)
|
||||||
|
return
|
||||||
|
|
||||||
|
redis_client = get_redis_instance()
|
||||||
|
if redis_client:
|
||||||
|
try:
|
||||||
|
cached_response = redis_client.get(cache_key)
|
||||||
|
if cached_response:
|
||||||
|
logger.info(f"Cache hit for stream key: {cache_key}")
|
||||||
|
cached_response = json.loads(cached_response.decode("utf-8"))
|
||||||
|
for chunk in cached_response:
|
||||||
|
yield chunk
|
||||||
|
time.sleep(0.03) # Simulate streaming delay
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting cached stream: {e}", exc_info=True)
|
||||||
|
|
||||||
|
stream_cache_data = []
|
||||||
|
for chunk in func(self, model, messages, stream, tools, *args, **kwargs):
|
||||||
|
yield chunk
|
||||||
|
stream_cache_data.append(str(chunk))
|
||||||
|
|
||||||
|
if redis_client:
|
||||||
|
try:
|
||||||
|
redis_client.set(cache_key, json.dumps(stream_cache_data), ex=1800)
|
||||||
|
logger.info(f"Stream cache saved for key: {cache_key}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error setting stream cache: {e}", exc_info=True)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
23
application/celery_init.py
Normal file
23
application/celery_init.py
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
from celery import Celery
|
||||||
|
from application.core.settings import settings
|
||||||
|
from celery.signals import setup_logging
|
||||||
|
|
||||||
|
|
||||||
|
def make_celery(app_name=__name__):
|
||||||
|
celery = Celery(
|
||||||
|
app_name,
|
||||||
|
broker=settings.CELERY_BROKER_URL,
|
||||||
|
backend=settings.CELERY_RESULT_BACKEND,
|
||||||
|
)
|
||||||
|
celery.conf.update(settings)
|
||||||
|
return celery
|
||||||
|
|
||||||
|
|
||||||
|
@setup_logging.connect
|
||||||
|
def config_loggers(*args, **kwargs):
|
||||||
|
from application.core.logging_config import setup_logging
|
||||||
|
|
||||||
|
setup_logging()
|
||||||
|
|
||||||
|
|
||||||
|
celery = make_celery()
|
||||||
22
application/core/logging_config.py
Normal file
22
application/core/logging_config.py
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
from logging.config import dictConfig
|
||||||
|
|
||||||
|
def setup_logging():
|
||||||
|
dictConfig({
|
||||||
|
'version': 1,
|
||||||
|
'formatters': {
|
||||||
|
'default': {
|
||||||
|
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"handlers": {
|
||||||
|
"console": {
|
||||||
|
"class": "logging.StreamHandler",
|
||||||
|
"stream": "ext://sys.stdout",
|
||||||
|
"formatter": "default",
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'root': {
|
||||||
|
'level': 'INFO',
|
||||||
|
'handlers': ['console'],
|
||||||
|
},
|
||||||
|
})
|
||||||
24
application/core/mongo_db.py
Normal file
24
application/core/mongo_db.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
from application.core.settings import settings
|
||||||
|
from pymongo import MongoClient
|
||||||
|
|
||||||
|
|
||||||
|
class MongoDB:
|
||||||
|
_client = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_client(cls):
|
||||||
|
"""
|
||||||
|
Get the MongoDB client instance, creating it if necessary.
|
||||||
|
"""
|
||||||
|
if cls._client is None:
|
||||||
|
cls._client = MongoClient(settings.MONGO_URI)
|
||||||
|
return cls._client
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def close_client(cls):
|
||||||
|
"""
|
||||||
|
Close the MongoDB client connection.
|
||||||
|
"""
|
||||||
|
if cls._client is not None:
|
||||||
|
cls._client.close()
|
||||||
|
cls._client = None
|
||||||
@@ -1,21 +1,122 @@
|
|||||||
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
from pydantic import BaseSettings
|
from pydantic_settings import BaseSettings
|
||||||
|
|
||||||
|
current_dir = os.path.dirname(
|
||||||
|
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Settings(BaseSettings):
|
class Settings(BaseSettings):
|
||||||
LLM_NAME: str = "openai_chat"
|
AUTH_TYPE: Optional[str] = None # simple_jwt, session_jwt, or None
|
||||||
EMBEDDINGS_NAME: str = "openai_text-embedding-ada-002"
|
LLM_PROVIDER: str = "docsgpt"
|
||||||
|
LLM_NAME: Optional[str] = (
|
||||||
|
None # if LLM_PROVIDER is openai, LLM_NAME can be gpt-4 or gpt-3.5-turbo
|
||||||
|
)
|
||||||
|
EMBEDDINGS_NAME: str = "huggingface_sentence-transformers/all-mpnet-base-v2"
|
||||||
CELERY_BROKER_URL: str = "redis://localhost:6379/0"
|
CELERY_BROKER_URL: str = "redis://localhost:6379/0"
|
||||||
CELERY_RESULT_BACKEND: str = "redis://localhost:6379/1"
|
CELERY_RESULT_BACKEND: str = "redis://localhost:6379/1"
|
||||||
MONGO_URI: str = "mongodb://localhost:27017/docsgpt"
|
MONGO_URI: str = "mongodb://localhost:27017/docsgpt"
|
||||||
MODEL_PATH: str = "./models/gpt4all-model.bin"
|
MONGO_DB_NAME: str = "docsgpt"
|
||||||
TOKENS_MAX_HISTORY: int = 150
|
LLM_PATH: str = os.path.join(current_dir, "models/docsgpt-7b-f16.gguf")
|
||||||
|
DEFAULT_MAX_HISTORY: int = 150
|
||||||
|
LLM_TOKEN_LIMITS: dict = {
|
||||||
|
"gpt-4o-mini": 128000,
|
||||||
|
"gpt-3.5-turbo": 4096,
|
||||||
|
"claude-2": 1e5,
|
||||||
|
"gemini-2.5-flash": 1e6,
|
||||||
|
}
|
||||||
|
UPLOAD_FOLDER: str = "inputs"
|
||||||
|
PARSE_PDF_AS_IMAGE: bool = False
|
||||||
|
PARSE_IMAGE_REMOTE: bool = False
|
||||||
|
VECTOR_STORE: str = (
|
||||||
|
"faiss" # "faiss" or "elasticsearch" or "qdrant" or "milvus" or "lancedb"
|
||||||
|
)
|
||||||
|
RETRIEVERS_ENABLED: list = ["classic_rag"]
|
||||||
|
AGENT_NAME: str = "classic"
|
||||||
|
FALLBACK_LLM_PROVIDER: Optional[str] = None # provider for fallback llm
|
||||||
|
FALLBACK_LLM_NAME: Optional[str] = None # model name for fallback llm
|
||||||
|
FALLBACK_LLM_API_KEY: Optional[str] = None # api key for fallback llm
|
||||||
|
|
||||||
API_URL: str = "http://localhost:5001" # backend url for celery worker
|
# Google Drive integration
|
||||||
|
GOOGLE_CLIENT_ID: Optional[str] = None # Replace with your actual Google OAuth client ID
|
||||||
|
GOOGLE_CLIENT_SECRET: Optional[str] = None# Replace with your actual Google OAuth client secret
|
||||||
|
CONNECTOR_REDIRECT_BASE_URI: Optional[str] = "http://127.0.0.1:7091/api/connectors/callback" ##add redirect url as it is to your provider's console(gcp)
|
||||||
|
|
||||||
API_KEY: str = None # LLM api key
|
|
||||||
EMBEDDINGS_KEY: str = None # api key for embeddings (if using openai, just copy API_KEY
|
# LLM Cache
|
||||||
|
CACHE_REDIS_URL: str = "redis://localhost:6379/2"
|
||||||
|
|
||||||
|
API_URL: str = "http://localhost:7091" # backend url for celery worker
|
||||||
|
|
||||||
|
API_KEY: Optional[str] = None # LLM api key
|
||||||
|
EMBEDDINGS_KEY: Optional[str] = (
|
||||||
|
None # api key for embeddings (if using openai, just copy API_KEY)
|
||||||
|
)
|
||||||
|
OPENAI_API_BASE: Optional[str] = None # azure openai api base url
|
||||||
|
OPENAI_API_VERSION: Optional[str] = None # azure openai api version
|
||||||
|
AZURE_DEPLOYMENT_NAME: Optional[str] = None # azure deployment name for answering
|
||||||
|
AZURE_EMBEDDINGS_DEPLOYMENT_NAME: Optional[str] = (
|
||||||
|
None # azure deployment name for embeddings
|
||||||
|
)
|
||||||
|
OPENAI_BASE_URL: Optional[str] = (
|
||||||
|
None # openai base url for open ai compatable models
|
||||||
|
)
|
||||||
|
|
||||||
|
# elasticsearch
|
||||||
|
ELASTIC_CLOUD_ID: Optional[str] = None # cloud id for elasticsearch
|
||||||
|
ELASTIC_USERNAME: Optional[str] = None # username for elasticsearch
|
||||||
|
ELASTIC_PASSWORD: Optional[str] = None # password for elasticsearch
|
||||||
|
ELASTIC_URL: Optional[str] = None # url for elasticsearch
|
||||||
|
ELASTIC_INDEX: Optional[str] = "docsgpt" # index name for elasticsearch
|
||||||
|
|
||||||
|
# SageMaker config
|
||||||
|
SAGEMAKER_ENDPOINT: Optional[str] = None # SageMaker endpoint name
|
||||||
|
SAGEMAKER_REGION: Optional[str] = None # SageMaker region name
|
||||||
|
SAGEMAKER_ACCESS_KEY: Optional[str] = None # SageMaker access key
|
||||||
|
SAGEMAKER_SECRET_KEY: Optional[str] = None # SageMaker secret key
|
||||||
|
|
||||||
|
# prem ai project id
|
||||||
|
PREMAI_PROJECT_ID: Optional[str] = None
|
||||||
|
|
||||||
|
# Qdrant vectorstore config
|
||||||
|
QDRANT_COLLECTION_NAME: Optional[str] = "docsgpt"
|
||||||
|
QDRANT_LOCATION: Optional[str] = None
|
||||||
|
QDRANT_URL: Optional[str] = None
|
||||||
|
QDRANT_PORT: Optional[int] = 6333
|
||||||
|
QDRANT_GRPC_PORT: int = 6334
|
||||||
|
QDRANT_PREFER_GRPC: bool = False
|
||||||
|
QDRANT_HTTPS: Optional[bool] = None
|
||||||
|
QDRANT_API_KEY: Optional[str] = None
|
||||||
|
QDRANT_PREFIX: Optional[str] = None
|
||||||
|
QDRANT_TIMEOUT: Optional[float] = None
|
||||||
|
QDRANT_HOST: Optional[str] = None
|
||||||
|
QDRANT_PATH: Optional[str] = None
|
||||||
|
QDRANT_DISTANCE_FUNC: str = "Cosine"
|
||||||
|
|
||||||
|
# PGVector vectorstore config
|
||||||
|
PGVECTOR_CONNECTION_STRING: Optional[str] = None
|
||||||
|
# Milvus vectorstore config
|
||||||
|
MILVUS_COLLECTION_NAME: Optional[str] = "docsgpt"
|
||||||
|
MILVUS_URI: Optional[str] = "./milvus_local.db" # milvus lite version as default
|
||||||
|
MILVUS_TOKEN: Optional[str] = ""
|
||||||
|
|
||||||
|
# LanceDB vectorstore config
|
||||||
|
LANCEDB_PATH: str = "/tmp/lancedb" # Path where LanceDB stores its local data
|
||||||
|
LANCEDB_TABLE_NAME: Optional[str] = (
|
||||||
|
"docsgpts" # Name of the table to use for storing vectors
|
||||||
|
)
|
||||||
|
|
||||||
|
FLASK_DEBUG_MODE: bool = False
|
||||||
|
STORAGE_TYPE: str = "local" # local or s3
|
||||||
|
URL_STRATEGY: str = "backend" # backend or s3
|
||||||
|
|
||||||
|
JWT_SECRET_KEY: str = ""
|
||||||
|
|
||||||
|
# Encryption settings
|
||||||
|
ENCRYPTION_SECRET_KEY: str = "default-docsgpt-encryption-key"
|
||||||
|
|
||||||
|
|
||||||
path = Path(__file__).parent.parent.absolute()
|
path = Path(__file__).parent.parent.absolute()
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
0
application/llm/__init__.py
Normal file
0
application/llm/__init__.py
Normal file
50
application/llm/anthropic.py
Normal file
50
application/llm/anthropic.py
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
from application.llm.base import BaseLLM
|
||||||
|
from application.core.settings import settings
|
||||||
|
|
||||||
|
|
||||||
|
class AnthropicLLM(BaseLLM):
|
||||||
|
|
||||||
|
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
|
||||||
|
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
||||||
|
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.api_key = (
|
||||||
|
api_key or settings.ANTHROPIC_API_KEY
|
||||||
|
) # If not provided, use a default from settings
|
||||||
|
self.user_api_key = user_api_key
|
||||||
|
self.anthropic = Anthropic(api_key=self.api_key)
|
||||||
|
self.HUMAN_PROMPT = HUMAN_PROMPT
|
||||||
|
self.AI_PROMPT = AI_PROMPT
|
||||||
|
|
||||||
|
def _raw_gen(
|
||||||
|
self, baseself, model, messages, stream=False, tools=None, max_tokens=300, **kwargs
|
||||||
|
):
|
||||||
|
context = messages[0]["content"]
|
||||||
|
user_question = messages[-1]["content"]
|
||||||
|
prompt = f"### Context \n {context} \n ### Question \n {user_question}"
|
||||||
|
if stream:
|
||||||
|
return self.gen_stream(model, prompt, stream, max_tokens, **kwargs)
|
||||||
|
|
||||||
|
completion = self.anthropic.completions.create(
|
||||||
|
model=model,
|
||||||
|
max_tokens_to_sample=max_tokens,
|
||||||
|
stream=stream,
|
||||||
|
prompt=f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT}",
|
||||||
|
)
|
||||||
|
return completion.completion
|
||||||
|
|
||||||
|
def _raw_gen_stream(
|
||||||
|
self, baseself, model, messages, stream=True, tools=None, max_tokens=300, **kwargs
|
||||||
|
):
|
||||||
|
context = messages[0]["content"]
|
||||||
|
user_question = messages[-1]["content"]
|
||||||
|
prompt = f"### Context \n {context} \n ### Question \n {user_question}"
|
||||||
|
stream_response = self.anthropic.completions.create(
|
||||||
|
model=model,
|
||||||
|
prompt=f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT}",
|
||||||
|
max_tokens_to_sample=max_tokens,
|
||||||
|
stream=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
for completion in stream_response:
|
||||||
|
yield completion.completion
|
||||||
144
application/llm/base.py
Normal file
144
application/llm/base.py
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
import logging
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
from application.cache import gen_cache, stream_cache
|
||||||
|
|
||||||
|
from application.core.settings import settings
|
||||||
|
from application.usage import gen_token_usage, stream_token_usage
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseLLM(ABC):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
decoded_token=None,
|
||||||
|
):
|
||||||
|
self.decoded_token = decoded_token
|
||||||
|
self.token_usage = {"prompt_tokens": 0, "generated_tokens": 0}
|
||||||
|
self.fallback_provider = settings.FALLBACK_LLM_PROVIDER
|
||||||
|
self.fallback_model_name = settings.FALLBACK_LLM_NAME
|
||||||
|
self.fallback_llm_api_key = settings.FALLBACK_LLM_API_KEY
|
||||||
|
self._fallback_llm = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def fallback_llm(self):
|
||||||
|
"""Lazy-loaded fallback LLM instance."""
|
||||||
|
if (
|
||||||
|
self._fallback_llm is None
|
||||||
|
and self.fallback_provider
|
||||||
|
and self.fallback_model_name
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
from application.llm.llm_creator import LLMCreator
|
||||||
|
|
||||||
|
self._fallback_llm = LLMCreator.create_llm(
|
||||||
|
self.fallback_provider,
|
||||||
|
self.fallback_llm_api_key,
|
||||||
|
None,
|
||||||
|
self.decoded_token,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to initialize fallback LLM: {str(e)}", exc_info=True
|
||||||
|
)
|
||||||
|
return self._fallback_llm
|
||||||
|
|
||||||
|
def _execute_with_fallback(
|
||||||
|
self, method_name: str, decorators: list, *args, **kwargs
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Unified method execution with fallback support.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
method_name: Name of the raw method ('_raw_gen' or '_raw_gen_stream')
|
||||||
|
decorators: List of decorators to apply
|
||||||
|
*args: Positional arguments
|
||||||
|
**kwargs: Keyword arguments
|
||||||
|
"""
|
||||||
|
|
||||||
|
def decorated_method():
|
||||||
|
method = getattr(self, method_name)
|
||||||
|
for decorator in decorators:
|
||||||
|
method = decorator(method)
|
||||||
|
return method(self, *args, **kwargs)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return decorated_method()
|
||||||
|
except Exception as e:
|
||||||
|
if not self.fallback_llm:
|
||||||
|
logger.error(f"Primary LLM failed and no fallback available: {str(e)}")
|
||||||
|
raise
|
||||||
|
logger.warning(
|
||||||
|
f"Falling back to {self.fallback_provider}/{self.fallback_model_name}. Error: {str(e)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
fallback_method = getattr(
|
||||||
|
self.fallback_llm, method_name.replace("_raw_", "")
|
||||||
|
)
|
||||||
|
return fallback_method(*args, **kwargs)
|
||||||
|
|
||||||
|
def gen(self, model, messages, stream=False, tools=None, *args, **kwargs):
|
||||||
|
decorators = [gen_token_usage, gen_cache]
|
||||||
|
return self._execute_with_fallback(
|
||||||
|
"_raw_gen",
|
||||||
|
decorators,
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
stream=stream,
|
||||||
|
tools=tools,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
def gen_stream(self, model, messages, stream=True, tools=None, *args, **kwargs):
|
||||||
|
decorators = [stream_cache, stream_token_usage]
|
||||||
|
return self._execute_with_fallback(
|
||||||
|
"_raw_gen_stream",
|
||||||
|
decorators,
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
stream=stream,
|
||||||
|
tools=tools,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _raw_gen(self, model, messages, stream, tools, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _raw_gen_stream(self, model, messages, stream, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def supports_tools(self):
|
||||||
|
return hasattr(self, "_supports_tools") and callable(
|
||||||
|
getattr(self, "_supports_tools")
|
||||||
|
)
|
||||||
|
|
||||||
|
def _supports_tools(self):
|
||||||
|
raise NotImplementedError("Subclass must implement _supports_tools method")
|
||||||
|
|
||||||
|
def supports_structured_output(self):
|
||||||
|
"""Check if the LLM supports structured output/JSON schema enforcement"""
|
||||||
|
return hasattr(self, "_supports_structured_output") and callable(
|
||||||
|
getattr(self, "_supports_structured_output")
|
||||||
|
)
|
||||||
|
|
||||||
|
def _supports_structured_output(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def prepare_structured_output_format(self, json_schema):
|
||||||
|
"""Prepare structured output format specific to the LLM provider"""
|
||||||
|
_ = json_schema
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_supported_attachment_types(self):
|
||||||
|
"""
|
||||||
|
Return a list of MIME types supported by this LLM for file uploads.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of supported MIME types
|
||||||
|
"""
|
||||||
|
return []
|
||||||
131
application/llm/docsgpt_provider.py
Normal file
131
application/llm/docsgpt_provider.py
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
import json
|
||||||
|
|
||||||
|
from application.core.settings import settings
|
||||||
|
from application.llm.base import BaseLLM
|
||||||
|
|
||||||
|
|
||||||
|
class DocsGPTAPILLM(BaseLLM):
|
||||||
|
|
||||||
|
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.client = OpenAI(api_key="sk-docsgpt-public", base_url="https://oai.arc53.com")
|
||||||
|
self.user_api_key = user_api_key
|
||||||
|
self.api_key = api_key
|
||||||
|
|
||||||
|
def _clean_messages_openai(self, messages):
|
||||||
|
cleaned_messages = []
|
||||||
|
for message in messages:
|
||||||
|
role = message.get("role")
|
||||||
|
content = message.get("content")
|
||||||
|
|
||||||
|
if role == "model":
|
||||||
|
role = "assistant"
|
||||||
|
|
||||||
|
if role and content is not None:
|
||||||
|
if isinstance(content, str):
|
||||||
|
cleaned_messages.append({"role": role, "content": content})
|
||||||
|
elif isinstance(content, list):
|
||||||
|
for item in content:
|
||||||
|
if "text" in item:
|
||||||
|
cleaned_messages.append(
|
||||||
|
{"role": role, "content": item["text"]}
|
||||||
|
)
|
||||||
|
elif "function_call" in item:
|
||||||
|
tool_call = {
|
||||||
|
"id": item["function_call"]["call_id"],
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": item["function_call"]["name"],
|
||||||
|
"arguments": json.dumps(
|
||||||
|
item["function_call"]["args"]
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cleaned_messages.append(
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": None,
|
||||||
|
"tool_calls": [tool_call],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
elif "function_response" in item:
|
||||||
|
cleaned_messages.append(
|
||||||
|
{
|
||||||
|
"role": "tool",
|
||||||
|
"tool_call_id": item["function_response"][
|
||||||
|
"call_id"
|
||||||
|
],
|
||||||
|
"content": json.dumps(
|
||||||
|
item["function_response"]["response"]["result"]
|
||||||
|
),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unexpected content dictionary format: {item}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unexpected content type: {type(content)}")
|
||||||
|
|
||||||
|
return cleaned_messages
|
||||||
|
|
||||||
|
def _raw_gen(
|
||||||
|
self,
|
||||||
|
baseself,
|
||||||
|
model,
|
||||||
|
messages,
|
||||||
|
stream=False,
|
||||||
|
tools=None,
|
||||||
|
engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
messages = self._clean_messages_openai(messages)
|
||||||
|
if tools:
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model="docsgpt",
|
||||||
|
messages=messages,
|
||||||
|
stream=stream,
|
||||||
|
tools=tools,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
return response.choices[0]
|
||||||
|
else:
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model="docsgpt", messages=messages, stream=stream, **kwargs
|
||||||
|
)
|
||||||
|
return response.choices[0].message.content
|
||||||
|
|
||||||
|
def _raw_gen_stream(
|
||||||
|
self,
|
||||||
|
baseself,
|
||||||
|
model,
|
||||||
|
messages,
|
||||||
|
stream=True,
|
||||||
|
tools=None,
|
||||||
|
engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
messages = self._clean_messages_openai(messages)
|
||||||
|
if tools:
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model="docsgpt",
|
||||||
|
messages=messages,
|
||||||
|
stream=stream,
|
||||||
|
tools=tools,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model="docsgpt", messages=messages, stream=stream, **kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
for line in response:
|
||||||
|
if len(line.choices) > 0 and line.choices[0].delta.content is not None and len(line.choices[0].delta.content) > 0:
|
||||||
|
yield line.choices[0].delta.content
|
||||||
|
elif len(line.choices) > 0:
|
||||||
|
yield line.choices[0]
|
||||||
|
|
||||||
|
def _supports_tools(self):
|
||||||
|
return True
|
||||||
464
application/llm/google_ai.py
Normal file
464
application/llm/google_ai.py
Normal file
@@ -0,0 +1,464 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from google import genai
|
||||||
|
from google.genai import types
|
||||||
|
|
||||||
|
from application.core.settings import settings
|
||||||
|
|
||||||
|
from application.llm.base import BaseLLM
|
||||||
|
from application.storage.storage_creator import StorageCreator
|
||||||
|
|
||||||
|
|
||||||
|
class GoogleLLM(BaseLLM):
|
||||||
|
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.user_api_key = user_api_key
|
||||||
|
self.client = genai.Client(api_key=self.api_key)
|
||||||
|
self.storage = StorageCreator.get_storage()
|
||||||
|
|
||||||
|
def get_supported_attachment_types(self):
|
||||||
|
"""
|
||||||
|
Return a list of MIME types supported by Google Gemini for file uploads.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of supported MIME types
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
"application/pdf",
|
||||||
|
"image/png",
|
||||||
|
"image/jpeg",
|
||||||
|
"image/jpg",
|
||||||
|
"image/webp",
|
||||||
|
"image/gif",
|
||||||
|
]
|
||||||
|
|
||||||
|
def prepare_messages_with_attachments(self, messages, attachments=None):
|
||||||
|
"""
|
||||||
|
Process attachments using Google AI's file API for more efficient handling.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages (list): List of message dictionaries.
|
||||||
|
attachments (list): List of attachment dictionaries with content and metadata.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: Messages formatted with file references for Google AI API.
|
||||||
|
"""
|
||||||
|
if not attachments:
|
||||||
|
return messages
|
||||||
|
|
||||||
|
prepared_messages = messages.copy()
|
||||||
|
|
||||||
|
# Find the user message to attach files to the last one
|
||||||
|
user_message_index = None
|
||||||
|
for i in range(len(prepared_messages) - 1, -1, -1):
|
||||||
|
if prepared_messages[i].get("role") == "user":
|
||||||
|
user_message_index = i
|
||||||
|
break
|
||||||
|
|
||||||
|
if user_message_index is None:
|
||||||
|
user_message = {"role": "user", "content": []}
|
||||||
|
prepared_messages.append(user_message)
|
||||||
|
user_message_index = len(prepared_messages) - 1
|
||||||
|
|
||||||
|
if isinstance(prepared_messages[user_message_index].get("content"), str):
|
||||||
|
text_content = prepared_messages[user_message_index]["content"]
|
||||||
|
prepared_messages[user_message_index]["content"] = [
|
||||||
|
{"type": "text", "text": text_content}
|
||||||
|
]
|
||||||
|
elif not isinstance(prepared_messages[user_message_index].get("content"), list):
|
||||||
|
prepared_messages[user_message_index]["content"] = []
|
||||||
|
|
||||||
|
files = []
|
||||||
|
for attachment in attachments:
|
||||||
|
mime_type = attachment.get("mime_type")
|
||||||
|
|
||||||
|
if mime_type in self.get_supported_attachment_types():
|
||||||
|
try:
|
||||||
|
file_uri = self._upload_file_to_google(attachment)
|
||||||
|
logging.info(
|
||||||
|
f"GoogleLLM: Successfully uploaded file, got URI: {file_uri}"
|
||||||
|
)
|
||||||
|
files.append({"file_uri": file_uri, "mime_type": mime_type})
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(
|
||||||
|
f"GoogleLLM: Error uploading file: {e}", exc_info=True
|
||||||
|
)
|
||||||
|
if "content" in attachment:
|
||||||
|
prepared_messages[user_message_index]["content"].append(
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": f"[File could not be processed: {attachment.get('path', 'unknown')}]",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if files:
|
||||||
|
logging.info(f"GoogleLLM: Adding {len(files)} files to message")
|
||||||
|
prepared_messages[user_message_index]["content"].append({"files": files})
|
||||||
|
|
||||||
|
return prepared_messages
|
||||||
|
|
||||||
|
def _upload_file_to_google(self, attachment):
|
||||||
|
"""
|
||||||
|
Upload a file to Google AI and return the file URI.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
attachment (dict): Attachment dictionary with path and metadata.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Google AI file URI for the uploaded file.
|
||||||
|
"""
|
||||||
|
if "google_file_uri" in attachment:
|
||||||
|
return attachment["google_file_uri"]
|
||||||
|
|
||||||
|
file_path = attachment.get("path")
|
||||||
|
if not file_path:
|
||||||
|
raise ValueError("No file path provided in attachment")
|
||||||
|
|
||||||
|
if not self.storage.file_exists(file_path):
|
||||||
|
raise FileNotFoundError(f"File not found: {file_path}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
file_uri = self.storage.process_file(
|
||||||
|
file_path,
|
||||||
|
lambda local_path, **kwargs: self.client.files.upload(
|
||||||
|
file=local_path
|
||||||
|
).uri,
|
||||||
|
)
|
||||||
|
|
||||||
|
from application.core.mongo_db import MongoDB
|
||||||
|
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
attachments_collection = db["attachments"]
|
||||||
|
if "_id" in attachment:
|
||||||
|
attachments_collection.update_one(
|
||||||
|
{"_id": attachment["_id"]}, {"$set": {"google_file_uri": file_uri}}
|
||||||
|
)
|
||||||
|
|
||||||
|
return file_uri
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error uploading file to Google AI: {e}", exc_info=True)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _clean_messages_google(self, messages):
|
||||||
|
"""Convert OpenAI format messages to Google AI format."""
|
||||||
|
cleaned_messages = []
|
||||||
|
for message in messages:
|
||||||
|
role = message.get("role")
|
||||||
|
content = message.get("content")
|
||||||
|
|
||||||
|
if role == "assistant":
|
||||||
|
role = "model"
|
||||||
|
elif role == "tool":
|
||||||
|
role = "model"
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
if role and content is not None:
|
||||||
|
if isinstance(content, str):
|
||||||
|
parts = [types.Part.from_text(text=content)]
|
||||||
|
elif isinstance(content, list):
|
||||||
|
for item in content:
|
||||||
|
if "text" in item:
|
||||||
|
parts.append(types.Part.from_text(text=item["text"]))
|
||||||
|
elif "function_call" in item:
|
||||||
|
parts.append(
|
||||||
|
types.Part.from_function_call(
|
||||||
|
name=item["function_call"]["name"],
|
||||||
|
args=item["function_call"]["args"],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
elif "function_response" in item:
|
||||||
|
parts.append(
|
||||||
|
types.Part.from_function_response(
|
||||||
|
name=item["function_response"]["name"],
|
||||||
|
response=item["function_response"]["response"],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
elif "files" in item:
|
||||||
|
for file_data in item["files"]:
|
||||||
|
parts.append(
|
||||||
|
types.Part.from_uri(
|
||||||
|
file_uri=file_data["file_uri"],
|
||||||
|
mime_type=file_data["mime_type"],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unexpected content dictionary format:{item}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unexpected content type: {type(content)}")
|
||||||
|
|
||||||
|
if parts:
|
||||||
|
cleaned_messages.append(types.Content(role=role, parts=parts))
|
||||||
|
|
||||||
|
return cleaned_messages
|
||||||
|
|
||||||
|
def _clean_schema(self, schema_obj):
|
||||||
|
"""
|
||||||
|
Recursively remove unsupported fields from schema objects
|
||||||
|
and validate required properties.
|
||||||
|
"""
|
||||||
|
if not isinstance(schema_obj, dict):
|
||||||
|
return schema_obj
|
||||||
|
allowed_fields = {
|
||||||
|
"type",
|
||||||
|
"description",
|
||||||
|
"items",
|
||||||
|
"properties",
|
||||||
|
"required",
|
||||||
|
"enum",
|
||||||
|
"pattern",
|
||||||
|
"minimum",
|
||||||
|
"maximum",
|
||||||
|
"nullable",
|
||||||
|
"default",
|
||||||
|
}
|
||||||
|
|
||||||
|
cleaned = {}
|
||||||
|
for key, value in schema_obj.items():
|
||||||
|
if key not in allowed_fields:
|
||||||
|
continue
|
||||||
|
elif key == "type" and isinstance(value, str):
|
||||||
|
cleaned[key] = value.upper()
|
||||||
|
elif isinstance(value, dict):
|
||||||
|
cleaned[key] = self._clean_schema(value)
|
||||||
|
elif isinstance(value, list):
|
||||||
|
cleaned[key] = [self._clean_schema(item) for item in value]
|
||||||
|
else:
|
||||||
|
cleaned[key] = value
|
||||||
|
|
||||||
|
# Validate that required properties actually exist in properties
|
||||||
|
if "required" in cleaned and "properties" in cleaned:
|
||||||
|
valid_required = []
|
||||||
|
properties_keys = set(cleaned["properties"].keys())
|
||||||
|
for required_prop in cleaned["required"]:
|
||||||
|
if required_prop in properties_keys:
|
||||||
|
valid_required.append(required_prop)
|
||||||
|
if valid_required:
|
||||||
|
cleaned["required"] = valid_required
|
||||||
|
else:
|
||||||
|
cleaned.pop("required", None)
|
||||||
|
elif "required" in cleaned and "properties" not in cleaned:
|
||||||
|
cleaned.pop("required", None)
|
||||||
|
|
||||||
|
return cleaned
|
||||||
|
|
||||||
|
def _clean_tools_format(self, tools_list):
|
||||||
|
"""Convert OpenAI format tools to Google AI format."""
|
||||||
|
genai_tools = []
|
||||||
|
for tool_data in tools_list:
|
||||||
|
if tool_data["type"] == "function":
|
||||||
|
function = tool_data["function"]
|
||||||
|
parameters = function["parameters"]
|
||||||
|
properties = parameters.get("properties", {})
|
||||||
|
|
||||||
|
if properties:
|
||||||
|
cleaned_properties = {}
|
||||||
|
for k, v in properties.items():
|
||||||
|
cleaned_properties[k] = self._clean_schema(v)
|
||||||
|
|
||||||
|
genai_function = dict(
|
||||||
|
name=function["name"],
|
||||||
|
description=function["description"],
|
||||||
|
parameters={
|
||||||
|
"type": "OBJECT",
|
||||||
|
"properties": cleaned_properties,
|
||||||
|
"required": (
|
||||||
|
parameters["required"]
|
||||||
|
if "required" in parameters
|
||||||
|
else []
|
||||||
|
),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
genai_function = dict(
|
||||||
|
name=function["name"],
|
||||||
|
description=function["description"],
|
||||||
|
)
|
||||||
|
|
||||||
|
genai_tool = types.Tool(function_declarations=[genai_function])
|
||||||
|
genai_tools.append(genai_tool)
|
||||||
|
|
||||||
|
return genai_tools
|
||||||
|
|
||||||
|
def _raw_gen(
|
||||||
|
self,
|
||||||
|
baseself,
|
||||||
|
model,
|
||||||
|
messages,
|
||||||
|
stream=False,
|
||||||
|
tools=None,
|
||||||
|
formatting="openai",
|
||||||
|
response_schema=None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
"""Generate content using Google AI API without streaming."""
|
||||||
|
client = genai.Client(api_key=self.api_key)
|
||||||
|
if formatting == "openai":
|
||||||
|
messages = self._clean_messages_google(messages)
|
||||||
|
config = types.GenerateContentConfig()
|
||||||
|
if messages[0].role == "system":
|
||||||
|
config.system_instruction = messages[0].parts[0].text
|
||||||
|
messages = messages[1:]
|
||||||
|
|
||||||
|
if tools:
|
||||||
|
cleaned_tools = self._clean_tools_format(tools)
|
||||||
|
config.tools = cleaned_tools
|
||||||
|
|
||||||
|
# Add response schema for structured output if provided
|
||||||
|
if response_schema:
|
||||||
|
config.response_schema = response_schema
|
||||||
|
config.response_mime_type = "application/json"
|
||||||
|
|
||||||
|
response = client.models.generate_content(
|
||||||
|
model=model,
|
||||||
|
contents=messages,
|
||||||
|
config=config,
|
||||||
|
)
|
||||||
|
|
||||||
|
if tools:
|
||||||
|
return response
|
||||||
|
else:
|
||||||
|
return response.text
|
||||||
|
|
||||||
|
def _raw_gen_stream(
|
||||||
|
self,
|
||||||
|
baseself,
|
||||||
|
model,
|
||||||
|
messages,
|
||||||
|
stream=True,
|
||||||
|
tools=None,
|
||||||
|
formatting="openai",
|
||||||
|
response_schema=None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
"""Generate content using Google AI API with streaming."""
|
||||||
|
client = genai.Client(api_key=self.api_key)
|
||||||
|
if formatting == "openai":
|
||||||
|
messages = self._clean_messages_google(messages)
|
||||||
|
config = types.GenerateContentConfig()
|
||||||
|
if messages[0].role == "system":
|
||||||
|
config.system_instruction = messages[0].parts[0].text
|
||||||
|
messages = messages[1:]
|
||||||
|
|
||||||
|
if tools:
|
||||||
|
cleaned_tools = self._clean_tools_format(tools)
|
||||||
|
config.tools = cleaned_tools
|
||||||
|
|
||||||
|
# Add response schema for structured output if provided
|
||||||
|
if response_schema:
|
||||||
|
config.response_schema = response_schema
|
||||||
|
config.response_mime_type = "application/json"
|
||||||
|
|
||||||
|
# Check if we have both tools and file attachments
|
||||||
|
has_attachments = False
|
||||||
|
for message in messages:
|
||||||
|
for part in message.parts:
|
||||||
|
if hasattr(part, "file_data") and part.file_data is not None:
|
||||||
|
has_attachments = True
|
||||||
|
break
|
||||||
|
if has_attachments:
|
||||||
|
break
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
f"GoogleLLM: Starting stream generation. Model: {model}, Messages: {json.dumps(messages, default=str)}, Has attachments: {has_attachments}"
|
||||||
|
)
|
||||||
|
|
||||||
|
response = client.models.generate_content_stream(
|
||||||
|
model=model,
|
||||||
|
contents=messages,
|
||||||
|
config=config,
|
||||||
|
)
|
||||||
|
|
||||||
|
for chunk in response:
|
||||||
|
if hasattr(chunk, "candidates") and chunk.candidates:
|
||||||
|
for candidate in chunk.candidates:
|
||||||
|
if candidate.content and candidate.content.parts:
|
||||||
|
for part in candidate.content.parts:
|
||||||
|
if part.function_call:
|
||||||
|
yield part
|
||||||
|
elif part.text:
|
||||||
|
yield part.text
|
||||||
|
elif hasattr(chunk, "text"):
|
||||||
|
yield chunk.text
|
||||||
|
|
||||||
|
def _supports_tools(self):
|
||||||
|
"""Return whether this LLM supports function calling."""
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _supports_structured_output(self):
|
||||||
|
"""Return whether this LLM supports structured JSON output."""
|
||||||
|
return True
|
||||||
|
|
||||||
|
def prepare_structured_output_format(self, json_schema):
|
||||||
|
"""Convert JSON schema to Google AI structured output format."""
|
||||||
|
if not json_schema:
|
||||||
|
return None
|
||||||
|
|
||||||
|
type_map = {
|
||||||
|
"object": "OBJECT",
|
||||||
|
"array": "ARRAY",
|
||||||
|
"string": "STRING",
|
||||||
|
"integer": "INTEGER",
|
||||||
|
"number": "NUMBER",
|
||||||
|
"boolean": "BOOLEAN",
|
||||||
|
}
|
||||||
|
|
||||||
|
def convert(schema):
|
||||||
|
if not isinstance(schema, dict):
|
||||||
|
return schema
|
||||||
|
|
||||||
|
result = {}
|
||||||
|
schema_type = schema.get("type")
|
||||||
|
if schema_type:
|
||||||
|
result["type"] = type_map.get(schema_type.lower(), schema_type.upper())
|
||||||
|
|
||||||
|
for key in [
|
||||||
|
"description",
|
||||||
|
"nullable",
|
||||||
|
"enum",
|
||||||
|
"minItems",
|
||||||
|
"maxItems",
|
||||||
|
"required",
|
||||||
|
"propertyOrdering",
|
||||||
|
]:
|
||||||
|
if key in schema:
|
||||||
|
result[key] = schema[key]
|
||||||
|
|
||||||
|
if "format" in schema:
|
||||||
|
format_value = schema["format"]
|
||||||
|
if schema_type == "string":
|
||||||
|
if format_value == "date":
|
||||||
|
result["format"] = "date-time"
|
||||||
|
elif format_value in ["enum", "date-time"]:
|
||||||
|
result["format"] = format_value
|
||||||
|
else:
|
||||||
|
result["format"] = format_value
|
||||||
|
|
||||||
|
if "properties" in schema:
|
||||||
|
result["properties"] = {
|
||||||
|
k: convert(v) for k, v in schema["properties"].items()
|
||||||
|
}
|
||||||
|
if "propertyOrdering" not in result and result.get("type") == "OBJECT":
|
||||||
|
result["propertyOrdering"] = list(result["properties"].keys())
|
||||||
|
|
||||||
|
if "items" in schema:
|
||||||
|
result["items"] = convert(schema["items"])
|
||||||
|
|
||||||
|
for field in ["anyOf", "oneOf", "allOf"]:
|
||||||
|
if field in schema:
|
||||||
|
result[field] = [convert(s) for s in schema[field]]
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
try:
|
||||||
|
return convert(json_schema)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(
|
||||||
|
f"Error preparing structured output format for Google: {e}",
|
||||||
|
exc_info=True,
|
||||||
|
)
|
||||||
|
return None
|
||||||
32
application/llm/groq.py
Normal file
32
application/llm/groq.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
from application.llm.base import BaseLLM
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
|
|
||||||
|
class GroqLLM(BaseLLM):
|
||||||
|
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.client = OpenAI(api_key=api_key, base_url="https://api.groq.com/openai/v1")
|
||||||
|
self.api_key = api_key
|
||||||
|
self.user_api_key = user_api_key
|
||||||
|
|
||||||
|
def _raw_gen(self, baseself, model, messages, stream=False, tools=None, **kwargs):
|
||||||
|
if tools:
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model=model, messages=messages, stream=stream, tools=tools, **kwargs
|
||||||
|
)
|
||||||
|
return response.choices[0]
|
||||||
|
else:
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model=model, messages=messages, stream=stream, **kwargs
|
||||||
|
)
|
||||||
|
return response.choices[0].message.content
|
||||||
|
|
||||||
|
def _raw_gen_stream(
|
||||||
|
self, baseself, model, messages, stream=True, tools=None, **kwargs
|
||||||
|
):
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model=model, messages=messages, stream=stream, **kwargs
|
||||||
|
)
|
||||||
|
for line in response:
|
||||||
|
if line.choices[0].delta.content is not None:
|
||||||
|
yield line.choices[0].delta.content
|
||||||
0
application/llm/handlers/__init__.py
Normal file
0
application/llm/handlers/__init__.py
Normal file
351
application/llm/handlers/base.py
Normal file
351
application/llm/handlers/base.py
Normal file
@@ -0,0 +1,351 @@
|
|||||||
|
import logging
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Any, Dict, Generator, List, Optional, Union
|
||||||
|
|
||||||
|
from application.logging import build_stack_data
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ToolCall:
|
||||||
|
"""Represents a tool/function call from the LLM."""
|
||||||
|
|
||||||
|
id: str
|
||||||
|
name: str
|
||||||
|
arguments: Union[str, Dict]
|
||||||
|
index: Optional[int] = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict) -> "ToolCall":
|
||||||
|
"""Create ToolCall from dictionary."""
|
||||||
|
return cls(
|
||||||
|
id=data.get("id", ""),
|
||||||
|
name=data.get("name", ""),
|
||||||
|
arguments=data.get("arguments", {}),
|
||||||
|
index=data.get("index"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LLMResponse:
|
||||||
|
"""Represents a response from the LLM."""
|
||||||
|
|
||||||
|
content: str
|
||||||
|
tool_calls: List[ToolCall]
|
||||||
|
finish_reason: str
|
||||||
|
raw_response: Any
|
||||||
|
|
||||||
|
@property
|
||||||
|
def requires_tool_call(self) -> bool:
|
||||||
|
"""Check if the response requires tool calls."""
|
||||||
|
return bool(self.tool_calls) and self.finish_reason == "tool_calls"
|
||||||
|
|
||||||
|
|
||||||
|
class LLMHandler(ABC):
|
||||||
|
"""Abstract base class for LLM handlers."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.llm_calls = []
|
||||||
|
self.tool_calls = []
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def parse_response(self, response: Any) -> LLMResponse:
|
||||||
|
"""Parse raw LLM response into standardized format."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def create_tool_message(self, tool_call: ToolCall, result: Any) -> Dict:
|
||||||
|
"""Create a tool result message for the conversation history."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _iterate_stream(self, response: Any) -> Generator:
|
||||||
|
"""Iterate through streaming response chunks."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def process_message_flow(
|
||||||
|
self,
|
||||||
|
agent,
|
||||||
|
initial_response,
|
||||||
|
tools_dict: Dict,
|
||||||
|
messages: List[Dict],
|
||||||
|
attachments: Optional[List] = None,
|
||||||
|
stream: bool = False,
|
||||||
|
) -> Union[str, Generator]:
|
||||||
|
"""
|
||||||
|
Main orchestration method for processing LLM message flow.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent: The agent instance
|
||||||
|
initial_response: Initial LLM response
|
||||||
|
tools_dict: Dictionary of available tools
|
||||||
|
messages: Conversation history
|
||||||
|
attachments: Optional attachments
|
||||||
|
stream: Whether to use streaming
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Final response or generator for streaming
|
||||||
|
"""
|
||||||
|
messages = self.prepare_messages(agent, messages, attachments)
|
||||||
|
|
||||||
|
if stream:
|
||||||
|
return self.handle_streaming(agent, initial_response, tools_dict, messages)
|
||||||
|
else:
|
||||||
|
return self.handle_non_streaming(
|
||||||
|
agent, initial_response, tools_dict, messages
|
||||||
|
)
|
||||||
|
|
||||||
|
def prepare_messages(
|
||||||
|
self, agent, messages: List[Dict], attachments: Optional[List] = None
|
||||||
|
) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Prepare messages with attachments and provider-specific formatting.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent: The agent instance
|
||||||
|
messages: Original messages
|
||||||
|
attachments: List of attachments
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Prepared messages list
|
||||||
|
"""
|
||||||
|
if not attachments:
|
||||||
|
return messages
|
||||||
|
logger.info(f"Preparing messages with {len(attachments)} attachments")
|
||||||
|
supported_types = agent.llm.get_supported_attachment_types()
|
||||||
|
|
||||||
|
supported_attachments = [
|
||||||
|
a for a in attachments if a.get("mime_type") in supported_types
|
||||||
|
]
|
||||||
|
unsupported_attachments = [
|
||||||
|
a for a in attachments if a.get("mime_type") not in supported_types
|
||||||
|
]
|
||||||
|
|
||||||
|
# Process supported attachments with the LLM's custom method
|
||||||
|
|
||||||
|
if supported_attachments:
|
||||||
|
logger.info(
|
||||||
|
f"Processing {len(supported_attachments)} supported attachments"
|
||||||
|
)
|
||||||
|
messages = agent.llm.prepare_messages_with_attachments(
|
||||||
|
messages, supported_attachments
|
||||||
|
)
|
||||||
|
# Process unsupported attachments with default method
|
||||||
|
|
||||||
|
if unsupported_attachments:
|
||||||
|
logger.info(
|
||||||
|
f"Processing {len(unsupported_attachments)} unsupported attachments"
|
||||||
|
)
|
||||||
|
messages = self._append_unsupported_attachments(
|
||||||
|
messages, unsupported_attachments
|
||||||
|
)
|
||||||
|
return messages
|
||||||
|
|
||||||
|
def _append_unsupported_attachments(
|
||||||
|
self, messages: List[Dict], attachments: List[Dict]
|
||||||
|
) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Default method to append unsupported attachment content to system prompt.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages: Current messages
|
||||||
|
attachments: List of unsupported attachments
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated messages list
|
||||||
|
"""
|
||||||
|
prepared_messages = messages.copy()
|
||||||
|
attachment_texts = []
|
||||||
|
|
||||||
|
for attachment in attachments:
|
||||||
|
logger.info(f"Adding attachment {attachment.get('id')} to context")
|
||||||
|
if "content" in attachment:
|
||||||
|
attachment_texts.append(
|
||||||
|
f"Attached file content:\n\n{attachment['content']}"
|
||||||
|
)
|
||||||
|
if attachment_texts:
|
||||||
|
combined_text = "\n\n".join(attachment_texts)
|
||||||
|
|
||||||
|
system_msg = next(
|
||||||
|
(msg for msg in prepared_messages if msg.get("role") == "system"),
|
||||||
|
{"role": "system", "content": ""},
|
||||||
|
)
|
||||||
|
|
||||||
|
if system_msg not in prepared_messages:
|
||||||
|
prepared_messages.insert(0, system_msg)
|
||||||
|
system_msg["content"] += f"\n\n{combined_text}"
|
||||||
|
return prepared_messages
|
||||||
|
|
||||||
|
def handle_tool_calls(
|
||||||
|
self, agent, tool_calls: List[ToolCall], tools_dict: Dict, messages: List[Dict]
|
||||||
|
) -> Generator:
|
||||||
|
"""
|
||||||
|
Execute tool calls and update conversation history.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent: The agent instance
|
||||||
|
tool_calls: List of tool calls to execute
|
||||||
|
tools_dict: Available tools dictionary
|
||||||
|
messages: Current conversation history
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated messages list
|
||||||
|
"""
|
||||||
|
updated_messages = messages.copy()
|
||||||
|
|
||||||
|
for call in tool_calls:
|
||||||
|
try:
|
||||||
|
self.tool_calls.append(call)
|
||||||
|
tool_executor_gen = agent._execute_tool_action(tools_dict, call)
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
yield next(tool_executor_gen)
|
||||||
|
except StopIteration as e:
|
||||||
|
tool_response, call_id = e.value
|
||||||
|
break
|
||||||
|
updated_messages.append(
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"function_call": {
|
||||||
|
"name": call.name,
|
||||||
|
"args": call.arguments,
|
||||||
|
"call_id": call_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
updated_messages.append(self.create_tool_message(call, tool_response))
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error executing tool: {str(e)}", exc_info=True)
|
||||||
|
error_call = ToolCall(
|
||||||
|
id=call.id, name=call.name, arguments=call.arguments
|
||||||
|
)
|
||||||
|
error_response = f"Error executing tool: {str(e)}"
|
||||||
|
error_message = self.create_tool_message(error_call, error_response)
|
||||||
|
updated_messages.append(error_message)
|
||||||
|
|
||||||
|
call_parts = call.name.split("_")
|
||||||
|
if len(call_parts) >= 2:
|
||||||
|
tool_id = call_parts[-1] # Last part is tool ID (e.g., "1")
|
||||||
|
action_name = "_".join(call_parts[:-1])
|
||||||
|
tool_name = tools_dict.get(tool_id, {}).get("name", "unknown_tool")
|
||||||
|
full_action_name = f"{action_name}_{tool_id}"
|
||||||
|
else:
|
||||||
|
tool_name = "unknown_tool"
|
||||||
|
action_name = call.name
|
||||||
|
full_action_name = call.name
|
||||||
|
yield {
|
||||||
|
"type": "tool_call",
|
||||||
|
"data": {
|
||||||
|
"tool_name": tool_name,
|
||||||
|
"call_id": call.id,
|
||||||
|
"action_name": full_action_name,
|
||||||
|
"arguments": call.arguments,
|
||||||
|
"error": error_response,
|
||||||
|
"status": "error",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return updated_messages
|
||||||
|
|
||||||
|
def handle_non_streaming(
|
||||||
|
self, agent, response: Any, tools_dict: Dict, messages: List[Dict]
|
||||||
|
) -> Generator:
|
||||||
|
"""
|
||||||
|
Handle non-streaming response flow.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent: The agent instance
|
||||||
|
response: Current LLM response
|
||||||
|
tools_dict: Available tools dictionary
|
||||||
|
messages: Conversation history
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Final response after processing all tool calls
|
||||||
|
"""
|
||||||
|
parsed = self.parse_response(response)
|
||||||
|
self.llm_calls.append(build_stack_data(agent.llm))
|
||||||
|
|
||||||
|
while parsed.requires_tool_call:
|
||||||
|
tool_handler_gen = self.handle_tool_calls(
|
||||||
|
agent, parsed.tool_calls, tools_dict, messages
|
||||||
|
)
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
yield next(tool_handler_gen)
|
||||||
|
except StopIteration as e:
|
||||||
|
messages = e.value
|
||||||
|
break
|
||||||
|
response = agent.llm.gen(
|
||||||
|
model=agent.gpt_model, messages=messages, tools=agent.tools
|
||||||
|
)
|
||||||
|
parsed = self.parse_response(response)
|
||||||
|
self.llm_calls.append(build_stack_data(agent.llm))
|
||||||
|
return parsed.content
|
||||||
|
|
||||||
|
def handle_streaming(
|
||||||
|
self, agent, response: Any, tools_dict: Dict, messages: List[Dict]
|
||||||
|
) -> Generator:
|
||||||
|
"""
|
||||||
|
Handle streaming response flow.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent: The agent instance
|
||||||
|
response: Current LLM response
|
||||||
|
tools_dict: Available tools dictionary
|
||||||
|
messages: Conversation history
|
||||||
|
|
||||||
|
Yields:
|
||||||
|
Streaming response chunks
|
||||||
|
"""
|
||||||
|
buffer = ""
|
||||||
|
tool_calls = {}
|
||||||
|
|
||||||
|
for chunk in self._iterate_stream(response):
|
||||||
|
if isinstance(chunk, str):
|
||||||
|
yield chunk
|
||||||
|
continue
|
||||||
|
parsed = self.parse_response(chunk)
|
||||||
|
|
||||||
|
if parsed.tool_calls:
|
||||||
|
for call in parsed.tool_calls:
|
||||||
|
if call.index not in tool_calls:
|
||||||
|
tool_calls[call.index] = call
|
||||||
|
else:
|
||||||
|
existing = tool_calls[call.index]
|
||||||
|
if call.id:
|
||||||
|
existing.id = call.id
|
||||||
|
if call.name:
|
||||||
|
existing.name = call.name
|
||||||
|
if call.arguments:
|
||||||
|
existing.arguments += call.arguments
|
||||||
|
if parsed.finish_reason == "tool_calls":
|
||||||
|
tool_handler_gen = self.handle_tool_calls(
|
||||||
|
agent, list(tool_calls.values()), tools_dict, messages
|
||||||
|
)
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
yield next(tool_handler_gen)
|
||||||
|
except StopIteration as e:
|
||||||
|
messages = e.value
|
||||||
|
break
|
||||||
|
tool_calls = {}
|
||||||
|
|
||||||
|
response = agent.llm.gen_stream(
|
||||||
|
model=agent.gpt_model, messages=messages, tools=agent.tools
|
||||||
|
)
|
||||||
|
self.llm_calls.append(build_stack_data(agent.llm))
|
||||||
|
|
||||||
|
yield from self.handle_streaming(agent, response, tools_dict, messages)
|
||||||
|
return
|
||||||
|
if parsed.content:
|
||||||
|
buffer += parsed.content
|
||||||
|
yield buffer
|
||||||
|
buffer = ""
|
||||||
|
if parsed.finish_reason == "stop":
|
||||||
|
return
|
||||||
78
application/llm/handlers/google.py
Normal file
78
application/llm/handlers/google.py
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
import uuid
|
||||||
|
from typing import Any, Dict, Generator
|
||||||
|
|
||||||
|
from application.llm.handlers.base import LLMHandler, LLMResponse, ToolCall
|
||||||
|
|
||||||
|
|
||||||
|
class GoogleLLMHandler(LLMHandler):
|
||||||
|
"""Handler for Google's GenAI API."""
|
||||||
|
|
||||||
|
def parse_response(self, response: Any) -> LLMResponse:
|
||||||
|
"""Parse Google response into standardized format."""
|
||||||
|
|
||||||
|
if isinstance(response, str):
|
||||||
|
return LLMResponse(
|
||||||
|
content=response,
|
||||||
|
tool_calls=[],
|
||||||
|
finish_reason="stop",
|
||||||
|
raw_response=response,
|
||||||
|
)
|
||||||
|
if hasattr(response, "candidates"):
|
||||||
|
parts = response.candidates[0].content.parts if response.candidates else []
|
||||||
|
tool_calls = [
|
||||||
|
ToolCall(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
name=part.function_call.name,
|
||||||
|
arguments=part.function_call.args,
|
||||||
|
)
|
||||||
|
for part in parts
|
||||||
|
if hasattr(part, "function_call") and part.function_call is not None
|
||||||
|
]
|
||||||
|
|
||||||
|
content = " ".join(
|
||||||
|
part.text
|
||||||
|
for part in parts
|
||||||
|
if hasattr(part, "text") and part.text is not None
|
||||||
|
)
|
||||||
|
return LLMResponse(
|
||||||
|
content=content,
|
||||||
|
tool_calls=tool_calls,
|
||||||
|
finish_reason="tool_calls" if tool_calls else "stop",
|
||||||
|
raw_response=response,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
tool_calls = []
|
||||||
|
if hasattr(response, "function_call"):
|
||||||
|
tool_calls.append(
|
||||||
|
ToolCall(
|
||||||
|
id=str(uuid.uuid4()),
|
||||||
|
name=response.function_call.name,
|
||||||
|
arguments=response.function_call.args,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return LLMResponse(
|
||||||
|
content=response.text if hasattr(response, "text") else "",
|
||||||
|
tool_calls=tool_calls,
|
||||||
|
finish_reason="tool_calls" if tool_calls else "stop",
|
||||||
|
raw_response=response,
|
||||||
|
)
|
||||||
|
|
||||||
|
def create_tool_message(self, tool_call: ToolCall, result: Any) -> Dict:
|
||||||
|
"""Create Google-style tool message."""
|
||||||
|
|
||||||
|
return {
|
||||||
|
"role": "model",
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"function_response": {
|
||||||
|
"name": tool_call.name,
|
||||||
|
"response": {"result": result},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
def _iterate_stream(self, response: Any) -> Generator:
|
||||||
|
"""Iterate through Google streaming response."""
|
||||||
|
for chunk in response:
|
||||||
|
yield chunk
|
||||||
18
application/llm/handlers/handler_creator.py
Normal file
18
application/llm/handlers/handler_creator.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
from application.llm.handlers.base import LLMHandler
|
||||||
|
from application.llm.handlers.google import GoogleLLMHandler
|
||||||
|
from application.llm.handlers.openai import OpenAILLMHandler
|
||||||
|
|
||||||
|
|
||||||
|
class LLMHandlerCreator:
|
||||||
|
handlers = {
|
||||||
|
"openai": OpenAILLMHandler,
|
||||||
|
"google": GoogleLLMHandler,
|
||||||
|
"default": OpenAILLMHandler,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create_handler(cls, llm_type: str, *args, **kwargs) -> LLMHandler:
|
||||||
|
handler_class = cls.handlers.get(llm_type.lower())
|
||||||
|
if not handler_class:
|
||||||
|
handler_class = OpenAILLMHandler
|
||||||
|
return handler_class(*args, **kwargs)
|
||||||
57
application/llm/handlers/openai.py
Normal file
57
application/llm/handlers/openai.py
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
from typing import Any, Dict, Generator
|
||||||
|
|
||||||
|
from application.llm.handlers.base import LLMHandler, LLMResponse, ToolCall
|
||||||
|
|
||||||
|
|
||||||
|
class OpenAILLMHandler(LLMHandler):
|
||||||
|
"""Handler for OpenAI API."""
|
||||||
|
|
||||||
|
def parse_response(self, response: Any) -> LLMResponse:
|
||||||
|
"""Parse OpenAI response into standardized format."""
|
||||||
|
if isinstance(response, str):
|
||||||
|
return LLMResponse(
|
||||||
|
content=response,
|
||||||
|
tool_calls=[],
|
||||||
|
finish_reason="stop",
|
||||||
|
raw_response=response,
|
||||||
|
)
|
||||||
|
|
||||||
|
message = getattr(response, "message", None) or getattr(response, "delta", None)
|
||||||
|
|
||||||
|
tool_calls = []
|
||||||
|
if hasattr(message, "tool_calls"):
|
||||||
|
tool_calls = [
|
||||||
|
ToolCall(
|
||||||
|
id=getattr(tc, "id", ""),
|
||||||
|
name=getattr(tc.function, "name", ""),
|
||||||
|
arguments=getattr(tc.function, "arguments", ""),
|
||||||
|
index=getattr(tc, "index", None),
|
||||||
|
)
|
||||||
|
for tc in message.tool_calls or []
|
||||||
|
]
|
||||||
|
return LLMResponse(
|
||||||
|
content=getattr(message, "content", ""),
|
||||||
|
tool_calls=tool_calls,
|
||||||
|
finish_reason=getattr(response, "finish_reason", ""),
|
||||||
|
raw_response=response,
|
||||||
|
)
|
||||||
|
|
||||||
|
def create_tool_message(self, tool_call: ToolCall, result: Any) -> Dict:
|
||||||
|
"""Create OpenAI-style tool message."""
|
||||||
|
return {
|
||||||
|
"role": "tool",
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"function_response": {
|
||||||
|
"name": tool_call.name,
|
||||||
|
"response": {"result": result},
|
||||||
|
"call_id": tool_call.id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
def _iterate_stream(self, response: Any) -> Generator:
|
||||||
|
"""Iterate through OpenAI streaming response."""
|
||||||
|
for chunk in response:
|
||||||
|
yield chunk
|
||||||
68
application/llm/huggingface.py
Normal file
68
application/llm/huggingface.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
from application.llm.base import BaseLLM
|
||||||
|
|
||||||
|
|
||||||
|
class HuggingFaceLLM(BaseLLM):
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
api_key=None,
|
||||||
|
user_api_key=None,
|
||||||
|
llm_name="Arc53/DocsGPT-7B",
|
||||||
|
q=False,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
global hf
|
||||||
|
|
||||||
|
from langchain.llms import HuggingFacePipeline
|
||||||
|
|
||||||
|
if q:
|
||||||
|
import torch
|
||||||
|
from transformers import (
|
||||||
|
AutoModelForCausalLM,
|
||||||
|
AutoTokenizer,
|
||||||
|
pipeline,
|
||||||
|
BitsAndBytesConfig,
|
||||||
|
)
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(llm_name)
|
||||||
|
bnb_config = BitsAndBytesConfig(
|
||||||
|
load_in_4bit=True,
|
||||||
|
bnb_4bit_use_double_quant=True,
|
||||||
|
bnb_4bit_quant_type="nf4",
|
||||||
|
bnb_4bit_compute_dtype=torch.bfloat16,
|
||||||
|
)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
llm_name, quantization_config=bnb_config
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(llm_name)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(llm_name)
|
||||||
|
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.user_api_key = user_api_key
|
||||||
|
pipe = pipeline(
|
||||||
|
"text-generation",
|
||||||
|
model=model,
|
||||||
|
tokenizer=tokenizer,
|
||||||
|
max_new_tokens=2000,
|
||||||
|
device_map="auto",
|
||||||
|
eos_token_id=tokenizer.eos_token_id,
|
||||||
|
)
|
||||||
|
hf = HuggingFacePipeline(pipeline=pipe)
|
||||||
|
|
||||||
|
def _raw_gen(self, baseself, model, messages, stream=False, **kwargs):
|
||||||
|
context = messages[0]["content"]
|
||||||
|
user_question = messages[-1]["content"]
|
||||||
|
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||||
|
|
||||||
|
result = hf(prompt)
|
||||||
|
|
||||||
|
return result.content
|
||||||
|
|
||||||
|
def _raw_gen_stream(self, baseself, model, messages, stream=True, **kwargs):
|
||||||
|
|
||||||
|
raise NotImplementedError("HuggingFaceLLM Streaming is not implemented yet.")
|
||||||
60
application/llm/llama_cpp.py
Normal file
60
application/llm/llama_cpp.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
from application.llm.base import BaseLLM
|
||||||
|
from application.core.settings import settings
|
||||||
|
import threading
|
||||||
|
|
||||||
|
|
||||||
|
class LlamaSingleton:
|
||||||
|
_instances = {}
|
||||||
|
_lock = threading.Lock() # Add a lock for thread synchronization
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_instance(cls, llm_name):
|
||||||
|
if llm_name not in cls._instances:
|
||||||
|
try:
|
||||||
|
from llama_cpp import Llama
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"Please install llama_cpp using pip install llama-cpp-python"
|
||||||
|
)
|
||||||
|
cls._instances[llm_name] = Llama(model_path=llm_name, n_ctx=2048)
|
||||||
|
return cls._instances[llm_name]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def query_model(cls, llm, prompt, **kwargs):
|
||||||
|
with cls._lock:
|
||||||
|
return llm(prompt, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class LlamaCpp(BaseLLM):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
api_key=None,
|
||||||
|
user_api_key=None,
|
||||||
|
llm_name=settings.LLM_PATH,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.user_api_key = user_api_key
|
||||||
|
self.llama = LlamaSingleton.get_instance(llm_name)
|
||||||
|
|
||||||
|
def _raw_gen(self, baseself, model, messages, stream=False, **kwargs):
|
||||||
|
context = messages[0]["content"]
|
||||||
|
user_question = messages[-1]["content"]
|
||||||
|
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||||
|
result = LlamaSingleton.query_model(
|
||||||
|
self.llama, prompt, max_tokens=150, echo=False
|
||||||
|
)
|
||||||
|
return result["choices"][0]["text"].split("### Answer \n")[-1]
|
||||||
|
|
||||||
|
def _raw_gen_stream(self, baseself, model, messages, stream=True, **kwargs):
|
||||||
|
context = messages[0]["content"]
|
||||||
|
user_question = messages[-1]["content"]
|
||||||
|
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||||
|
result = LlamaSingleton.query_model(
|
||||||
|
self.llama, prompt, max_tokens=150, echo=False, stream=stream
|
||||||
|
)
|
||||||
|
for item in result:
|
||||||
|
for choice in item["choices"]:
|
||||||
|
yield choice["text"]
|
||||||
35
application/llm/llm_creator.py
Normal file
35
application/llm/llm_creator.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
from application.llm.groq import GroqLLM
|
||||||
|
from application.llm.openai import OpenAILLM, AzureOpenAILLM
|
||||||
|
from application.llm.sagemaker import SagemakerAPILLM
|
||||||
|
from application.llm.huggingface import HuggingFaceLLM
|
||||||
|
from application.llm.llama_cpp import LlamaCpp
|
||||||
|
from application.llm.anthropic import AnthropicLLM
|
||||||
|
from application.llm.docsgpt_provider import DocsGPTAPILLM
|
||||||
|
from application.llm.premai import PremAILLM
|
||||||
|
from application.llm.google_ai import GoogleLLM
|
||||||
|
from application.llm.novita import NovitaLLM
|
||||||
|
|
||||||
|
|
||||||
|
class LLMCreator:
|
||||||
|
llms = {
|
||||||
|
"openai": OpenAILLM,
|
||||||
|
"azure_openai": AzureOpenAILLM,
|
||||||
|
"sagemaker": SagemakerAPILLM,
|
||||||
|
"huggingface": HuggingFaceLLM,
|
||||||
|
"llama.cpp": LlamaCpp,
|
||||||
|
"anthropic": AnthropicLLM,
|
||||||
|
"docsgpt": DocsGPTAPILLM,
|
||||||
|
"premai": PremAILLM,
|
||||||
|
"groq": GroqLLM,
|
||||||
|
"google": GoogleLLM,
|
||||||
|
"novita": NovitaLLM,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create_llm(cls, type, api_key, user_api_key, decoded_token, *args, **kwargs):
|
||||||
|
llm_class = cls.llms.get(type.lower())
|
||||||
|
if not llm_class:
|
||||||
|
raise ValueError(f"No LLM class found for type {type}")
|
||||||
|
return llm_class(
|
||||||
|
api_key, user_api_key, decoded_token=decoded_token, *args, **kwargs
|
||||||
|
)
|
||||||
32
application/llm/novita.py
Normal file
32
application/llm/novita.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
from application.llm.base import BaseLLM
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
|
|
||||||
|
class NovitaLLM(BaseLLM):
|
||||||
|
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.client = OpenAI(api_key=api_key, base_url="https://api.novita.ai/v3/openai")
|
||||||
|
self.api_key = api_key
|
||||||
|
self.user_api_key = user_api_key
|
||||||
|
|
||||||
|
def _raw_gen(self, baseself, model, messages, stream=False, tools=None, **kwargs):
|
||||||
|
if tools:
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model=model, messages=messages, stream=stream, tools=tools, **kwargs
|
||||||
|
)
|
||||||
|
return response.choices[0]
|
||||||
|
else:
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model=model, messages=messages, stream=stream, **kwargs
|
||||||
|
)
|
||||||
|
return response.choices[0].message.content
|
||||||
|
|
||||||
|
def _raw_gen_stream(
|
||||||
|
self, baseself, model, messages, stream=True, tools=None, **kwargs
|
||||||
|
):
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model=model, messages=messages, stream=stream, **kwargs
|
||||||
|
)
|
||||||
|
for line in response:
|
||||||
|
if line.choices[0].delta.content is not None:
|
||||||
|
yield line.choices[0].delta.content
|
||||||
423
application/llm/openai.py
Normal file
423
application/llm/openai.py
Normal file
@@ -0,0 +1,423 @@
|
|||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from application.core.settings import settings
|
||||||
|
from application.llm.base import BaseLLM
|
||||||
|
from application.storage.storage_creator import StorageCreator
|
||||||
|
|
||||||
|
|
||||||
|
class OpenAILLM(BaseLLM):
|
||||||
|
|
||||||
|
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
if (
|
||||||
|
isinstance(settings.OPENAI_BASE_URL, str)
|
||||||
|
and settings.OPENAI_BASE_URL.strip()
|
||||||
|
):
|
||||||
|
self.client = OpenAI(api_key=api_key, base_url=settings.OPENAI_BASE_URL)
|
||||||
|
else:
|
||||||
|
DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
|
||||||
|
self.client = OpenAI(api_key=api_key, base_url=DEFAULT_OPENAI_API_BASE)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.user_api_key = user_api_key
|
||||||
|
self.storage = StorageCreator.get_storage()
|
||||||
|
|
||||||
|
def _clean_messages_openai(self, messages):
|
||||||
|
cleaned_messages = []
|
||||||
|
for message in messages:
|
||||||
|
role = message.get("role")
|
||||||
|
content = message.get("content")
|
||||||
|
|
||||||
|
if role == "model":
|
||||||
|
role = "assistant"
|
||||||
|
|
||||||
|
if role and content is not None:
|
||||||
|
if isinstance(content, str):
|
||||||
|
cleaned_messages.append({"role": role, "content": content})
|
||||||
|
elif isinstance(content, list):
|
||||||
|
for item in content:
|
||||||
|
if "text" in item:
|
||||||
|
cleaned_messages.append(
|
||||||
|
{"role": role, "content": item["text"]}
|
||||||
|
)
|
||||||
|
elif "function_call" in item:
|
||||||
|
tool_call = {
|
||||||
|
"id": item["function_call"]["call_id"],
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": item["function_call"]["name"],
|
||||||
|
"arguments": json.dumps(
|
||||||
|
item["function_call"]["args"]
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cleaned_messages.append(
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": None,
|
||||||
|
"tool_calls": [tool_call],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
elif "function_response" in item:
|
||||||
|
cleaned_messages.append(
|
||||||
|
{
|
||||||
|
"role": "tool",
|
||||||
|
"tool_call_id": item["function_response"][
|
||||||
|
"call_id"
|
||||||
|
],
|
||||||
|
"content": json.dumps(
|
||||||
|
item["function_response"]["response"]["result"]
|
||||||
|
),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
elif isinstance(item, dict):
|
||||||
|
content_parts = []
|
||||||
|
if "text" in item:
|
||||||
|
content_parts.append(
|
||||||
|
{"type": "text", "text": item["text"]}
|
||||||
|
)
|
||||||
|
elif (
|
||||||
|
"type" in item
|
||||||
|
and item["type"] == "text"
|
||||||
|
and "text" in item
|
||||||
|
):
|
||||||
|
content_parts.append(item)
|
||||||
|
elif (
|
||||||
|
"type" in item
|
||||||
|
and item["type"] == "file"
|
||||||
|
and "file" in item
|
||||||
|
):
|
||||||
|
content_parts.append(item)
|
||||||
|
elif (
|
||||||
|
"type" in item
|
||||||
|
and item["type"] == "image_url"
|
||||||
|
and "image_url" in item
|
||||||
|
):
|
||||||
|
content_parts.append(item)
|
||||||
|
cleaned_messages.append(
|
||||||
|
{"role": role, "content": content_parts}
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unexpected content dictionary format: {item}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unexpected content type: {type(content)}")
|
||||||
|
|
||||||
|
return cleaned_messages
|
||||||
|
|
||||||
|
def _raw_gen(
|
||||||
|
self,
|
||||||
|
baseself,
|
||||||
|
model,
|
||||||
|
messages,
|
||||||
|
stream=False,
|
||||||
|
tools=None,
|
||||||
|
engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||||
|
response_format=None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
messages = self._clean_messages_openai(messages)
|
||||||
|
|
||||||
|
request_params = {
|
||||||
|
"model": model,
|
||||||
|
"messages": messages,
|
||||||
|
"stream": stream,
|
||||||
|
**kwargs,
|
||||||
|
}
|
||||||
|
|
||||||
|
if tools:
|
||||||
|
request_params["tools"] = tools
|
||||||
|
|
||||||
|
if response_format:
|
||||||
|
request_params["response_format"] = response_format
|
||||||
|
|
||||||
|
response = self.client.chat.completions.create(**request_params)
|
||||||
|
|
||||||
|
if tools:
|
||||||
|
return response.choices[0]
|
||||||
|
else:
|
||||||
|
return response.choices[0].message.content
|
||||||
|
|
||||||
|
def _raw_gen_stream(
|
||||||
|
self,
|
||||||
|
baseself,
|
||||||
|
model,
|
||||||
|
messages,
|
||||||
|
stream=True,
|
||||||
|
tools=None,
|
||||||
|
engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||||
|
response_format=None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
messages = self._clean_messages_openai(messages)
|
||||||
|
|
||||||
|
request_params = {
|
||||||
|
"model": model,
|
||||||
|
"messages": messages,
|
||||||
|
"stream": stream,
|
||||||
|
**kwargs,
|
||||||
|
}
|
||||||
|
|
||||||
|
if tools:
|
||||||
|
request_params["tools"] = tools
|
||||||
|
|
||||||
|
if response_format:
|
||||||
|
request_params["response_format"] = response_format
|
||||||
|
|
||||||
|
response = self.client.chat.completions.create(**request_params)
|
||||||
|
|
||||||
|
for line in response:
|
||||||
|
if (
|
||||||
|
len(line.choices) > 0
|
||||||
|
and line.choices[0].delta.content is not None
|
||||||
|
and len(line.choices[0].delta.content) > 0
|
||||||
|
):
|
||||||
|
yield line.choices[0].delta.content
|
||||||
|
elif len(line.choices) > 0:
|
||||||
|
yield line.choices[0]
|
||||||
|
|
||||||
|
def _supports_tools(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _supports_structured_output(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def prepare_structured_output_format(self, json_schema):
|
||||||
|
if not json_schema:
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
def add_additional_properties_false(schema_obj):
|
||||||
|
if isinstance(schema_obj, dict):
|
||||||
|
schema_copy = schema_obj.copy()
|
||||||
|
|
||||||
|
if schema_copy.get("type") == "object":
|
||||||
|
schema_copy["additionalProperties"] = False
|
||||||
|
# Ensure 'required' includes all properties for OpenAI strict mode
|
||||||
|
if "properties" in schema_copy:
|
||||||
|
schema_copy["required"] = list(
|
||||||
|
schema_copy["properties"].keys()
|
||||||
|
)
|
||||||
|
|
||||||
|
for key, value in schema_copy.items():
|
||||||
|
if key == "properties" and isinstance(value, dict):
|
||||||
|
schema_copy[key] = {
|
||||||
|
prop_name: add_additional_properties_false(prop_schema)
|
||||||
|
for prop_name, prop_schema in value.items()
|
||||||
|
}
|
||||||
|
elif key == "items" and isinstance(value, dict):
|
||||||
|
schema_copy[key] = add_additional_properties_false(value)
|
||||||
|
elif key in ["anyOf", "oneOf", "allOf"] and isinstance(
|
||||||
|
value, list
|
||||||
|
):
|
||||||
|
schema_copy[key] = [
|
||||||
|
add_additional_properties_false(sub_schema)
|
||||||
|
for sub_schema in value
|
||||||
|
]
|
||||||
|
|
||||||
|
return schema_copy
|
||||||
|
return schema_obj
|
||||||
|
|
||||||
|
processed_schema = add_additional_properties_false(json_schema)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"type": "json_schema",
|
||||||
|
"json_schema": {
|
||||||
|
"name": processed_schema.get("name", "response"),
|
||||||
|
"description": processed_schema.get(
|
||||||
|
"description", "Structured response"
|
||||||
|
),
|
||||||
|
"schema": processed_schema,
|
||||||
|
"strict": True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error preparing structured output format: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_supported_attachment_types(self):
|
||||||
|
"""
|
||||||
|
Return a list of MIME types supported by OpenAI for file uploads.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of supported MIME types
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
"application/pdf",
|
||||||
|
"image/png",
|
||||||
|
"image/jpeg",
|
||||||
|
"image/jpg",
|
||||||
|
"image/webp",
|
||||||
|
"image/gif",
|
||||||
|
]
|
||||||
|
|
||||||
|
def prepare_messages_with_attachments(self, messages, attachments=None):
|
||||||
|
"""
|
||||||
|
Process attachments using OpenAI's file API for more efficient handling.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages (list): List of message dictionaries.
|
||||||
|
attachments (list): List of attachment dictionaries with content and metadata.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: Messages formatted with file references for OpenAI API.
|
||||||
|
"""
|
||||||
|
if not attachments:
|
||||||
|
return messages
|
||||||
|
|
||||||
|
prepared_messages = messages.copy()
|
||||||
|
|
||||||
|
# Find the user message to attach file_id to the last one
|
||||||
|
user_message_index = None
|
||||||
|
for i in range(len(prepared_messages) - 1, -1, -1):
|
||||||
|
if prepared_messages[i].get("role") == "user":
|
||||||
|
user_message_index = i
|
||||||
|
break
|
||||||
|
|
||||||
|
if user_message_index is None:
|
||||||
|
user_message = {"role": "user", "content": []}
|
||||||
|
prepared_messages.append(user_message)
|
||||||
|
user_message_index = len(prepared_messages) - 1
|
||||||
|
|
||||||
|
if isinstance(prepared_messages[user_message_index].get("content"), str):
|
||||||
|
text_content = prepared_messages[user_message_index]["content"]
|
||||||
|
prepared_messages[user_message_index]["content"] = [
|
||||||
|
{"type": "text", "text": text_content}
|
||||||
|
]
|
||||||
|
elif not isinstance(prepared_messages[user_message_index].get("content"), list):
|
||||||
|
prepared_messages[user_message_index]["content"] = []
|
||||||
|
|
||||||
|
for attachment in attachments:
|
||||||
|
mime_type = attachment.get("mime_type")
|
||||||
|
|
||||||
|
if mime_type and mime_type.startswith("image/"):
|
||||||
|
try:
|
||||||
|
base64_image = self._get_base64_image(attachment)
|
||||||
|
prepared_messages[user_message_index]["content"].append(
|
||||||
|
{
|
||||||
|
"type": "image_url",
|
||||||
|
"image_url": {
|
||||||
|
"url": f"data:{mime_type};base64,{base64_image}"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(
|
||||||
|
f"Error processing image attachment: {e}", exc_info=True
|
||||||
|
)
|
||||||
|
if "content" in attachment:
|
||||||
|
prepared_messages[user_message_index]["content"].append(
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": f"[Image could not be processed: {attachment.get('path', 'unknown')}]",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
# Handle PDFs using the file API
|
||||||
|
elif mime_type == "application/pdf":
|
||||||
|
try:
|
||||||
|
file_id = self._upload_file_to_openai(attachment)
|
||||||
|
prepared_messages[user_message_index]["content"].append(
|
||||||
|
{"type": "file", "file": {"file_id": file_id}}
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error uploading PDF to OpenAI: {e}", exc_info=True)
|
||||||
|
if "content" in attachment:
|
||||||
|
prepared_messages[user_message_index]["content"].append(
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": f"File content:\n\n{attachment['content']}",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return prepared_messages
|
||||||
|
|
||||||
|
def _get_base64_image(self, attachment):
|
||||||
|
"""
|
||||||
|
Convert an image file to base64 encoding.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
attachment (dict): Attachment dictionary with path and metadata.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Base64-encoded image data.
|
||||||
|
"""
|
||||||
|
file_path = attachment.get("path")
|
||||||
|
if not file_path:
|
||||||
|
raise ValueError("No file path provided in attachment")
|
||||||
|
|
||||||
|
try:
|
||||||
|
with self.storage.get_file(file_path) as image_file:
|
||||||
|
return base64.b64encode(image_file.read()).decode("utf-8")
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise FileNotFoundError(f"File not found: {file_path}")
|
||||||
|
|
||||||
|
def _upload_file_to_openai(self, attachment):
|
||||||
|
"""
|
||||||
|
Upload a file to OpenAI and return the file_id.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
attachment (dict): Attachment dictionary with path and metadata.
|
||||||
|
Expected keys:
|
||||||
|
- path: Path to the file
|
||||||
|
- id: Optional MongoDB ID for caching
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: OpenAI file_id for the uploaded file.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
|
||||||
|
if "openai_file_id" in attachment:
|
||||||
|
return attachment["openai_file_id"]
|
||||||
|
|
||||||
|
file_path = attachment.get("path")
|
||||||
|
|
||||||
|
if not self.storage.file_exists(file_path):
|
||||||
|
raise FileNotFoundError(f"File not found: {file_path}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
file_id = self.storage.process_file(
|
||||||
|
file_path,
|
||||||
|
lambda local_path, **kwargs: self.client.files.create(
|
||||||
|
file=open(local_path, "rb"), purpose="assistants"
|
||||||
|
).id,
|
||||||
|
)
|
||||||
|
|
||||||
|
from application.core.mongo_db import MongoDB
|
||||||
|
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
attachments_collection = db["attachments"]
|
||||||
|
if "_id" in attachment:
|
||||||
|
attachments_collection.update_one(
|
||||||
|
{"_id": attachment["_id"]}, {"$set": {"openai_file_id": file_id}}
|
||||||
|
)
|
||||||
|
|
||||||
|
return file_id
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error uploading file to OpenAI: {e}", exc_info=True)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class AzureOpenAILLM(OpenAILLM):
|
||||||
|
|
||||||
|
def __init__(self, api_key, user_api_key, *args, **kwargs):
|
||||||
|
|
||||||
|
super().__init__(api_key)
|
||||||
|
self.api_base = (settings.OPENAI_API_BASE,)
|
||||||
|
self.api_version = (settings.OPENAI_API_VERSION,)
|
||||||
|
self.deployment_name = (settings.AZURE_DEPLOYMENT_NAME,)
|
||||||
|
from openai import AzureOpenAI
|
||||||
|
|
||||||
|
self.client = AzureOpenAI(
|
||||||
|
api_key=api_key,
|
||||||
|
api_version=settings.OPENAI_API_VERSION,
|
||||||
|
azure_endpoint=settings.OPENAI_API_BASE,
|
||||||
|
)
|
||||||
38
application/llm/premai.py
Normal file
38
application/llm/premai.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
from application.llm.base import BaseLLM
|
||||||
|
from application.core.settings import settings
|
||||||
|
|
||||||
|
|
||||||
|
class PremAILLM(BaseLLM):
|
||||||
|
|
||||||
|
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
|
||||||
|
from premai import Prem
|
||||||
|
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.client = Prem(api_key=api_key)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.user_api_key = user_api_key
|
||||||
|
self.project_id = settings.PREMAI_PROJECT_ID
|
||||||
|
|
||||||
|
def _raw_gen(self, baseself, model, messages, stream=False, **kwargs):
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model=model,
|
||||||
|
project_id=self.project_id,
|
||||||
|
messages=messages,
|
||||||
|
stream=stream,
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
return response.choices[0].message["content"]
|
||||||
|
|
||||||
|
def _raw_gen_stream(self, baseself, model, messages, stream=True, **kwargs):
|
||||||
|
response = self.client.chat.completions.create(
|
||||||
|
model=model,
|
||||||
|
project_id=self.project_id,
|
||||||
|
messages=messages,
|
||||||
|
stream=stream,
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
for line in response:
|
||||||
|
if line.choices[0].delta["content"] is not None:
|
||||||
|
yield line.choices[0].delta["content"]
|
||||||
140
application/llm/sagemaker.py
Normal file
140
application/llm/sagemaker.py
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
from application.llm.base import BaseLLM
|
||||||
|
from application.core.settings import settings
|
||||||
|
import json
|
||||||
|
import io
|
||||||
|
|
||||||
|
|
||||||
|
class LineIterator:
|
||||||
|
"""
|
||||||
|
A helper class for parsing the byte stream input.
|
||||||
|
|
||||||
|
The output of the model will be in the following format:
|
||||||
|
```
|
||||||
|
b'{"outputs": [" a"]}\n'
|
||||||
|
b'{"outputs": [" challenging"]}\n'
|
||||||
|
b'{"outputs": [" problem"]}\n'
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
While usually each PayloadPart event from the event stream will contain a byte array
|
||||||
|
with a full json, this is not guaranteed and some of the json objects may be split across
|
||||||
|
PayloadPart events. For example:
|
||||||
|
```
|
||||||
|
{'PayloadPart': {'Bytes': b'{"outputs": '}}
|
||||||
|
{'PayloadPart': {'Bytes': b'[" problem"]}\n'}}
|
||||||
|
```
|
||||||
|
|
||||||
|
This class accounts for this by concatenating bytes written via the 'write' function
|
||||||
|
and then exposing a method which will return lines (ending with a '\n' character) within
|
||||||
|
the buffer via the 'scan_lines' function. It maintains the position of the last read
|
||||||
|
position to ensure that previous bytes are not exposed again.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.byte_iterator = iter(stream)
|
||||||
|
self.buffer = io.BytesIO()
|
||||||
|
self.read_pos = 0
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __next__(self):
|
||||||
|
while True:
|
||||||
|
self.buffer.seek(self.read_pos)
|
||||||
|
line = self.buffer.readline()
|
||||||
|
if line and line[-1] == ord("\n"):
|
||||||
|
self.read_pos += len(line)
|
||||||
|
return line[:-1]
|
||||||
|
try:
|
||||||
|
chunk = next(self.byte_iterator)
|
||||||
|
except StopIteration:
|
||||||
|
if self.read_pos < self.buffer.getbuffer().nbytes:
|
||||||
|
continue
|
||||||
|
raise
|
||||||
|
if "PayloadPart" not in chunk:
|
||||||
|
print("Unknown event type:" + chunk)
|
||||||
|
continue
|
||||||
|
self.buffer.seek(0, io.SEEK_END)
|
||||||
|
self.buffer.write(chunk["PayloadPart"]["Bytes"])
|
||||||
|
|
||||||
|
|
||||||
|
class SagemakerAPILLM(BaseLLM):
|
||||||
|
|
||||||
|
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
|
||||||
|
import boto3
|
||||||
|
|
||||||
|
runtime = boto3.client(
|
||||||
|
"runtime.sagemaker",
|
||||||
|
aws_access_key_id="xxx",
|
||||||
|
aws_secret_access_key="xxx",
|
||||||
|
region_name="us-west-2",
|
||||||
|
)
|
||||||
|
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.api_key = api_key
|
||||||
|
self.user_api_key = user_api_key
|
||||||
|
self.endpoint = settings.SAGEMAKER_ENDPOINT
|
||||||
|
self.runtime = runtime
|
||||||
|
|
||||||
|
def _raw_gen(self, baseself, model, messages, stream=False, tools=None, **kwargs):
|
||||||
|
context = messages[0]["content"]
|
||||||
|
user_question = messages[-1]["content"]
|
||||||
|
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||||
|
|
||||||
|
# Construct payload for endpoint
|
||||||
|
payload = {
|
||||||
|
"inputs": prompt,
|
||||||
|
"stream": False,
|
||||||
|
"parameters": {
|
||||||
|
"do_sample": True,
|
||||||
|
"temperature": 0.1,
|
||||||
|
"max_new_tokens": 30,
|
||||||
|
"repetition_penalty": 1.03,
|
||||||
|
"stop": ["</s>", "###"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
body_bytes = json.dumps(payload).encode("utf-8")
|
||||||
|
|
||||||
|
# Invoke the endpoint
|
||||||
|
response = self.runtime.invoke_endpoint(
|
||||||
|
EndpointName=self.endpoint, ContentType="application/json", Body=body_bytes
|
||||||
|
)
|
||||||
|
result = json.loads(response["Body"].read().decode())
|
||||||
|
import sys
|
||||||
|
|
||||||
|
print(result[0]["generated_text"], file=sys.stderr)
|
||||||
|
return result[0]["generated_text"][len(prompt) :]
|
||||||
|
|
||||||
|
def _raw_gen_stream(self, baseself, model, messages, stream=True, tools=None, **kwargs):
|
||||||
|
context = messages[0]["content"]
|
||||||
|
user_question = messages[-1]["content"]
|
||||||
|
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
|
||||||
|
|
||||||
|
# Construct payload for endpoint
|
||||||
|
payload = {
|
||||||
|
"inputs": prompt,
|
||||||
|
"stream": True,
|
||||||
|
"parameters": {
|
||||||
|
"do_sample": True,
|
||||||
|
"temperature": 0.1,
|
||||||
|
"max_new_tokens": 512,
|
||||||
|
"repetition_penalty": 1.03,
|
||||||
|
"stop": ["</s>", "###"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
body_bytes = json.dumps(payload).encode("utf-8")
|
||||||
|
|
||||||
|
# Invoke the endpoint
|
||||||
|
response = self.runtime.invoke_endpoint_with_response_stream(
|
||||||
|
EndpointName=self.endpoint, ContentType="application/json", Body=body_bytes
|
||||||
|
)
|
||||||
|
# result = json.loads(response['Body'].read().decode())
|
||||||
|
event_stream = response["Body"]
|
||||||
|
start_json = b"{"
|
||||||
|
for line in LineIterator(event_stream):
|
||||||
|
if line != b"" and start_json in line:
|
||||||
|
# print(line)
|
||||||
|
data = json.loads(line[line.find(start_json) :].decode("utf-8"))
|
||||||
|
if data["token"]["text"] not in ["</s>", "###"]:
|
||||||
|
print(data["token"]["text"], end="")
|
||||||
|
yield data["token"]["text"]
|
||||||
161
application/logging.py
Normal file
161
application/logging.py
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
import datetime
|
||||||
|
import functools
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
from typing import Any, Callable, Dict, Generator, List
|
||||||
|
|
||||||
|
from application.core.mongo_db import MongoDB
|
||||||
|
from application.core.settings import settings
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class LogContext:
|
||||||
|
def __init__(self, endpoint, activity_id, user, api_key, query):
|
||||||
|
self.endpoint = endpoint
|
||||||
|
self.activity_id = activity_id
|
||||||
|
self.user = user
|
||||||
|
self.api_key = api_key
|
||||||
|
self.query = query
|
||||||
|
self.stacks = []
|
||||||
|
|
||||||
|
|
||||||
|
def build_stack_data(
|
||||||
|
obj: Any,
|
||||||
|
include_attributes: List[str] = None,
|
||||||
|
exclude_attributes: List[str] = None,
|
||||||
|
custom_data: Dict = None,
|
||||||
|
) -> Dict:
|
||||||
|
if obj is None:
|
||||||
|
raise ValueError("The 'obj' parameter cannot be None")
|
||||||
|
data = {}
|
||||||
|
if include_attributes is None:
|
||||||
|
include_attributes = []
|
||||||
|
for name, value in inspect.getmembers(obj):
|
||||||
|
if (
|
||||||
|
not name.startswith("_")
|
||||||
|
and not inspect.ismethod(value)
|
||||||
|
and not inspect.isfunction(value)
|
||||||
|
):
|
||||||
|
include_attributes.append(name)
|
||||||
|
for attr_name in include_attributes:
|
||||||
|
if exclude_attributes and attr_name in exclude_attributes:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
attr_value = getattr(obj, attr_name)
|
||||||
|
if attr_value is not None:
|
||||||
|
if isinstance(attr_value, (int, float, str, bool)):
|
||||||
|
data[attr_name] = attr_value
|
||||||
|
elif isinstance(attr_value, list):
|
||||||
|
if all(isinstance(item, dict) for item in attr_value):
|
||||||
|
data[attr_name] = attr_value
|
||||||
|
elif all(hasattr(item, "__dict__") for item in attr_value):
|
||||||
|
data[attr_name] = [item.__dict__ for item in attr_value]
|
||||||
|
else:
|
||||||
|
data[attr_name] = [str(item) for item in attr_value]
|
||||||
|
elif isinstance(attr_value, dict):
|
||||||
|
data[attr_name] = {k: str(v) for k, v in attr_value.items()}
|
||||||
|
except AttributeError as e:
|
||||||
|
logging.warning(f"AttributeError while accessing {attr_name}: {e}")
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
if custom_data:
|
||||||
|
data.update(custom_data)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def log_activity() -> Callable:
|
||||||
|
def decorator(func: Callable) -> Callable:
|
||||||
|
@functools.wraps(func)
|
||||||
|
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||||
|
activity_id = str(uuid.uuid4())
|
||||||
|
data = build_stack_data(args[0])
|
||||||
|
endpoint = data.get("endpoint", "")
|
||||||
|
user = data.get("user", "local")
|
||||||
|
api_key = data.get("user_api_key", "")
|
||||||
|
query = kwargs.get("query", getattr(args[0], "query", ""))
|
||||||
|
|
||||||
|
context = LogContext(endpoint, activity_id, user, api_key, query)
|
||||||
|
kwargs["log_context"] = context
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
f"Starting activity: {endpoint} - {activity_id} - User: {user}"
|
||||||
|
)
|
||||||
|
|
||||||
|
generator = func(*args, **kwargs)
|
||||||
|
yield from _consume_and_log(generator, context)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
def _consume_and_log(generator: Generator, context: "LogContext"):
|
||||||
|
try:
|
||||||
|
for item in generator:
|
||||||
|
yield item
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(f"Error in {context.endpoint} - {context.activity_id}: {e}")
|
||||||
|
context.stacks.append({"component": "error", "data": {"message": str(e)}})
|
||||||
|
_log_to_mongodb(
|
||||||
|
endpoint=context.endpoint,
|
||||||
|
activity_id=context.activity_id,
|
||||||
|
user=context.user,
|
||||||
|
api_key=context.api_key,
|
||||||
|
query=context.query,
|
||||||
|
stacks=context.stacks,
|
||||||
|
level="error",
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
_log_to_mongodb(
|
||||||
|
endpoint=context.endpoint,
|
||||||
|
activity_id=context.activity_id,
|
||||||
|
user=context.user,
|
||||||
|
api_key=context.api_key,
|
||||||
|
query=context.query,
|
||||||
|
stacks=context.stacks,
|
||||||
|
level="info",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _log_to_mongodb(
|
||||||
|
endpoint: str,
|
||||||
|
activity_id: str,
|
||||||
|
user: str,
|
||||||
|
api_key: str,
|
||||||
|
query: str,
|
||||||
|
stacks: List[Dict],
|
||||||
|
level: str,
|
||||||
|
) -> None:
|
||||||
|
try:
|
||||||
|
mongo = MongoDB.get_client()
|
||||||
|
db = mongo[settings.MONGO_DB_NAME]
|
||||||
|
user_logs_collection = db["stack_logs"]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
log_entry = {
|
||||||
|
"endpoint": endpoint,
|
||||||
|
"id": activity_id,
|
||||||
|
"level": level,
|
||||||
|
"user": user,
|
||||||
|
"api_key": api_key,
|
||||||
|
"query": query,
|
||||||
|
"stacks": stacks,
|
||||||
|
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
||||||
|
}
|
||||||
|
# clean up text fields to be no longer than 10000 characters
|
||||||
|
for key, value in log_entry.items():
|
||||||
|
if isinstance(value, str) and len(value) > 10000:
|
||||||
|
log_entry[key] = value[:10000]
|
||||||
|
|
||||||
|
user_logs_collection.insert_one(log_entry)
|
||||||
|
logging.debug(f"Logged activity to MongoDB: {activity_id}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Failed to log to MongoDB: {e}", exc_info=True)
|
||||||
1331
application/package-lock.json
generated
1331
application/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,5 +0,0 @@
|
|||||||
{
|
|
||||||
"devDependencies": {
|
|
||||||
"tailwindcss": "^3.2.4"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
94
application/parser/chunking.py
Normal file
94
application/parser/chunking.py
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
import re
|
||||||
|
from typing import List, Tuple
|
||||||
|
import logging
|
||||||
|
from application.parser.schema.base import Document
|
||||||
|
from application.utils import get_encoding
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class Chunker:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
chunking_strategy: str = "classic_chunk",
|
||||||
|
max_tokens: int = 2000,
|
||||||
|
min_tokens: int = 150,
|
||||||
|
duplicate_headers: bool = False,
|
||||||
|
):
|
||||||
|
if chunking_strategy not in ["classic_chunk"]:
|
||||||
|
raise ValueError(f"Unsupported chunking strategy: {chunking_strategy}")
|
||||||
|
self.chunking_strategy = chunking_strategy
|
||||||
|
self.max_tokens = max_tokens
|
||||||
|
self.min_tokens = min_tokens
|
||||||
|
self.duplicate_headers = duplicate_headers
|
||||||
|
self.encoding = get_encoding()
|
||||||
|
|
||||||
|
def separate_header_and_body(self, text: str) -> Tuple[str, str]:
|
||||||
|
header_pattern = r"^(.*?\n){3}"
|
||||||
|
match = re.match(header_pattern, text)
|
||||||
|
if match:
|
||||||
|
header = match.group(0)
|
||||||
|
body = text[len(header):]
|
||||||
|
else:
|
||||||
|
header, body = "", text # No header, treat entire text as body
|
||||||
|
return header, body
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def split_document(self, doc: Document) -> List[Document]:
|
||||||
|
split_docs = []
|
||||||
|
header, body = self.separate_header_and_body(doc.text)
|
||||||
|
header_tokens = self.encoding.encode(header) if header else []
|
||||||
|
body_tokens = self.encoding.encode(body)
|
||||||
|
|
||||||
|
current_position = 0
|
||||||
|
part_index = 0
|
||||||
|
while current_position < len(body_tokens):
|
||||||
|
end_position = current_position + self.max_tokens - len(header_tokens)
|
||||||
|
chunk_tokens = (header_tokens + body_tokens[current_position:end_position]
|
||||||
|
if self.duplicate_headers or part_index == 0 else body_tokens[current_position:end_position])
|
||||||
|
chunk_text = self.encoding.decode(chunk_tokens)
|
||||||
|
new_doc = Document(
|
||||||
|
text=chunk_text,
|
||||||
|
doc_id=f"{doc.doc_id}-{part_index}",
|
||||||
|
embedding=doc.embedding,
|
||||||
|
extra_info={**(doc.extra_info or {}), "token_count": len(chunk_tokens)}
|
||||||
|
)
|
||||||
|
split_docs.append(new_doc)
|
||||||
|
current_position = end_position
|
||||||
|
part_index += 1
|
||||||
|
header_tokens = []
|
||||||
|
return split_docs
|
||||||
|
|
||||||
|
def classic_chunk(self, documents: List[Document]) -> List[Document]:
|
||||||
|
processed_docs = []
|
||||||
|
i = 0
|
||||||
|
while i < len(documents):
|
||||||
|
doc = documents[i]
|
||||||
|
tokens = self.encoding.encode(doc.text)
|
||||||
|
token_count = len(tokens)
|
||||||
|
|
||||||
|
if self.min_tokens <= token_count <= self.max_tokens:
|
||||||
|
doc.extra_info = doc.extra_info or {}
|
||||||
|
doc.extra_info["token_count"] = token_count
|
||||||
|
processed_docs.append(doc)
|
||||||
|
i += 1
|
||||||
|
elif token_count < self.min_tokens:
|
||||||
|
|
||||||
|
doc.extra_info = doc.extra_info or {}
|
||||||
|
doc.extra_info["token_count"] = token_count
|
||||||
|
processed_docs.append(doc)
|
||||||
|
i += 1
|
||||||
|
else:
|
||||||
|
# Split large documents
|
||||||
|
processed_docs.extend(self.split_document(doc))
|
||||||
|
i += 1
|
||||||
|
return processed_docs
|
||||||
|
|
||||||
|
def chunk(
|
||||||
|
self,
|
||||||
|
documents: List[Document]
|
||||||
|
) -> List[Document]:
|
||||||
|
if self.chunking_strategy == "classic_chunk":
|
||||||
|
return self.classic_chunk(documents)
|
||||||
|
else:
|
||||||
|
raise ValueError("Unsupported chunking strategy")
|
||||||
18
application/parser/connectors/__init__.py
Normal file
18
application/parser/connectors/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
"""
|
||||||
|
External knowledge base connectors for DocsGPT.
|
||||||
|
|
||||||
|
This module contains connectors for external knowledge bases and document storage systems
|
||||||
|
that require authentication and specialized handling, separate from simple web scrapers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .base import BaseConnectorAuth, BaseConnectorLoader
|
||||||
|
from .connector_creator import ConnectorCreator
|
||||||
|
from .google_drive import GoogleDriveAuth, GoogleDriveLoader
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'BaseConnectorAuth',
|
||||||
|
'BaseConnectorLoader',
|
||||||
|
'ConnectorCreator',
|
||||||
|
'GoogleDriveAuth',
|
||||||
|
'GoogleDriveLoader'
|
||||||
|
]
|
||||||
129
application/parser/connectors/base.py
Normal file
129
application/parser/connectors/base.py
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
"""
|
||||||
|
Base classes for external knowledge base connectors.
|
||||||
|
|
||||||
|
This module provides minimal abstract base classes that define the essential
|
||||||
|
interface for external knowledge base connectors.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
from application.parser.schema.base import Document
|
||||||
|
|
||||||
|
|
||||||
|
class BaseConnectorAuth(ABC):
|
||||||
|
"""
|
||||||
|
Abstract base class for connector authentication.
|
||||||
|
|
||||||
|
Defines the minimal interface that all connector authentication
|
||||||
|
implementations must follow.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_authorization_url(self, state: Optional[str] = None) -> str:
|
||||||
|
"""
|
||||||
|
Generate authorization URL for OAuth flows.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: Optional state parameter for CSRF protection
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Authorization URL
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def exchange_code_for_tokens(self, authorization_code: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Exchange authorization code for access tokens.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
authorization_code: Authorization code from OAuth callback
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary containing token information
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def refresh_access_token(self, refresh_token: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Refresh an expired access token.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
refresh_token: Refresh token
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary containing refreshed token information
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def is_token_expired(self, token_info: Dict[str, Any]) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a token is expired.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
token_info: Token information dictionary
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if token is expired, False otherwise
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class BaseConnectorLoader(ABC):
|
||||||
|
"""
|
||||||
|
Abstract base class for connector loaders.
|
||||||
|
|
||||||
|
Defines the minimal interface that all connector loader
|
||||||
|
implementations must follow.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __init__(self, session_token: str):
|
||||||
|
"""
|
||||||
|
Initialize the connector loader.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session_token: Authentication session token
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def load_data(self, inputs: Dict[str, Any]) -> List[Document]:
|
||||||
|
"""
|
||||||
|
Load documents from the external knowledge base.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
inputs: Configuration dictionary containing:
|
||||||
|
- file_ids: Optional list of specific file IDs to load
|
||||||
|
- folder_ids: Optional list of folder IDs to browse/download
|
||||||
|
- limit: Maximum number of items to return
|
||||||
|
- list_only: If True, return metadata without content
|
||||||
|
- recursive: Whether to recursively process folders
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of Document objects
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def download_to_directory(self, local_dir: str, source_config: Dict[str, Any] = None) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Download files/folders to a local directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
local_dir: Local directory path to download files to
|
||||||
|
source_config: Configuration for what to download
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary containing download results:
|
||||||
|
- files_downloaded: Number of files downloaded
|
||||||
|
- directory_path: Path where files were downloaded
|
||||||
|
- empty_result: Whether no files were downloaded
|
||||||
|
- source_type: Type of connector
|
||||||
|
- config_used: Configuration that was used
|
||||||
|
- error: Error message if download failed (optional)
|
||||||
|
"""
|
||||||
|
pass
|
||||||
81
application/parser/connectors/connector_creator.py
Normal file
81
application/parser/connectors/connector_creator.py
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
from application.parser.connectors.google_drive.loader import GoogleDriveLoader
|
||||||
|
from application.parser.connectors.google_drive.auth import GoogleDriveAuth
|
||||||
|
|
||||||
|
|
||||||
|
class ConnectorCreator:
|
||||||
|
"""
|
||||||
|
Factory class for creating external knowledge base connectors and auth providers.
|
||||||
|
|
||||||
|
These are different from remote loaders as they typically require
|
||||||
|
authentication and connect to external document storage systems.
|
||||||
|
"""
|
||||||
|
|
||||||
|
connectors = {
|
||||||
|
"google_drive": GoogleDriveLoader,
|
||||||
|
}
|
||||||
|
|
||||||
|
auth_providers = {
|
||||||
|
"google_drive": GoogleDriveAuth,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create_connector(cls, connector_type, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Create a connector instance for the specified type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connector_type: Type of connector to create (e.g., 'google_drive')
|
||||||
|
*args, **kwargs: Arguments to pass to the connector constructor
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Connector instance
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If connector type is not supported
|
||||||
|
"""
|
||||||
|
connector_class = cls.connectors.get(connector_type.lower())
|
||||||
|
if not connector_class:
|
||||||
|
raise ValueError(f"No connector class found for type {connector_type}")
|
||||||
|
return connector_class(*args, **kwargs)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create_auth(cls, connector_type):
|
||||||
|
"""
|
||||||
|
Create an auth provider instance for the specified connector type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connector_type: Type of connector auth to create (e.g., 'google_drive')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Auth provider instance
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If connector type is not supported for auth
|
||||||
|
"""
|
||||||
|
auth_class = cls.auth_providers.get(connector_type.lower())
|
||||||
|
if not auth_class:
|
||||||
|
raise ValueError(f"No auth class found for type {connector_type}")
|
||||||
|
return auth_class()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_supported_connectors(cls):
|
||||||
|
"""
|
||||||
|
Get list of supported connector types.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of supported connector type strings
|
||||||
|
"""
|
||||||
|
return list(cls.connectors.keys())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def is_supported(cls, connector_type):
|
||||||
|
"""
|
||||||
|
Check if a connector type is supported.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
connector_type: Type of connector to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if supported, False otherwise
|
||||||
|
"""
|
||||||
|
return connector_type.lower() in cls.connectors
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user