mirror of
https://github.com/arc53/DocsGPT.git
synced 2026-05-07 06:30:03 +00:00
Compare commits
2946 Commits
0.8.0
...
v1-mini-im
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
689dd79597 | ||
|
|
0c15af90b1 | ||
|
|
cdd6ff6557 | ||
|
|
72b3d94453 | ||
|
|
7e88d09e5d | ||
|
|
74a4a237dc | ||
|
|
c3f01c6619 | ||
|
|
6b408823d4 | ||
|
|
3fc81ac5d8 | ||
|
|
2652f8a5b0 | ||
|
|
d711eefe96 | ||
|
|
79206f3919 | ||
|
|
de971d9452 | ||
|
|
1b4d5ca0dd | ||
|
|
81989e8258 | ||
|
|
dc262d1698 | ||
|
|
69f9c93869 | ||
|
|
74bf80b25c | ||
|
|
d9a92a7208 | ||
|
|
02e93d993d | ||
|
|
6b6495f48c | ||
|
|
249dd9ce37 | ||
|
|
9134ab0478 | ||
|
|
10ef68c9d0 | ||
|
|
7d65cf1c2b | ||
|
|
13c6cc59c1 | ||
|
|
6381f7dd4e | ||
|
|
e6ac4008fe | ||
|
|
1af09f114d | ||
|
|
be7da983e7 | ||
|
|
8b9e595d85 | ||
|
|
398f3acc8d | ||
|
|
e04baa7ed8 | ||
|
|
e5586b6f20 | ||
|
|
addf57cab7 | ||
|
|
648b3f1d20 | ||
|
|
a75a9e23f9 | ||
|
|
73256389cf | ||
|
|
d609efca49 | ||
|
|
772860b667 | ||
|
|
ea2fd8b04a | ||
|
|
2c73deac20 | ||
|
|
47f3907e5e | ||
|
|
727495c553 | ||
|
|
a3b08a5b44 | ||
|
|
81532ada2a | ||
|
|
43f71374e5 | ||
|
|
d5c0322e2a | ||
|
|
3b66a3176c | ||
|
|
dc6db847ca | ||
|
|
ed0063aada | ||
|
|
9a6a55b6da | ||
|
|
12a8368216 | ||
|
|
3f6d6f15ea | ||
|
|
126fa01b14 | ||
|
|
e06debad5f | ||
|
|
6492852f7d | ||
|
|
00a621f33a | ||
|
|
e92ffc6fdc | ||
|
|
fe185e5b8d | ||
|
|
9f3d9ab860 | ||
|
|
1c0adde380 | ||
|
|
3c56bd0d0b | ||
|
|
86664ebda2 | ||
|
|
db18b743d1 | ||
|
|
9e85cc9065 | ||
|
|
aaaa6f002d | ||
|
|
47dcbcb74b | ||
|
|
ddbfd94193 | ||
|
|
8dec60ab8b | ||
|
|
84b2e4bab4 | ||
|
|
193ca6fd63 | ||
|
|
2afdd7f026 | ||
|
|
f364475f64 | ||
|
|
b254de6ed6 | ||
|
|
08dedcaf95 | ||
|
|
c726eb8ebd | ||
|
|
5f0d39e5f1 | ||
|
|
8c82fc5495 | ||
|
|
6d81a15e97 | ||
|
|
5478e4234c | ||
|
|
4056278fef | ||
|
|
ee6530fe00 | ||
|
|
7c1decbcc3 | ||
|
|
8a3c724b31 | ||
|
|
15d4e9dbf5 | ||
|
|
174dee0fe6 | ||
|
|
f7bfd38b28 | ||
|
|
187e5da61e | ||
|
|
175ed58d2e | ||
|
|
820ee3a843 | ||
|
|
462f2e9494 | ||
|
|
c4968a641e | ||
|
|
c6ece177cd | ||
|
|
a3e6a5622d | ||
|
|
e8d11fdfa6 | ||
|
|
72393dc369 | ||
|
|
556b0a1da5 | ||
|
|
844167ba06 | ||
|
|
6fa3acb1ca | ||
|
|
32c268a21e | ||
|
|
ed34c2b929 | ||
|
|
06e827573c | ||
|
|
74e76d4cda | ||
|
|
db5c69ca76 | ||
|
|
9fd063266b | ||
|
|
05aa9d7cca | ||
|
|
dcececd118 | ||
|
|
eaf39bb15b | ||
|
|
6515481624 | ||
|
|
6a7e3b6d77 | ||
|
|
02804fecce | ||
|
|
324a8cd4cf | ||
|
|
ce5cd5561a | ||
|
|
adeefce9aa | ||
|
|
5ab43fd12c | ||
|
|
5894e47189 | ||
|
|
ca61d81f4a | ||
|
|
b12d0ca7b1 | ||
|
|
21996af626 | ||
|
|
cc3b174e5a | ||
|
|
faee58fb1e | ||
|
|
d439e48b39 | ||
|
|
3f0f155d64 | ||
|
|
d82d512319 | ||
|
|
76aea1716f | ||
|
|
586649b73f | ||
|
|
0349a79cb3 | ||
|
|
78a255bdd7 | ||
|
|
5b30e71aa1 | ||
|
|
99d84aece9 | ||
|
|
525d8eb66d | ||
|
|
4c810108e0 | ||
|
|
fc03cdc76a | ||
|
|
9779a563f3 | ||
|
|
6141c3c348 | ||
|
|
c3726ddfc9 | ||
|
|
10eaa8143e | ||
|
|
0c4f4e1f0c | ||
|
|
b225c3cd80 | ||
|
|
b558645d6b | ||
|
|
03b0889b15 | ||
|
|
943fe3651c | ||
|
|
65e57be4dd | ||
|
|
13ad3b5dce | ||
|
|
918bbf0369 | ||
|
|
5006271abb | ||
|
|
a6625ec5de | ||
|
|
1a2104f474 | ||
|
|
444abb8283 | ||
|
|
ee86537f21 | ||
|
|
17a736a927 | ||
|
|
6b5779054d | ||
|
|
14296632ef | ||
|
|
2a3f0e455a | ||
|
|
8aa44c415b | ||
|
|
0566c41a32 | ||
|
|
876b04c058 | ||
|
|
b49a5934e2 | ||
|
|
5fb063914e | ||
|
|
b9941e29a9 | ||
|
|
8ef321d784 | ||
|
|
8353f9c649 | ||
|
|
cb6b3aa406 | ||
|
|
36c7bd9206 | ||
|
|
fea94379d7 | ||
|
|
e602d941ca | ||
|
|
f41f69a268 | ||
|
|
ff72251878 | ||
|
|
7751fb52dd | ||
|
|
87a44d101d | ||
|
|
80148f25b6 | ||
|
|
8e3e4a8b09 | ||
|
|
9389b4a1e8 | ||
|
|
4245e5bd2e | ||
|
|
e7d2af2405 | ||
|
|
4c32a96370 | ||
|
|
f61d112cea | ||
|
|
2c55c6cd9a | ||
|
|
f1d714b5c1 | ||
|
|
69d9dc672a | ||
|
|
9192e010e8 | ||
|
|
f24cea0877 | ||
|
|
a29bfa7489 | ||
|
|
2246866a09 | ||
|
|
7b17fde34a | ||
|
|
df57053613 | ||
|
|
5662be12b5 | ||
|
|
d3e9d66b07 | ||
|
|
e0bdbcbe38 | ||
|
|
05c835ed02 | ||
|
|
9e7f1ad1c0 | ||
|
|
f910a82683 | ||
|
|
d8b7e86f8d | ||
|
|
aef3e0b4bb | ||
|
|
b0eee7be24 | ||
|
|
197e94302b | ||
|
|
98e949d2fd | ||
|
|
83e7a928f1 | ||
|
|
ccd29b7d4e | ||
|
|
5b6cfa6ecc | ||
|
|
f91846ce2d | ||
|
|
87e24ab96e | ||
|
|
40c3e5568c | ||
|
|
7958d29e13 | ||
|
|
a6fafa6a4d | ||
|
|
3ad38f53fd | ||
|
|
d90b1c57e5 | ||
|
|
a69a0e100f | ||
|
|
b0d4576a95 | ||
|
|
2a4ab3aca1 | ||
|
|
e0fd11a86e | ||
|
|
de369f8b5e | ||
|
|
af3e16c4fc | ||
|
|
aacf281222 | ||
|
|
6d8f083c6f | ||
|
|
909bc421c0 | ||
|
|
d14f04d79c | ||
|
|
e0a9f08632 | ||
|
|
09e7c1b97f | ||
|
|
4adffe762a | ||
|
|
9a937d2686 | ||
|
|
e68da34c13 | ||
|
|
9b9f95710a | ||
|
|
3352d42414 | ||
|
|
899b30da5e | ||
|
|
dc2faf7a7e | ||
|
|
67e0d222d1 | ||
|
|
17698ce774 | ||
|
|
7d1c8c008b | ||
|
|
9e58eb02b3 | ||
|
|
3f7de867cc | ||
|
|
2c2bdd37d5 | ||
|
|
6a00319c2d | ||
|
|
66870279d3 | ||
|
|
fbf7cf874b | ||
|
|
ba7278b80f | ||
|
|
9d649de6f9 | ||
|
|
7929afbf58 | ||
|
|
ceaf942e70 | ||
|
|
f355601a44 | ||
|
|
4ff99a1e86 | ||
|
|
129084ba92 | ||
|
|
2288df1293 | ||
|
|
d9dfac55e7 | ||
|
|
404cf4b7c7 | ||
|
|
f1c1fc123b | ||
|
|
9f19c7ee4c | ||
|
|
155e74eca1 | ||
|
|
ea2dc4dbcb | ||
|
|
616edc97de | ||
|
|
b017e99c79 | ||
|
|
f698e9d3e1 | ||
|
|
d366502850 | ||
|
|
3d6757c170 | ||
|
|
cb8302add8 | ||
|
|
9d266e9fad | ||
|
|
ae94c9d31e | ||
|
|
83ab232dcd | ||
|
|
eea85772a3 | ||
|
|
0fe7e223cc | ||
|
|
3789d2eb03 | ||
|
|
d54469532e | ||
|
|
9884e51836 | ||
|
|
6626723180 | ||
|
|
0c251e066b | ||
|
|
0957034bfa | ||
|
|
44521cd893 | ||
|
|
b17f846730 | ||
|
|
6dd32fd4ca | ||
|
|
b17b1c70b5 | ||
|
|
3f5b31fb5f | ||
|
|
06bda6bd55 | ||
|
|
7dd97821a8 | ||
|
|
695191d888 | ||
|
|
1dbcef24c7 | ||
|
|
e086c79da0 | ||
|
|
6ae8d34b27 | ||
|
|
2e23e547d3 | ||
|
|
fa11dc9828 | ||
|
|
673fa70bc5 | ||
|
|
a0660a54c1 | ||
|
|
1137bf4280 | ||
|
|
da41c898d8 | ||
|
|
21e5c261ef | ||
|
|
a7d61b9d59 | ||
|
|
c5fe25c149 | ||
|
|
6a4cb617f9 | ||
|
|
94f70e6de5 | ||
|
|
ab4ebf9a9d | ||
|
|
9f7945fcf5 | ||
|
|
d8ec3c008c | ||
|
|
2f00691246 | ||
|
|
9b2383b074 | ||
|
|
e4e9910575 | ||
|
|
f448e4a615 | ||
|
|
c4e8daf50e | ||
|
|
5aa4ec1b9f | ||
|
|
125ce0aad3 | ||
|
|
ababc9ae04 | ||
|
|
62ac90746e | ||
|
|
096f6d91a2 | ||
|
|
d28ef6b094 | ||
|
|
8fb945ab09 | ||
|
|
835d71727c | ||
|
|
ce32dd2907 | ||
|
|
72bc24a490 | ||
|
|
d6c49bdbf0 | ||
|
|
1805292528 | ||
|
|
d09ce7e1f7 | ||
|
|
a8d2024791 | ||
|
|
f0b954dbfb | ||
|
|
50bee7c2b0 | ||
|
|
e7b15b316e | ||
|
|
a4507008c1 | ||
|
|
c5ba85f929 | ||
|
|
2e636bd67e | ||
|
|
4a039f1abf | ||
|
|
434d8e2070 | ||
|
|
160ad2dc79 | ||
|
|
0ec86c2c71 | ||
|
|
03452ffd9f | ||
|
|
da6317a242 | ||
|
|
8b8e616557 | ||
|
|
d260f1a1a6 | ||
|
|
9d452e3b04 | ||
|
|
e012189672 | ||
|
|
4c31e9a8b1 | ||
|
|
7cfc230316 | ||
|
|
9605e85f1c | ||
|
|
498e2b772c | ||
|
|
dad897da51 | ||
|
|
02ad5f062e | ||
|
|
4eb9471b4f | ||
|
|
b505d207d7 | ||
|
|
3c954bd07f | ||
|
|
c00b6459dc | ||
|
|
eb4d776784 | ||
|
|
5d7a890533 | ||
|
|
9c6aefef1e | ||
|
|
e4554d6c09 | ||
|
|
c184b63df8 | ||
|
|
6bb4195393 | ||
|
|
7827a4d40d | ||
|
|
f09fa8231a | ||
|
|
96ff10000d | ||
|
|
9460636867 | ||
|
|
6c43245295 | ||
|
|
266b6cf638 | ||
|
|
70183e234a | ||
|
|
17b9c359ca | ||
|
|
045630b8a5 | ||
|
|
55ff7dd640 | ||
|
|
e6d64f71f2 | ||
|
|
e72313ebdd | ||
|
|
65d5bd72cd | ||
|
|
dc0cbb41f0 | ||
|
|
c4a54a85be | ||
|
|
5b2738aec9 | ||
|
|
892312fc08 | ||
|
|
c2ccf2c72c | ||
|
|
80aaecb5f0 | ||
|
|
946865a335 | ||
|
|
5de15c8413 | ||
|
|
67268fd35a | ||
|
|
42fc771833 | ||
|
|
444b1a0b65 | ||
|
|
814ea1c016 | ||
|
|
4d34dc4234 | ||
|
|
d567399f2b | ||
|
|
77f4f8d8b0 | ||
|
|
a2d04beaa1 | ||
|
|
ba49eea23d | ||
|
|
82beafc086 | ||
|
|
7d8ed2d102 | ||
|
|
aab8d3a4f1 | ||
|
|
76658d50a0 | ||
|
|
88ba22342c | ||
|
|
11a1460af9 | ||
|
|
2cd4c41316 | ||
|
|
b910f308f2 | ||
|
|
763aa73ea4 | ||
|
|
30c79e92d4 | ||
|
|
402d5e054b | ||
|
|
0e211df206 | ||
|
|
e24a0ac686 | ||
|
|
8c91b1c527 | ||
|
|
2b38f80d04 | ||
|
|
282bd35f52 | ||
|
|
cc9b4c2bcb | ||
|
|
068ce4970a | ||
|
|
cf19165ad8 | ||
|
|
68c479f3a5 | ||
|
|
ba496a772b | ||
|
|
3b27db36f2 | ||
|
|
f803def69b | ||
|
|
52065e69a4 | ||
|
|
50f5e8a955 | ||
|
|
2d0e97b66d | ||
|
|
5f3cc5a392 | ||
|
|
ac66d77512 | ||
|
|
50cf653d4a | ||
|
|
56256051d2 | ||
|
|
c0361ff03d | ||
|
|
f153435c08 | ||
|
|
9aa7f22fa6 | ||
|
|
52b7bda5f8 | ||
|
|
21aefa2778 | ||
|
|
a89ff71c9e | ||
|
|
4c275816be | ||
|
|
f8dfbcfc80 | ||
|
|
d317f6473d | ||
|
|
00b4e133d4 | ||
|
|
b6349e4efb | ||
|
|
6ca3d9585c | ||
|
|
5935a0283a | ||
|
|
5400a6ec06 | ||
|
|
6574d9cc84 | ||
|
|
42b83c5994 | ||
|
|
896612a5a3 | ||
|
|
0ee875bee4 | ||
|
|
8ce345cd94 | ||
|
|
da2f8477e6 | ||
|
|
82b47b5673 | ||
|
|
7c15a4c7ff | ||
|
|
3369b910b4 | ||
|
|
ec0c4c3b84 | ||
|
|
f74e2c9da1 | ||
|
|
e26ad3c475 | ||
|
|
145c3b8ad0 | ||
|
|
0ff6c6a154 | ||
|
|
641cf5a4c1 | ||
|
|
09b9576eef | ||
|
|
18b71ca2f2 | ||
|
|
e0eb7f456e | ||
|
|
188d118fc0 | ||
|
|
adcdce8d76 | ||
|
|
b865a7aec1 | ||
|
|
cec8c72b46 | ||
|
|
b052e32805 | ||
|
|
816f660be3 | ||
|
|
fc8be45d5a | ||
|
|
e749c936c9 | ||
|
|
b2b9670a23 | ||
|
|
2f88890c94 | ||
|
|
6366663f03 | ||
|
|
20fe7dc6d1 | ||
|
|
4b9153069e | ||
|
|
80406d0753 | ||
|
|
35f4c11784 | ||
|
|
7896526f19 | ||
|
|
f7db22edff | ||
|
|
0e4196f036 | ||
|
|
1bf6af6eeb | ||
|
|
5a9bc6d2bf | ||
|
|
f7f6042579 | ||
|
|
c4a598f3d3 | ||
|
|
7c23f43c63 | ||
|
|
7e2cbdd88c | ||
|
|
3b3a04a249 | ||
|
|
f9b2c95695 | ||
|
|
c2c18e8319 | ||
|
|
384ad3e0ac | ||
|
|
8c986aaa7f | ||
|
|
bb4ea76d30 | ||
|
|
2868e47cf8 | ||
|
|
e0adc3e5d5 | ||
|
|
e55d1a5865 | ||
|
|
018273c6b2 | ||
|
|
44b8a11c04 | ||
|
|
56e5aba559 | ||
|
|
46904ccd54 | ||
|
|
5b7c7a4471 | ||
|
|
9da4215d1f | ||
|
|
f39ac9945f | ||
|
|
a0cc2e4d46 | ||
|
|
4065041a9f | ||
|
|
f08067a161 | ||
|
|
545caacfa3 | ||
|
|
a06f646637 | ||
|
|
578c68205a | ||
|
|
f09f1433a9 | ||
|
|
15a9e97a1e | ||
|
|
b3af4ee50b | ||
|
|
07d59b6640 | ||
|
|
e25b988dc8 | ||
|
|
2410bd8654 | ||
|
|
44d21ab703 | ||
|
|
e283957c8f | ||
|
|
b1210c4902 | ||
|
|
e7430f0fbc | ||
|
|
92d6ae54c3 | ||
|
|
f82be23ca9 | ||
|
|
8c3f75e3e2 | ||
|
|
193d59f193 | ||
|
|
c2bebbaefa | ||
|
|
7ae5a9c5a5 | ||
|
|
3b69bea23d | ||
|
|
ab05726b99 | ||
|
|
b2b04268e9 | ||
|
|
bd73fa9ae7 | ||
|
|
927d10d66e | ||
|
|
b67329623c | ||
|
|
6f47aa802b | ||
|
|
3417c73011 | ||
|
|
6a02bcf15b | ||
|
|
cd0fbf79a3 | ||
|
|
15d2d0115b | ||
|
|
d1a0fe6e91 | ||
|
|
1db80d140f | ||
|
|
896dcf1f9e | ||
|
|
819a12fb49 | ||
|
|
c68273706c | ||
|
|
6bb0cd535a | ||
|
|
cb9ec69cf6 | ||
|
|
143854fa81 | ||
|
|
2f48a3d7d5 | ||
|
|
ec95dafe1e | ||
|
|
3d1fe724e5 | ||
|
|
5c615d6f2d | ||
|
|
d72558eb36 | ||
|
|
65c33ad915 | ||
|
|
9be128a963 | ||
|
|
eb05132008 | ||
|
|
f94a093e8c | ||
|
|
0d0c2daf64 | ||
|
|
823d948b25 | ||
|
|
56831fbcf2 | ||
|
|
bf49b9cb88 | ||
|
|
e01adffbad | ||
|
|
08a5d52d82 | ||
|
|
fdae235742 | ||
|
|
9903fad1e9 | ||
|
|
14bbd5338d | ||
|
|
4a236c2f6f | ||
|
|
0a8cdbd7f1 | ||
|
|
94c49843be | ||
|
|
9281fac898 | ||
|
|
0b2736f454 | ||
|
|
ae116b0d0d | ||
|
|
ba260e3382 | ||
|
|
1282e7687f | ||
|
|
b1d8266eef | ||
|
|
7acae6935b | ||
|
|
092c01cae7 | ||
|
|
56a1066c30 | ||
|
|
1356d71839 | ||
|
|
1eb011e8c3 | ||
|
|
e349eb28b0 | ||
|
|
b000b235a2 | ||
|
|
16fe92282e | ||
|
|
e218e88cf4 | ||
|
|
888ea81a32 | ||
|
|
735fab7640 | ||
|
|
45745c2a47 | ||
|
|
4caff0fcf6 | ||
|
|
762ea6ce7f | ||
|
|
8b4f6553f3 | ||
|
|
a61e44d175 | ||
|
|
e1b1558fc9 | ||
|
|
53225bda4e | ||
|
|
5212769848 | ||
|
|
d5ded3c9f4 | ||
|
|
c92d778894 | ||
|
|
829abd1ad6 | ||
|
|
266d256a07 | ||
|
|
8380cac3e7 | ||
|
|
a24652f901 | ||
|
|
2d203d3c70 | ||
|
|
48d21600da | ||
|
|
2508d0fbb3 | ||
|
|
e90e80c289 | ||
|
|
5e4748f9d9 | ||
|
|
212952f3e9 | ||
|
|
f99b6496c5 | ||
|
|
67423d51b9 | ||
|
|
58465ece65 | ||
|
|
8ede3a0173 | ||
|
|
ad2f0f8950 | ||
|
|
76973a4b4c | ||
|
|
b198e2e029 | ||
|
|
4d6ea401b5 | ||
|
|
b00c4cc3b6 | ||
|
|
4185e64c65 | ||
|
|
6eb2c884a2 | ||
|
|
6c0362a4cf | ||
|
|
50b1755a63 | ||
|
|
ff3c7eb5fb | ||
|
|
3755316d49 | ||
|
|
f952046847 | ||
|
|
969cdb4a63 | ||
|
|
f336d44595 | ||
|
|
a53f93c195 | ||
|
|
fcb334ce33 | ||
|
|
8ddf04a904 | ||
|
|
29698ca169 | ||
|
|
a9baf7436a | ||
|
|
99a8962183 | ||
|
|
afc5b15a6b | ||
|
|
b6ab508e27 | ||
|
|
789e65557a | ||
|
|
8a7806ab2d | ||
|
|
493303e103 | ||
|
|
1d9af05e9e | ||
|
|
5b07c5f2e8 | ||
|
|
2a4ec0cf5b | ||
|
|
a00c44386e | ||
|
|
a38d71bbfb | ||
|
|
a24a3f868c | ||
|
|
f60c516185 | ||
|
|
26f4646304 | ||
|
|
3a351f67e6 | ||
|
|
e7c09cb91e | ||
|
|
ae1a6ef303 | ||
|
|
2ff477a339 | ||
|
|
793f3fb683 | ||
|
|
a472ee7602 | ||
|
|
c62040e232 | ||
|
|
2e7cb510ae | ||
|
|
dbe45904d7 | ||
|
|
5623734276 | ||
|
|
d3b592bffc | ||
|
|
4fcbdae5bf | ||
|
|
ca95d7275a | ||
|
|
61baf3701c | ||
|
|
bbce872ac5 | ||
|
|
0f7ebcd8e4 | ||
|
|
82fc19e7b7 | ||
|
|
839a12bed4 | ||
|
|
2ef23fe1b3 | ||
|
|
fd905b1a06 | ||
|
|
1372210004 | ||
|
|
ade704d065 | ||
|
|
42f48649b9 | ||
|
|
0b08e8b617 | ||
|
|
926b2f1a1b | ||
|
|
1770a1a45f | ||
|
|
50ed2a64c6 | ||
|
|
2332344988 | ||
|
|
7ccc8cdc58 | ||
|
|
ecec9f913e | ||
|
|
777f40fc5e | ||
|
|
327ae35420 | ||
|
|
0d48159da8 | ||
|
|
d36f12a4ea | ||
|
|
709488beb1 | ||
|
|
a9e4583695 | ||
|
|
4702dec933 | ||
|
|
e6352dd691 | ||
|
|
240ea3b857 | ||
|
|
f0908af3c0 | ||
|
|
6834961dd1 | ||
|
|
b404162364 | ||
|
|
e879ef805f | ||
|
|
7077ca5e98 | ||
|
|
a1e6978c8f | ||
|
|
584391dd59 | ||
|
|
bab3ae809c | ||
|
|
c78518baf0 | ||
|
|
556d7e0497 | ||
|
|
2d27936dab | ||
|
|
0cc22de545 | ||
|
|
63f6127049 | ||
|
|
f34e00c986 | ||
|
|
55f60a9fe1 | ||
|
|
7da3618e0c | ||
|
|
56bfa98633 | ||
|
|
96f6188722 | ||
|
|
aa9d359039 | ||
|
|
cef5731028 | ||
|
|
5bc28bd4fd | ||
|
|
55a1d867c3 | ||
|
|
6c3a79802e | ||
|
|
c35c5e0793 | ||
|
|
7bc83caa99 | ||
|
|
3aceca63c6 | ||
|
|
9bc166ffd4 | ||
|
|
fc01b90007 | ||
|
|
e35f1d70e4 | ||
|
|
cab1f3787a | ||
|
|
bb42f4cbc1 | ||
|
|
98dc418a51 | ||
|
|
322b4eb18c | ||
|
|
7f1cc30ed8 | ||
|
|
7b45a6b956 | ||
|
|
e36769e70f | ||
|
|
bd4a4cc4af | ||
|
|
8343fe63cb | ||
|
|
7d89fb8461 | ||
|
|
098955d230 | ||
|
|
d254d14928 | ||
|
|
0a3e8ca535 | ||
|
|
b8a10e0962 | ||
|
|
0aceda96e4 | ||
|
|
44b6ec25a2 | ||
|
|
1b84d1fa9d | ||
|
|
78d5ed2ed2 | ||
|
|
142477ab9b | ||
|
|
b414f79bc5 | ||
|
|
6e08fe21d0 | ||
|
|
9b839655a7 | ||
|
|
3353c0ee1d | ||
|
|
aaecf52c99 | ||
|
|
8b3e960be0 | ||
|
|
3351f71813 | ||
|
|
7490256303 | ||
|
|
041d600e45 | ||
|
|
b4e2588a24 | ||
|
|
68dc14c5a1 | ||
|
|
ef35864e16 | ||
|
|
c0d385b983 | ||
|
|
b2df431fa4 | ||
|
|
69a4bd415a | ||
|
|
4862548e65 | ||
|
|
50248cc9ea | ||
|
|
430822bae3 | ||
|
|
dd9d18208d | ||
|
|
e5b1a71659 | ||
|
|
35f4b13237 | ||
|
|
5f5c31cd5b | ||
|
|
e9530d5ec5 | ||
|
|
143f4aa886 | ||
|
|
ece5c8bb31 | ||
|
|
31baf181a3 | ||
|
|
3bae30c70c | ||
|
|
12b18c6bd1 | ||
|
|
787d9e3bf5 | ||
|
|
f325b54895 | ||
|
|
c5616705b0 | ||
|
|
c0f693d35d | ||
|
|
52a5f132c1 | ||
|
|
f14eac6d10 | ||
|
|
e90fe117ec | ||
|
|
381d737d24 | ||
|
|
7cab5b3b09 | ||
|
|
9f911cb5cb | ||
|
|
3da7cba06c | ||
|
|
b47af9600f | ||
|
|
92c3c707e1 | ||
|
|
5acc54e609 | ||
|
|
9c6352dd5b | ||
|
|
8e29a07df5 | ||
|
|
bd88cd3a06 | ||
|
|
f371b9702f | ||
|
|
3ff4ae29af | ||
|
|
eae0f2e7a9 | ||
|
|
305a98bb79 | ||
|
|
8040a3ed60 | ||
|
|
bb9de7d9b0 | ||
|
|
d8e8bc0068 | ||
|
|
6577e9d852 | ||
|
|
3f8625c65a | ||
|
|
92d69636a7 | ||
|
|
9c28817fba | ||
|
|
773788fb32 | ||
|
|
a393ad8e04 | ||
|
|
71d3714347 | ||
|
|
b7e1329c13 | ||
|
|
59e6d9d10e | ||
|
|
46efb446fb | ||
|
|
d31e3a54fd | ||
|
|
c4e471ac47 | ||
|
|
3b8733e085 | ||
|
|
a7c67d83ca | ||
|
|
8abc1de26d | ||
|
|
2ca9f708a6 | ||
|
|
f8f369fbb2 | ||
|
|
3e9155767b | ||
|
|
8cd4195657 | ||
|
|
ad1a944276 | ||
|
|
02ff4c5657 | ||
|
|
b1b27f2dde | ||
|
|
5097f77469 | ||
|
|
7e826d5002 | ||
|
|
fe8143a56c | ||
|
|
e5442a713a | ||
|
|
1982a46f36 | ||
|
|
c8c3640baf | ||
|
|
fdf47b3f2c | ||
|
|
93fa4b6a37 | ||
|
|
90e9ab70b0 | ||
|
|
573c2386b7 | ||
|
|
d2176aeeb9 | ||
|
|
920aec5c3e | ||
|
|
b792c5459a | ||
|
|
87fbf05fa1 | ||
|
|
67c53250c5 | ||
|
|
d657eea910 | ||
|
|
b5fbb825ed | ||
|
|
d094e7a4c6 | ||
|
|
945c155b17 | ||
|
|
f798072a1e | ||
|
|
f967214b57 | ||
|
|
d0b92e2540 | ||
|
|
8ddfe272bf | ||
|
|
b7a6bad7cd | ||
|
|
e2f6c04406 | ||
|
|
c662725955 | ||
|
|
4b66ddfdef | ||
|
|
2d55b1f592 | ||
|
|
14adfabf7e | ||
|
|
e7a76ede76 | ||
|
|
de47df3bf9 | ||
|
|
5475e6f7c5 | ||
|
|
8e3f3d74d4 | ||
|
|
046f6c66ed | ||
|
|
79f9d6552e | ||
|
|
56b4b63749 | ||
|
|
b3246a48c7 | ||
|
|
71722ef6a3 | ||
|
|
ebf8f00302 | ||
|
|
7445928c7e | ||
|
|
5ab7602f2f | ||
|
|
a340aff63a | ||
|
|
f82042ff00 | ||
|
|
920422e28c | ||
|
|
50d6b7a6f8 | ||
|
|
41d624a36a | ||
|
|
f42c37c82e | ||
|
|
119fcdf6f6 | ||
|
|
a5b093d1a9 | ||
|
|
e07cb44a3e | ||
|
|
fec1bcfd5c | ||
|
|
dbcf658343 | ||
|
|
d89e78c9ca | ||
|
|
ec50650dfa | ||
|
|
7432e551f9 | ||
|
|
4ee6bd44d1 | ||
|
|
26f819098d | ||
|
|
a1c79f93d7 | ||
|
|
9c1b202d74 | ||
|
|
8ad0f59f19 | ||
|
|
50fbe3d5af | ||
|
|
af40a77d24 | ||
|
|
8af9a5e921 | ||
|
|
9807788ecb | ||
|
|
5e2f329f15 | ||
|
|
9572a7adaa | ||
|
|
1ba94f4f5f | ||
|
|
237afa0a3a | ||
|
|
d80b7017cf | ||
|
|
56793c8db7 | ||
|
|
8edb217943 | ||
|
|
23ebcf1065 | ||
|
|
68a5a3d62a | ||
|
|
8d7236b0db | ||
|
|
96c7daf818 | ||
|
|
9d8073d468 | ||
|
|
fc4942e189 | ||
|
|
ca69d025bd | ||
|
|
ffa428e32a | ||
|
|
c24e90eaae | ||
|
|
ab32eff588 | ||
|
|
7f592f2b35 | ||
|
|
3bf7f67adf | ||
|
|
594ce05292 | ||
|
|
fe02ca68d5 | ||
|
|
21ef27ee9b | ||
|
|
09d37f669f | ||
|
|
416b776062 | ||
|
|
5ed05d4020 | ||
|
|
4004bfb5ef | ||
|
|
45aace8966 | ||
|
|
d9fc623dcb | ||
|
|
dbb822f6b0 | ||
|
|
3d64dffc32 | ||
|
|
130ece7bc0 | ||
|
|
b2809b2e9a | ||
|
|
29e89d2965 | ||
|
|
e7d54a639e | ||
|
|
22df98e9bb | ||
|
|
0d45c44c6f | ||
|
|
63c6912841 | ||
|
|
73bce73034 | ||
|
|
b2582796a2 | ||
|
|
8babb6e68f | ||
|
|
d1d28df8a1 | ||
|
|
cd556d5d43 | ||
|
|
2855283a2c | ||
|
|
06c29500f2 | ||
|
|
81104153a6 | ||
|
|
23bfd4683c | ||
|
|
a52a3e3158 | ||
|
|
44e524e3c3 | ||
|
|
9a430f73e2 | ||
|
|
fdea40ec11 | ||
|
|
526d340849 | ||
|
|
fe95f6ad81 | ||
|
|
39e73c37ab | ||
|
|
39b36b6857 | ||
|
|
44e98748c5 | ||
|
|
8a7aeee955 | ||
|
|
1c7befb8d3 | ||
|
|
d5d59ac62c | ||
|
|
562f0762a0 | ||
|
|
e46aedce21 | ||
|
|
57cc09b1d7 | ||
|
|
e1e608b744 | ||
|
|
cbfa5a5118 | ||
|
|
ea9ab5b27c | ||
|
|
357ced6cba | ||
|
|
3ffda69651 | ||
|
|
e1bf4e0762 | ||
|
|
ec7f14b82d | ||
|
|
6520be5b85 | ||
|
|
17e4fad6fb | ||
|
|
d84c416421 | ||
|
|
32803c89a3 | ||
|
|
a86bcb5c29 | ||
|
|
7d76a33790 | ||
|
|
8552e81022 | ||
|
|
eacdde829f | ||
|
|
d873539856 | ||
|
|
24bb2e469d | ||
|
|
e1aa2cc0b8 | ||
|
|
d073947f3b | ||
|
|
3243740dd1 | ||
|
|
f9bd566a3b | ||
|
|
183251487c | ||
|
|
ff532210f7 | ||
|
|
d0a04d9801 | ||
|
|
ea6533db4e | ||
|
|
89d5e7bee5 | ||
|
|
7e6cdee592 | ||
|
|
990c2fb416 | ||
|
|
09e054c6aa | ||
|
|
23f648f53a | ||
|
|
07fa656e7c | ||
|
|
7858c48f11 | ||
|
|
e56d54c3f0 | ||
|
|
f37ca95c10 | ||
|
|
72e51bb072 | ||
|
|
dcfcbf54be | ||
|
|
204936b2d0 | ||
|
|
98856b39ac | ||
|
|
ad5f707486 | ||
|
|
5ecfb0ce6d | ||
|
|
2147b3f06f | ||
|
|
7daed3daaf | ||
|
|
481df4d604 | ||
|
|
cf333873fd | ||
|
|
ae700e8f3a | ||
|
|
16386a9524 | ||
|
|
7e7ce276b2 | ||
|
|
71c6b41b83 | ||
|
|
4b2faae29a | ||
|
|
7e28e562d0 | ||
|
|
93c2e2a597 | ||
|
|
c45d13d834 | ||
|
|
330276cdf7 | ||
|
|
22c7015c69 | ||
|
|
cc67d4a1e2 | ||
|
|
eeb9da696f | ||
|
|
4979e1ac9a | ||
|
|
545353dabf | ||
|
|
545376740c | ||
|
|
8289b02ab0 | ||
|
|
fc0060662b | ||
|
|
df9d432d29 | ||
|
|
76fd6e15cc | ||
|
|
06982efda5 | ||
|
|
3cd9a72495 | ||
|
|
0ce27f274a | ||
|
|
e60f78ac4a | ||
|
|
637d3a24a1 | ||
|
|
24c8b24b1f | ||
|
|
5ad34e2216 | ||
|
|
64c42f0ddf | ||
|
|
0a31ddaae6 | ||
|
|
38476cfeb8 | ||
|
|
decc31f1f0 | ||
|
|
ea0aa64330 | ||
|
|
e9a6044645 | ||
|
|
474d700df2 | ||
|
|
c50ff6faa3 | ||
|
|
c8efef8f04 | ||
|
|
1d22f77568 | ||
|
|
5aa51f5f36 | ||
|
|
335c21c48a | ||
|
|
c35d1cecfe | ||
|
|
0d3e6157cd | ||
|
|
68e4cf4d14 | ||
|
|
9454150f7d | ||
|
|
0a0e16547e | ||
|
|
0aec1b9969 | ||
|
|
3e1ec23409 | ||
|
|
2f9f428a2f | ||
|
|
da15cde49c | ||
|
|
e6ed37139a | ||
|
|
377e33c148 | ||
|
|
e567d88951 | ||
|
|
89b2937b11 | ||
|
|
142ed75468 | ||
|
|
d80eeb044c | ||
|
|
7c69e99914 | ||
|
|
5e1aaf5a44 | ||
|
|
ad610d2f90 | ||
|
|
02934452d6 | ||
|
|
8b054010e1 | ||
|
|
5b77f3839b | ||
|
|
231b792452 | ||
|
|
b468e0c164 | ||
|
|
fa1f9d7009 | ||
|
|
c5a8f3abcd | ||
|
|
dfe6a8d3e3 | ||
|
|
292257770c | ||
|
|
b4c6b2b08b | ||
|
|
6cb4577e1b | ||
|
|
456784db48 | ||
|
|
dd9ea46e58 | ||
|
|
ed3af2fac0 | ||
|
|
02f8132f3a | ||
|
|
55bd90fad9 | ||
|
|
cd7bbb45c3 | ||
|
|
6c7fc0ed22 | ||
|
|
5421bc1386 | ||
|
|
051841e566 | ||
|
|
0c68815cf2 | ||
|
|
0c1138179b | ||
|
|
1f3d1cc73e | ||
|
|
707d1332de | ||
|
|
f6c88da81b | ||
|
|
a651e6e518 | ||
|
|
bea89b93eb | ||
|
|
244c9b96a2 | ||
|
|
a37bd76950 | ||
|
|
9d70032de8 | ||
|
|
e4945b41e9 | ||
|
|
493dc8689c | ||
|
|
bdac2ffa27 | ||
|
|
b1235f3ce0 | ||
|
|
ba4bb63a1f | ||
|
|
3227b0e69c | ||
|
|
29c899627e | ||
|
|
5923781484 | ||
|
|
8bb263a2ec | ||
|
|
94c7bba168 | ||
|
|
f9ad4c068a | ||
|
|
19d68252cd | ||
|
|
72bbe3b1ce | ||
|
|
856824316b | ||
|
|
95e189d1d8 | ||
|
|
c629460acb | ||
|
|
f235a94986 | ||
|
|
632cba86e9 | ||
|
|
6b92c7eccc | ||
|
|
ab0da1abac | ||
|
|
7f31ac7bcb | ||
|
|
57a6fb31b2 | ||
|
|
fd2b6c111c | ||
|
|
302458b505 | ||
|
|
0e31329785 | ||
|
|
8978a4cf2d | ||
|
|
57d103116f | ||
|
|
a4e9ee72d4 | ||
|
|
c70be12bfd | ||
|
|
4241307990 | ||
|
|
727a8ef13d | ||
|
|
7c92558ad1 | ||
|
|
45083d29a6 | ||
|
|
5089d86095 | ||
|
|
80e55ef385 | ||
|
|
b5ed98445f | ||
|
|
82d377abf5 | ||
|
|
2dbea5d1b2 | ||
|
|
4ba35d6189 | ||
|
|
1620b4f214 | ||
|
|
cec3f987f2 | ||
|
|
ec27445728 | ||
|
|
55050a9f58 | ||
|
|
4b1f572b04 | ||
|
|
502dc9ec52 | ||
|
|
28f925ef75 | ||
|
|
9c8999a3ae | ||
|
|
90db42ce3a | ||
|
|
551130f0e1 | ||
|
|
98abeabc0d | ||
|
|
2940a60b3c | ||
|
|
76b9bc0d56 | ||
|
|
42422ccdcd | ||
|
|
e9702ae2de | ||
|
|
5c54852ebe | ||
|
|
718a86ecda | ||
|
|
e02f19058e | ||
|
|
1223fd2149 | ||
|
|
4095b2b674 | ||
|
|
3be6e2132b | ||
|
|
b09386d102 | ||
|
|
6464698b6d | ||
|
|
9230fd3bd6 | ||
|
|
7771609ea0 | ||
|
|
561a125c92 | ||
|
|
7149461d8e | ||
|
|
02c8bd06f5 | ||
|
|
0732d9b6c8 | ||
|
|
2952c1be08 | ||
|
|
96c4a13c93 | ||
|
|
53abf1a79e | ||
|
|
f00802dd6b | ||
|
|
ab95d90284 | ||
|
|
9f17eb1d28 | ||
|
|
f4ab85a2bb | ||
|
|
5b40c5a9d7 | ||
|
|
6583aeff08 | ||
|
|
b1c531fbcc | ||
|
|
4406426515 | ||
|
|
af48782464 | ||
|
|
726d4ddd9f | ||
|
|
adc637b689 | ||
|
|
d6c9b4fbc9 | ||
|
|
e17cc8ea34 | ||
|
|
574a0e2dba | ||
|
|
fd0bd13b08 | ||
|
|
f8c92147cd | ||
|
|
8136cd78d3 | ||
|
|
d9c4331480 | ||
|
|
7af726f4b2 | ||
|
|
a50f3bc55b | ||
|
|
5438bf9754 | ||
|
|
7fd377bdbe | ||
|
|
84620a7375 | ||
|
|
6968317db2 | ||
|
|
67a92428b5 | ||
|
|
5bb639f0ad | ||
|
|
5bc758aa2d | ||
|
|
27b24f19de | ||
|
|
3dfde84827 | ||
|
|
5e39be6a2c | ||
|
|
35248991e7 | ||
|
|
b76e820122 | ||
|
|
51eced00aa | ||
|
|
079a216f5b | ||
|
|
8b5df98f57 | ||
|
|
fb6fd5b5b2 | ||
|
|
5d5ea3eb8f | ||
|
|
21360981ee | ||
|
|
0b3cad152f | ||
|
|
2c2dbe45a6 | ||
|
|
5c7a3a515c | ||
|
|
f2b05ad56d | ||
|
|
5f9702b91c | ||
|
|
93de4065c7 | ||
|
|
8e0e55fe5e | ||
|
|
a8a8585570 | ||
|
|
1f3c07979a | ||
|
|
fa07b3349d | ||
|
|
519ffe617b | ||
|
|
fe02bf9347 | ||
|
|
faa583864d | ||
|
|
1a7504eba0 | ||
|
|
46d32b4072 | ||
|
|
18d8b9c395 | ||
|
|
8b9b74464e | ||
|
|
867c375843 | ||
|
|
54ca6acf5a | ||
|
|
6ac2d6d228 | ||
|
|
10c7a5f36b | ||
|
|
4fd6c52951 | ||
|
|
93fea17918 | ||
|
|
b3f6a3aae6 | ||
|
|
773147701d | ||
|
|
d891c8dae2 | ||
|
|
101852c7d1 | ||
|
|
c1f13ba8b1 | ||
|
|
71e45860f3 | ||
|
|
25dfd63c4f | ||
|
|
fc12d7b4c8 | ||
|
|
a6eedc6d84 | ||
|
|
b523a98289 | ||
|
|
a0929c96ba | ||
|
|
ae1f25379f | ||
|
|
1e3c8cb7b1 | ||
|
|
b9f28705c8 | ||
|
|
ad4f3ce379 | ||
|
|
d4f53bf6bb | ||
|
|
2ea2819477 | ||
|
|
49a2b2ce6d | ||
|
|
06edc261c0 | ||
|
|
af69bc9d3c | ||
|
|
6eb8256220 | ||
|
|
ecf3067d67 | ||
|
|
3a7f23f75e | ||
|
|
f88c34a0be | ||
|
|
572c57e023 | ||
|
|
79cf2150d5 | ||
|
|
68b868047e | ||
|
|
377670b34a | ||
|
|
2b7f4de832 | ||
|
|
4a88a63fa0 | ||
|
|
bf195051e2 | ||
|
|
c3ccd9feff | ||
|
|
2d0f0948fb | ||
|
|
fc7a5d098d | ||
|
|
b7f766ab82 | ||
|
|
bfffd5e4b3 | ||
|
|
63ba005f4d | ||
|
|
f66ef05f2a | ||
|
|
a3b28843b6 | ||
|
|
b07ec8accb | ||
|
|
06f4b5823a | ||
|
|
99fe57f99a | ||
|
|
d1226031e1 | ||
|
|
78f3e64d5a | ||
|
|
1d98e75b92 | ||
|
|
66d8d95763 | ||
|
|
e2bf468195 | ||
|
|
b7efc16257 | ||
|
|
ec6bcdff7e | ||
|
|
3e65885e1f | ||
|
|
c6ce4d9374 | ||
|
|
0b437d0e8d | ||
|
|
e1df3be4b9 | ||
|
|
b944769f8c | ||
|
|
56b8074c22 | ||
|
|
b577f322c9 | ||
|
|
b007e2af8f | ||
|
|
df89990aa5 | ||
|
|
c108a53b11 | ||
|
|
4831f5bb5d | ||
|
|
987ef63e64 | ||
|
|
e997e12bb9 | ||
|
|
6ba0add265 | ||
|
|
9160c13039 | ||
|
|
40be9f65e4 | ||
|
|
0aae53524c | ||
|
|
1d1efc00b5 | ||
|
|
7584305159 | ||
|
|
554601d674 | ||
|
|
6caf14f4b2 | ||
|
|
edbd08be8a | ||
|
|
caed6df53b | ||
|
|
d823fba60b | ||
|
|
92c8abe65d | ||
|
|
91e966b480 | ||
|
|
1f0b779c64 | ||
|
|
0ccd76074a | ||
|
|
07c6dcab4a | ||
|
|
84cbc1201c | ||
|
|
495bbc2aba | ||
|
|
cb0bceacfa | ||
|
|
6799050718 | ||
|
|
4b892e8939 | ||
|
|
674001b499 | ||
|
|
c730777134 | ||
|
|
8148876249 | ||
|
|
4cf946f856 | ||
|
|
05706f1641 | ||
|
|
6fed84958e | ||
|
|
64011c5988 | ||
|
|
3e02d5a56f | ||
|
|
14f57bc3a4 | ||
|
|
ac8f1b9aa3 | ||
|
|
104c6ef457 | ||
|
|
84661cea36 | ||
|
|
c2b0ed85d2 | ||
|
|
5a081f2419 | ||
|
|
88016f9c35 | ||
|
|
0d56e62bb8 | ||
|
|
567756edd3 | ||
|
|
7cc0a3620e | ||
|
|
b5587e458f | ||
|
|
b22d965b7b | ||
|
|
cc0b41ddfb | ||
|
|
006aeeebb0 | ||
|
|
3cfb1abf62 | ||
|
|
e1da69040d | ||
|
|
5924693e90 | ||
|
|
9ee7d659df | ||
|
|
ac1b1c3cdd | ||
|
|
8440138ba0 | ||
|
|
877b44ec0a | ||
|
|
cc4acb8766 | ||
|
|
3aa85bb51c | ||
|
|
4e948d8bff | ||
|
|
28489d244c | ||
|
|
acf3dd2762 | ||
|
|
8589303753 | ||
|
|
0d9fc26119 | ||
|
|
9dd63c1da4 | ||
|
|
7ff03ab098 | ||
|
|
750345d209 | ||
|
|
03ee16f5ca | ||
|
|
586fc80c19 | ||
|
|
13cd221fe5 | ||
|
|
f35af54e9f | ||
|
|
67e37f1ce1 | ||
|
|
49ff27a5fe | ||
|
|
04730ba8c7 | ||
|
|
b2fcf91958 | ||
|
|
b78d2bd4b1 | ||
|
|
2612ce5ad9 | ||
|
|
798913740e | ||
|
|
7d0445cc20 | ||
|
|
361f6895ee | ||
|
|
47442f4f58 | ||
|
|
307c2e1682 | ||
|
|
2190359e4d | ||
|
|
27a933c7b7 | ||
|
|
71970a0d1d | ||
|
|
7661273cfd | ||
|
|
cd06334049 | ||
|
|
05319e36a7 | ||
|
|
200a3b81e5 | ||
|
|
5647755762 | ||
|
|
adb2947b52 | ||
|
|
7b05afab74 | ||
|
|
5cf5bed6a8 | ||
|
|
095cb58df3 | ||
|
|
181bf69994 | ||
|
|
927b513bf8 | ||
|
|
05801cd90c | ||
|
|
a8ac00469d | ||
|
|
1e3ae948a2 | ||
|
|
2d8aa229c6 | ||
|
|
84f4812189 | ||
|
|
8a3612e56c | ||
|
|
d08861fb30 | ||
|
|
ecc0f9d9f5 | ||
|
|
e209699b19 | ||
|
|
c8d8690cfd | ||
|
|
59d05b698a | ||
|
|
1bcbfc8d18 | ||
|
|
bafed63d40 | ||
|
|
828a056e21 | ||
|
|
9424f6303a | ||
|
|
c0dc5c3a4d | ||
|
|
d0fb3da285 | ||
|
|
ccce01800d | ||
|
|
b44b9d8016 | ||
|
|
7592c45bd9 | ||
|
|
b024936ad7 | ||
|
|
be2246283f | ||
|
|
a7969f6ec8 | ||
|
|
ac447dd055 | ||
|
|
28cdbe407c | ||
|
|
bf486082c9 | ||
|
|
41290b463c | ||
|
|
385ebe234e | ||
|
|
72e9fcc895 | ||
|
|
5f42e4ac3f | ||
|
|
926ec89f48 | ||
|
|
440e1b9156 | ||
|
|
ea0a6e413d | ||
|
|
0de4241b56 | ||
|
|
6e8a53a204 | ||
|
|
60772889d5 | ||
|
|
7db7c9e978 | ||
|
|
d85bf67103 | ||
|
|
926f2e9f48 | ||
|
|
2019f29e8c | ||
|
|
3b45b63d2a | ||
|
|
1c08c53121 | ||
|
|
7623bde159 | ||
|
|
1ed0f5e78d | ||
|
|
568ab33a37 | ||
|
|
f639b052e3 | ||
|
|
56f91948f8 | ||
|
|
6c5e481318 | ||
|
|
f487f1e8c1 | ||
|
|
68ee9743fe | ||
|
|
f4cb48ed0d | ||
|
|
ad77fe1116 | ||
|
|
28a0667da6 | ||
|
|
1f0366c989 | ||
|
|
3a51922650 | ||
|
|
82b2be5046 | ||
|
|
0fc9718c35 | ||
|
|
976733a3c3 | ||
|
|
5d17072709 | ||
|
|
fbad183d39 | ||
|
|
7356a2ff07 | ||
|
|
6ff948c107 | ||
|
|
e3ebce117b | ||
|
|
ce69b09730 | ||
|
|
c823cef405 | ||
|
|
0379b81d43 | ||
|
|
6a997163fd | ||
|
|
93f8466230 | ||
|
|
114c8d3c22 | ||
|
|
3e77e79194 | ||
|
|
ca91d36979 | ||
|
|
d47232246a | ||
|
|
d819222cf7 | ||
|
|
0c4c4d5622 | ||
|
|
ad051ed083 | ||
|
|
1aa0af3e58 | ||
|
|
72556b37f5 | ||
|
|
0bddae5775 | ||
|
|
1f1e710a6d | ||
|
|
b57d418b98 | ||
|
|
0913c43219 | ||
|
|
d754a43fba | ||
|
|
f97b56a87b | ||
|
|
2f78398914 | ||
|
|
81b9a34e5e | ||
|
|
73ba078efc | ||
|
|
1ffe0ad85c | ||
|
|
797b36a81e | ||
|
|
b82c14892e | ||
|
|
a8891dabec | ||
|
|
86ba797665 | ||
|
|
3830dcb3f3 | ||
|
|
c20fe7a773 | ||
|
|
fa01f86b19 | ||
|
|
9583095734 | ||
|
|
a5b2eb3a28 | ||
|
|
72f2784588 | ||
|
|
5c5b730bb8 | ||
|
|
b9ec6b4315 | ||
|
|
4b83fa3549 | ||
|
|
a69e81076a | ||
|
|
4cd2b73f19 | ||
|
|
4ea0bebd92 | ||
|
|
bbcdae25a1 | ||
|
|
220a801138 | ||
|
|
c6821d9cc3 | ||
|
|
8b59245e6a | ||
|
|
9b5ee2e694 | ||
|
|
e932d86b69 | ||
|
|
96f05311b8 | ||
|
|
3e2d68782c | ||
|
|
db2a4349cb | ||
|
|
2014fe83a3 | ||
|
|
55439aab5e | ||
|
|
8c91864f1c | ||
|
|
9319ec5bb2 | ||
|
|
83e4023c19 | ||
|
|
a14701bdd2 | ||
|
|
379dd011ff | ||
|
|
49b3ccfe2b | ||
|
|
16608370a6 | ||
|
|
53015c9d8e | ||
|
|
6d68b89ea0 | ||
|
|
254582da89 | ||
|
|
af54b7cfef | ||
|
|
f13149db8e | ||
|
|
79912a4067 | ||
|
|
c0b6b85ec0 | ||
|
|
a4895f5166 | ||
|
|
4d7670a12e | ||
|
|
8c21954049 | ||
|
|
132fab1c03 | ||
|
|
e7b8d71010 | ||
|
|
fff8cfdee0 | ||
|
|
3e45a3b4d8 | ||
|
|
7c66e21356 | ||
|
|
c477a49777 | ||
|
|
5a38c09f8d | ||
|
|
fe4657b122 | ||
|
|
c1dcd2e57d | ||
|
|
26d993674e | ||
|
|
9d475001ee | ||
|
|
34eb25b0ba | ||
|
|
716b935177 | ||
|
|
92528af600 | ||
|
|
2606e6b82d | ||
|
|
b965ce7376 | ||
|
|
048f1b53c0 | ||
|
|
43340c4aa8 | ||
|
|
9f073fcbcf | ||
|
|
c0c60a4875 | ||
|
|
94f682e461 | ||
|
|
1086bfe1ba | ||
|
|
d441d5763f | ||
|
|
c0a2daa3a3 | ||
|
|
3de51b6a65 | ||
|
|
a741388447 | ||
|
|
1ea9b87498 | ||
|
|
0cab007c37 | ||
|
|
4a331db5fc | ||
|
|
904b0bf2da | ||
|
|
90425542f8 | ||
|
|
eae0141d50 | ||
|
|
9594c82005 | ||
|
|
657aacceb5 | ||
|
|
a35dbf99a6 | ||
|
|
0d80f5d752 | ||
|
|
b36f4dfd08 | ||
|
|
fddee69f92 | ||
|
|
ec270a3b54 | ||
|
|
c97d1e3363 | ||
|
|
554c1ed1f7 | ||
|
|
a90b286482 | ||
|
|
cc78ea7222 | ||
|
|
7f2cc3b232 | ||
|
|
00b10f17c1 | ||
|
|
cab6305462 | ||
|
|
7218403ad7 | ||
|
|
811dfecf98 | ||
|
|
acbbf30a0e | ||
|
|
4d29f8f679 | ||
|
|
13fcbe3e74 | ||
|
|
850b79f459 | ||
|
|
9e6f970bc4 | ||
|
|
cbcb717aee | ||
|
|
5aea46c214 | ||
|
|
6394720c5a | ||
|
|
6af627ea97 | ||
|
|
85277f2b4f | ||
|
|
7b0876204e | ||
|
|
cf65942504 | ||
|
|
7369b02bf4 | ||
|
|
1438fea76b | ||
|
|
e0912f0cf0 | ||
|
|
838525b452 | ||
|
|
774cbbf47a | ||
|
|
d15bc6d32c | ||
|
|
99e0766f53 | ||
|
|
51225b18b2 | ||
|
|
96ab01b0c1 | ||
|
|
a4eb4ea66d | ||
|
|
54819e288a | ||
|
|
ec5fbded4f | ||
|
|
f939576311 | ||
|
|
628784da35 | ||
|
|
9ea3231060 | ||
|
|
0b7858494f | ||
|
|
8f98c8a3c9 | ||
|
|
67f9b3a6e0 | ||
|
|
5defc0a87b | ||
|
|
b4bcb09707 | ||
|
|
b2d74f66b3 | ||
|
|
75223e18ee | ||
|
|
4aea9c727d | ||
|
|
7d779afcd4 | ||
|
|
5cb7a69a46 | ||
|
|
0e88bfc570 | ||
|
|
48cf56557b | ||
|
|
9c9354cf38 | ||
|
|
e730ae66ae | ||
|
|
58d6b71808 | ||
|
|
4b9c1c4863 | ||
|
|
e1cdacaebf | ||
|
|
af120248d7 | ||
|
|
3749b327f9 | ||
|
|
017ccd6351 | ||
|
|
cdc860933e | ||
|
|
7b408f338a | ||
|
|
b326c0c9ae | ||
|
|
f06f409f2d | ||
|
|
a0e8b70e6d | ||
|
|
5294178bb7 | ||
|
|
9050d48bc3 | ||
|
|
9d0b54f461 | ||
|
|
4ba848a483 | ||
|
|
0b26e6232a | ||
|
|
88ad827a87 | ||
|
|
0b0f0a959a | ||
|
|
25ee749724 | ||
|
|
204b871fa2 | ||
|
|
f45db6014d | ||
|
|
475850ef94 | ||
|
|
602fe086b9 | ||
|
|
5ad76cf2af | ||
|
|
03e8c56f05 | ||
|
|
d1981967b2 | ||
|
|
c6094ad575 | ||
|
|
93e376ad2f | ||
|
|
6bba3d164a | ||
|
|
b5decffaa2 | ||
|
|
c068ac48d1 | ||
|
|
d4b89803b2 | ||
|
|
d5b73236de | ||
|
|
1e011879b1 | ||
|
|
9c30ff3024 | ||
|
|
035f41b12c | ||
|
|
0bbf1db434 | ||
|
|
639e267392 | ||
|
|
bd5504461e | ||
|
|
c46aa23fdd | ||
|
|
d654e79be3 | ||
|
|
c41877920a | ||
|
|
3f11e3e6a6 | ||
|
|
225e73c8cf | ||
|
|
95ec541a38 | ||
|
|
1941bd36bb | ||
|
|
e1b6d61558 | ||
|
|
c873e4ef42 | ||
|
|
90eb261da6 | ||
|
|
fb46cc9fdf | ||
|
|
2d5a2eb52b | ||
|
|
fa108126bb | ||
|
|
b9540ba2bc | ||
|
|
1992acaf61 | ||
|
|
8c586a34e7 | ||
|
|
44399a03c1 | ||
|
|
3e70af9a57 | ||
|
|
475d20b627 | ||
|
|
69c5c6d6b8 | ||
|
|
2480dc83b2 | ||
|
|
7c8b617f62 | ||
|
|
7377fee8ca | ||
|
|
bdd78b664f | ||
|
|
9272d4725a | ||
|
|
4ae6a8e25d | ||
|
|
6e660140ae | ||
|
|
5315429195 | ||
|
|
abf898e032 | ||
|
|
eef112d83d | ||
|
|
e1784abbeb | ||
|
|
0031ca3159 | ||
|
|
411115523e | ||
|
|
8b206b087c | ||
|
|
0d126106c0 | ||
|
|
0751debff7 | ||
|
|
33a28a64ec | ||
|
|
28e37d8ad2 | ||
|
|
190f571718 | ||
|
|
c7d7dfbd50 | ||
|
|
efb018d2b0 | ||
|
|
cae9a45832 | ||
|
|
3daeab5186 | ||
|
|
83914d5a56 | ||
|
|
0f611eb87b | ||
|
|
f70b2d0839 | ||
|
|
2f33a46e89 | ||
|
|
598c7a5d76 | ||
|
|
8724c12c11 | ||
|
|
22d9020331 | ||
|
|
b4d77080e8 | ||
|
|
e42fc97d03 | ||
|
|
e45648b389 | ||
|
|
085c4ddf09 | ||
|
|
5ddf9bd7ec | ||
|
|
2420af3b6d | ||
|
|
b8fade251b | ||
|
|
8935dc4e31 | ||
|
|
ae61d89494 | ||
|
|
753832d701 | ||
|
|
8926cf777c | ||
|
|
868ea1a1e2 | ||
|
|
1e1707ec0b | ||
|
|
636ac2a56c | ||
|
|
45076b05f7 | ||
|
|
ba9e2101bb | ||
|
|
7301b61cb8 | ||
|
|
ee3f657751 | ||
|
|
e30291966a | ||
|
|
2536bd0988 | ||
|
|
5234350bde | ||
|
|
36e4398bcb | ||
|
|
4b040280c3 | ||
|
|
fdd2300517 | ||
|
|
49913b2258 | ||
|
|
4927b64d27 | ||
|
|
fb2df05e3f | ||
|
|
ab90a93eec | ||
|
|
48c17169b5 | ||
|
|
41cd83f20e | ||
|
|
52dd3f798a | ||
|
|
070efd6951 | ||
|
|
502d82e1c9 | ||
|
|
7760e779ae | ||
|
|
474298c969 | ||
|
|
b2a013c027 | ||
|
|
cca5ef098b | ||
|
|
41b4c28430 | ||
|
|
90962ee056 | ||
|
|
953cff09a0 | ||
|
|
b41a989051 | ||
|
|
4fcd45c1ae | ||
|
|
1f75f0c082 | ||
|
|
c2a95b5bec | ||
|
|
0a246d3de7 | ||
|
|
2d6238d431 | ||
|
|
c4f3dc4434 | ||
|
|
2aea24afdd | ||
|
|
666240f21e | ||
|
|
fb4ab220d6 | ||
|
|
5a882fe37f | ||
|
|
132326136a | ||
|
|
6fc4723d61 | ||
|
|
8564198321 | ||
|
|
4c3f990d4b | ||
|
|
b19c14787e | ||
|
|
f67b79f007 | ||
|
|
daa332aa20 | ||
|
|
c3f538c2f6 | ||
|
|
a0e677ea00 | ||
|
|
343569ba19 | ||
|
|
9096013e13 | ||
|
|
89a2f249c1 | ||
|
|
4b0e094272 | ||
|
|
97713e872a | ||
|
|
f9a7db11eb | ||
|
|
1448d7e6eb | ||
|
|
8e7d5340d7 | ||
|
|
47ecf98e2a | ||
|
|
f8e4e42a36 | ||
|
|
38753c4395 | ||
|
|
b473e13b83 | ||
|
|
9092575186 | ||
|
|
ffe5ac2aad | ||
|
|
0ab6f75410 | ||
|
|
099245f27e | ||
|
|
0a0fe20fa0 | ||
|
|
c2aa5cc994 | ||
|
|
f84e59a7fb | ||
|
|
613c032994 | ||
|
|
7829db97bf | ||
|
|
acdfde6752 | ||
|
|
c673c0b245 | ||
|
|
4bf4e11cee | ||
|
|
770175456f | ||
|
|
0abbf71f15 | ||
|
|
46b0de367a | ||
|
|
30309659d3 | ||
|
|
acadd6bddc | ||
|
|
96c57260cb | ||
|
|
f29f58b2ac | ||
|
|
124a04738c | ||
|
|
3a60c31df9 | ||
|
|
501cf3973c | ||
|
|
c73251e998 | ||
|
|
201fb61bd4 | ||
|
|
f87ae429f4 | ||
|
|
35e8e2df44 | ||
|
|
7c3f80f13d | ||
|
|
17a176ad4e | ||
|
|
ca5eb06de9 | ||
|
|
2378548cf1 | ||
|
|
fdd265f47f | ||
|
|
3e2e1ecddf | ||
|
|
863950963f | ||
|
|
defa1b28a8 | ||
|
|
1f649274d1 | ||
|
|
3ce04de161 | ||
|
|
e798d18e70 | ||
|
|
ed2609d3b3 | ||
|
|
6d2a2632c5 | ||
|
|
dbf95a95a4 | ||
|
|
0e4bd06795 | ||
|
|
4d38280cfa | ||
|
|
75173473ae | ||
|
|
b314b27260 | ||
|
|
cc7e223082 | ||
|
|
79f87d4c20 | ||
|
|
8adbd6720a | ||
|
|
c3973571a7 | ||
|
|
bf63509a6e | ||
|
|
6552fe831b | ||
|
|
05fdf6b93a | ||
|
|
6953c3dbe4 | ||
|
|
55ecda902d | ||
|
|
0495610257 | ||
|
|
301bb2dcfe | ||
|
|
598b8f9980 | ||
|
|
9528f34a25 | ||
|
|
625aed151d | ||
|
|
4ffdf3f9a2 | ||
|
|
0a97e5b7be | ||
|
|
bfeae3a95b | ||
|
|
4ab12663be | ||
|
|
0584c29781 | ||
|
|
a8231d375a | ||
|
|
a86b342ba5 | ||
|
|
0a7a313e5d | ||
|
|
9d4aee5de2 | ||
|
|
faf031ce80 | ||
|
|
e9a2b8f03a | ||
|
|
d89bd0941d | ||
|
|
8d8423b6e0 | ||
|
|
e22669f91d | ||
|
|
b5e5fb7f10 | ||
|
|
2709994ede | ||
|
|
e5bd194b6c | ||
|
|
f01f76dba7 | ||
|
|
289bd41570 | ||
|
|
6a0d6a8faf | ||
|
|
dcc39d954e | ||
|
|
8a67f18cd9 | ||
|
|
2e02304c71 | ||
|
|
ce975c5d93 | ||
|
|
fb4bb54aca | ||
|
|
dae0942d03 | ||
|
|
25b1173db7 | ||
|
|
92d90866ca | ||
|
|
1595e0210a | ||
|
|
ea4ef40a12 | ||
|
|
9986fce8bf | ||
|
|
628f83172a | ||
|
|
c855896221 | ||
|
|
94b5241e70 | ||
|
|
0600f095f5 | ||
|
|
a0a05b676f | ||
|
|
a818975823 | ||
|
|
8e9f31cc32 | ||
|
|
0d4bc4ec2c | ||
|
|
7a0118b31c | ||
|
|
e9a8161811 | ||
|
|
a6bface632 | ||
|
|
48f47351ee | ||
|
|
9247f16add | ||
|
|
d3eab30d74 | ||
|
|
f65ecb9a0f | ||
|
|
312cb9ae70 | ||
|
|
cce60ce101 | ||
|
|
e0a3b8004c | ||
|
|
91239820e3 | ||
|
|
8641a91182 | ||
|
|
84bffd24f2 | ||
|
|
9fb37b1179 | ||
|
|
4eee10b5d5 | ||
|
|
c53456876c | ||
|
|
1a9f31174d | ||
|
|
0493352292 | ||
|
|
13b91193cc | ||
|
|
9a367c76a0 | ||
|
|
f58e7cc154 | ||
|
|
5ee0f15d94 | ||
|
|
250edf26a5 | ||
|
|
7a01376828 | ||
|
|
63b547ea13 | ||
|
|
626689cbe0 | ||
|
|
a44319d815 | ||
|
|
2c8a2945f0 | ||
|
|
ba59042e5c | ||
|
|
3273af7f40 | ||
|
|
5971ff884e | ||
|
|
cbf33e698b | ||
|
|
868e59bca0 | ||
|
|
04959df194 | ||
|
|
47d687b151 | ||
|
|
2ad6b4fa4e | ||
|
|
8e94688b77 | ||
|
|
fab367f041 | ||
|
|
94617c5ef7 | ||
|
|
4443bc77fd | ||
|
|
d33246612d | ||
|
|
144ab61e07 | ||
|
|
a4c95fd62b | ||
|
|
2245f4690e | ||
|
|
8eaeaa91f9 | ||
|
|
7bd0351ee9 | ||
|
|
811a20f080 | ||
|
|
1decff2114 | ||
|
|
c97968f6c0 | ||
|
|
9deb5adcbf | ||
|
|
91e7c16d90 | ||
|
|
edc81d8e6e | ||
|
|
ed8d553491 | ||
|
|
a64a5e89db | ||
|
|
bd636d59dd | ||
|
|
2d15492190 | ||
|
|
d696f0d081 | ||
|
|
9409e4498f | ||
|
|
541a6417b7 | ||
|
|
f6e9f9011d | ||
|
|
2fe3cb2b22 | ||
|
|
6b9519b56f | ||
|
|
9bbe7564a9 | ||
|
|
58af393968 | ||
|
|
bed4939652 | ||
|
|
ebf6109219 | ||
|
|
0ef232f731 | ||
|
|
6f83bd8961 | ||
|
|
ad602f22c8 | ||
|
|
70f44fcaca | ||
|
|
1f32e7cf82 | ||
|
|
00390200ec | ||
|
|
0a11a3afee | ||
|
|
9f77b03643 | ||
|
|
c6dc1675d8 | ||
|
|
e475a4cc7c | ||
|
|
dfc3cdd5d4 | ||
|
|
6974db5fd8 | ||
|
|
32c67c2a02 | ||
|
|
6c585de6d3 | ||
|
|
1056c943d3 | ||
|
|
839f0a3b95 | ||
|
|
b19e9cae23 | ||
|
|
84a15ef54d | ||
|
|
d4b409e166 | ||
|
|
ba1c0ab6fb | ||
|
|
eddafcfdfb | ||
|
|
8a225e279f | ||
|
|
d5cce88108 | ||
|
|
a7aae3ff7e | ||
|
|
25feab9a29 | ||
|
|
97916bf925 | ||
|
|
42e2c784c4 | ||
|
|
e00c6f2c14 | ||
|
|
0837295bd3 | ||
|
|
f3a005a667 | ||
|
|
d59ffaf0bd | ||
|
|
e133c29b2c | ||
|
|
f64bf7daa0 | ||
|
|
ef24318c17 | ||
|
|
33fe0ffc93 | ||
|
|
243b036ae7 | ||
|
|
06518c209a | ||
|
|
3482474265 | ||
|
|
5debb48265 | ||
|
|
1a8f89573d | ||
|
|
84377eed07 | ||
|
|
dd9589b37a | ||
|
|
7c00099919 | ||
|
|
bc840900a3 | ||
|
|
4429755c09 | ||
|
|
a2967afb55 | ||
|
|
3d03826db5 | ||
|
|
7ff86a2aee | ||
|
|
2a68cc9989 | ||
|
|
855365fba6 | ||
|
|
928303f27b | ||
|
|
df2f69e85f | ||
|
|
ec3407df7e | ||
|
|
5dae074c95 | ||
|
|
a35be6ae57 | ||
|
|
101935ae46 | ||
|
|
d68e731ffd | ||
|
|
bf0dd6946e | ||
|
|
41cbcbc07f | ||
|
|
73f93946b0 | ||
|
|
181d2504e5 | ||
|
|
b0423d987e | ||
|
|
9157fe7323 | ||
|
|
e02718947a | ||
|
|
ebbd47c9cb | ||
|
|
ad810b3740 | ||
|
|
c3e85d747a | ||
|
|
8e092cbe1c | ||
|
|
9a35609bc7 | ||
|
|
7fed92d6b3 | ||
|
|
1f52461cd9 | ||
|
|
fe16743d16 | ||
|
|
7fd8e57bdc | ||
|
|
ed6cd9890a | ||
|
|
f876f9e20e | ||
|
|
3e87d83ae8 | ||
|
|
62b15f2d6f | ||
|
|
89529f4df5 | ||
|
|
fe18d6e638 | ||
|
|
042519005c | ||
|
|
7e24995afe | ||
|
|
877b165a9a | ||
|
|
0784823e21 | ||
|
|
1a9f47b1bc | ||
|
|
64f72ada28 | ||
|
|
171916e1a4 | ||
|
|
dbfc1bb68f | ||
|
|
5d4c067d80 | ||
|
|
3f10a775ba | ||
|
|
2f05a47de3 | ||
|
|
9ca079c95a | ||
|
|
2d37083719 | ||
|
|
0b890e1d70 | ||
|
|
0bb014c965 | ||
|
|
0684449c2a | ||
|
|
a23806d16a | ||
|
|
0b7be94d13 | ||
|
|
4ac996cfe6 | ||
|
|
78c819f976 | ||
|
|
365537f74e | ||
|
|
5c756348a5 | ||
|
|
ed12c2d527 | ||
|
|
82189b0a3c | ||
|
|
23889f7f16 | ||
|
|
45e14bc2f5 | ||
|
|
0746f30645 | ||
|
|
daedfc0a57 | ||
|
|
2cc3372b86 | ||
|
|
e024452610 | ||
|
|
06e4a05e41 | ||
|
|
256514fefc | ||
|
|
3f64ff8194 | ||
|
|
af2cef1bfc | ||
|
|
3be74b1fdd | ||
|
|
e2a705806a | ||
|
|
5c99615edf | ||
|
|
605f168c7e | ||
|
|
b223cf05d9 | ||
|
|
419b98b50f | ||
|
|
b99b3b844a | ||
|
|
d9787e849e | ||
|
|
631e77ce65 | ||
|
|
7ff3a31e72 | ||
|
|
331dfdbab4 | ||
|
|
c83ff2237c | ||
|
|
9972435525 | ||
|
|
409d0e4084 | ||
|
|
991a38df28 | ||
|
|
656f4da8f9 | ||
|
|
f8d65b84db | ||
|
|
18ed255f5a | ||
|
|
4a8e9bf04e | ||
|
|
0b1a302995 | ||
|
|
6978e7439f | ||
|
|
fcb6bec474 | ||
|
|
91690ff99a | ||
|
|
45f930a9e2 | ||
|
|
09a1879f3e | ||
|
|
4bc14dbdd0 | ||
|
|
1627d424e7 | ||
|
|
0aa9da39a9 | ||
|
|
8564c2ba72 | ||
|
|
1c791f240a | ||
|
|
bea0ccaa6c | ||
|
|
05f756963c | ||
|
|
54ad6ad1c7 | ||
|
|
c44ff77e09 | ||
|
|
c77d415893 | ||
|
|
92c9612dee | ||
|
|
b40417fcfe | ||
|
|
13c890b212 | ||
|
|
741ab6e43c | ||
|
|
3db46ecd68 | ||
|
|
a972bb8827 | ||
|
|
2e4e080329 | ||
|
|
5fe4c40ec1 | ||
|
|
d18cb373fc | ||
|
|
1b1771e4eb | ||
|
|
51e450cc4b | ||
|
|
67a97a7e51 | ||
|
|
1e88c86378 | ||
|
|
fcb5f946dd | ||
|
|
2e69e9bef3 | ||
|
|
88623754cf | ||
|
|
886bcef7b0 | ||
|
|
cc414da744 | ||
|
|
6e88ecc2da | ||
|
|
88d2420163 | ||
|
|
7e71ee1aae | ||
|
|
c6d78e27c6 | ||
|
|
b6d06dcfc3 | ||
|
|
756c46c026 | ||
|
|
5afba6e30f | ||
|
|
02a23a65e7 | ||
|
|
e7de16833d | ||
|
|
990c6d1c64 | ||
|
|
f78a8c6fea | ||
|
|
5580d19b75 | ||
|
|
c035e3c7c6 | ||
|
|
1502adfb85 | ||
|
|
3b76b3ddce | ||
|
|
e4a1730a5b | ||
|
|
4aa66170c5 | ||
|
|
24352b56af | ||
|
|
cbea17b4d5 | ||
|
|
4dd0d65db4 | ||
|
|
cdfdcc7d03 | ||
|
|
b4082b2cfa | ||
|
|
fae3cfc58f | ||
|
|
ad038784cc | ||
|
|
7e69dd5ff0 | ||
|
|
8720ec65ab | ||
|
|
3098f99eba | ||
|
|
98b90b82c4 | ||
|
|
01268a37e3 | ||
|
|
0c46f3c205 | ||
|
|
b442cc186a | ||
|
|
2353276aa7 | ||
|
|
8c034d3e78 | ||
|
|
2c25f4a4c0 | ||
|
|
d2a988a715 | ||
|
|
bd12eac145 | ||
|
|
ebc44273c9 | ||
|
|
b781d78cc6 | ||
|
|
3f7c8bdba5 | ||
|
|
fd8e277530 | ||
|
|
6a024b0ced | ||
|
|
a185b2a12a | ||
|
|
2366c2cd94 | ||
|
|
567c01e302 | ||
|
|
b3d2c1a5d1 | ||
|
|
7b3ecb5c2f | ||
|
|
f4abed43ba | ||
|
|
5854202f22 | ||
|
|
d3238de8ab | ||
|
|
09a2705311 | ||
|
|
a4c0861cf4 | ||
|
|
5ba917c5e4 | ||
|
|
83f2fb1e62 | ||
|
|
7bf79675c1 | ||
|
|
f33aa9c71b | ||
|
|
ac1c21c784 | ||
|
|
1757ce23af | ||
|
|
3f1bae3044 | ||
|
|
20bd7ca2cc | ||
|
|
a2b0204a95 | ||
|
|
f7063d03f1 | ||
|
|
9be3043e0f | ||
|
|
4aeeaf185c | ||
|
|
a25d5d98a4 | ||
|
|
973304e0d7 | ||
|
|
590a735f99 | ||
|
|
42185a011b | ||
|
|
e7b872a5df | ||
|
|
2bf75c36e4 | ||
|
|
bcd9005b53 | ||
|
|
cca6297a64 | ||
|
|
39e94d4a5e | ||
|
|
204b1b1963 | ||
|
|
b2e45e8af3 | ||
|
|
27797581ba | ||
|
|
a797801d4c | ||
|
|
d7c09e3493 | ||
|
|
085542c861 | ||
|
|
7ccd74b022 | ||
|
|
047afeebb6 | ||
|
|
a6eab324b8 | ||
|
|
74a11da9bd | ||
|
|
473cd79af1 | ||
|
|
b3c075714c | ||
|
|
f5661b3b1e | ||
|
|
8ce1fd561d | ||
|
|
adb2cf35d4 | ||
|
|
3e32724729 | ||
|
|
b59aa2f3e7 | ||
|
|
44405b250c | ||
|
|
2b547f71f4 | ||
|
|
bd66d0a987 | ||
|
|
56e3a1c3b2 | ||
|
|
62802eb138 | ||
|
|
848beb11df | ||
|
|
0481e766ae | ||
|
|
644a66c983 | ||
|
|
5f62c0d57a | ||
|
|
c440e6f8fa | ||
|
|
706bd6126a | ||
|
|
f8544cf14b | ||
|
|
a8bb992569 | ||
|
|
ddafb96eba | ||
|
|
1dfb5d29c3 | ||
|
|
3c64abceb8 | ||
|
|
3c43b87e9f | ||
|
|
d7fe1150dc | ||
|
|
3e55be910b | ||
|
|
1ae0af56cc | ||
|
|
b329ede52a | ||
|
|
a9f6a06446 | ||
|
|
24383997ef | ||
|
|
48ec0ae44c | ||
|
|
ea49095b0a | ||
|
|
f3ff3920d9 | ||
|
|
3db07f3a26 | ||
|
|
a2ef45e13f | ||
|
|
3e20934e20 | ||
|
|
ec5db5d2c7 | ||
|
|
cc25b5e856 | ||
|
|
c06f232589 | ||
|
|
aa57984bde | ||
|
|
e1a8184c2e | ||
|
|
fc24eb08cb | ||
|
|
345ab8ea9e | ||
|
|
65547bad87 | ||
|
|
4402442e54 | ||
|
|
5bfd7d5e6c | ||
|
|
09537ec0dd | ||
|
|
5ad4ba6abd | ||
|
|
4decb34f99 | ||
|
|
947014945f | ||
|
|
b6710beadc | ||
|
|
c2c5f07ffa | ||
|
|
95a60adfcc | ||
|
|
59eeb73b60 | ||
|
|
0173a4d7fa | ||
|
|
e2e7ee1893 | ||
|
|
58dfc58622 | ||
|
|
c8bfc52fab | ||
|
|
3e50fe1aa9 | ||
|
|
e7c760e68b | ||
|
|
fa9ca221b4 | ||
|
|
cff76c672c | ||
|
|
2395785337 | ||
|
|
862ea5fa36 | ||
|
|
aa0964d99f | ||
|
|
5b5281e50c | ||
|
|
8bcf7bdade | ||
|
|
57cfafeb34 | ||
|
|
5ea8706ba9 | ||
|
|
68f497517d | ||
|
|
8308bd0039 | ||
|
|
b7f00324bc | ||
|
|
77f5328ab9 | ||
|
|
bb82b9a9d3 | ||
|
|
355fd2b5d7 | ||
|
|
1e6cc95f09 | ||
|
|
bb8b4cae79 | ||
|
|
faef061d74 | ||
|
|
62af7549c7 | ||
|
|
04b4db763c | ||
|
|
c95b4d1305 | ||
|
|
40b8aa42cc | ||
|
|
e66535c572 | ||
|
|
81c8511316 | ||
|
|
b59170078d | ||
|
|
453c653975 | ||
|
|
976e7b6765 | ||
|
|
94a8c42311 | ||
|
|
534b650c10 | ||
|
|
f01c04bb24 | ||
|
|
fcc0449076 | ||
|
|
565b0c5a9c | ||
|
|
c204d37ac7 | ||
|
|
c5adea6993 | ||
|
|
c8648101a7 | ||
|
|
1f9c167bd2 | ||
|
|
cbae7bd500 | ||
|
|
27f97bc55d | ||
|
|
601ba40cb0 | ||
|
|
4632531f2d | ||
|
|
c9e95a9146 | ||
|
|
c48d4ae7df | ||
|
|
4895d389e4 | ||
|
|
92916e42c1 | ||
|
|
1a8499cf26 | ||
|
|
81a912c93f | ||
|
|
989d6eee0f | ||
|
|
a8d371045b | ||
|
|
b80726e942 | ||
|
|
0fc48ea03f | ||
|
|
afc86efe28 | ||
|
|
ab1ebeb7e0 | ||
|
|
6932c7e3e9 | ||
|
|
c04687fdd1 | ||
|
|
7717242112 | ||
|
|
1ad82c22d9 | ||
|
|
8fa88175c1 | ||
|
|
b05fec93bb | ||
|
|
802311a06a | ||
|
|
dc0f26d3d8 | ||
|
|
36c32fd968 | ||
|
|
0001963a04 | ||
|
|
6a264a45e2 | ||
|
|
d897315355 | ||
|
|
d536b9d8c6 | ||
|
|
6af7d4e6e8 | ||
|
|
67dddcb224 | ||
|
|
aad12aa227 | ||
|
|
d71675f3d2 | ||
|
|
88fb35552a | ||
|
|
8a084a05c9 | ||
|
|
90ef7ddacb | ||
|
|
034dfffb85 | ||
|
|
09a15e2e59 | ||
|
|
7859a0d001 | ||
|
|
085e1b6c41 | ||
|
|
5b01664d53 | ||
|
|
03adfd4898 | ||
|
|
1616124fa2 | ||
|
|
2611550ffd | ||
|
|
2989be47cc | ||
|
|
9b7a346380 | ||
|
|
8c8bf8702f | ||
|
|
eb8c2a9277 | ||
|
|
350c91889e | ||
|
|
43018840d1 | ||
|
|
4daf443db8 | ||
|
|
a85f214fdb | ||
|
|
ab77c4e616 | ||
|
|
19315f72a0 | ||
|
|
3683d4a759 | ||
|
|
e9a7722915 | ||
|
|
7794129929 | ||
|
|
2ee2067634 | ||
|
|
ef6ec3fcb8 | ||
|
|
c57ff3ae51 | ||
|
|
aa7f59f88c | ||
|
|
3b4a95ef33 | ||
|
|
09ba14b8ca | ||
|
|
b5dc7281f9 | ||
|
|
deca7726f4 | ||
|
|
058e0279b4 | ||
|
|
5e604950c5 | ||
|
|
af1b81097f | ||
|
|
08e713d381 | ||
|
|
a2c228c09f | ||
|
|
4e6afebf9e | ||
|
|
13d274202e | ||
|
|
f56acac656 | ||
|
|
68d325b5b5 | ||
|
|
40907a2584 | ||
|
|
f4f5d99ec9 | ||
|
|
0475e55518 | ||
|
|
872b390cfa | ||
|
|
dc4c539607 | ||
|
|
69f8c76ce2 | ||
|
|
cbdcca7fd8 | ||
|
|
1e7fa988da | ||
|
|
3cfe771b8b | ||
|
|
8b4d6d0868 | ||
|
|
ba35e1422b | ||
|
|
8db9b09c96 | ||
|
|
78e99d1171 | ||
|
|
6a12f96fdc | ||
|
|
b79a20151c | ||
|
|
c9976020dd | ||
|
|
e8988e82d0 | ||
|
|
ff95570da6 | ||
|
|
6698a000e6 | ||
|
|
b084e3074d | ||
|
|
bc4f9c3442 | ||
|
|
ecf88e7ea1 | ||
|
|
d6cb66cd2f | ||
|
|
bc2241f67a | ||
|
|
3d292aa485 | ||
|
|
21f46a8aea | ||
|
|
ba6d61cc35 | ||
|
|
27b43c11bd | ||
|
|
a5292c4473 | ||
|
|
f3923488a5 | ||
|
|
d964ebfff8 | ||
|
|
e420eeece4 | ||
|
|
f34713545e | ||
|
|
2b513c7d87 | ||
|
|
7e28893613 | ||
|
|
822674ca78 | ||
|
|
35ebf97fdf | ||
|
|
b456c0ca9f | ||
|
|
bae49891be | ||
|
|
dfb4a67d87 | ||
|
|
8e727a3253 | ||
|
|
d6f03d7a07 | ||
|
|
5ada5bf1a0 | ||
|
|
ae9c935c5c | ||
|
|
95618001aa | ||
|
|
40c361968e | ||
|
|
757abda654 | ||
|
|
862807e863 | ||
|
|
d188db887c | ||
|
|
59b6c56262 | ||
|
|
f92658de82 | ||
|
|
f2b3402c17 | ||
|
|
24badf65a4 | ||
|
|
86442f212f | ||
|
|
9b6d6ecc32 | ||
|
|
1f51277004 | ||
|
|
68cc646a3e | ||
|
|
420ca1b3b4 | ||
|
|
a83e68815a | ||
|
|
d87aebb718 | ||
|
|
a9a4d14e8a | ||
|
|
9ed2fc7359 | ||
|
|
caad382a95 | ||
|
|
ea39fc9c48 | ||
|
|
bf7cce52db | ||
|
|
63a15a3359 | ||
|
|
db34392210 | ||
|
|
cc4e0ba6c1 | ||
|
|
38989f9c68 | ||
|
|
c78c92e539 | ||
|
|
31e694f50d | ||
|
|
5368199517 | ||
|
|
6bbb9176fc | ||
|
|
4209eee2f8 | ||
|
|
f65ebb6b71 | ||
|
|
ef8107e56a | ||
|
|
2293a30f19 | ||
|
|
d7678fd355 | ||
|
|
27d8a5cf99 | ||
|
|
03f6c58ac6 | ||
|
|
4fb52dc6fc | ||
|
|
0232ba3f25 | ||
|
|
5987e4c8e1 | ||
|
|
18ce7c8f2f | ||
|
|
177c9da8b5 | ||
|
|
b5f1a8e90f | ||
|
|
494c3dd1bd | ||
|
|
ad8f78d51e | ||
|
|
5112801c37 | ||
|
|
226adfdba2 | ||
|
|
22c0375dca | ||
|
|
66ebfef619 | ||
|
|
7e75513151 | ||
|
|
e77f6c9f6f | ||
|
|
5bd7c0ab8b | ||
|
|
97f7f6f7d2 | ||
|
|
d65fc70f07 | ||
|
|
dcae85eae8 | ||
|
|
686a48298b | ||
|
|
8ca590559d | ||
|
|
70251222cc | ||
|
|
e55c68d27e | ||
|
|
da4f2ef6b3 | ||
|
|
dbf2cabd38 | ||
|
|
72e68a163c | ||
|
|
919a87bf47 | ||
|
|
bea0bbfcdb | ||
|
|
c12d7a8d82 | ||
|
|
711ad1750a | ||
|
|
825567d449 | ||
|
|
800439d29e | ||
|
|
e3517dde13 | ||
|
|
f2da8473a4 | ||
|
|
9cc9a6e9b4 | ||
|
|
873406732f | ||
|
|
14ab950a6c | ||
|
|
6cd8e71f4f | ||
|
|
4aeaec9dc7 | ||
|
|
e318228a08 | ||
|
|
d22efbf745 | ||
|
|
90309d5552 | ||
|
|
72842ecd7a | ||
|
|
a1b32ffca9 | ||
|
|
44d225e6ca | ||
|
|
37ab5f9d7a | ||
|
|
61fdcec511 | ||
|
|
45cc4fd97a | ||
|
|
3228b88312 | ||
|
|
a1d3592d08 | ||
|
|
c686d950d0 | ||
|
|
ca779bb0af | ||
|
|
90f64e2527 | ||
|
|
444d50f751 | ||
|
|
2f9c72c1cf | ||
|
|
1bb81614a5 | ||
|
|
888e13e198 | ||
|
|
8166642ff9 | ||
|
|
51c42790b7 | ||
|
|
f105fd1b2c | ||
|
|
fe78e9a336 | ||
|
|
2fce25b0c8 | ||
|
|
6c0da2ea94 | ||
|
|
a353e69648 | ||
|
|
ac930d5504 | ||
|
|
d4cf8037b7 | ||
|
|
fb1fd851b0 | ||
|
|
2ff8c0b128 | ||
|
|
d232229abf | ||
|
|
490e58fb52 | ||
|
|
a8582be54d | ||
|
|
30bb8449e9 | ||
|
|
adb7132e02 | ||
|
|
4a1e488bd7 | ||
|
|
d200db0eeb | ||
|
|
28e06fa684 | ||
|
|
c4cb9b07cb | ||
|
|
817fc5d4b3 | ||
|
|
2de1e5f71a | ||
|
|
5246d85f11 | ||
|
|
9526ed0258 | ||
|
|
736add031c | ||
|
|
d4042ebaa2 | ||
|
|
54e31be3b2 | ||
|
|
b630be8c8a | ||
|
|
0aca41f9a6 | ||
|
|
a3fed0f84b | ||
|
|
1414ad6d50 | ||
|
|
ed6b4dabf8 | ||
|
|
d9309ebc6e | ||
|
|
c49b7613e0 | ||
|
|
4f88b6dc71 | ||
|
|
5c9e6404cc | ||
|
|
80df494787 | ||
|
|
c0886c2785 | ||
|
|
a83a56eecd | ||
|
|
130eb56d09 | ||
|
|
b60b473e02 | ||
|
|
e0504eb957 | ||
|
|
3886e41e94 | ||
|
|
edc54c7120 | ||
|
|
cef1167ef1 | ||
|
|
f456500f3a | ||
|
|
59328ea44d | ||
|
|
0dc840dc8e | ||
|
|
6700028bd1 | ||
|
|
213b1d1d0d | ||
|
|
feab64b09a | ||
|
|
f9f096cca8 | ||
|
|
535d174c2b | ||
|
|
11d2401970 | ||
|
|
232b36d4ae | ||
|
|
b38b159f4e | ||
|
|
606bf1ff58 | ||
|
|
46cec638dd | ||
|
|
8637397c86 | ||
|
|
7502e1881f | ||
|
|
734d5e50c5 | ||
|
|
052ff6727b | ||
|
|
2962dbd6b8 | ||
|
|
392afd6f33 | ||
|
|
fc3f4dff10 | ||
|
|
24d6889b24 | ||
|
|
27e3e22703 | ||
|
|
7b7f609c47 | ||
|
|
5389c8858a | ||
|
|
c2d2fbba96 | ||
|
|
cfc039dae1 | ||
|
|
edf24dc992 | ||
|
|
97710296ac | ||
|
|
acbfc0bb81 | ||
|
|
e1fe2fb093 | ||
|
|
dd5c1ec9ed | ||
|
|
c7f7614646 | ||
|
|
d604398642 | ||
|
|
d40b1d8937 | ||
|
|
49b4b476dc | ||
|
|
c0ec689be9 | ||
|
|
8e26decb5b | ||
|
|
921efcbf4b | ||
|
|
705ab58bfb | ||
|
|
b42e32a955 | ||
|
|
dec60a0fdd | ||
|
|
fa84d5c502 | ||
|
|
34c763caf5 | ||
|
|
e5709dfabc | ||
|
|
5f4f4a8ab9 | ||
|
|
ca9e71087b | ||
|
|
6da483b3ef | ||
|
|
e300145263 | ||
|
|
eb3f0035fe | ||
|
|
6be3a2a142 | ||
|
|
e23893a419 | ||
|
|
7b4c1dcde0 | ||
|
|
4b7cb2a22a | ||
|
|
344a8a3887 | ||
|
|
0afda5dc27 | ||
|
|
0891ef6d0a | ||
|
|
cdb64ecb19 | ||
|
|
b05ac4f2a4 | ||
|
|
411189a076 | ||
|
|
63cf4d46c9 | ||
|
|
3c683f2192 | ||
|
|
d46e59bcd4 | ||
|
|
c45e76ec31 | ||
|
|
44828707ea | ||
|
|
74c047d249 | ||
|
|
50abfb98fe | ||
|
|
6104657970 | ||
|
|
02116d4c05 | ||
|
|
23f993bb54 | ||
|
|
16aedd61da | ||
|
|
5a2f3ad616 | ||
|
|
54971104f8 | ||
|
|
deeffbf77d | ||
|
|
7e8dd6bba8 | ||
|
|
dc4078d744 | ||
|
|
1eb168be55 | ||
|
|
3c6fd365fb | ||
|
|
53c9184057 | ||
|
|
16c9872571 | ||
|
|
17160bc467 | ||
|
|
3b39e58cf3 | ||
|
|
7db055116c | ||
|
|
0262ff1aac | ||
|
|
14def09ce3 | ||
|
|
96e59da6bc | ||
|
|
8f75b0a0c0 | ||
|
|
70a40cfc45 | ||
|
|
ef6b8b9ebc | ||
|
|
f9dbaa9407 | ||
|
|
ed338668d1 | ||
|
|
1e5d94a958 | ||
|
|
51553c565f | ||
|
|
ae36805aa1 | ||
|
|
22b7445ac4 | ||
|
|
9c278d7d0b | ||
|
|
3a1592692e | ||
|
|
bdec956708 | ||
|
|
d0d8a8a3af | ||
|
|
f787962be8 | ||
|
|
57b9b369b7 | ||
|
|
ccda5bdb7e | ||
|
|
bd5fa83fe0 | ||
|
|
59caf381f7 | ||
|
|
181cb1b1bd | ||
|
|
ba2dd2d872 | ||
|
|
4dc5acd68e | ||
|
|
f57116afbe | ||
|
|
99c41c7e34 | ||
|
|
d7b38d9513 | ||
|
|
2c7aad1dcd | ||
|
|
593dba72a8 | ||
|
|
09f2f2a9e7 | ||
|
|
6c583eedb9 | ||
|
|
1c4d7a6ad1 | ||
|
|
f75ed4bc66 | ||
|
|
f487deb7b9 | ||
|
|
ad30a8476c | ||
|
|
60db807443 | ||
|
|
d1dedff9ca | ||
|
|
9a04506b0d | ||
|
|
a58369fbb1 | ||
|
|
d63b5d71a1 | ||
|
|
360d790282 | ||
|
|
a0dd8f8e0f | ||
|
|
db7c001076 | ||
|
|
c96f905d2b | ||
|
|
4b3f04083b | ||
|
|
8276b6c9a9 | ||
|
|
43d6e788dc | ||
|
|
0c062a8485 | ||
|
|
99b649f24e | ||
|
|
7c6532f145 | ||
|
|
052669a0b0 | ||
|
|
0cf86d3bbc | ||
|
|
56a16b862a | ||
|
|
b2fffb2e23 | ||
|
|
3f7a27cdbb | ||
|
|
58e30b8c88 | ||
|
|
4025e55b95 | ||
|
|
e1e63ebd64 | ||
|
|
8279df48bf | ||
|
|
d86a06fab0 | ||
|
|
90b24dd915 | ||
|
|
bacd2a6893 | ||
|
|
0f059f247d | ||
|
|
e2b76d9c29 | ||
|
|
1107a2f2bc | ||
|
|
efd43013da | ||
|
|
7b8458b47d | ||
|
|
84eed09a17 | ||
|
|
35b1a40d49 | ||
|
|
81d7fe3fdb | ||
|
|
02187fed4e | ||
|
|
019bf013ac | ||
|
|
d6e59a6a0a | ||
|
|
46aa862943 | ||
|
|
0413cab0d9 | ||
|
|
3357ce8f33 | ||
|
|
1776f6e7fd | ||
|
|
edfe5e1156 | ||
|
|
0768992848 | ||
|
|
1224f94879 | ||
|
|
b58c5344b8 | ||
|
|
7175bc0595 | ||
|
|
b7a6f5696d | ||
|
|
abf5b89c28 | ||
|
|
d554444b0e | ||
|
|
16ae0725e6 | ||
|
|
61feced541 | ||
|
|
a1d4db2f1e | ||
|
|
357e9af627 | ||
|
|
a41519be63 | ||
|
|
870e6b07c8 | ||
|
|
6f41759519 | ||
|
|
6727c42f18 | ||
|
|
90c367842f | ||
|
|
a0bb6e370e | ||
|
|
f2910ab9d1 | ||
|
|
b4bfed2ccb | ||
|
|
2fcde61b6d | ||
|
|
ffddf10de5 | ||
|
|
6e3bd5e6f3 | ||
|
|
b21230c4d6 | ||
|
|
0a533b64e1 | ||
|
|
15b0e321bd | ||
|
|
4d749340a2 | ||
|
|
0ef6ffa452 | ||
|
|
d7b1310ba3 | ||
|
|
7408454a75 | ||
|
|
07b71468cc | ||
|
|
522e966194 | ||
|
|
937c60c9cf | ||
|
|
bbb1e22163 | ||
|
|
a16e83200a | ||
|
|
d437521710 | ||
|
|
5cbf4cf352 | ||
|
|
2985e3b75b | ||
|
|
f34a75fc5b | ||
|
|
5aa88714b8 | ||
|
|
ce56a414e0 | ||
|
|
ba4a7dcd45 | ||
|
|
85c648da6c | ||
|
|
483f8eb690 | ||
|
|
93c868d698 | ||
|
|
a14e70e3f4 | ||
|
|
a6ff606cae | ||
|
|
651eb3374c | ||
|
|
68c71adc5a | ||
|
|
0c4ca9c94d | ||
|
|
8c04f5b3f1 | ||
|
|
35b29a0a1e | ||
|
|
d289f432b1 | ||
|
|
e16e269775 | ||
|
|
4e5d0c2e84 | ||
|
|
c9a2034936 | ||
|
|
b70fc1151d | ||
|
|
c11034edcd | ||
|
|
804d9b42a5 | ||
|
|
b1bb4e6758 | ||
|
|
76ed8f0ba2 | ||
|
|
4dde7eaea1 | ||
|
|
2e2149c110 | ||
|
|
70bb9477c5 | ||
|
|
ec5363e9c1 | ||
|
|
dba3b1c559 | ||
|
|
9606e3f80c | ||
|
|
7bc7b500f5 | ||
|
|
c6e804fa10 | ||
|
|
1cbaf9bd9d | ||
|
|
45145685d5 | ||
|
|
2fbec6f21f | ||
|
|
ad29d2765f | ||
|
|
e47e751142 | ||
|
|
c63d4ccf3e | ||
|
|
e5c30cf841 | ||
|
|
c80678aac5 | ||
|
|
1754570057 | ||
|
|
d87b411193 | ||
|
|
8fc6284317 | ||
|
|
eae49d2367 | ||
|
|
69287c5198 | ||
|
|
e6b3984f78 | ||
|
|
547fe888d4 | ||
|
|
3454309cbc | ||
|
|
544c46cd44 | ||
|
|
2c100825cc | ||
|
|
558ecd84a6 | ||
|
|
df24cfff4f | ||
|
|
bd5d93a964 | ||
|
|
ae2ded119f | ||
|
|
abdb80a6be | ||
|
|
2f9cbe2bf1 | ||
|
|
2cca7d60d5 | ||
|
|
3df745d1d2 | ||
|
|
9862083e0b | ||
|
|
7a4976c470 | ||
|
|
8834a19743 | ||
|
|
6e15403f60 | ||
|
|
7e1cf10cb2 | ||
|
|
ee762c3c68 | ||
|
|
32c06414c5 | ||
|
|
e97e1ba4bc | ||
|
|
2f580f7800 | ||
|
|
1ce1459455 | ||
|
|
c26573482e | ||
|
|
414ec08dee | ||
|
|
1cc78191eb | ||
|
|
75c6c6081a | ||
|
|
8d2ebe9718 | ||
|
|
eed974b883 | ||
|
|
ae846dac4d | ||
|
|
0b09c00b50 | ||
|
|
f7a1874cb3 | ||
|
|
28fb04eb7b | ||
|
|
34310cf420 | ||
|
|
e1d61d7190 | ||
|
|
9c14ac84cb | ||
|
|
1d1ea7b6f2 | ||
|
|
92401f5b7c | ||
|
|
38ac9218ec | ||
|
|
48497c749a | ||
|
|
72a1892058 | ||
|
|
f2c328d212 | ||
|
|
e9eafc40a7 | ||
|
|
933ca1bf81 | ||
|
|
b4fc9aa7eb | ||
|
|
dcc475bbef | ||
|
|
1fe35ad0cd | ||
|
|
f1ed1e0f14 | ||
|
|
fcc746fb98 | ||
|
|
95934a5b7a | ||
|
|
d38b101820 | ||
|
|
91d730a7bc | ||
|
|
0cfa77b628 | ||
|
|
ca4881ad51 | ||
|
|
8c2c064fe2 | ||
|
|
10646b9b86 | ||
|
|
967b195946 | ||
|
|
1ae7771290 | ||
|
|
a585fe4d54 | ||
|
|
fa3a9fe70e | ||
|
|
99952a393f | ||
|
|
920a41e3ca | ||
|
|
e5bec957a1 | ||
|
|
41cb765255 | ||
|
|
2d12a3cd7a | ||
|
|
df4fe0176c | ||
|
|
4fcc80719e | ||
|
|
f6c66f6ee4 | ||
|
|
220d137e66 | ||
|
|
425803a1b6 | ||
|
|
c794ea614a | ||
|
|
9000838aab | ||
|
|
2790bda1e9 | ||
|
|
e13d4daa9a | ||
|
|
2f504a4e03 | ||
|
|
598a50a133 | ||
|
|
1b06a5a3e0 | ||
|
|
9f1d3b0269 | ||
|
|
a09543d38b | ||
|
|
2ab3539925 | ||
|
|
23ddf53abe | ||
|
|
d8720d0849 | ||
|
|
6753b55160 | ||
|
|
7f7f48ad56 | ||
|
|
149ca01029 | ||
|
|
5c8133a810 | ||
|
|
2adccdd1b0 | ||
|
|
b91068d658 | ||
|
|
4534cafd3f | ||
|
|
405e79d729 | ||
|
|
4df2349e9d | ||
|
|
a9b61d3e13 | ||
|
|
3767d14e5c | ||
|
|
889a050f25 | ||
|
|
0701fac807 | ||
|
|
9fba91069a | ||
|
|
4f9ce70ff8 | ||
|
|
5e00d4ded7 | ||
|
|
95cd9ee5bb | ||
|
|
40f16f8ef1 | ||
|
|
3d9288f82f | ||
|
|
c51f12f88b | ||
|
|
0618153390 | ||
|
|
a7c066291b | ||
|
|
a69ac372fa | ||
|
|
16b2a54981 | ||
|
|
3f68e0d66f | ||
|
|
12d483fde6 | ||
|
|
96034a9712 | ||
|
|
d2def4479b | ||
|
|
afbbb913e7 | ||
|
|
ad76f239a3 | ||
|
|
e6b096c9e0 | ||
|
|
6e26b4e6c7 | ||
|
|
ea79494b6d | ||
|
|
afb18a3e4d | ||
|
|
f9c9853102 | ||
|
|
b3eb9fb6fa | ||
|
|
d3b97bf51a | ||
|
|
7a2e491199 | ||
|
|
25efaf08b7 | ||
|
|
f893ea6b98 | ||
|
|
500745b62c | ||
|
|
9ebe5bf1a7 | ||
|
|
4aecb86daa | ||
|
|
6924dd6df6 | ||
|
|
431755144e | ||
|
|
d182f81754 | ||
|
|
de0193fffc | ||
|
|
53e86205ad | ||
|
|
aa670efe3a | ||
|
|
e693fe49a7 | ||
|
|
7eaa32d85f | ||
|
|
ab40d2c37a | ||
|
|
784206b39b | ||
|
|
7c8264e221 | ||
|
|
db7195aa30 | ||
|
|
eb7bbc1612 | ||
|
|
ee3792181d | ||
|
|
9804965a20 | ||
|
|
b84842df3d | ||
|
|
fc170d3033 | ||
|
|
8fa4ec7ad8 | ||
|
|
480825ddd7 | ||
|
|
260e328cc1 | ||
|
|
8873428b4b | ||
|
|
ab43c20b8f | ||
|
|
88d9d4f4a3 | ||
|
|
d4840f85c0 | ||
|
|
6f9ddeaed0 | ||
|
|
af5e73c8cb | ||
|
|
333b6e60e1 | ||
|
|
1b61337b75 | ||
|
|
77991896b4 | ||
|
|
60a670ce29 | ||
|
|
c1c69ed22b | ||
|
|
d71c74c6fb | ||
|
|
590aa8b43f | ||
|
|
607e0166f6 | ||
|
|
130c83ee92 | ||
|
|
fd5e418abf | ||
|
|
262d160314 | ||
|
|
9146827590 | ||
|
|
062b108259 | ||
|
|
ba796b6be1 | ||
|
|
3d763235e1 | ||
|
|
c30c6d9f10 | ||
|
|
311716ed18 | ||
|
|
19bb1b4aa4 | ||
|
|
b8749e36b9 | ||
|
|
00b6639155 | ||
|
|
71d7daaef3 | ||
|
|
8654c5d471 | ||
|
|
02124b3d38 | ||
|
|
340dcfb70d | ||
|
|
a37b92223a | ||
|
|
7d2b8cb4fc | ||
|
|
8d7a134cb4 | ||
|
|
4b849d7201 | ||
|
|
e03e185d30 | ||
|
|
7a02df5588 | ||
|
|
19494685ba | ||
|
|
1e26943c3e | ||
|
|
83fa850142 | ||
|
|
968a116d14 | ||
|
|
fb55b494d7 | ||
|
|
59b6a83d7d | ||
|
|
aabc4f0d7b | ||
|
|
391f686173 | ||
|
|
8e6f6d46ec | ||
|
|
2ba7a55439 | ||
|
|
e07df29ab9 | ||
|
|
abf24fe60f | ||
|
|
fad5f5b81f | ||
|
|
6961f49a0c | ||
|
|
6911f8652a | ||
|
|
6658cec6a0 | ||
|
|
14011b9d84 | ||
|
|
bd2d0b6790 | ||
|
|
d36f58230a | ||
|
|
018f950ca3 | ||
|
|
db8db9fae9 | ||
|
|
79ce8d6563 | ||
|
|
13eaa9a35a | ||
|
|
39f0d76b4b | ||
|
|
0a5832ec75 | ||
|
|
6e147b3ed2 | ||
|
|
c162f79daa | ||
|
|
87585be687 | ||
|
|
ea08d6413c | ||
|
|
879905edf6 | ||
|
|
6fd80a5582 | ||
|
|
0dc7333563 | ||
|
|
f61c3168d2 | ||
|
|
9cadd74a96 | ||
|
|
729fa2352b | ||
|
|
b673aaf9f0 | ||
|
|
3132cc6005 | ||
|
|
ac994d3077 | ||
|
|
02d4f7f2da | ||
|
|
d99569f005 | ||
|
|
ec5166249a | ||
|
|
dadd12adb3 | ||
|
|
88b4fb8c2a | ||
|
|
afecae3786 | ||
|
|
d18598bc33 | ||
|
|
794fc05ada | ||
|
|
5daeb7f876 | ||
|
|
53e71c545e | ||
|
|
959a55e36c | ||
|
|
64572b0024 | ||
|
|
9a0c1caa43 | ||
|
|
eed6723147 | ||
|
|
97fabf51b8 | ||
|
|
5e5e2b8aee | ||
|
|
e01071426f | ||
|
|
eed1bfbe50 | ||
|
|
0c3970a266 | ||
|
|
267cfb621e | ||
|
|
0e90febab2 | ||
|
|
31d947837f | ||
|
|
017b11fbba | ||
|
|
3c492062a9 | ||
|
|
b26b49d0ca | ||
|
|
ed08123550 | ||
|
|
add2db5b7a | ||
|
|
f272d7121a | ||
|
|
577556678c | ||
|
|
e146922367 | ||
|
|
6f1548b7f8 | ||
|
|
9e6fe47b44 | ||
|
|
60cfea1126 |
15
.devcontainer/Dockerfile
Normal file
15
.devcontainer/Dockerfile
Normal file
@@ -0,0 +1,15 @@
|
||||
FROM python:3.12-bookworm
|
||||
|
||||
# Install Node.js 20.x
|
||||
RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \
|
||||
&& apt-get install -y nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install global npm packages
|
||||
RUN npm install -g husky vite
|
||||
|
||||
# Create and activate Python virtual environment
|
||||
RUN python -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
WORKDIR /workspace
|
||||
49
.devcontainer/devc-welcome.md
Normal file
49
.devcontainer/devc-welcome.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# Welcome to DocsGPT Devcontainer
|
||||
|
||||
Welcome to the DocsGPT development environment! This guide will help you get started quickly.
|
||||
|
||||
## Starting Services
|
||||
|
||||
To run DocsGPT, you need to start three main services: Flask (backend), Celery (task queue), and Vite (frontend). Here are the commands to start each service within the devcontainer:
|
||||
|
||||
### Vite (Frontend)
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
npm run dev -- --host
|
||||
```
|
||||
|
||||
### Flask (Backend)
|
||||
|
||||
```bash
|
||||
flask --app application/app.py run --host=0.0.0.0 --port=7091
|
||||
```
|
||||
|
||||
### Celery (Task Queue)
|
||||
|
||||
```bash
|
||||
celery -A application.app.celery worker -l INFO
|
||||
```
|
||||
|
||||
## Github Codespaces Instructions
|
||||
|
||||
### 1. Make Ports Public:
|
||||
|
||||
Go to the "Ports" panel in Codespaces (usually located at the bottom of the VS Code window).
|
||||
|
||||
For both port 5173 and 7091, right-click on the port and select "Make Public".
|
||||
|
||||

|
||||
|
||||
|
||||
### 2. Update VITE_API_HOST:
|
||||
|
||||
After making port 7091 public, copy the public URL provided by Codespaces for port 7091.
|
||||
|
||||
Open the file frontend/.env.development.
|
||||
|
||||
Find the line VITE_API_HOST=http://localhost:7091.
|
||||
|
||||
Replace http://localhost:7091 with the public URL you copied from Codespaces.
|
||||
|
||||

|
||||
24
.devcontainer/devcontainer.json
Normal file
24
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"name": "DocsGPT Dev Container",
|
||||
"dockerComposeFile": ["docker-compose-dev.yaml", "docker-compose.override.yaml"],
|
||||
"service": "dev",
|
||||
"workspaceFolder": "/workspace",
|
||||
"postCreateCommand": ".devcontainer/post-create-command.sh",
|
||||
"forwardPorts": [7091, 5173, 6379, 27017],
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"ms-python.python",
|
||||
"ms-toolsai.jupyter",
|
||||
"esbenp.prettier-vscode",
|
||||
"dbaeumer.vscode-eslint"
|
||||
]
|
||||
},
|
||||
"codespaces": {
|
||||
"openFiles": [
|
||||
".devcontainer/devc-welcome.md",
|
||||
"CONTRIBUTING.md"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,3 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
|
||||
redis:
|
||||
40
.devcontainer/docker-compose.override.yaml
Normal file
40
.devcontainer/docker-compose.override.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
dev:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
volumes:
|
||||
- ../:/workspace:cached
|
||||
command: sleep infinity
|
||||
depends_on:
|
||||
redis:
|
||||
condition: service_healthy
|
||||
mongo:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- CELERY_BROKER_URL=redis://redis:6379/0
|
||||
- CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
- MONGO_URI=mongodb://mongo:27017/docsgpt
|
||||
- CACHE_REDIS_URL=redis://redis:6379/2
|
||||
networks:
|
||||
- default
|
||||
|
||||
redis:
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 5s
|
||||
timeout: 30s
|
||||
retries: 5
|
||||
|
||||
mongo:
|
||||
healthcheck:
|
||||
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
|
||||
interval: 5s
|
||||
timeout: 30s
|
||||
retries: 5
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: docsgpt-dev-network
|
||||
32
.devcontainer/post-create-command.sh
Executable file
32
.devcontainer/post-create-command.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e # Exit immediately if a command exits with a non-zero status
|
||||
|
||||
if [ ! -f frontend/.env.development ]; then
|
||||
cp -n .env-template frontend/.env.development || true # Assuming .env-template is in the root
|
||||
fi
|
||||
|
||||
# Determine VITE_API_HOST based on environment
|
||||
if [ -n "$CODESPACES" ]; then
|
||||
# Running in Codespaces
|
||||
CODESPACE_NAME=$(echo "$CODESPACES" | cut -d'-' -f1) # Extract codespace name
|
||||
PUBLIC_API_HOST="https://${CODESPACE_NAME}-7091.${GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN}"
|
||||
echo "Setting VITE_API_HOST for Codespaces: $PUBLIC_API_HOST in frontend/.env.development"
|
||||
sed -i "s|VITE_API_HOST=.*|VITE_API_HOST=$PUBLIC_API_HOST|" frontend/.env.development
|
||||
else
|
||||
# Not running in Codespaces (local devcontainer)
|
||||
DEFAULT_API_HOST="http://localhost:7091"
|
||||
echo "Setting VITE_API_HOST for local dev: $DEFAULT_API_HOST in frontend/.env.development"
|
||||
sed -i "s|VITE_API_HOST=.*|VITE_API_HOST=$DEFAULT_API_HOST|" frontend/.env.development
|
||||
fi
|
||||
|
||||
|
||||
mkdir -p model
|
||||
if [ ! -d model/all-mpnet-base-v2 ]; then
|
||||
wget -q https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip -O model/mpnet-base-v2.zip
|
||||
unzip -q model/mpnet-base-v2.zip -d model
|
||||
rm model/mpnet-base-v2.zip
|
||||
fi
|
||||
pip install -r application/requirements.txt
|
||||
cd frontend
|
||||
npm install --include=dev
|
||||
@@ -1,9 +1,36 @@
|
||||
API_KEY=<LLM api key (for example, open ai key)>
|
||||
LLM_NAME=docsgpt
|
||||
VITE_API_STREAMING=true
|
||||
INTERNAL_KEY=<internal key for worker-to-backend authentication>
|
||||
|
||||
# Provider-specific API keys (optional - use these to enable multiple providers)
|
||||
# OPENAI_API_KEY=<your-openai-api-key>
|
||||
# ANTHROPIC_API_KEY=<your-anthropic-api-key>
|
||||
# GOOGLE_API_KEY=<your-google-api-key>
|
||||
# GROQ_API_KEY=<your-groq-api-key>
|
||||
# NOVITA_API_KEY=<your-novita-api-key>
|
||||
# OPEN_ROUTER_API_KEY=<your-openrouter-api-key>
|
||||
|
||||
# Remote Embeddings (Optional - for using a remote embeddings API instead of local SentenceTransformer)
|
||||
# When set, the app will use the remote API and won't load SentenceTransformer (saves RAM)
|
||||
EMBEDDINGS_BASE_URL=
|
||||
EMBEDDINGS_KEY=
|
||||
|
||||
#For Azure (you can delete it if you don't use Azure)
|
||||
OPENAI_API_BASE=
|
||||
OPENAI_API_VERSION=
|
||||
AZURE_DEPLOYMENT_NAME=
|
||||
AZURE_EMBEDDINGS_DEPLOYMENT_NAME=
|
||||
AZURE_EMBEDDINGS_DEPLOYMENT_NAME=
|
||||
|
||||
#Azure AD Application (client) ID
|
||||
MICROSOFT_CLIENT_ID=your-azure-ad-client-id
|
||||
#Azure AD Application client secret
|
||||
MICROSOFT_CLIENT_SECRET=your-azure-ad-client-secret
|
||||
#Azure AD Tenant ID (or 'common' for multi-tenant)
|
||||
MICROSOFT_TENANT_ID=your-azure-ad-tenant-id
|
||||
#If you are using a Microsoft Entra ID tenant,
|
||||
#configure the AUTHORITY variable as
|
||||
#"https://login.microsoftonline.com/TENANT_GUID"
|
||||
#or "https://login.microsoftonline.com/contoso.onmicrosoft.com".
|
||||
#Alternatively, use "https://login.microsoftonline.com/common" for multi-tenant app.
|
||||
MICROSOFT_AUTHORITY=https://{tenantId}.ciamlogin.com/{tenantId}
|
||||
|
||||
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# Auto detect text files and perform LF normalization
|
||||
* text=auto
|
||||
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: arc53
|
||||
23
.github/dependabot.yml
vendored
Normal file
23
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "pip" # See documentation for possible values
|
||||
directory: "/application" # Location of package manifests
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "npm" # See documentation for possible values
|
||||
directory: "/frontend" # Location of package manifests
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/extensions/react-widget"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
14
.github/holopin.yml
vendored
14
.github/holopin.yml
vendored
@@ -1,5 +1,11 @@
|
||||
organization: arc53
|
||||
defaultSticker: clqmdf0ed34290glbvqh0kzxd
|
||||
organization: docsgpt
|
||||
defaultSticker: cm1ulwkkl180570cl82rtzympu
|
||||
stickers:
|
||||
- id: clqmdf0ed34290glbvqh0kzxd
|
||||
alias: festive
|
||||
- id: cm1ulwkkl180570cl82rtzympu
|
||||
alias: contributor2024
|
||||
- id: cm1ureg8o130450cl8c1po6mil
|
||||
alias: api
|
||||
- id: cm1urhmag148240cl8yvqxkthx
|
||||
alias: lpc
|
||||
- id: cm1urlcpq622090cl2tvu4w71y
|
||||
alias: lexeu
|
||||
|
||||
24
.github/labeler.yml
vendored
24
.github/labeler.yml
vendored
@@ -1,23 +1,31 @@
|
||||
repo:
|
||||
- '*'
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: '*'
|
||||
|
||||
github:
|
||||
- .github/**/*
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: '.github/**/*'
|
||||
|
||||
application:
|
||||
- application/**/*
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'application/**/*'
|
||||
|
||||
docs:
|
||||
- docs/**/*
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'docs/**/*'
|
||||
|
||||
extensions:
|
||||
- extensions/**/*
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'extensions/**/*'
|
||||
|
||||
frontend:
|
||||
- frontend/**/*
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'frontend/**/*'
|
||||
|
||||
scripts:
|
||||
- scripts/**/*
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'scripts/**/*'
|
||||
|
||||
tests:
|
||||
- tests/**/*
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'tests/**/*'
|
||||
|
||||
11
.github/styles/DocsGPT/Spelling.yml
vendored
Normal file
11
.github/styles/DocsGPT/Spelling.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
extends: spelling
|
||||
level: warning
|
||||
message: "Did you really mean '%s'?"
|
||||
ignore:
|
||||
- "**/node_modules/**"
|
||||
- "**/dist/**"
|
||||
- "**/build/**"
|
||||
- "**/coverage/**"
|
||||
- "**/public/**"
|
||||
- "**/static/**"
|
||||
vocab: DocsGPT
|
||||
46
.github/styles/config/vocabularies/DocsGPT/accept.txt
vendored
Normal file
46
.github/styles/config/vocabularies/DocsGPT/accept.txt
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
Ollama
|
||||
Qdrant
|
||||
Milvus
|
||||
Chatwoot
|
||||
Nextra
|
||||
VSCode
|
||||
npm
|
||||
LLMs
|
||||
APIs
|
||||
Groq
|
||||
SGLang
|
||||
LMDeploy
|
||||
OAuth
|
||||
Vite
|
||||
LLM
|
||||
JSONPath
|
||||
UIs
|
||||
configs
|
||||
uncomment
|
||||
qdrant
|
||||
vectorstore
|
||||
docsgpt
|
||||
llm
|
||||
GPUs
|
||||
kubectl
|
||||
Lightsail
|
||||
enqueues
|
||||
chatbot
|
||||
VSCode's
|
||||
Shareability
|
||||
feedbacks
|
||||
automations
|
||||
Premade
|
||||
Signup
|
||||
Repo
|
||||
repo
|
||||
env
|
||||
URl
|
||||
agentic
|
||||
llama_cpp
|
||||
parsable
|
||||
SDKs
|
||||
boolean
|
||||
bool
|
||||
hardcode
|
||||
EOL
|
||||
40
.github/workflows/bandit.yaml
vendored
Normal file
40
.github/workflows/bandit.yaml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: Bandit Security Scan
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
bandit_scan:
|
||||
if: ${{ github.repository == 'arc53/DocsGPT' }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install bandit # Bandit is needed for this action
|
||||
if [ -f application/requirements.txt ]; then pip install -r application/requirements.txt; fi
|
||||
|
||||
- name: Run Bandit scan
|
||||
uses: PyCQA/bandit-action@v1
|
||||
with:
|
||||
severity: medium
|
||||
confidence: medium
|
||||
targets: application/
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
101
.github/workflows/ci.yml
vendored
101
.github/workflows/ci.yml
vendored
@@ -1,49 +1,112 @@
|
||||
name: Build and push DocsGPT Docker image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
build:
|
||||
if: github.repository == 'arc53/DocsGPT'
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
suffix: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
suffix: arm64
|
||||
runs-on: ${{ matrix.runner }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up QEMU # Only needed for emulation, not for native arm64 builds
|
||||
if: matrix.platform == 'linux/arm64'
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
install: true
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Runs a single command using the runners shell
|
||||
- name: Build and push Docker images to docker.io and ghcr.io
|
||||
uses: docker/build-push-action@v4
|
||||
- name: Build and push platform-specific images
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: './application/Dockerfile'
|
||||
platforms: linux/amd64
|
||||
platforms: ${{ matrix.platform }}
|
||||
context: ./application
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
|
||||
ghcr.io/${{ github.repository_owner }}/docsgpt:latest
|
||||
${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
|
||||
ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
|
||||
provenance: false
|
||||
sbom: false
|
||||
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
|
||||
cache-to: type=inline
|
||||
|
||||
manifest:
|
||||
if: github.repository == 'arc53/DocsGPT'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
install: true
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push manifest for DockerHub
|
||||
run: |
|
||||
set -e
|
||||
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }} \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-arm64
|
||||
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}
|
||||
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt:latest \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-arm64
|
||||
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
|
||||
|
||||
- name: Create and push manifest for ghcr.io
|
||||
run: |
|
||||
set -e
|
||||
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }} \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-arm64
|
||||
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}
|
||||
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt:latest \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-arm64
|
||||
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt:latest
|
||||
102
.github/workflows/cife.yml
vendored
102
.github/workflows/cife.yml
vendored
@@ -1,48 +1,112 @@
|
||||
name: Build and push DocsGPT-FE Docker image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
build:
|
||||
if: github.repository == 'arc53/DocsGPT'
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
suffix: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
suffix: arm64
|
||||
runs-on: ${{ matrix.runner }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up QEMU # Only needed for emulation, not for native arm64 builds
|
||||
if: matrix.platform == 'linux/arm64'
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
install: true
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Runs a single command using the runners shell
|
||||
- name: Build and push Docker images to docker.io and ghcr.io
|
||||
uses: docker/build-push-action@v4
|
||||
- name: Build and push platform-specific images
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: './frontend/Dockerfile'
|
||||
platforms: linux/amd64
|
||||
platforms: ${{ matrix.platform }}
|
||||
context: ./frontend
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
|
||||
ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest
|
||||
${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
|
||||
ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
|
||||
provenance: false
|
||||
sbom: false
|
||||
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
|
||||
cache-to: type=inline
|
||||
|
||||
manifest:
|
||||
if: github.repository == 'arc53/DocsGPT'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
install: true
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push manifest for DockerHub
|
||||
run: |
|
||||
set -e
|
||||
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }} \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
|
||||
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}
|
||||
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
|
||||
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
|
||||
|
||||
- name: Create and push manifest for ghcr.io
|
||||
run: |
|
||||
set -e
|
||||
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }} \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
|
||||
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}
|
||||
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
|
||||
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest
|
||||
100
.github/workflows/docker-develop-build.yml
vendored
Normal file
100
.github/workflows/docker-develop-build.yml
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
name: Build and push multi-arch DocsGPT Docker image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'arc53/DocsGPT'
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
suffix: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
suffix: arm64
|
||||
runs-on: ${{ matrix.runner }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
install: true
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push platform-specific images
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: './application/Dockerfile'
|
||||
platforms: ${{ matrix.platform }}
|
||||
context: ./application
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_USERNAME }}/docsgpt:develop-${{ matrix.suffix }}
|
||||
ghcr.io/${{ github.repository_owner }}/docsgpt:develop-${{ matrix.suffix }}
|
||||
provenance: false
|
||||
sbom: false
|
||||
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt:develop
|
||||
cache-to: type=inline
|
||||
|
||||
manifest:
|
||||
if: github.repository == 'arc53/DocsGPT'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
install: true
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push manifest for DockerHub
|
||||
run: |
|
||||
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop-amd64 \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop-arm64
|
||||
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop
|
||||
|
||||
- name: Create and push manifest for ghcr.io
|
||||
run: |
|
||||
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt:develop \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:develop-amd64 \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:develop-arm64
|
||||
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt:develop
|
||||
104
.github/workflows/docker-develop-fe-build.yml
vendored
Normal file
104
.github/workflows/docker-develop-fe-build.yml
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
name: Build and push DocsGPT FE Docker image for development
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'arc53/DocsGPT'
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: ubuntu-latest
|
||||
suffix: amd64
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
suffix: arm64
|
||||
runs-on: ${{ matrix.runner }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU # Only needed for emulation, not for native arm64 builds
|
||||
if: matrix.platform == 'linux/arm64'
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
install: true
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push platform-specific images
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: './frontend/Dockerfile'
|
||||
platforms: ${{ matrix.platform }}
|
||||
context: ./frontend
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop-${{ matrix.suffix }}
|
||||
ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop-${{ matrix.suffix }}
|
||||
provenance: false
|
||||
sbom: false
|
||||
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop
|
||||
cache-to: type=inline
|
||||
|
||||
manifest:
|
||||
if: github.repository == 'arc53/DocsGPT'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: docker-container
|
||||
install: true
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push manifest for DockerHub
|
||||
run: |
|
||||
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop-amd64 \
|
||||
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop-arm64
|
||||
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop
|
||||
|
||||
- name: Create and push manifest for ghcr.io
|
||||
run: |
|
||||
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop-amd64 \
|
||||
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop-arm64
|
||||
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop
|
||||
3
.github/workflows/labeler.yml
vendored
3
.github/workflows/labeler.yml
vendored
@@ -4,12 +4,13 @@ on:
|
||||
- pull_request_target
|
||||
jobs:
|
||||
triage:
|
||||
if: github.repository == 'arc53/DocsGPT'
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/labeler@v4
|
||||
- uses: actions/labeler@v5
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
sync-labels: true
|
||||
|
||||
5
.github/workflows/lint.yml
vendored
5
.github/workflows/lint.yml
vendored
@@ -7,11 +7,14 @@ on:
|
||||
pull_request:
|
||||
types: [ opened, synchronize ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
ruff:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Lint with Ruff
|
||||
uses: chartboost/ruff-action@v1
|
||||
|
||||
114
.github/workflows/npm-publish.yml
vendored
Normal file
114
.github/workflows/npm-publish.yml
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
name: Publish npm libraries
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: >
|
||||
Version bump type (patch | minor | major) or explicit semver (e.g. 1.2.3).
|
||||
Applies to both docsgpt and docsgpt-react.
|
||||
required: true
|
||||
default: patch
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
environment: npm-release
|
||||
defaults:
|
||||
run:
|
||||
working-directory: extensions/react-widget
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
registry-url: https://registry.npmjs.org
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
# ── docsgpt (HTML embedding bundle) ──────────────────────────────────
|
||||
# Uses the `build` script (parcel build src/browser.tsx) and keeps
|
||||
# the `targets` field so Parcel produces browser-optimised bundles.
|
||||
|
||||
- name: Set package name → docsgpt
|
||||
run: jq --arg n "docsgpt" '.name=$n' package.json > _tmp.json && mv _tmp.json package.json
|
||||
|
||||
- name: Bump version (docsgpt)
|
||||
id: version_docsgpt
|
||||
run: |
|
||||
VERSION="${{ github.event.inputs.version }}"
|
||||
NEW_VER=$(npm version "${VERSION:-patch}" --no-git-tag-version)
|
||||
echo "version=${NEW_VER#v}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Build docsgpt
|
||||
run: npm run build
|
||||
|
||||
- name: Publish docsgpt
|
||||
run: npm publish --verbose
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
# ── docsgpt-react (React library bundle) ─────────────────────────────
|
||||
# Uses `build:react` script (parcel build src/index.ts) and strips
|
||||
# the `targets` field so Parcel treats the output as a plain library
|
||||
# without browser-specific target resolution, producing a smaller bundle.
|
||||
|
||||
- name: Reset package.json from source control
|
||||
run: git checkout -- package.json
|
||||
|
||||
- name: Set package name → docsgpt-react
|
||||
run: jq --arg n "docsgpt-react" '.name=$n' package.json > _tmp.json && mv _tmp.json package.json
|
||||
|
||||
- name: Remove targets field (react library build)
|
||||
run: jq 'del(.targets)' package.json > _tmp.json && mv _tmp.json package.json
|
||||
|
||||
- name: Bump version (docsgpt-react) to match docsgpt
|
||||
run: npm version "${{ steps.version_docsgpt.outputs.version }}" --no-git-tag-version
|
||||
|
||||
- name: Clean dist before react build
|
||||
run: rm -rf dist
|
||||
|
||||
- name: Build docsgpt-react
|
||||
run: npm run build:react
|
||||
|
||||
- name: Publish docsgpt-react
|
||||
run: npm publish --verbose
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
# ── Commit the bumped version back to the repository ─────────────────
|
||||
|
||||
- name: Reset package.json and write final version
|
||||
run: |
|
||||
git checkout -- package.json
|
||||
jq --arg v "${{ steps.version_docsgpt.outputs.version }}" '.version=$v' \
|
||||
package.json > _tmp.json && mv _tmp.json package.json
|
||||
npm install --package-lock-only
|
||||
|
||||
- name: Commit version bump and create PR
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
BRANCH="chore/bump-npm-v${{ steps.version_docsgpt.outputs.version }}"
|
||||
git checkout -b "$BRANCH"
|
||||
git add package.json package-lock.json
|
||||
git commit -m "chore: bump npm libraries to v${{ steps.version_docsgpt.outputs.version }}"
|
||||
git push origin "$BRANCH"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create PR
|
||||
run: |
|
||||
gh pr create \
|
||||
--title "chore: bump npm libraries to v${{ steps.version_docsgpt.outputs.version }}" \
|
||||
--body "Automated version bump after npm publish." \
|
||||
--base main
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
20
.github/workflows/pytest.yml
vendored
20
.github/workflows/pytest.yml
vendored
@@ -1,30 +1,34 @@
|
||||
name: Run python tests with pytest
|
||||
on: [push, pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
pytest_and_coverage:
|
||||
name: Run tests and count coverage
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.9", "3.10", "3.11"]
|
||||
python-version: ["3.12"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest pytest-cov
|
||||
cd application
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
cd ../tests
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
- name: Test with pytest and generate coverage report
|
||||
run: |
|
||||
python -m pytest --cov=application --cov=scripts --cov=extensions --cov-report=xml
|
||||
python -m pytest --cov=application --cov-report=xml --cov-report=term-missing
|
||||
- name: Upload coverage reports to Codecov
|
||||
if: github.event_name == 'pull_request' && matrix.python-version == '3.11'
|
||||
uses: codecov/codecov-action@v3
|
||||
if: github.event_name == 'pull_request' && matrix.python-version == '3.12'
|
||||
uses: codecov/codecov-action@v5
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
|
||||
34
.github/workflows/react-widget-build.yml
vendored
Normal file
34
.github/workflows/react-widget-build.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: React Widget Build
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'extensions/react-widget/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'extensions/react-widget/**'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: extensions/react-widget
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: npm
|
||||
cache-dependency-path: extensions/react-widget/package-lock.json
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build
|
||||
run: npm run build
|
||||
2
.github/workflows/sync_fork.yaml
vendored
2
.github/workflows/sync_fork.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
steps:
|
||||
# Step 1: run a standard checkout action
|
||||
- name: Checkout target repo
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Step 2: run the sync action
|
||||
- name: Sync upstream changes
|
||||
|
||||
30
.github/workflows/vale.yml
vendored
Normal file
30
.github/workflows/vale.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Vale Documentation Linter
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'docs/**/*.md'
|
||||
- 'docs/**/*.mdx'
|
||||
- '**/*.md'
|
||||
- '.vale.ini'
|
||||
- '.github/styles/**'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
vale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Vale linter
|
||||
uses: errata-ai/vale-action@v2
|
||||
with:
|
||||
files: docs
|
||||
fail_on_error: false
|
||||
version: 3.0.5
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -2,7 +2,10 @@
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
results.txt
|
||||
experiments/
|
||||
|
||||
experiments
|
||||
# C extensions
|
||||
*.so
|
||||
*.next
|
||||
@@ -69,12 +72,14 @@ instance/
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
docs/public/_pagefind/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
**/*.ipynb
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
@@ -112,6 +117,7 @@ venv.bak/
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
.jwt_secret_key
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
@@ -143,6 +149,10 @@ frontend/yarn-error.log*
|
||||
frontend/pnpm-debug.log*
|
||||
frontend/lerna-debug.log*
|
||||
|
||||
# Keep frontend utility helpers tracked (overrides global lib/ ignore)
|
||||
!frontend/src/lib/
|
||||
!frontend/src/lib/**
|
||||
|
||||
frontend/node_modules
|
||||
frontend/dist
|
||||
frontend/dist-ssr
|
||||
@@ -171,5 +181,5 @@ application/vectors/
|
||||
|
||||
node_modules/
|
||||
.vscode/settings.json
|
||||
models/
|
||||
/models/
|
||||
model/
|
||||
|
||||
@@ -1,2 +1,6 @@
|
||||
# Allow lines to be as long as 120 characters.
|
||||
line-length = 120
|
||||
line-length = 120
|
||||
|
||||
[lint.per-file-ignores]
|
||||
# Integration tests use sys.path.insert() before imports for standalone execution
|
||||
"tests/integration/*.py" = ["E402"]
|
||||
5
.vale.ini
Normal file
5
.vale.ini
Normal file
@@ -0,0 +1,5 @@
|
||||
MinAlertLevel = warning
|
||||
StylesPath = .github/styles
|
||||
|
||||
[*.{md,mdx}]
|
||||
BasedOnStyles = DocsGPT
|
||||
71
.vscode/launch.json
vendored
Normal file
71
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Frontend Debug (npm)",
|
||||
"type": "node-terminal",
|
||||
"request": "launch",
|
||||
"command": "npm run dev",
|
||||
"cwd": "${workspaceFolder}/frontend"
|
||||
},
|
||||
{
|
||||
"name": "Flask Debugger",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "flask",
|
||||
"env": {
|
||||
"FLASK_APP": "application/app.py",
|
||||
"PYTHONPATH": "${workspaceFolder}",
|
||||
"FLASK_ENV": "development",
|
||||
"FLASK_DEBUG": "1",
|
||||
"FLASK_RUN_PORT": "7091",
|
||||
"FLASK_RUN_HOST": "0.0.0.0"
|
||||
|
||||
},
|
||||
"args": [
|
||||
"run",
|
||||
"--no-debugger"
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
},
|
||||
{
|
||||
"name": "Celery Debugger",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "celery",
|
||||
"env": {
|
||||
"PYTHONPATH": "${workspaceFolder}",
|
||||
},
|
||||
"args": [
|
||||
"-A",
|
||||
"application.app.celery",
|
||||
"worker",
|
||||
"-l",
|
||||
"INFO",
|
||||
"--pool=solo"
|
||||
],
|
||||
"cwd": "${workspaceFolder}"
|
||||
},
|
||||
{
|
||||
"name": "Dev Containers (Mongo + Redis)",
|
||||
"type": "node-terminal",
|
||||
"request": "launch",
|
||||
"command": "docker compose -f deployment/docker-compose-dev.yaml up --build",
|
||||
"cwd": "${workspaceFolder}"
|
||||
}
|
||||
],
|
||||
"compounds": [
|
||||
{
|
||||
"name": "DocsGPT: Full Stack",
|
||||
"configurations": [
|
||||
"Frontend Debug (npm)",
|
||||
"Flask Debugger",
|
||||
"Celery Debugger"
|
||||
],
|
||||
"presentation": {
|
||||
"group": "DocsGPT",
|
||||
"order": 1
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
134
AGENTS.md
Normal file
134
AGENTS.md
Normal file
@@ -0,0 +1,134 @@
|
||||
# AGENTS.md
|
||||
|
||||
- Read `CONTRIBUTING.md` before making non-trivial changes.
|
||||
- For day-to-day development and feature work, follow the development-environment workflow rather than defaulting to `setup.sh` / `setup.ps1`.
|
||||
- Avoid using the setup scripts during normal feature work unless the user explicitly asks for them. Users configure `.env` usually.
|
||||
- Try to follow red/green TDD
|
||||
|
||||
### Check existing dev prerequisites first
|
||||
|
||||
For feature work, do **not** assume the environment needs to be recreated.
|
||||
|
||||
- Check whether the user already has a Python virtual environment such as `venv/` or `.venv/`.
|
||||
- Check whether MongoDB is already running.
|
||||
- Check whether Redis is already running.
|
||||
- Reuse what is already working. Do not stop or recreate MongoDB, Redis, or the Python environment unless the task is environment setup or troubleshooting.
|
||||
|
||||
## Normal local development commands
|
||||
|
||||
Use these commands once the dev prerequisites above are satisfied.
|
||||
|
||||
### Backend
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate # macOS/Linux
|
||||
uv pip install -r application/requirements.txt # or: pip install -r application/requirements.txt
|
||||
```
|
||||
|
||||
Run the Flask API (if needed):
|
||||
|
||||
```bash
|
||||
flask --app application/app.py run --host=0.0.0.0 --port=7091
|
||||
```
|
||||
|
||||
Run the Celery worker in a separate terminal (if needed):
|
||||
|
||||
```bash
|
||||
celery -A application.app.celery worker -l INFO
|
||||
```
|
||||
|
||||
On macOS, prefer the solo pool for Celery:
|
||||
|
||||
```bash
|
||||
python -m celery -A application.app.celery worker -l INFO --pool=solo
|
||||
```
|
||||
|
||||
### Frontend
|
||||
|
||||
Install dependencies only when needed, then run the dev server:
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
npm install --include=dev
|
||||
npm run dev
|
||||
```
|
||||
|
||||
### Docs site
|
||||
|
||||
```bash
|
||||
cd docs
|
||||
npm install
|
||||
```
|
||||
|
||||
### Python / backend changes validation
|
||||
|
||||
```bash
|
||||
ruff check .
|
||||
python -m pytest
|
||||
```
|
||||
|
||||
### Frontend changes
|
||||
|
||||
```bash
|
||||
cd frontend && npm run lint
|
||||
cd frontend && npm run build
|
||||
```
|
||||
|
||||
### Documentation changes
|
||||
|
||||
```bash
|
||||
cd docs && npm run build
|
||||
```
|
||||
|
||||
If Vale is installed locally and you edited prose, also run:
|
||||
|
||||
```bash
|
||||
vale .
|
||||
```
|
||||
|
||||
## Repository map
|
||||
|
||||
- `application/`: Flask backend, API routes, agent logic, retrieval, parsing, security, storage, Celery worker, and WSGI entrypoints.
|
||||
- `tests/`: backend unit/integration tests and test-only Python dependencies.
|
||||
- `frontend/`: Vite + React + TypeScript application.
|
||||
- `frontend/src/`: main UI code, including `components`, `conversation`, `hooks`, `locale`, `settings`, `upload`, and Redux store wiring in `store.ts`.
|
||||
- `docs/`: separate documentation site built with Next.js/Nextra.
|
||||
- `extensions/`: integrations and widgets such as Chatwoot, Chrome, Discord, React widget, Slack bot, and web widget.
|
||||
- `deployment/`: Docker Compose variants and Kubernetes manifests.
|
||||
|
||||
## Coding rules
|
||||
|
||||
### Backend
|
||||
|
||||
- Follow PEP 8 and keep Python line length at or under 120 characters.
|
||||
- Use type hints for function arguments and return values.
|
||||
- Add Google-style docstrings to new or substantially changed functions and classes.
|
||||
- Add or update tests under `tests/` for backend behavior changes.
|
||||
- Keep changes narrow in `api`, `auth`, `security`, `parser`, `retriever`, and `storage` areas.
|
||||
|
||||
### Backend Abstractions
|
||||
|
||||
- LLM providers implement a common interface in `application/llm/` (add new providers by extending the base class).
|
||||
- Vector stores are abstracted in `application/vectorstore/`.
|
||||
- Parsers live in `application/parser/` and handle different document formats in the ingestion stage.
|
||||
- Agents and tools are in `application/agents/` and `application/agents/tools/`.
|
||||
- Celery setup/config lives in `application/celery_init.py` and `application/celeryconfig.py`.
|
||||
- Settings and env vars are managed via Pydantic in `application/core/settings.py`.
|
||||
|
||||
### Frontend
|
||||
|
||||
- Follow the existing ESLint + Prettier setup.
|
||||
- Prefer small, reusable functional components and hooks.
|
||||
- If shared state must be added, use Redux rather than introducing a new global state library.
|
||||
- Avoid broad UI refactors unless the task explicitly asks for them.
|
||||
- Do not re-create components if we already have some in the app.
|
||||
|
||||
## PR readiness
|
||||
|
||||
Before opening a PR:
|
||||
|
||||
- run the relevant validation commands above
|
||||
- confirm backend changes still work end-to-end after ingesting sample data when applicable
|
||||
- clearly summarize user-visible behavior changes
|
||||
- mention any config, dependency, or deployment implications
|
||||
- Ask your user to attach a screenshot or a video to it
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 88 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 21 KiB |
@@ -6,7 +6,7 @@ Thank you for choosing to contribute to DocsGPT! We are all very grateful!
|
||||
|
||||
📣 **Discussions** - Engage in conversations, start new topics, or help answer questions.
|
||||
|
||||
🐞 **Issues** - This is where we keep track of tasks. It could be bugs,fixes or suggestions for new features.
|
||||
🐞 **Issues** - This is where we keep track of tasks. It could be bugs, fixes or suggestions for new features.
|
||||
|
||||
🛠️ **Pull requests** - Suggest changes to our repository, either by working on existing issues or adding new features.
|
||||
|
||||
@@ -21,11 +21,18 @@ Thank you for choosing to contribute to DocsGPT! We are all very grateful!
|
||||
- If you're interested in contributing code, here are some important things to know:
|
||||
|
||||
- We have a frontend built on React (Vite) and a backend in Python.
|
||||
=======
|
||||
Before creating issues, please check out how the latest version of our app looks and works by launching it via [Quickstart](https://github.com/arc53/DocsGPT#quickstart) the version on our live demo is slightly modified with login. Your issues should relate to the version that you can launch via [Quickstart](https://github.com/arc53/DocsGPT#quickstart).
|
||||
|
||||
> **Required for every PR:** Please attach screenshots or a short screen
|
||||
> recording that shows the working version of your changes. This makes the
|
||||
> requirement visible to reviewers and helps them quickly verify what you are
|
||||
> submitting.
|
||||
|
||||
|
||||
Before creating issues, please check out how the latest version of our app looks and works by launching it via [Quickstart](https://github.com/arc53/DocsGPT#quickstart) the version on our live demo is slightly modified with login. Your issues should relate to the version you can launch via [Quickstart](https://github.com/arc53/DocsGPT#quickstart).
|
||||
|
||||
### 👨💻 If you're interested in contributing code, here are some important things to know:
|
||||
|
||||
For instructions on setting up a development environment, please refer to our [Development Deployment Guide](https://docs.docsgpt.cloud/Deploying/Development-Environment).
|
||||
|
||||
Tech Stack Overview:
|
||||
|
||||
@@ -33,19 +40,40 @@ Tech Stack Overview:
|
||||
|
||||
- 🖥 Backend: Developed in Python 🐍
|
||||
|
||||
### 🌐 If you are looking to contribute to frontend (⚛️React, Vite):
|
||||
### 🌐 Frontend Contributions (⚛️ React, Vite)
|
||||
|
||||
- The current frontend is being migrated from [`/application`](https://github.com/arc53/DocsGPT/tree/main/application) to [`/frontend`](https://github.com/arc53/DocsGPT/tree/main/frontend) with a new design, so please contribute to the new one.
|
||||
- Check out this [milestone](https://github.com/arc53/DocsGPT/milestone/1) and its issues.
|
||||
- The updated Figma design can be found [here](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1).
|
||||
* The updated Figma design can be found [here](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1). Please try to follow the guidelines.
|
||||
* **Coding Style:** We follow a strict coding style enforced by ESLint and Prettier. Please ensure your code adheres to the configuration provided in our repository's `fronetend/.eslintrc.js` file. We recommend configuring your editor with ESLint and Prettier to help with this.
|
||||
* **Component Structure:** Strive for small, reusable components. Favor functional components and hooks over class components where possible.
|
||||
* **State Management** If you need to add stores, please use Redux.
|
||||
|
||||
Please try to follow the guidelines.
|
||||
### 🖥 Backend Contributions (🐍 Python)
|
||||
|
||||
### 🖥 If you are looking to contribute to Backend (🐍 Python):
|
||||
|
||||
- Review our issues and contribute to [`/application`](https://github.com/arc53/DocsGPT/tree/main/application) or [`/scripts`](https://github.com/arc53/DocsGPT/tree/main/scripts) (please disregard old [`ingest_rst.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst.py) [`ingest_rst_sphinx.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst_sphinx.py) files; they will be deprecated soon).
|
||||
- Review our issues and contribute to [`/application`](https://github.com/arc53/DocsGPT/tree/main/application)
|
||||
- All new code should be covered with unit tests ([pytest](https://github.com/pytest-dev/pytest)). Please find tests under [`/tests`](https://github.com/arc53/DocsGPT/tree/main/tests) folder.
|
||||
- Before submitting your Pull Request, ensure it can be queried after ingesting some test data.
|
||||
- **Coding Style:** We adhere to the [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guide for Python code. We use `ruff` as our linter and code formatter. Please ensure your code is formatted correctly and passes `ruff` checks before submitting.
|
||||
- **Type Hinting:** Please use type hints for all function arguments and return values. This improves code readability and helps catch errors early. Example:
|
||||
|
||||
```python
|
||||
def my_function(name: str, count: int) -> list[str]:
|
||||
...
|
||||
```
|
||||
- **Docstrings:** All functions and classes should have docstrings explaining their purpose, parameters, and return values. We prefer the [Google style docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). Example:
|
||||
|
||||
```python
|
||||
def my_function(name: str, count: int) -> list[str]:
|
||||
"""Does something with a name and a count.
|
||||
|
||||
Args:
|
||||
name: The name to use.
|
||||
count: The number of times to do it.
|
||||
|
||||
Returns:
|
||||
A list of strings.
|
||||
"""
|
||||
...
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
@@ -102,7 +130,7 @@ Here's a step-by-step guide on how to contribute to DocsGPT:
|
||||
```
|
||||
|
||||
9. **Submit a Pull Request (PR):**
|
||||
- Create a Pull Request from your branch to the main repository. Make sure to include a detailed description of your changes and reference any related issues.
|
||||
- Create a Pull Request from your branch to the main repository. Make sure to include a detailed description of your changes, reference any related issues, and attach screenshots or a screen recording showing the working version.
|
||||
|
||||
10. **Collaborate:**
|
||||
- Be responsive to comments and feedback on your PR.
|
||||
@@ -124,5 +152,5 @@ Here's a step-by-step guide on how to contribute to DocsGPT:
|
||||
Thank you for considering contributing to DocsGPT! 🙏
|
||||
|
||||
## Questions/collaboration
|
||||
Feel free to join our [Discord](https://discord.gg/n5BX8dh8rU). We're very friendly and welcoming to new contributors, so don't hesitate to reach out.
|
||||
# Thank you so much for considering to contribute DocsGPT!🙏
|
||||
Feel free to join our [Discord](https://discord.gg/vN7YFfdMpj). We're very friendly and welcoming to new contributors, so don't hesitate to reach out.
|
||||
# Thank you so much for considering to contributing DocsGPT!🙏
|
||||
|
||||
39
HACKTOBERFEST.md
Normal file
39
HACKTOBERFEST.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# **🎉 Join the Hacktoberfest with DocsGPT and win a Free T-shirt for a meaningful PR! 🎉**
|
||||
|
||||
Welcome, contributors! We're excited to announce that DocsGPT is participating in Hacktoberfest. Get involved by submitting meaningful pull requests.
|
||||
|
||||
All Meaningful contributors with accepted PRs that were created for issues with the `hacktoberfest` label (set by our maintainer team: dartpain, siiddhantt, pabik, ManishMadan2882) will receive a cool T-shirt! 🤩.
|
||||
<img width="1331" height="678" alt="hacktoberfest-mocks-preview" src="https://github.com/user-attachments/assets/633f6377-38db-48f5-b519-a8b3855a9eb4" />
|
||||
|
||||
Fill in [this form](https://forms.gle/Npaba4n9Epfyx56S8
|
||||
) after your PR was merged please
|
||||
|
||||
If you are in doubt don't hesitate to ping us on discord, ping me - Alex (dartpain).
|
||||
|
||||
## 📜 Here's How to Contribute:
|
||||
```text
|
||||
🛠️ Code: This is the golden ticket! Make meaningful contributions through PRs.
|
||||
|
||||
🧩 API extension: Build an app utilising DocsGPT API. We prefer submissions that showcase original ideas and turn the API into an AI agent.
|
||||
They can be a completely separate repos.
|
||||
For example:
|
||||
https://github.com/arc53/tg-bot-docsgpt-extenstion or
|
||||
https://github.com/arc53/DocsGPT-cli
|
||||
|
||||
Non-Code Contributions:
|
||||
|
||||
📚 Wiki: Improve our documentation, create a guide.
|
||||
|
||||
🖥️ Design: Improve the UI/UX or design a new feature.
|
||||
```
|
||||
|
||||
### 📝 Guidelines for Pull Requests:
|
||||
- Familiarize yourself with the current contributions and our [Roadmap](https://github.com/orgs/arc53/projects/2).
|
||||
- Before contributing check existing [issues](https://github.com/arc53/DocsGPT/issues) or [create](https://github.com/arc53/DocsGPT/issues/new/choose) an issue and wait to get assigned.
|
||||
- Once you are finished with your contribution, please fill in this [form](https://forms.gle/Npaba4n9Epfyx56S8).
|
||||
- Refer to the [Documentation](https://docs.docsgpt.cloud/).
|
||||
- Feel free to join our [Discord](https://discord.gg/vN7YFfdMpj) server. We're here to help newcomers, so don't hesitate to jump in! Join us [here](https://discord.gg/vN7YFfdMpj).
|
||||
|
||||
Thank you very much for considering contributing to DocsGPT during Hacktoberfest! 🙏 Your contributions (not just simple typos) could earn you a stylish new t-shirt.
|
||||
|
||||
We will publish a t-shirt design later into the October.
|
||||
229
README.md
229
README.md
@@ -3,13 +3,11 @@
|
||||
</h1>
|
||||
|
||||
<p align="center">
|
||||
<strong>Open-Source Documentation Assistant</strong>
|
||||
<strong>Private AI for agents, assistants and enterprise search</strong>
|
||||
</p>
|
||||
|
||||
<p align="left">
|
||||
<strong><a href="https://docsgpt.arc53.com/">DocsGPT</a></strong> is a cutting-edge open-source solution that streamlines the process of finding information in the project documentation. With its integration of the powerful <strong>GPT</strong> models, developers can easily ask questions about a project and receive accurate answers.
|
||||
|
||||
Say goodbye to time-consuming manual searches, and let <strong><a href="https://docsgpt.arc53.com/">DocsGPT</a></strong> help you quickly find the information you need. Try it out and see how it revolutionizes your project documentation experience. Contribute to its development and be a part of the future of AI-powered assistance.
|
||||
<strong><a href="https://www.docsgpt.cloud/">DocsGPT</a></strong> is an open-source AI platform for building intelligent agents and assistants. Features Agent Builder, deep research tools, document analysis (PDF, Office, web content, and audio), Multi-model support (choose your provider or run locally), and rich API connectivity for agents with actionable tools and integrations. Deploy anywhere with complete privacy control.
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
@@ -17,172 +15,123 @@ Say goodbye to time-consuming manual searches, and let <strong><a href="https://
|
||||
<a href="https://github.com/arc53/DocsGPT"></a>
|
||||
<a href="https://github.com/arc53/DocsGPT"></a>
|
||||
<a href="https://github.com/arc53/DocsGPT/blob/main/LICENSE"></a>
|
||||
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
||||
<a href="https://twitter.com/docsgptai"></a>
|
||||
<a href="https://www.bestpractices.dev/projects/9907"><img src="https://www.bestpractices.dev/projects/9907/badge"></a>
|
||||
<a href="https://discord.gg/vN7YFfdMpj"></a>
|
||||
<a href="https://x.com/docsgptai"></a>
|
||||
|
||||
<a href="https://docs.docsgpt.cloud/quickstart">⚡️ Quickstart</a> • <a href="https://app.docsgpt.cloud/">☁️ Cloud Version</a> • <a href="https://discord.gg/vN7YFfdMpj">💬 Discord</a>
|
||||
<br>
|
||||
<a href="https://docs.docsgpt.cloud/">📖 Documentation</a> • <a href="https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md">👫 Contribute</a> • <a href="https://blog.docsgpt.cloud/">🗞 Blog</a>
|
||||
<br>
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
<div align="center">
|
||||
<br>
|
||||
<img src="https://d3dg1063dc54p9.cloudfront.net/videos/demo-26.gif" alt="video-example-of-docs-gpt" width="800" height="480">
|
||||
</div>
|
||||
<h3 align="left">
|
||||
<strong>Key Features:</strong>
|
||||
</h3>
|
||||
<ul align="left">
|
||||
<li><strong>🗂️ Wide Format Support:</strong> Reads PDF, DOCX, CSV, XLSX, EPUB, MD, RST, HTML, MDX, JSON, PPTX, images, and audio files such as MP3, WAV, M4A, OGG, and WebM.</li>
|
||||
<li><strong>🎙️ Speech Workflows:</strong> Record voice input into chat, transcribe audio on the backend, and ingest meeting recordings or voice notes as searchable knowledge.</li>
|
||||
<li><strong>🌐 Web & Data Integration:</strong> Ingests from URLs, sitemaps, Reddit, GitHub and web crawlers.</li>
|
||||
<li><strong>✅ Reliable Answers:</strong> Get accurate, hallucination-free responses with source citations viewable in a clean UI.</li>
|
||||
<li><strong>🔑 Streamlined API Keys:</strong> Generate keys linked to your settings, documents, and models, simplifying chatbot and integration setup.</li>
|
||||
<li><strong>🔗 Actionable Tooling:</strong> Connect to APIs, tools, and other services to enable LLM actions.</li>
|
||||
<li><strong>🧩 Pre-built Integrations:</strong> Use readily available HTML/React chat widgets, search tools, Discord/Telegram bots, and more.</li>
|
||||
<li><strong>🔌 Flexible Deployment:</strong> Works with major LLMs (OpenAI, Google, Anthropic) and local models (Ollama, llama_cpp).</li>
|
||||
<li><strong>🏢 Secure & Scalable:</strong> Run privately and securely with Kubernetes support, designed for enterprise-grade reliability.</li>
|
||||
</ul>
|
||||
|
||||
## Roadmap
|
||||
- [x] Add OAuth 2.0 authentication for MCP ( September 2025 )
|
||||
- [x] Deep Agents ( October 2025 )
|
||||
- [x] Prompt Templating ( October 2025 )
|
||||
- [x] Full api tooling ( Dec 2025 )
|
||||
- [ ] Agent scheduling ( Jan 2026 )
|
||||
|
||||
You can find our full roadmap [here](https://github.com/orgs/arc53/projects/2). Please don't hesitate to contribute or create issues, it helps us improve DocsGPT!
|
||||
|
||||
### Production Support / Help for Companies:
|
||||
|
||||
We're eager to provide personalized assistance when deploying your DocsGPT to a live environment.
|
||||
|
||||
- [Book Demo :wave:](https://airtable.com/appdeaL0F1qV8Bl2C/shrrJF1Ll7btCJRbP)
|
||||
- [Send Email :email:](mailto:contact@arc53.com?subject=DocsGPT%20support%2Fsolutions)
|
||||
[Get a Demo :wave:](https://www.docsgpt.cloud/contact)
|
||||
|
||||

|
||||
[Send Email :email:](mailto:support@docsgpt.cloud?subject=DocsGPT%20support%2Fsolutions)
|
||||
|
||||
## Roadmap
|
||||
## Join the Lighthouse Program 🌟
|
||||
|
||||
You can find our roadmap [here](https://github.com/orgs/arc53/projects/2). Please don't hesitate to contribute or create issues, it helps us improve DocsGPT!
|
||||
Calling all developers and GenAI innovators! The **DocsGPT Lighthouse Program** connects technical leaders actively deploying or extending DocsGPT in real-world scenarios. Collaborate directly with our team to shape the roadmap, access priority support, and build enterprise-ready solutions with exclusive community insights.
|
||||
|
||||
## Our Open-Source Models Optimized for DocsGPT:
|
||||
|
||||
| Name | Base Model | Requirements (or similar) |
|
||||
| --------------------------------------------------------------------- | ----------- | ------------------------- |
|
||||
| [Docsgpt-7b-mistral](https://huggingface.co/Arc53/docsgpt-7b-mistral) | Mistral-7b | 1xA10G gpu |
|
||||
| [Docsgpt-14b](https://huggingface.co/Arc53/docsgpt-14b) | llama-2-14b | 2xA10 gpu's |
|
||||
| [Docsgpt-40b-falcon](https://huggingface.co/Arc53/docsgpt-40b-falcon) | falcon-40b | 8xA10G gpu's |
|
||||
|
||||
If you don't have enough resources to run it, you can use bitsnbytes to quantize.
|
||||
|
||||
## Features
|
||||
|
||||

|
||||
|
||||
## Useful Links
|
||||
|
||||
- :mag: :fire: [Live preview](https://docsgpt.arc53.com/)
|
||||
|
||||
- :speech_balloon: :tada: [Join our Discord](https://discord.gg/n5BX8dh8rU)
|
||||
|
||||
- :books: :sunglasses: [Guides](https://docs.docsgpt.co.uk/)
|
||||
|
||||
- :couple: [Interested in contributing?](https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md)
|
||||
|
||||
- :file_folder: :rocket: [How to use any other documentation](https://docs.docsgpt.co.uk/Guides/How-to-train-on-other-documentation)
|
||||
|
||||
- :house: :closed_lock_with_key: [How to host it locally (so all data will stay on-premises)](https://docs.docsgpt.co.uk/Guides/How-to-use-different-LLM)
|
||||
|
||||
## Project Structure
|
||||
|
||||
- Application - Flask app (main application).
|
||||
|
||||
- Extensions - Chrome extension.
|
||||
|
||||
- Scripts - Script that creates similarity search index for other libraries.
|
||||
|
||||
- Frontend - Frontend uses <a href="https://vitejs.dev/">Vite</a> and <a href="https://react.dev/">React</a>.
|
||||
[Learn More & Apply →](https://docs.google.com/forms/d/1KAADiJinUJ8EMQyfTXUIGyFbqINNClNR3jBNWq7DgTE)
|
||||
|
||||
## QuickStart
|
||||
|
||||
> [!Note]
|
||||
> Make sure you have [Docker](https://docs.docker.com/engine/install/) installed
|
||||
|
||||
On Mac OS or Linux, write:
|
||||
A more detailed [Quickstart](https://docs.docsgpt.cloud/quickstart) is available in our documentation
|
||||
|
||||
`./setup.sh`
|
||||
1. **Clone the repository:**
|
||||
|
||||
It will install all the dependencies and allow you to download the local model, use OpenAI or use our LLM API.
|
||||
|
||||
Otherwise, refer to this Guide:
|
||||
|
||||
1. Download and open this repository with `git clone https://github.com/arc53/DocsGPT.git`
|
||||
2. Create a `.env` file in your root directory and set the env variables and `VITE_API_STREAMING` to true or false, depending on whether you want streaming answers or not.
|
||||
It should look like this inside:
|
||||
|
||||
```
|
||||
LLM_NAME=[docsgpt or openai or others]
|
||||
VITE_API_STREAMING=true
|
||||
API_KEY=[if LLM_NAME is openai]
|
||||
```bash
|
||||
git clone https://github.com/arc53/DocsGPT.git
|
||||
cd DocsGPT
|
||||
```
|
||||
|
||||
See optional environment variables in the [/.env-template](https://github.com/arc53/DocsGPT/blob/main/.env-template) and [/application/.env_sample](https://github.com/arc53/DocsGPT/blob/main/application/.env_sample) files.
|
||||
**For macOS and Linux:**
|
||||
|
||||
3. Run [./run-with-docker-compose.sh](https://github.com/arc53/DocsGPT/blob/main/run-with-docker-compose.sh).
|
||||
4. Navigate to http://localhost:5173/.
|
||||
2. **Run the setup script:**
|
||||
|
||||
To stop, just run `Ctrl + C`.
|
||||
```bash
|
||||
./setup.sh
|
||||
```
|
||||
|
||||
## Development Environments
|
||||
**For Windows:**
|
||||
|
||||
### Spin up Mongo and Redis
|
||||
2. **Run the PowerShell setup script:**
|
||||
|
||||
For development, only two containers are used from [docker-compose.yaml](https://github.com/arc53/DocsGPT/blob/main/docker-compose.yaml) (by deleting all services except for Redis and Mongo).
|
||||
See file [docker-compose-dev.yaml](./docker-compose-dev.yaml).
|
||||
```powershell
|
||||
PowerShell -ExecutionPolicy Bypass -File .\setup.ps1
|
||||
```
|
||||
|
||||
Run
|
||||
Either script will guide you through setting up DocsGPT. Five options available: using the public API, running locally, connecting to a local inference engine, using a cloud API provider, or build the docker image locally. Scripts will automatically configure your `.env` file and handle necessary downloads and installations based on your chosen option.
|
||||
|
||||
```
|
||||
docker compose -f docker-compose-dev.yaml build
|
||||
docker compose -f docker-compose-dev.yaml up -d
|
||||
**Navigate to http://localhost:5173/**
|
||||
|
||||
To stop DocsGPT, open a terminal in the `DocsGPT` directory and run:
|
||||
|
||||
```bash
|
||||
docker compose -f deployment/docker-compose.yaml down
|
||||
```
|
||||
|
||||
### Run the Backend
|
||||
(or use the specific `docker compose down` command shown after running the setup script).
|
||||
|
||||
> [!Note]
|
||||
> Make sure you have Python 3.10 or 3.11 installed.
|
||||
|
||||
1. Export required environment variables or prepare a `.env` file in the project folder:
|
||||
- Copy [.env_sample](https://github.com/arc53/DocsGPT/blob/main/application/.env_sample) and create `.env`.
|
||||
|
||||
(check out [`application/core/settings.py`](application/core/settings.py) if you want to see more config options.)
|
||||
|
||||
2. (optional) Create a Python virtual environment:
|
||||
You can follow the [Python official documentation](https://docs.python.org/3/tutorial/venv.html) for virtual environments.
|
||||
|
||||
a) On Mac OS and Linux
|
||||
|
||||
```commandline
|
||||
python -m venv venv
|
||||
. venv/bin/activate
|
||||
```
|
||||
|
||||
b) On Windows
|
||||
|
||||
```commandline
|
||||
python -m venv venv
|
||||
venv/Scripts/activate
|
||||
```
|
||||
|
||||
3. Download embedding model and save it in the `model/` folder:
|
||||
You can use the script below, or download it manually from [here](https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip), unzip it and save it in the `model/` folder.
|
||||
|
||||
```commandline
|
||||
wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip
|
||||
unzip mpnet-base-v2.zip -d model
|
||||
rm mpnet-base-v2.zip
|
||||
```
|
||||
|
||||
4. Install dependencies for the backend:
|
||||
|
||||
```commandline
|
||||
pip install -r application/requirements.txt
|
||||
```
|
||||
|
||||
5. Run the app using `flask --app application/app.py run --host=0.0.0.0 --port=7091`.
|
||||
6. Start worker with `celery -A application.app.celery worker -l INFO`.
|
||||
|
||||
### Start Frontend
|
||||
|
||||
> [!Note]
|
||||
> Make sure you have Node version 16 or higher.
|
||||
|
||||
1. Navigate to the [/frontend](https://github.com/arc53/DocsGPT/tree/main/frontend) folder.
|
||||
2. Install the required packages `husky` and `vite` (ignore if already installed).
|
||||
|
||||
```commandline
|
||||
npm install husky -g
|
||||
npm install vite -g
|
||||
```
|
||||
|
||||
3. Install dependencies by running `npm install --include=dev`.
|
||||
4. Run the app using `npm run dev`.
|
||||
> For development environment setup instructions, please refer to the [Development Environment Guide](https://docs.docsgpt.cloud/Deploying/Development-Environment).
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for information about how to get involved. We welcome issues, questions, and pull requests.
|
||||
|
||||
## Architecture
|
||||
|
||||

|
||||
|
||||
## Project Structure
|
||||
|
||||
- Application - Flask app (main application).
|
||||
|
||||
- Extensions - Extensions, like react widget or discord bot.
|
||||
|
||||
- Frontend - Frontend uses <a href="https://vitejs.dev/">Vite</a> and <a href="https://react.dev/">React</a>.
|
||||
|
||||
- Scripts - Miscellaneous scripts.
|
||||
|
||||
## Code Of Conduct
|
||||
|
||||
We as members, contributors, and leaders, pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Please refer to the [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) file for more information about contributing.
|
||||
@@ -197,4 +146,16 @@ We as members, contributors, and leaders, pledge to make participation in our co
|
||||
|
||||
The source code license is [MIT](https://opensource.org/license/mit/), as described in the [LICENSE](LICENSE) file.
|
||||
|
||||
Built with [:bird: :link: LangChain](https://github.com/hwchase17/langchain)
|
||||
## This project is supported by:
|
||||
|
||||
<p>
|
||||
<a href="https://www.digitalocean.com/?utm_medium=opensource&utm_source=DocsGPT">
|
||||
<img src="https://opensource.nyc3.cdn.digitaloceanspaces.com/attribution/assets/SVG/DO_Logo_horizontal_blue.svg" width="201px">
|
||||
</a>
|
||||
</p>
|
||||
<p>
|
||||
<a href="https://get.neon.com/docsgpt">
|
||||
<img width="201" alt="color" src="https://github.com/user-attachments/assets/7d9813b7-0e6d-403f-b5af-68af066b326f" />
|
||||
</a>
|
||||
|
||||
</p>
|
||||
|
||||
BIN
Readme Logo.png
BIN
Readme Logo.png
Binary file not shown.
|
Before Width: | Height: | Size: 23 KiB |
20
SECURITY.md
Normal file
20
SECURITY.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Supported Versions:
|
||||
|
||||
Currently, we support security patches by committing changes and bumping the version published on Github.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Preferred method: use GitHub's private vulnerability reporting flow:
|
||||
https://github.com/arc53/DocsGPT/security
|
||||
|
||||
Then click **Report a vulnerability**.
|
||||
|
||||
|
||||
Alternatively:
|
||||
|
||||
security@arc53.com
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
API_KEY=your_api_key
|
||||
EMBEDDINGS_KEY=your_api_key
|
||||
API_URL=http://localhost:7091
|
||||
FLASK_APP=application/app.py
|
||||
FLASK_DEBUG=true
|
||||
|
||||
#For OPENAI on Azure
|
||||
OPENAI_API_BASE=
|
||||
OPENAI_API_VERSION=
|
||||
AZURE_DEPLOYMENT_NAME=
|
||||
AZURE_EMBEDDINGS_DEPLOYMENT_NAME=
|
||||
@@ -1,31 +1,92 @@
|
||||
FROM python:3.11-slim-bullseye as builder
|
||||
# Builder Stage
|
||||
FROM ubuntu:24.04 as builder
|
||||
|
||||
# Tiktoken requires Rust toolchain, so build it in a separate stage
|
||||
RUN apt-get update && apt-get install -y gcc curl
|
||||
RUN apt-get install -y wget unzip
|
||||
RUN wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip
|
||||
RUN unzip mpnet-base-v2.zip -d model
|
||||
RUN rm mpnet-base-v2.zip
|
||||
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && apt-get install --reinstall libc6-dev -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
RUN pip install --upgrade pip && pip install tiktoken==0.5.2
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa && \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends gcc g++ wget unzip libc6-dev python3.12 python3.12-venv python3.12-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Verify Python installation and setup symlink
|
||||
RUN if [ -f /usr/bin/python3.12 ]; then \
|
||||
ln -s /usr/bin/python3.12 /usr/bin/python; \
|
||||
else \
|
||||
echo "Python 3.12 not found"; exit 1; \
|
||||
fi
|
||||
|
||||
# Download and unzip the model
|
||||
RUN wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip && \
|
||||
unzip mpnet-base-v2.zip -d models && \
|
||||
rm mpnet-base-v2.zip
|
||||
|
||||
# Install Rust
|
||||
RUN wget -q -O - https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Clean up to reduce container size
|
||||
RUN apt-get remove --purge -y wget unzip && apt-get autoremove -y && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy requirements.txt
|
||||
COPY requirements.txt .
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
# Setup Python virtual environment
|
||||
RUN python3.12 -m venv /venv
|
||||
|
||||
# Activate virtual environment and install Python packages
|
||||
ENV PATH="/venv/bin:$PATH"
|
||||
|
||||
FROM python:3.11-slim-bullseye
|
||||
# Install Python packages
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
pip install --no-cache-dir tiktoken && \
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy pre-built packages and binaries from builder stage
|
||||
COPY --from=builder /usr/local/ /usr/local/
|
||||
# Final Stage
|
||||
FROM ubuntu:24.04 as final
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y software-properties-common && \
|
||||
add-apt-repository ppa:deadsnakes/ppa && \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3.12 \
|
||||
libgl1 \
|
||||
libglib2.0-0 \
|
||||
poppler-utils \
|
||||
&& \
|
||||
ln -s /usr/bin/python3.12 /usr/bin/python && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
COPY --from=builder /model /app/model
|
||||
|
||||
# Create a non-root user: `appuser` (Feel free to choose a name)
|
||||
RUN groupadd -r appuser && \
|
||||
useradd -r -g appuser -d /app -s /sbin/nologin -c "Docker image user" appuser
|
||||
|
||||
# Copy the virtual environment and model from the builder stage
|
||||
COPY --from=builder /venv /venv
|
||||
|
||||
COPY --from=builder /models /app/models
|
||||
|
||||
# Copy your application code
|
||||
COPY . /app/application
|
||||
ENV FLASK_APP=app.py
|
||||
ENV FLASK_DEBUG=true
|
||||
|
||||
# Change the ownership of the /app directory to the appuser
|
||||
|
||||
RUN mkdir -p /app/application/inputs/local
|
||||
RUN chown -R appuser:appuser /app
|
||||
|
||||
# Set environment variables
|
||||
ENV FLASK_APP=app.py \
|
||||
FLASK_DEBUG=true \
|
||||
PATH="/venv/bin:$PATH"
|
||||
|
||||
# Expose the port the app runs on
|
||||
EXPOSE 7091
|
||||
|
||||
CMD ["gunicorn", "-w", "2", "--timeout", "120", "--bind", "0.0.0.0:7091", "application.wsgi:app"]
|
||||
# Switch to non-root user
|
||||
USER appuser
|
||||
|
||||
# Start Gunicorn
|
||||
CMD ["gunicorn", "-w", "1", "--timeout", "120", "--bind", "0.0.0.0:7091", "--preload", "application.wsgi:app"]
|
||||
|
||||
25
application/agents/agent_creator.py
Normal file
25
application/agents/agent_creator.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import logging
|
||||
|
||||
from application.agents.agentic_agent import AgenticAgent
|
||||
from application.agents.classic_agent import ClassicAgent
|
||||
from application.agents.research_agent import ResearchAgent
|
||||
from application.agents.workflow_agent import WorkflowAgent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentCreator:
|
||||
agents = {
|
||||
"classic": ClassicAgent,
|
||||
"react": ClassicAgent, # backwards compat: react falls back to classic
|
||||
"agentic": AgenticAgent,
|
||||
"research": ResearchAgent,
|
||||
"workflow": WorkflowAgent,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def create_agent(cls, type, *args, **kwargs):
|
||||
agent_class = cls.agents.get(type.lower())
|
||||
if not agent_class:
|
||||
raise ValueError(f"No agent class found for type {type}")
|
||||
return agent_class(*args, **kwargs)
|
||||
63
application/agents/agentic_agent.py
Normal file
63
application/agents/agentic_agent.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import logging
|
||||
from typing import Dict, Generator, Optional
|
||||
|
||||
from application.agents.base import BaseAgent
|
||||
from application.agents.tools.internal_search import (
|
||||
INTERNAL_TOOL_ID,
|
||||
add_internal_search_tool,
|
||||
)
|
||||
from application.logging import LogContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgenticAgent(BaseAgent):
|
||||
"""Agent where the LLM controls retrieval via tools.
|
||||
|
||||
Unlike ClassicAgent which pre-fetches docs into the prompt,
|
||||
AgenticAgent gives the LLM an internal_search tool so it can
|
||||
decide when, what, and whether to search.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
retriever_config: Optional[Dict] = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.retriever_config = retriever_config or {}
|
||||
|
||||
def _gen_inner(
|
||||
self, query: str, log_context: LogContext
|
||||
) -> Generator[Dict, None, None]:
|
||||
tools_dict = self.tool_executor.get_tools()
|
||||
add_internal_search_tool(tools_dict, self.retriever_config)
|
||||
self._prepare_tools(tools_dict)
|
||||
|
||||
# 4. Build messages (prompt has NO pre-fetched docs)
|
||||
messages = self._build_messages(self.prompt, query)
|
||||
|
||||
# 5. Call LLM — the handler manages the tool loop
|
||||
llm_response = self._llm_gen(messages, log_context)
|
||||
|
||||
yield from self._handle_response(
|
||||
llm_response, tools_dict, messages, log_context
|
||||
)
|
||||
|
||||
# 6. Collect sources from internal search tool results
|
||||
self._collect_internal_sources()
|
||||
|
||||
yield {"sources": self.retrieved_docs}
|
||||
yield {"tool_calls": self._get_truncated_tool_calls()}
|
||||
|
||||
log_context.stacks.append(
|
||||
{"component": "agent", "data": {"tool_calls": self.tool_calls.copy()}}
|
||||
)
|
||||
|
||||
def _collect_internal_sources(self):
|
||||
"""Collect retrieved docs from the cached InternalSearchTool instance."""
|
||||
cache_key = f"internal_search:{INTERNAL_TOOL_ID}:{self.user or ''}"
|
||||
tool = self.tool_executor._loaded_tools.get(cache_key)
|
||||
if tool and hasattr(tool, "retrieved_docs") and tool.retrieved_docs:
|
||||
self.retrieved_docs = tool.retrieved_docs
|
||||
585
application/agents/base.py
Normal file
585
application/agents/base.py
Normal file
@@ -0,0 +1,585 @@
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, Generator, List, Optional
|
||||
|
||||
from application.agents.tool_executor import ToolExecutor
|
||||
from application.core.json_schema_utils import (
|
||||
JsonSchemaValidationError,
|
||||
normalize_json_schema_payload,
|
||||
)
|
||||
from application.core.settings import settings
|
||||
from application.llm.handlers.base import ToolCall
|
||||
from application.llm.handlers.handler_creator import LLMHandlerCreator
|
||||
from application.llm.llm_creator import LLMCreator
|
||||
from application.logging import build_stack_data, log_activity, LogContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseAgent(ABC):
|
||||
def __init__(
|
||||
self,
|
||||
endpoint: str,
|
||||
llm_name: str,
|
||||
model_id: str,
|
||||
api_key: str,
|
||||
agent_id: Optional[str] = None,
|
||||
user_api_key: Optional[str] = None,
|
||||
prompt: str = "",
|
||||
chat_history: Optional[List[Dict]] = None,
|
||||
retrieved_docs: Optional[List[Dict]] = None,
|
||||
decoded_token: Optional[Dict] = None,
|
||||
attachments: Optional[List[Dict]] = None,
|
||||
json_schema: Optional[Dict] = None,
|
||||
limited_token_mode: Optional[bool] = False,
|
||||
token_limit: Optional[int] = settings.DEFAULT_AGENT_LIMITS["token_limit"],
|
||||
limited_request_mode: Optional[bool] = False,
|
||||
request_limit: Optional[int] = settings.DEFAULT_AGENT_LIMITS["request_limit"],
|
||||
compressed_summary: Optional[str] = None,
|
||||
llm=None,
|
||||
llm_handler=None,
|
||||
tool_executor: Optional[ToolExecutor] = None,
|
||||
backup_models: Optional[List[str]] = None,
|
||||
):
|
||||
self.endpoint = endpoint
|
||||
self.llm_name = llm_name
|
||||
self.model_id = model_id
|
||||
self.api_key = api_key
|
||||
self.agent_id = agent_id
|
||||
self.user_api_key = user_api_key
|
||||
self.prompt = prompt
|
||||
self.decoded_token = decoded_token or {}
|
||||
self.user: str = self.decoded_token.get("sub")
|
||||
self.tools: List[Dict] = []
|
||||
self.chat_history: List[Dict] = chat_history if chat_history is not None else []
|
||||
|
||||
# Dependency injection for LLM — fall back to creating if not provided
|
||||
if llm is not None:
|
||||
self.llm = llm
|
||||
else:
|
||||
self.llm = LLMCreator.create_llm(
|
||||
llm_name,
|
||||
api_key=api_key,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
model_id=model_id,
|
||||
agent_id=agent_id,
|
||||
backup_models=backup_models,
|
||||
)
|
||||
|
||||
self.retrieved_docs = retrieved_docs or []
|
||||
|
||||
if llm_handler is not None:
|
||||
self.llm_handler = llm_handler
|
||||
else:
|
||||
self.llm_handler = LLMHandlerCreator.create_handler(
|
||||
llm_name if llm_name else "default"
|
||||
)
|
||||
|
||||
# Tool executor — injected or created
|
||||
if tool_executor is not None:
|
||||
self.tool_executor = tool_executor
|
||||
else:
|
||||
self.tool_executor = ToolExecutor(
|
||||
user_api_key=user_api_key,
|
||||
user=self.user,
|
||||
decoded_token=decoded_token,
|
||||
)
|
||||
|
||||
self.attachments = attachments or []
|
||||
self.json_schema = None
|
||||
if json_schema is not None:
|
||||
try:
|
||||
self.json_schema = normalize_json_schema_payload(json_schema)
|
||||
except JsonSchemaValidationError as exc:
|
||||
logger.warning("Ignoring invalid JSON schema payload: %s", exc)
|
||||
self.limited_token_mode = limited_token_mode
|
||||
self.token_limit = token_limit
|
||||
self.limited_request_mode = limited_request_mode
|
||||
self.request_limit = request_limit
|
||||
self.compressed_summary = compressed_summary
|
||||
self.current_token_count = 0
|
||||
self.context_limit_reached = False
|
||||
|
||||
@log_activity()
|
||||
def gen(
|
||||
self, query: str, log_context: LogContext = None
|
||||
) -> Generator[Dict, None, None]:
|
||||
yield from self._gen_inner(query, log_context)
|
||||
|
||||
@abstractmethod
|
||||
def _gen_inner(
|
||||
self, query: str, log_context: LogContext
|
||||
) -> Generator[Dict, None, None]:
|
||||
pass
|
||||
|
||||
def gen_continuation(
|
||||
self,
|
||||
messages: List[Dict],
|
||||
tools_dict: Dict,
|
||||
pending_tool_calls: List[Dict],
|
||||
tool_actions: List[Dict],
|
||||
) -> Generator[Dict, None, None]:
|
||||
"""Resume generation after tool actions are resolved.
|
||||
|
||||
Processes the client-provided *tool_actions* (approvals, denials,
|
||||
or client-side results), appends the resulting messages, then
|
||||
hands back to the LLM to continue the conversation.
|
||||
|
||||
Args:
|
||||
messages: The saved messages array from the pause point.
|
||||
tools_dict: The saved tools dictionary.
|
||||
pending_tool_calls: The pending tool call descriptors from the pause.
|
||||
tool_actions: Client-provided actions resolving the pending calls.
|
||||
"""
|
||||
self._prepare_tools(tools_dict)
|
||||
|
||||
actions_by_id = {a["call_id"]: a for a in tool_actions}
|
||||
|
||||
# Build a single assistant message containing all tool calls so
|
||||
# the message history matches the format LLM providers expect
|
||||
# (one assistant message with N tool_calls, followed by N tool results).
|
||||
tc_objects: List[Dict[str, Any]] = []
|
||||
for pending in pending_tool_calls:
|
||||
call_id = pending["call_id"]
|
||||
args = pending["arguments"]
|
||||
args_str = (
|
||||
json.dumps(args) if isinstance(args, dict) else (args or "{}")
|
||||
)
|
||||
tc_obj: Dict[str, Any] = {
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": pending["name"],
|
||||
"arguments": args_str,
|
||||
},
|
||||
}
|
||||
if pending.get("thought_signature"):
|
||||
tc_obj["thought_signature"] = pending["thought_signature"]
|
||||
tc_objects.append(tc_obj)
|
||||
|
||||
messages.append({
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": tc_objects,
|
||||
})
|
||||
|
||||
# Now process each pending call and append tool result messages
|
||||
for pending in pending_tool_calls:
|
||||
call_id = pending["call_id"]
|
||||
args = pending["arguments"]
|
||||
action = actions_by_id.get(call_id)
|
||||
if not action:
|
||||
action = {
|
||||
"call_id": call_id,
|
||||
"decision": "denied",
|
||||
"comment": "No response provided",
|
||||
}
|
||||
|
||||
if action.get("decision") == "approved":
|
||||
# Execute the tool server-side
|
||||
tc = ToolCall(
|
||||
id=call_id,
|
||||
name=pending["name"],
|
||||
arguments=(
|
||||
json.dumps(args) if isinstance(args, dict) else args
|
||||
),
|
||||
)
|
||||
tool_gen = self._execute_tool_action(tools_dict, tc)
|
||||
tool_response = None
|
||||
while True:
|
||||
try:
|
||||
event = next(tool_gen)
|
||||
yield event
|
||||
except StopIteration as e:
|
||||
tool_response, _ = e.value
|
||||
break
|
||||
messages.append(
|
||||
self.llm_handler.create_tool_message(tc, tool_response)
|
||||
)
|
||||
|
||||
elif action.get("decision") == "denied":
|
||||
comment = action.get("comment", "")
|
||||
denial = (
|
||||
f"Tool execution denied by user. Reason: {comment}"
|
||||
if comment
|
||||
else "Tool execution denied by user."
|
||||
)
|
||||
tc = ToolCall(
|
||||
id=call_id, name=pending["name"], arguments=args
|
||||
)
|
||||
messages.append(
|
||||
self.llm_handler.create_tool_message(tc, denial)
|
||||
)
|
||||
yield {
|
||||
"type": "tool_call",
|
||||
"data": {
|
||||
"tool_name": pending.get("tool_name", "unknown"),
|
||||
"call_id": call_id,
|
||||
"action_name": pending.get("llm_name", pending["name"]),
|
||||
"arguments": args,
|
||||
"status": "denied",
|
||||
},
|
||||
}
|
||||
|
||||
elif "result" in action:
|
||||
result = action["result"]
|
||||
result_str = (
|
||||
json.dumps(result)
|
||||
if not isinstance(result, str)
|
||||
else result
|
||||
)
|
||||
tc = ToolCall(
|
||||
id=call_id, name=pending["name"], arguments=args
|
||||
)
|
||||
messages.append(
|
||||
self.llm_handler.create_tool_message(tc, result_str)
|
||||
)
|
||||
yield {
|
||||
"type": "tool_call",
|
||||
"data": {
|
||||
"tool_name": pending.get("tool_name", "unknown"),
|
||||
"call_id": call_id,
|
||||
"action_name": pending.get("llm_name", pending["name"]),
|
||||
"arguments": args,
|
||||
"result": (
|
||||
result_str[:50] + "..."
|
||||
if len(result_str) > 50
|
||||
else result_str
|
||||
),
|
||||
"status": "completed",
|
||||
},
|
||||
}
|
||||
|
||||
# Resume the LLM loop with the updated messages
|
||||
llm_response = self._llm_gen(messages)
|
||||
yield from self._handle_response(
|
||||
llm_response, tools_dict, messages, None
|
||||
)
|
||||
|
||||
yield {"sources": self.retrieved_docs}
|
||||
yield {"tool_calls": self._get_truncated_tool_calls()}
|
||||
|
||||
# ---- Tool delegation (thin wrappers around ToolExecutor) ----
|
||||
|
||||
@property
|
||||
def tool_calls(self) -> List[Dict]:
|
||||
return self.tool_executor.tool_calls
|
||||
|
||||
@tool_calls.setter
|
||||
def tool_calls(self, value: List[Dict]):
|
||||
self.tool_executor.tool_calls = value
|
||||
|
||||
def _get_tools(self, api_key: str = None) -> Dict[str, Dict]:
|
||||
return self.tool_executor._get_tools_by_api_key(api_key or self.user_api_key)
|
||||
|
||||
def _get_user_tools(self, user="local"):
|
||||
return self.tool_executor._get_user_tools(user)
|
||||
|
||||
def _build_tool_parameters(self, action):
|
||||
return self.tool_executor._build_tool_parameters(action)
|
||||
|
||||
def _prepare_tools(self, tools_dict):
|
||||
self.tools = self.tool_executor.prepare_tools_for_llm(tools_dict)
|
||||
|
||||
def _execute_tool_action(self, tools_dict, call):
|
||||
return self.tool_executor.execute(
|
||||
tools_dict, call, self.llm.__class__.__name__
|
||||
)
|
||||
|
||||
def _get_truncated_tool_calls(self):
|
||||
return self.tool_executor.get_truncated_tool_calls()
|
||||
|
||||
# ---- Context / token management ----
|
||||
|
||||
def _calculate_current_context_tokens(self, messages: List[Dict]) -> int:
|
||||
from application.api.answer.services.compression.token_counter import (
|
||||
TokenCounter,
|
||||
)
|
||||
return TokenCounter.count_message_tokens(messages)
|
||||
|
||||
def _check_context_limit(self, messages: List[Dict]) -> bool:
|
||||
from application.core.model_utils import get_token_limit
|
||||
|
||||
try:
|
||||
current_tokens = self._calculate_current_context_tokens(messages)
|
||||
self.current_token_count = current_tokens
|
||||
context_limit = get_token_limit(self.model_id)
|
||||
threshold = int(context_limit * settings.COMPRESSION_THRESHOLD_PERCENTAGE)
|
||||
|
||||
if current_tokens >= threshold:
|
||||
logger.warning(
|
||||
f"Context limit approaching: {current_tokens}/{context_limit} tokens "
|
||||
f"({(current_tokens/context_limit)*100:.1f}%)"
|
||||
)
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking context limit: {str(e)}", exc_info=True)
|
||||
return False
|
||||
|
||||
def _validate_context_size(self, messages: List[Dict]) -> None:
|
||||
from application.core.model_utils import get_token_limit
|
||||
|
||||
current_tokens = self._calculate_current_context_tokens(messages)
|
||||
self.current_token_count = current_tokens
|
||||
context_limit = get_token_limit(self.model_id)
|
||||
percentage = (current_tokens / context_limit) * 100
|
||||
|
||||
if current_tokens >= context_limit:
|
||||
logger.warning(
|
||||
f"Context at limit: {current_tokens:,}/{context_limit:,} tokens "
|
||||
f"({percentage:.1f}%). Model: {self.model_id}"
|
||||
)
|
||||
elif current_tokens >= int(
|
||||
context_limit * settings.COMPRESSION_THRESHOLD_PERCENTAGE
|
||||
):
|
||||
logger.info(
|
||||
f"Context approaching limit: {current_tokens:,}/{context_limit:,} tokens "
|
||||
f"({percentage:.1f}%)"
|
||||
)
|
||||
|
||||
def _truncate_text_middle(self, text: str, max_tokens: int) -> str:
|
||||
from application.utils import num_tokens_from_string
|
||||
|
||||
current_tokens = num_tokens_from_string(text)
|
||||
if current_tokens <= max_tokens:
|
||||
return text
|
||||
|
||||
chars_per_token = len(text) / current_tokens if current_tokens > 0 else 4
|
||||
target_chars = int(max_tokens * chars_per_token * 0.95)
|
||||
|
||||
if target_chars <= 0:
|
||||
return ""
|
||||
|
||||
start_chars = int(target_chars * 0.4)
|
||||
end_chars = int(target_chars * 0.4)
|
||||
|
||||
truncation_marker = "\n\n[... content truncated to fit context limit ...]\n\n"
|
||||
truncated = text[:start_chars] + truncation_marker + text[-end_chars:]
|
||||
|
||||
logger.info(
|
||||
f"Truncated text from {current_tokens:,} to ~{max_tokens:,} tokens "
|
||||
f"(removed middle section)"
|
||||
)
|
||||
return truncated
|
||||
|
||||
# ---- Message building ----
|
||||
|
||||
def _build_messages(
|
||||
self,
|
||||
system_prompt: str,
|
||||
query: str,
|
||||
) -> List[Dict]:
|
||||
"""Build messages using pre-rendered system prompt"""
|
||||
from application.core.model_utils import get_token_limit
|
||||
from application.utils import num_tokens_from_string
|
||||
|
||||
if self.compressed_summary:
|
||||
compression_context = (
|
||||
"\n\n---\n\n"
|
||||
"This session is being continued from a previous conversation that "
|
||||
"has been compressed to fit within context limits. "
|
||||
"The conversation is summarized below:\n\n"
|
||||
f"{self.compressed_summary}"
|
||||
)
|
||||
system_prompt = system_prompt + compression_context
|
||||
|
||||
context_limit = get_token_limit(self.model_id)
|
||||
system_tokens = num_tokens_from_string(system_prompt)
|
||||
|
||||
safety_buffer = int(context_limit * 0.1)
|
||||
available_after_system = context_limit - system_tokens - safety_buffer
|
||||
|
||||
max_query_tokens = int(available_after_system * 0.8)
|
||||
query_tokens = num_tokens_from_string(query)
|
||||
|
||||
if query_tokens > max_query_tokens:
|
||||
query = self._truncate_text_middle(query, max_query_tokens)
|
||||
query_tokens = num_tokens_from_string(query)
|
||||
|
||||
available_for_history = max(available_after_system - query_tokens, 0)
|
||||
|
||||
working_history = self._truncate_history_to_fit(
|
||||
self.chat_history,
|
||||
available_for_history,
|
||||
)
|
||||
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
|
||||
for i in working_history:
|
||||
if "prompt" in i and "response" in i:
|
||||
messages.append({"role": "user", "content": i["prompt"]})
|
||||
messages.append({"role": "assistant", "content": i["response"]})
|
||||
if "tool_calls" in i:
|
||||
for tool_call in i["tool_calls"]:
|
||||
call_id = tool_call.get("call_id") or str(uuid.uuid4())
|
||||
args = tool_call.get("arguments")
|
||||
args_str = (
|
||||
json.dumps(args)
|
||||
if isinstance(args, dict)
|
||||
else (args or "{}")
|
||||
)
|
||||
messages.append({
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [{
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": tool_call.get("action_name", ""),
|
||||
"arguments": args_str,
|
||||
},
|
||||
}],
|
||||
})
|
||||
result = tool_call.get("result")
|
||||
result_str = (
|
||||
json.dumps(result)
|
||||
if not isinstance(result, str)
|
||||
else (result or "")
|
||||
)
|
||||
messages.append({
|
||||
"role": "tool",
|
||||
"tool_call_id": call_id,
|
||||
"content": result_str,
|
||||
})
|
||||
messages.append({"role": "user", "content": query})
|
||||
return messages
|
||||
|
||||
def _truncate_history_to_fit(
|
||||
self,
|
||||
history: List[Dict],
|
||||
max_tokens: int,
|
||||
) -> List[Dict]:
|
||||
from application.utils import num_tokens_from_string
|
||||
|
||||
if not history or max_tokens <= 0:
|
||||
return []
|
||||
|
||||
truncated = []
|
||||
current_tokens = 0
|
||||
|
||||
for message in reversed(history):
|
||||
message_tokens = 0
|
||||
|
||||
if "prompt" in message and "response" in message:
|
||||
message_tokens += num_tokens_from_string(message["prompt"])
|
||||
message_tokens += num_tokens_from_string(message["response"])
|
||||
|
||||
if "tool_calls" in message:
|
||||
for tool_call in message["tool_calls"]:
|
||||
tool_str = (
|
||||
f"Tool: {tool_call.get('tool_name')} | "
|
||||
f"Action: {tool_call.get('action_name')} | "
|
||||
f"Args: {tool_call.get('arguments')} | "
|
||||
f"Response: {tool_call.get('result')}"
|
||||
)
|
||||
message_tokens += num_tokens_from_string(tool_str)
|
||||
|
||||
if current_tokens + message_tokens <= max_tokens:
|
||||
current_tokens += message_tokens
|
||||
truncated.insert(0, message)
|
||||
else:
|
||||
break
|
||||
|
||||
if len(truncated) < len(history):
|
||||
logger.info(
|
||||
f"Truncated chat history from {len(history)} to {len(truncated)} messages "
|
||||
f"to fit within {max_tokens:,} token budget"
|
||||
)
|
||||
|
||||
return truncated
|
||||
|
||||
# ---- LLM generation ----
|
||||
|
||||
def _llm_gen(self, messages: List[Dict], log_context: Optional[LogContext] = None):
|
||||
self._validate_context_size(messages)
|
||||
|
||||
gen_kwargs = {"model": self.model_id, "messages": messages}
|
||||
if self.attachments:
|
||||
gen_kwargs["_usage_attachments"] = self.attachments
|
||||
|
||||
if (
|
||||
hasattr(self.llm, "_supports_tools")
|
||||
and self.llm._supports_tools
|
||||
and self.tools
|
||||
):
|
||||
gen_kwargs["tools"] = self.tools
|
||||
if (
|
||||
self.json_schema
|
||||
and hasattr(self.llm, "_supports_structured_output")
|
||||
and self.llm._supports_structured_output()
|
||||
):
|
||||
structured_format = self.llm.prepare_structured_output_format(
|
||||
self.json_schema
|
||||
)
|
||||
if structured_format:
|
||||
if self.llm_name == "openai":
|
||||
gen_kwargs["response_format"] = structured_format
|
||||
elif self.llm_name == "google":
|
||||
gen_kwargs["response_schema"] = structured_format
|
||||
resp = self.llm.gen_stream(**gen_kwargs)
|
||||
|
||||
if log_context:
|
||||
data = build_stack_data(self.llm, exclude_attributes=["client"])
|
||||
log_context.stacks.append({"component": "llm", "data": data})
|
||||
return resp
|
||||
|
||||
def _llm_handler(
|
||||
self,
|
||||
resp,
|
||||
tools_dict: Dict,
|
||||
messages: List[Dict],
|
||||
log_context: Optional[LogContext] = None,
|
||||
attachments: Optional[List[Dict]] = None,
|
||||
):
|
||||
resp = self.llm_handler.process_message_flow(
|
||||
self, resp, tools_dict, messages, attachments, True
|
||||
)
|
||||
if log_context:
|
||||
data = build_stack_data(self.llm_handler, exclude_attributes=["tool_calls"])
|
||||
log_context.stacks.append({"component": "llm_handler", "data": data})
|
||||
return resp
|
||||
|
||||
def _handle_response(self, response, tools_dict, messages, log_context):
|
||||
is_structured_output = (
|
||||
self.json_schema is not None
|
||||
and hasattr(self.llm, "_supports_structured_output")
|
||||
and self.llm._supports_structured_output()
|
||||
)
|
||||
|
||||
if isinstance(response, str):
|
||||
answer_data = {"answer": response}
|
||||
if is_structured_output:
|
||||
answer_data["structured"] = True
|
||||
answer_data["schema"] = self.json_schema
|
||||
yield answer_data
|
||||
return
|
||||
if hasattr(response, "message") and getattr(response.message, "content", None):
|
||||
answer_data = {"answer": response.message.content}
|
||||
if is_structured_output:
|
||||
answer_data["structured"] = True
|
||||
answer_data["schema"] = self.json_schema
|
||||
yield answer_data
|
||||
return
|
||||
processed_response_gen = self._llm_handler(
|
||||
response, tools_dict, messages, log_context, self.attachments
|
||||
)
|
||||
|
||||
for event in processed_response_gen:
|
||||
if isinstance(event, str):
|
||||
answer_data = {"answer": event}
|
||||
if is_structured_output:
|
||||
answer_data["structured"] = True
|
||||
answer_data["schema"] = self.json_schema
|
||||
yield answer_data
|
||||
elif hasattr(event, "message") and getattr(event.message, "content", None):
|
||||
answer_data = {"answer": event.message.content}
|
||||
if is_structured_output:
|
||||
answer_data["structured"] = True
|
||||
answer_data["schema"] = self.json_schema
|
||||
yield answer_data
|
||||
elif isinstance(event, dict) and "type" in event:
|
||||
yield event
|
||||
33
application/agents/classic_agent.py
Normal file
33
application/agents/classic_agent.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import logging
|
||||
from typing import Dict, Generator
|
||||
|
||||
from application.agents.base import BaseAgent
|
||||
from application.logging import LogContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ClassicAgent(BaseAgent):
|
||||
"""A simplified agent with clear execution flow"""
|
||||
|
||||
def _gen_inner(
|
||||
self, query: str, log_context: LogContext
|
||||
) -> Generator[Dict, None, None]:
|
||||
"""Core generator function for ClassicAgent execution flow"""
|
||||
|
||||
tools_dict = self.tool_executor.get_tools()
|
||||
self._prepare_tools(tools_dict)
|
||||
|
||||
messages = self._build_messages(self.prompt, query)
|
||||
llm_response = self._llm_gen(messages, log_context)
|
||||
|
||||
yield from self._handle_response(
|
||||
llm_response, tools_dict, messages, log_context
|
||||
)
|
||||
|
||||
yield {"sources": self.retrieved_docs}
|
||||
yield {"tool_calls": self._get_truncated_tool_calls()}
|
||||
|
||||
log_context.stacks.append(
|
||||
{"component": "agent", "data": {"tool_calls": self.tool_calls.copy()}}
|
||||
)
|
||||
698
application/agents/research_agent.py
Normal file
698
application/agents/research_agent.py
Normal file
@@ -0,0 +1,698 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from typing import Dict, Generator, List, Optional
|
||||
|
||||
from application.agents.base import BaseAgent
|
||||
from application.agents.tool_executor import ToolExecutor
|
||||
from application.agents.tools.internal_search import (
|
||||
INTERNAL_TOOL_ID,
|
||||
add_internal_search_tool,
|
||||
)
|
||||
from application.agents.tools.think import THINK_TOOL_ENTRY, THINK_TOOL_ID
|
||||
from application.logging import LogContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Defaults (can be overridden via constructor)
|
||||
DEFAULT_MAX_STEPS = 6
|
||||
DEFAULT_MAX_SUB_ITERATIONS = 5
|
||||
DEFAULT_TIMEOUT_SECONDS = 300 # 5 minutes
|
||||
DEFAULT_TOKEN_BUDGET = 100_000
|
||||
DEFAULT_PARALLEL_WORKERS = 3
|
||||
|
||||
# Adaptive depth caps per complexity level
|
||||
COMPLEXITY_CAPS = {
|
||||
"simple": 2,
|
||||
"moderate": 4,
|
||||
"complex": 6,
|
||||
}
|
||||
|
||||
_PROMPTS_DIR = os.path.join(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
|
||||
"prompts",
|
||||
"research",
|
||||
)
|
||||
|
||||
|
||||
def _load_prompt(name: str) -> str:
|
||||
with open(os.path.join(_PROMPTS_DIR, name), "r") as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
CLARIFICATION_PROMPT = _load_prompt("clarification.txt")
|
||||
PLANNING_PROMPT = _load_prompt("planning.txt")
|
||||
STEP_PROMPT = _load_prompt("step.txt")
|
||||
SYNTHESIS_PROMPT = _load_prompt("synthesis.txt")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CitationManager
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class CitationManager:
|
||||
"""Tracks and deduplicates citations across research steps."""
|
||||
|
||||
def __init__(self):
|
||||
self.citations: Dict[int, Dict] = {}
|
||||
self._counter = 0
|
||||
|
||||
def add(self, doc: Dict) -> int:
|
||||
"""Register a source, return its citation number. Deduplicates by source."""
|
||||
source = doc.get("source", "")
|
||||
title = doc.get("title", "")
|
||||
for num, existing in self.citations.items():
|
||||
if existing.get("source") == source and existing.get("title") == title:
|
||||
return num
|
||||
self._counter += 1
|
||||
self.citations[self._counter] = doc
|
||||
return self._counter
|
||||
|
||||
def add_docs(self, docs: List[Dict]) -> str:
|
||||
"""Register multiple docs, return formatted citation mapping text."""
|
||||
mapping_lines = []
|
||||
for doc in docs:
|
||||
num = self.add(doc)
|
||||
title = doc.get("title", "Untitled")
|
||||
mapping_lines.append(f"[{num}] {title}")
|
||||
return "\n".join(mapping_lines)
|
||||
|
||||
def format_references(self) -> str:
|
||||
"""Generate [N] -> source mapping for report footer."""
|
||||
if not self.citations:
|
||||
return "No sources found."
|
||||
lines = []
|
||||
for num, doc in sorted(self.citations.items()):
|
||||
title = doc.get("title", "Untitled")
|
||||
source = doc.get("source", "Unknown")
|
||||
filename = doc.get("filename", "")
|
||||
display = filename or title
|
||||
lines.append(f"[{num}] {display} — {source}")
|
||||
return "\n".join(lines)
|
||||
|
||||
def get_all_docs(self) -> List[Dict]:
|
||||
return list(self.citations.values())
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ResearchAgent
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ResearchAgent(BaseAgent):
|
||||
"""Multi-step research agent with parallel execution and budget controls.
|
||||
|
||||
Orchestrates: Plan -> Research (per step, optionally parallel) -> Synthesize.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
retriever_config: Optional[Dict] = None,
|
||||
max_steps: int = DEFAULT_MAX_STEPS,
|
||||
max_sub_iterations: int = DEFAULT_MAX_SUB_ITERATIONS,
|
||||
timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS,
|
||||
token_budget: int = DEFAULT_TOKEN_BUDGET,
|
||||
parallel_workers: int = DEFAULT_PARALLEL_WORKERS,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.retriever_config = retriever_config or {}
|
||||
self.max_steps = max_steps
|
||||
self.max_sub_iterations = max_sub_iterations
|
||||
self.timeout_seconds = timeout_seconds
|
||||
self.token_budget = token_budget
|
||||
self.parallel_workers = parallel_workers
|
||||
self.citations = CitationManager()
|
||||
self._start_time: float = 0
|
||||
self._tokens_used: int = 0
|
||||
self._last_token_snapshot: int = 0
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Budget & timeout helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _is_timed_out(self) -> bool:
|
||||
return (time.monotonic() - self._start_time) >= self.timeout_seconds
|
||||
|
||||
def _elapsed(self) -> float:
|
||||
return round(time.monotonic() - self._start_time, 1)
|
||||
|
||||
def _track_tokens(self, count: int):
|
||||
self._tokens_used += count
|
||||
|
||||
def _budget_remaining(self) -> int:
|
||||
return max(self.token_budget - self._tokens_used, 0)
|
||||
|
||||
def _is_over_budget(self) -> bool:
|
||||
return self._tokens_used >= self.token_budget
|
||||
|
||||
def _snapshot_llm_tokens(self) -> int:
|
||||
"""Read current token usage from LLM and return delta since last snapshot."""
|
||||
current = self.llm.token_usage.get("prompt_tokens", 0) + self.llm.token_usage.get("generated_tokens", 0)
|
||||
delta = current - self._last_token_snapshot
|
||||
self._last_token_snapshot = current
|
||||
return delta
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Main orchestration
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _gen_inner(
|
||||
self, query: str, log_context: LogContext
|
||||
) -> Generator[Dict, None, None]:
|
||||
self._start_time = time.monotonic()
|
||||
tools_dict = self._setup_tools()
|
||||
|
||||
# Phase 0: Clarification (skip if user is responding to a prior clarification)
|
||||
if not self._is_follow_up():
|
||||
clarification = self._clarification_phase(query)
|
||||
if clarification:
|
||||
yield {"metadata": {"is_clarification": True}}
|
||||
yield {"answer": clarification}
|
||||
yield {"sources": []}
|
||||
yield {"tool_calls": []}
|
||||
log_context.stacks.append(
|
||||
{"component": "agent", "data": {"clarification": True}}
|
||||
)
|
||||
return
|
||||
|
||||
# Phase 1: Planning (with adaptive depth)
|
||||
yield {"type": "research_progress", "data": {"status": "planning"}}
|
||||
plan, complexity = self._planning_phase(query)
|
||||
|
||||
if not plan:
|
||||
logger.warning("ResearchAgent: Planning produced no steps, falling back")
|
||||
plan = [{"query": query, "rationale": "Direct investigation"}]
|
||||
complexity = "simple"
|
||||
|
||||
yield {
|
||||
"type": "research_plan",
|
||||
"data": {"steps": plan, "complexity": complexity},
|
||||
}
|
||||
|
||||
# Phase 2: Research each step (yields progress events in real-time)
|
||||
intermediate_reports = []
|
||||
for i, step in enumerate(plan):
|
||||
step_num = i + 1
|
||||
step_query = step.get("query", query)
|
||||
|
||||
if self._is_timed_out():
|
||||
logger.warning(
|
||||
f"ResearchAgent: Timeout at step {step_num}/{len(plan)} "
|
||||
f"({self._elapsed()}s)"
|
||||
)
|
||||
break
|
||||
if self._is_over_budget():
|
||||
logger.warning(
|
||||
f"ResearchAgent: Token budget exhausted at step {step_num}/{len(plan)}"
|
||||
)
|
||||
break
|
||||
|
||||
yield {
|
||||
"type": "research_progress",
|
||||
"data": {
|
||||
"step": step_num,
|
||||
"total": len(plan),
|
||||
"query": step_query,
|
||||
"status": "researching",
|
||||
},
|
||||
}
|
||||
|
||||
report = self._research_step(step_query, tools_dict)
|
||||
intermediate_reports.append({"step": step, "content": report})
|
||||
|
||||
yield {
|
||||
"type": "research_progress",
|
||||
"data": {
|
||||
"step": step_num,
|
||||
"total": len(plan),
|
||||
"query": step_query,
|
||||
"status": "complete",
|
||||
},
|
||||
}
|
||||
|
||||
# Phase 3: Synthesis (streaming)
|
||||
if self._is_timed_out():
|
||||
logger.warning(
|
||||
f"ResearchAgent: Timeout ({self._elapsed()}s) before synthesis, "
|
||||
f"synthesizing with {len(intermediate_reports)} reports"
|
||||
)
|
||||
yield {
|
||||
"type": "research_progress",
|
||||
"data": {
|
||||
"status": "synthesizing",
|
||||
"elapsed_seconds": self._elapsed(),
|
||||
"tokens_used": self._tokens_used,
|
||||
},
|
||||
}
|
||||
yield from self._synthesis_phase(
|
||||
query, plan, intermediate_reports, tools_dict, log_context
|
||||
)
|
||||
|
||||
# Sources and tool calls
|
||||
self.retrieved_docs = self.citations.get_all_docs()
|
||||
yield {"sources": self.retrieved_docs}
|
||||
yield {"tool_calls": self._get_truncated_tool_calls()}
|
||||
|
||||
logger.info(
|
||||
f"ResearchAgent completed: {len(intermediate_reports)}/{len(plan)} steps, "
|
||||
f"{self._elapsed()}s, ~{self._tokens_used} tokens"
|
||||
)
|
||||
log_context.stacks.append(
|
||||
{"component": "agent", "data": {"tool_calls": self.tool_calls.copy()}}
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Tool setup
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _setup_tools(self) -> Dict:
|
||||
"""Build tools_dict with user tools + internal search + think."""
|
||||
tools_dict = self.tool_executor.get_tools()
|
||||
|
||||
add_internal_search_tool(tools_dict, self.retriever_config)
|
||||
|
||||
think_entry = dict(THINK_TOOL_ENTRY)
|
||||
think_entry["config"] = {}
|
||||
tools_dict[THINK_TOOL_ID] = think_entry
|
||||
|
||||
self._prepare_tools(tools_dict)
|
||||
return tools_dict
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Phase 0: Clarification
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _is_follow_up(self) -> bool:
|
||||
"""Check if the user is responding to a prior clarification.
|
||||
|
||||
Uses the metadata flag stored in the conversation DB — no string matching.
|
||||
Only skip clarification when the last query was explicitly flagged
|
||||
as a clarification by this agent.
|
||||
"""
|
||||
if not self.chat_history:
|
||||
return False
|
||||
last = self.chat_history[-1]
|
||||
meta = last.get("metadata", {})
|
||||
return bool(meta.get("is_clarification"))
|
||||
|
||||
def _clarification_phase(self, question: str) -> Optional[str]:
|
||||
"""Ask the LLM whether the question needs clarification.
|
||||
|
||||
Returns formatted clarification text if needed, or None to proceed.
|
||||
Uses response_format to force valid JSON output.
|
||||
"""
|
||||
messages = [
|
||||
{"role": "system", "content": CLARIFICATION_PROMPT},
|
||||
{"role": "user", "content": question},
|
||||
]
|
||||
|
||||
try:
|
||||
response = self.llm.gen(
|
||||
model=self.model_id,
|
||||
messages=messages,
|
||||
tools=None,
|
||||
response_format={"type": "json_object"},
|
||||
)
|
||||
text = self._extract_text(response)
|
||||
self._track_tokens(self._snapshot_llm_tokens())
|
||||
logger.info(f"ResearchAgent clarification response: {text[:300]}")
|
||||
|
||||
data = self._parse_clarification_json(text)
|
||||
if not data or not data.get("needs_clarification"):
|
||||
return None
|
||||
|
||||
questions = data.get("questions", [])
|
||||
if not questions:
|
||||
return None
|
||||
|
||||
# Format as a friendly response
|
||||
lines = [
|
||||
"Before I begin researching, I'd like to clarify a few things:\n"
|
||||
]
|
||||
for i, q in enumerate(questions[:3], 1):
|
||||
lines.append(f"{i}. {q}")
|
||||
lines.append(
|
||||
"\nPlease provide these details and I'll start the research."
|
||||
)
|
||||
return "\n".join(lines)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Clarification phase failed: {e}", exc_info=True)
|
||||
return None # proceed with research on failure
|
||||
|
||||
def _parse_clarification_json(self, text: str) -> Optional[Dict]:
|
||||
"""Parse clarification JSON from LLM response."""
|
||||
try:
|
||||
return json.loads(text)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Try extracting from code fences
|
||||
for marker in ["```json", "```"]:
|
||||
if marker in text:
|
||||
start = text.index(marker) + len(marker)
|
||||
end = text.index("```", start) if "```" in text[start:] else len(text)
|
||||
try:
|
||||
return json.loads(text[start:end].strip())
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
pass
|
||||
|
||||
# Try finding JSON object
|
||||
for i, ch in enumerate(text):
|
||||
if ch == "{":
|
||||
for j in range(len(text) - 1, i, -1):
|
||||
if text[j] == "}":
|
||||
try:
|
||||
return json.loads(text[i : j + 1])
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
break
|
||||
|
||||
return None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Phase 1: Planning (with adaptive depth)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _planning_phase(self, question: str) -> tuple[List[Dict], str]:
|
||||
"""Decompose the question into research steps via LLM.
|
||||
|
||||
Returns (steps, complexity) where complexity is simple/moderate/complex.
|
||||
"""
|
||||
messages = [
|
||||
{"role": "system", "content": PLANNING_PROMPT},
|
||||
{"role": "user", "content": question},
|
||||
]
|
||||
|
||||
try:
|
||||
response = self.llm.gen(
|
||||
model=self.model_id,
|
||||
messages=messages,
|
||||
tools=None,
|
||||
response_format={"type": "json_object"},
|
||||
)
|
||||
text = self._extract_text(response)
|
||||
self._track_tokens(self._snapshot_llm_tokens())
|
||||
logger.info(f"ResearchAgent planning LLM response: {text[:500]}")
|
||||
|
||||
plan_data = self._parse_plan_json(text)
|
||||
if isinstance(plan_data, dict):
|
||||
complexity = plan_data.get("complexity", "moderate")
|
||||
steps = plan_data.get("steps", [])
|
||||
else:
|
||||
complexity = "moderate"
|
||||
steps = plan_data
|
||||
|
||||
# Adaptive depth: cap steps based on assessed complexity
|
||||
cap = COMPLEXITY_CAPS.get(complexity, self.max_steps)
|
||||
cap = min(cap, self.max_steps)
|
||||
steps = steps[:cap]
|
||||
|
||||
logger.info(
|
||||
f"ResearchAgent plan: complexity={complexity}, "
|
||||
f"steps={len(steps)} (cap={cap})"
|
||||
)
|
||||
return steps, complexity
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Planning phase failed: {e}", exc_info=True)
|
||||
return (
|
||||
[{"query": question, "rationale": "Direct investigation (planning failed)"}],
|
||||
"simple",
|
||||
)
|
||||
|
||||
def _parse_plan_json(self, text: str):
|
||||
"""Extract JSON plan from LLM response. Returns dict or list."""
|
||||
# Try direct parse
|
||||
try:
|
||||
data = json.loads(text)
|
||||
if isinstance(data, dict) and "steps" in data:
|
||||
return data
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Try extracting from markdown code fences
|
||||
for marker in ["```json", "```"]:
|
||||
if marker in text:
|
||||
start = text.index(marker) + len(marker)
|
||||
end = text.index("```", start) if "```" in text[start:] else len(text)
|
||||
try:
|
||||
data = json.loads(text[start:end].strip())
|
||||
if isinstance(data, dict) and "steps" in data:
|
||||
return data
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
pass
|
||||
|
||||
# Try finding JSON object in text
|
||||
for i, ch in enumerate(text):
|
||||
if ch == "{":
|
||||
for j in range(len(text) - 1, i, -1):
|
||||
if text[j] == "}":
|
||||
try:
|
||||
data = json.loads(text[i : j + 1])
|
||||
if isinstance(data, dict) and "steps" in data:
|
||||
return data
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
break
|
||||
|
||||
logger.warning(f"Could not parse plan JSON from: {text[:200]}")
|
||||
return []
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Phase 2: Research step (core loop)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _research_step(self, step_query: str, tools_dict: Dict) -> str:
|
||||
"""Run a focused research loop for one sub-question (sequential path)."""
|
||||
report = self._research_step_with_executor(
|
||||
step_query, tools_dict, self.tool_executor
|
||||
)
|
||||
self._collect_step_sources()
|
||||
return report
|
||||
|
||||
def _research_step_with_executor(
|
||||
self, step_query: str, tools_dict: Dict, executor: ToolExecutor
|
||||
) -> str:
|
||||
"""Core research loop. Works with any ToolExecutor instance."""
|
||||
system_prompt = STEP_PROMPT.replace("{step_query}", step_query)
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": step_query},
|
||||
]
|
||||
|
||||
last_search_empty = False
|
||||
|
||||
for iteration in range(self.max_sub_iterations):
|
||||
# Check timeout and budget
|
||||
if self._is_timed_out():
|
||||
logger.info(
|
||||
f"Research step '{step_query[:50]}' timed out at iteration {iteration}"
|
||||
)
|
||||
break
|
||||
if self._is_over_budget():
|
||||
logger.info(
|
||||
f"Research step '{step_query[:50]}' hit token budget at iteration {iteration}"
|
||||
)
|
||||
break
|
||||
|
||||
try:
|
||||
response = self.llm.gen(
|
||||
model=self.model_id,
|
||||
messages=messages,
|
||||
tools=self.tools if self.tools else None,
|
||||
)
|
||||
self._track_tokens(self._snapshot_llm_tokens())
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Research step LLM call failed (iteration {iteration}): {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
break
|
||||
|
||||
parsed = self.llm_handler.parse_response(response)
|
||||
|
||||
if not parsed.requires_tool_call:
|
||||
return parsed.content or "No findings for this step."
|
||||
|
||||
# Execute tool calls
|
||||
messages, last_search_empty = self._execute_step_tools_with_refinement(
|
||||
parsed.tool_calls, tools_dict, messages, executor, last_search_empty
|
||||
)
|
||||
|
||||
# Max iterations / timeout / budget — ask for summary
|
||||
messages.append(
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Please summarize your findings so far based on the information gathered.",
|
||||
}
|
||||
)
|
||||
try:
|
||||
response = self.llm.gen(
|
||||
model=self.model_id, messages=messages, tools=None
|
||||
)
|
||||
self._track_tokens(self._snapshot_llm_tokens())
|
||||
text = self._extract_text(response)
|
||||
return text or "Research step completed."
|
||||
except Exception:
|
||||
return "Research step completed."
|
||||
|
||||
def _execute_step_tools_with_refinement(
|
||||
self,
|
||||
tool_calls,
|
||||
tools_dict: Dict,
|
||||
messages: List[Dict],
|
||||
executor: ToolExecutor,
|
||||
last_search_empty: bool,
|
||||
) -> tuple[List[Dict], bool]:
|
||||
"""Execute tool calls with query refinement on empty results.
|
||||
|
||||
Returns (updated_messages, was_last_search_empty).
|
||||
"""
|
||||
search_returned_empty = False
|
||||
|
||||
for call in tool_calls:
|
||||
gen = executor.execute(
|
||||
tools_dict, call, self.llm.__class__.__name__
|
||||
)
|
||||
result = None
|
||||
call_id = None
|
||||
while True:
|
||||
try:
|
||||
event = next(gen)
|
||||
# Log tool_call status events instead of discarding them
|
||||
if isinstance(event, dict) and event.get("type") == "tool_call":
|
||||
logger.debug(
|
||||
"Tool %s status: %s",
|
||||
event.get("data", {}).get("action_name", ""),
|
||||
event.get("data", {}).get("status", ""),
|
||||
)
|
||||
except StopIteration as e:
|
||||
result, call_id = e.value
|
||||
break
|
||||
|
||||
# Detect empty search results for refinement
|
||||
is_search = "search" in (call.name or "").lower()
|
||||
result_str = str(result) if result else ""
|
||||
if is_search and "No documents found" in result_str:
|
||||
search_returned_empty = True
|
||||
if last_search_empty:
|
||||
# Two consecutive empty searches — inject refinement hint
|
||||
result_str += (
|
||||
"\n\nHint: Previous search also returned no results. "
|
||||
"Try a very different query with different keywords, "
|
||||
"or broaden your search terms."
|
||||
)
|
||||
result = result_str
|
||||
|
||||
import json as _json
|
||||
|
||||
args_str = (
|
||||
_json.dumps(call.arguments)
|
||||
if isinstance(call.arguments, dict)
|
||||
else call.arguments
|
||||
)
|
||||
messages.append({
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [{
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {"name": call.name, "arguments": args_str},
|
||||
}],
|
||||
})
|
||||
tool_message = self.llm_handler.create_tool_message(call, result)
|
||||
messages.append(tool_message)
|
||||
|
||||
return messages, search_returned_empty
|
||||
|
||||
def _collect_step_sources(self):
|
||||
"""Collect sources from InternalSearchTool and register with CitationManager."""
|
||||
cache_key = f"internal_search:{INTERNAL_TOOL_ID}:{self.user or ''}"
|
||||
tool = self.tool_executor._loaded_tools.get(cache_key)
|
||||
if tool and hasattr(tool, "retrieved_docs"):
|
||||
for doc in tool.retrieved_docs:
|
||||
self.citations.add(doc)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Phase 3: Synthesis
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _synthesis_phase(
|
||||
self,
|
||||
question: str,
|
||||
plan: List[Dict],
|
||||
intermediate_reports: List[Dict],
|
||||
tools_dict: Dict,
|
||||
log_context: LogContext,
|
||||
) -> Generator[Dict, None, None]:
|
||||
"""Compile all findings into a final cited report (streaming)."""
|
||||
plan_lines = []
|
||||
for i, step in enumerate(plan, 1):
|
||||
plan_lines.append(
|
||||
f"{i}. {step.get('query', 'Unknown')} — {step.get('rationale', '')}"
|
||||
)
|
||||
plan_summary = "\n".join(plan_lines)
|
||||
|
||||
findings_parts = []
|
||||
for i, report in enumerate(intermediate_reports, 1):
|
||||
step_query = report["step"].get("query", "Unknown")
|
||||
content = report["content"]
|
||||
findings_parts.append(
|
||||
f"--- Step {i}: {step_query} ---\n{content}"
|
||||
)
|
||||
findings = "\n\n".join(findings_parts)
|
||||
|
||||
references = self.citations.format_references()
|
||||
|
||||
synthesis_prompt = SYNTHESIS_PROMPT.replace("{question}", question)
|
||||
synthesis_prompt = synthesis_prompt.replace("{plan_summary}", plan_summary)
|
||||
synthesis_prompt = synthesis_prompt.replace("{findings}", findings)
|
||||
synthesis_prompt = synthesis_prompt.replace("{references}", references)
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": synthesis_prompt},
|
||||
{"role": "user", "content": f"Please write the research report for: {question}"},
|
||||
]
|
||||
|
||||
llm_response = self.llm.gen_stream(
|
||||
model=self.model_id, messages=messages, tools=None
|
||||
)
|
||||
|
||||
if log_context:
|
||||
from application.logging import build_stack_data
|
||||
|
||||
log_context.stacks.append(
|
||||
{"component": "synthesis_llm", "data": build_stack_data(self.llm)}
|
||||
)
|
||||
|
||||
yield from self._handle_response(
|
||||
llm_response, tools_dict, messages, log_context
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _extract_text(self, response) -> str:
|
||||
"""Extract text content from a non-streaming LLM response."""
|
||||
if isinstance(response, str):
|
||||
return response
|
||||
if hasattr(response, "message") and hasattr(response.message, "content"):
|
||||
return response.message.content or ""
|
||||
if hasattr(response, "choices") and response.choices:
|
||||
choice = response.choices[0]
|
||||
if hasattr(choice, "message") and hasattr(choice.message, "content"):
|
||||
return choice.message.content or ""
|
||||
if hasattr(response, "content") and isinstance(response.content, list):
|
||||
if response.content and hasattr(response.content[0], "text"):
|
||||
return response.content[0].text or ""
|
||||
return str(response) if response else ""
|
||||
477
application/agents/tool_executor.py
Normal file
477
application/agents/tool_executor.py
Normal file
@@ -0,0 +1,477 @@
|
||||
import logging
|
||||
import uuid
|
||||
from collections import Counter
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
from application.agents.tools.tool_action_parser import ToolActionParser
|
||||
from application.agents.tools.tool_manager import ToolManager
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.security.encryption import decrypt_credentials
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ToolExecutor:
|
||||
"""Handles tool discovery, preparation, and execution.
|
||||
|
||||
Extracted from BaseAgent to separate concerns and enable tool caching.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
user_api_key: Optional[str] = None,
|
||||
user: Optional[str] = None,
|
||||
decoded_token: Optional[Dict] = None,
|
||||
):
|
||||
self.user_api_key = user_api_key
|
||||
self.user = user
|
||||
self.decoded_token = decoded_token
|
||||
self.tool_calls: List[Dict] = []
|
||||
self._loaded_tools: Dict[str, object] = {}
|
||||
self.conversation_id: Optional[str] = None
|
||||
self.client_tools: Optional[List[Dict]] = None
|
||||
self._name_to_tool: Dict[str, Tuple[str, str]] = {}
|
||||
self._tool_to_name: Dict[Tuple[str, str], str] = {}
|
||||
|
||||
def get_tools(self) -> Dict[str, Dict]:
|
||||
"""Load tool configs from DB based on user context.
|
||||
|
||||
If *client_tools* have been set on this executor, they are
|
||||
automatically merged into the returned dict.
|
||||
"""
|
||||
if self.user_api_key:
|
||||
tools = self._get_tools_by_api_key(self.user_api_key)
|
||||
else:
|
||||
tools = self._get_user_tools(self.user or "local")
|
||||
if self.client_tools:
|
||||
self.merge_client_tools(tools, self.client_tools)
|
||||
return tools
|
||||
|
||||
def _get_tools_by_api_key(self, api_key: str) -> Dict[str, Dict]:
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
agents_collection = db["agents"]
|
||||
tools_collection = db["user_tools"]
|
||||
|
||||
agent_data = agents_collection.find_one({"key": api_key})
|
||||
tool_ids = agent_data.get("tools", []) if agent_data else []
|
||||
|
||||
tools = (
|
||||
tools_collection.find(
|
||||
{"_id": {"$in": [ObjectId(tool_id) for tool_id in tool_ids]}}
|
||||
)
|
||||
if tool_ids
|
||||
else []
|
||||
)
|
||||
tools = list(tools)
|
||||
return {str(tool["_id"]): tool for tool in tools} if tools else {}
|
||||
|
||||
def _get_user_tools(self, user: str = "local") -> Dict[str, Dict]:
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
user_tools_collection = db["user_tools"]
|
||||
user_tools = user_tools_collection.find({"user": user, "status": True})
|
||||
user_tools = list(user_tools)
|
||||
return {str(i): tool for i, tool in enumerate(user_tools)}
|
||||
|
||||
def merge_client_tools(
|
||||
self, tools_dict: Dict, client_tools: List[Dict]
|
||||
) -> Dict:
|
||||
"""Merge client-provided tool definitions into tools_dict.
|
||||
|
||||
Client tools use the standard function-calling format::
|
||||
|
||||
[{"type": "function", "function": {"name": "get_weather",
|
||||
"description": "...", "parameters": {...}}}]
|
||||
|
||||
They are stored in *tools_dict* with ``client_side: True`` so that
|
||||
:meth:`check_pause` returns a pause signal instead of trying to
|
||||
execute them server-side.
|
||||
|
||||
Args:
|
||||
tools_dict: The mutable server tools dict (will be modified in place).
|
||||
client_tools: List of tool definitions in function-calling format.
|
||||
|
||||
Returns:
|
||||
The updated *tools_dict* (same reference, for convenience).
|
||||
"""
|
||||
for i, ct in enumerate(client_tools):
|
||||
func = ct.get("function", ct) # tolerate bare {"name":..} too
|
||||
name = func.get("name", f"clienttool{i}")
|
||||
tool_id = f"ct{i}"
|
||||
|
||||
tools_dict[tool_id] = {
|
||||
"name": name,
|
||||
"client_side": True,
|
||||
"actions": [
|
||||
{
|
||||
"name": name,
|
||||
"description": func.get("description", ""),
|
||||
"active": True,
|
||||
"parameters": func.get("parameters", {}),
|
||||
}
|
||||
],
|
||||
}
|
||||
return tools_dict
|
||||
|
||||
def prepare_tools_for_llm(self, tools_dict: Dict) -> List[Dict]:
|
||||
"""Convert tool configs to LLM function schemas.
|
||||
|
||||
Action names are kept clean for the LLM:
|
||||
- Unique action names appear as-is (e.g. ``get_weather``).
|
||||
- Duplicate action names get numbered suffixes (e.g. ``search_1``,
|
||||
``search_2``).
|
||||
|
||||
A reverse mapping is stored in ``_name_to_tool`` so that tool calls
|
||||
can be routed back to the correct ``(tool_id, action_name)`` without
|
||||
brittle string splitting.
|
||||
"""
|
||||
# Pass 1: collect entries and count action name occurrences
|
||||
entries: List[Tuple[str, str, Dict, bool]] = [] # (tool_id, action_name, action, is_client)
|
||||
name_counts: Counter = Counter()
|
||||
|
||||
for tool_id, tool in tools_dict.items():
|
||||
is_api = tool["name"] == "api_tool"
|
||||
is_client = tool.get("client_side", False)
|
||||
|
||||
if is_api and "actions" not in tool.get("config", {}):
|
||||
continue
|
||||
if not is_api and "actions" not in tool:
|
||||
continue
|
||||
|
||||
actions = (
|
||||
tool["config"]["actions"].values()
|
||||
if is_api
|
||||
else tool["actions"]
|
||||
)
|
||||
|
||||
for action in actions:
|
||||
if not action.get("active", True):
|
||||
continue
|
||||
entries.append((tool_id, action["name"], action, is_client))
|
||||
name_counts[action["name"]] += 1
|
||||
|
||||
# Pass 2: assign LLM-visible names and build mappings
|
||||
self._name_to_tool = {}
|
||||
self._tool_to_name = {}
|
||||
collision_counters: Dict[str, int] = {}
|
||||
all_llm_names: set = set()
|
||||
|
||||
result = []
|
||||
for tool_id, action_name, action, is_client in entries:
|
||||
if name_counts[action_name] == 1:
|
||||
llm_name = action_name
|
||||
else:
|
||||
counter = collision_counters.get(action_name, 1)
|
||||
candidate = f"{action_name}_{counter}"
|
||||
# Skip if candidate collides with a unique action name
|
||||
while candidate in all_llm_names or (
|
||||
candidate in name_counts and name_counts[candidate] == 1
|
||||
):
|
||||
counter += 1
|
||||
candidate = f"{action_name}_{counter}"
|
||||
collision_counters[action_name] = counter + 1
|
||||
llm_name = candidate
|
||||
|
||||
all_llm_names.add(llm_name)
|
||||
self._name_to_tool[llm_name] = (tool_id, action_name)
|
||||
self._tool_to_name[(tool_id, action_name)] = llm_name
|
||||
|
||||
if is_client:
|
||||
params = action.get("parameters", {})
|
||||
else:
|
||||
params = self._build_tool_parameters(action)
|
||||
|
||||
result.append({
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": llm_name,
|
||||
"description": action.get("description", ""),
|
||||
"parameters": params,
|
||||
},
|
||||
})
|
||||
return result
|
||||
|
||||
def _build_tool_parameters(self, action: Dict) -> Dict:
|
||||
params = {"type": "object", "properties": {}, "required": []}
|
||||
for param_type in ["query_params", "headers", "body", "parameters"]:
|
||||
if param_type in action and action[param_type].get("properties"):
|
||||
for k, v in action[param_type]["properties"].items():
|
||||
if v.get("filled_by_llm", True):
|
||||
params["properties"][k] = {
|
||||
key: value
|
||||
for key, value in v.items()
|
||||
if key not in ("filled_by_llm", "value", "required")
|
||||
}
|
||||
if v.get("required", False):
|
||||
params["required"].append(k)
|
||||
return params
|
||||
|
||||
def check_pause(
|
||||
self, tools_dict: Dict, call, llm_class_name: str
|
||||
) -> Optional[Dict]:
|
||||
"""Check if a tool call requires pausing for approval or client execution.
|
||||
|
||||
Returns a dict describing the pending action if pause is needed, None otherwise.
|
||||
"""
|
||||
parser = ToolActionParser(llm_class_name, name_mapping=self._name_to_tool)
|
||||
tool_id, action_name, call_args = parser.parse_args(call)
|
||||
call_id = getattr(call, "id", None) or str(uuid.uuid4())
|
||||
llm_name = getattr(call, "name", "")
|
||||
|
||||
if tool_id is None or action_name is None or tool_id not in tools_dict:
|
||||
return None # Will be handled as error by execute()
|
||||
|
||||
tool_data = tools_dict[tool_id]
|
||||
|
||||
# Client-side tools
|
||||
if tool_data.get("client_side"):
|
||||
return {
|
||||
"call_id": call_id,
|
||||
"name": llm_name,
|
||||
"tool_name": tool_data.get("name", "unknown"),
|
||||
"tool_id": tool_id,
|
||||
"action_name": action_name,
|
||||
"llm_name": llm_name,
|
||||
"arguments": call_args if isinstance(call_args, dict) else {},
|
||||
"pause_type": "requires_client_execution",
|
||||
"thought_signature": getattr(call, "thought_signature", None),
|
||||
}
|
||||
|
||||
# Approval required
|
||||
if tool_data["name"] == "api_tool":
|
||||
action_data = tool_data.get("config", {}).get("actions", {}).get(
|
||||
action_name, {}
|
||||
)
|
||||
else:
|
||||
action_data = next(
|
||||
(a for a in tool_data.get("actions", []) if a["name"] == action_name),
|
||||
{},
|
||||
)
|
||||
|
||||
if action_data.get("require_approval"):
|
||||
return {
|
||||
"call_id": call_id,
|
||||
"name": llm_name,
|
||||
"tool_name": tool_data.get("name", "unknown"),
|
||||
"tool_id": tool_id,
|
||||
"action_name": action_name,
|
||||
"llm_name": llm_name,
|
||||
"arguments": call_args if isinstance(call_args, dict) else {},
|
||||
"pause_type": "awaiting_approval",
|
||||
"thought_signature": getattr(call, "thought_signature", None),
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
def execute(self, tools_dict: Dict, call, llm_class_name: str):
|
||||
"""Execute a tool call. Yields status events, returns (result, call_id)."""
|
||||
parser = ToolActionParser(llm_class_name, name_mapping=self._name_to_tool)
|
||||
tool_id, action_name, call_args = parser.parse_args(call)
|
||||
llm_name = getattr(call, "name", "unknown")
|
||||
|
||||
call_id = getattr(call, "id", None) or str(uuid.uuid4())
|
||||
|
||||
if tool_id is None or action_name is None:
|
||||
error_message = f"Error: Failed to parse LLM tool call. Tool name: {llm_name}"
|
||||
logger.error(error_message)
|
||||
|
||||
tool_call_data = {
|
||||
"tool_name": "unknown",
|
||||
"call_id": call_id,
|
||||
"action_name": llm_name,
|
||||
"arguments": call_args or {},
|
||||
"result": f"Failed to parse tool call. Invalid tool name format: {llm_name}",
|
||||
}
|
||||
yield {"type": "tool_call", "data": {**tool_call_data, "status": "error"}}
|
||||
self.tool_calls.append(tool_call_data)
|
||||
return "Failed to parse tool call.", call_id
|
||||
|
||||
if tool_id not in tools_dict:
|
||||
error_message = f"Error: Tool ID '{tool_id}' extracted from LLM call not found in available tools_dict. Available IDs: {list(tools_dict.keys())}"
|
||||
logger.error(error_message)
|
||||
|
||||
tool_call_data = {
|
||||
"tool_name": "unknown",
|
||||
"call_id": call_id,
|
||||
"action_name": llm_name,
|
||||
"arguments": call_args,
|
||||
"result": f"Tool with ID {tool_id} not found. Available tools: {list(tools_dict.keys())}",
|
||||
}
|
||||
yield {"type": "tool_call", "data": {**tool_call_data, "status": "error"}}
|
||||
self.tool_calls.append(tool_call_data)
|
||||
return f"Tool with ID {tool_id} not found.", call_id
|
||||
|
||||
tool_call_data = {
|
||||
"tool_name": tools_dict[tool_id]["name"],
|
||||
"call_id": call_id,
|
||||
"action_name": llm_name,
|
||||
"arguments": call_args,
|
||||
}
|
||||
yield {"type": "tool_call", "data": {**tool_call_data, "status": "pending"}}
|
||||
|
||||
tool_data = tools_dict[tool_id]
|
||||
action_data = (
|
||||
tool_data["config"]["actions"][action_name]
|
||||
if tool_data["name"] == "api_tool"
|
||||
else next(
|
||||
action
|
||||
for action in tool_data["actions"]
|
||||
if action["name"] == action_name
|
||||
)
|
||||
)
|
||||
|
||||
query_params, headers, body, parameters = {}, {}, {}, {}
|
||||
param_types = {
|
||||
"query_params": query_params,
|
||||
"headers": headers,
|
||||
"body": body,
|
||||
"parameters": parameters,
|
||||
}
|
||||
|
||||
for param_type, target_dict in param_types.items():
|
||||
if param_type in action_data and action_data[param_type].get("properties"):
|
||||
for param, details in action_data[param_type]["properties"].items():
|
||||
if (
|
||||
param not in call_args
|
||||
and "value" in details
|
||||
and details["value"]
|
||||
):
|
||||
target_dict[param] = details["value"]
|
||||
for param, value in call_args.items():
|
||||
for param_type, target_dict in param_types.items():
|
||||
if param_type in action_data and param in action_data[param_type].get(
|
||||
"properties", {}
|
||||
):
|
||||
target_dict[param] = value
|
||||
|
||||
# Load tool (with caching)
|
||||
tool = self._get_or_load_tool(
|
||||
tool_data, tool_id, action_name,
|
||||
headers=headers, query_params=query_params,
|
||||
)
|
||||
|
||||
resolved_arguments = (
|
||||
{"query_params": query_params, "headers": headers, "body": body}
|
||||
if tool_data["name"] == "api_tool"
|
||||
else parameters
|
||||
)
|
||||
if tool_data["name"] == "api_tool":
|
||||
logger.debug(
|
||||
f"Executing api: {action_name} with query_params: {query_params}, headers: {headers}, body: {body}"
|
||||
)
|
||||
result = tool.execute_action(action_name, **body)
|
||||
else:
|
||||
logger.debug(f"Executing tool: {action_name} with args: {call_args}")
|
||||
result = tool.execute_action(action_name, **parameters)
|
||||
|
||||
get_artifact_id = (
|
||||
getattr(tool, "get_artifact_id", None)
|
||||
if tool_data["name"] != "api_tool"
|
||||
else None
|
||||
)
|
||||
|
||||
artifact_id = None
|
||||
if callable(get_artifact_id):
|
||||
try:
|
||||
artifact_id = get_artifact_id(action_name, **parameters)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to extract artifact_id from tool %s for action %s",
|
||||
tool_data["name"],
|
||||
action_name,
|
||||
)
|
||||
|
||||
artifact_id = str(artifact_id).strip() if artifact_id is not None else ""
|
||||
if artifact_id:
|
||||
tool_call_data["artifact_id"] = artifact_id
|
||||
result_full = str(result)
|
||||
tool_call_data["resolved_arguments"] = resolved_arguments
|
||||
tool_call_data["result_full"] = result_full
|
||||
tool_call_data["result"] = (
|
||||
f"{result_full[:50]}..." if len(result_full) > 50 else result_full
|
||||
)
|
||||
|
||||
stream_tool_call_data = {
|
||||
key: value
|
||||
for key, value in tool_call_data.items()
|
||||
if key not in {"result_full", "resolved_arguments"}
|
||||
}
|
||||
yield {"type": "tool_call", "data": {**stream_tool_call_data, "status": "completed"}}
|
||||
self.tool_calls.append(tool_call_data)
|
||||
|
||||
return result, call_id
|
||||
|
||||
def _get_or_load_tool(
|
||||
self, tool_data: Dict, tool_id: str, action_name: str,
|
||||
headers: Optional[Dict] = None, query_params: Optional[Dict] = None,
|
||||
):
|
||||
"""Load a tool, using cache when possible."""
|
||||
cache_key = f"{tool_data['name']}:{tool_id}:{self.user or ''}"
|
||||
if cache_key in self._loaded_tools:
|
||||
return self._loaded_tools[cache_key]
|
||||
|
||||
tm = ToolManager(config={})
|
||||
|
||||
if tool_data["name"] == "api_tool":
|
||||
action_config = tool_data["config"]["actions"][action_name]
|
||||
tool_config = {
|
||||
"url": action_config["url"],
|
||||
"method": action_config["method"],
|
||||
"headers": headers or {},
|
||||
"query_params": query_params or {},
|
||||
}
|
||||
if "body_content_type" in action_config:
|
||||
tool_config["body_content_type"] = action_config.get(
|
||||
"body_content_type", "application/json"
|
||||
)
|
||||
tool_config["body_encoding_rules"] = action_config.get(
|
||||
"body_encoding_rules", {}
|
||||
)
|
||||
else:
|
||||
tool_config = tool_data["config"].copy() if tool_data["config"] else {}
|
||||
if tool_config.get("encrypted_credentials") and self.user:
|
||||
decrypted = decrypt_credentials(
|
||||
tool_config["encrypted_credentials"], self.user
|
||||
)
|
||||
tool_config.update(decrypted)
|
||||
tool_config["auth_credentials"] = decrypted
|
||||
tool_config.pop("encrypted_credentials", None)
|
||||
tool_config["tool_id"] = str(tool_data.get("_id", tool_id))
|
||||
if self.conversation_id:
|
||||
tool_config["conversation_id"] = self.conversation_id
|
||||
if tool_data["name"] == "mcp_tool":
|
||||
tool_config["query_mode"] = True
|
||||
|
||||
tool = tm.load_tool(
|
||||
tool_data["name"],
|
||||
tool_config=tool_config,
|
||||
user_id=self.user,
|
||||
)
|
||||
|
||||
# Don't cache api_tool since config varies by action
|
||||
if tool_data["name"] != "api_tool":
|
||||
self._loaded_tools[cache_key] = tool
|
||||
|
||||
return tool
|
||||
|
||||
def get_truncated_tool_calls(self) -> List[Dict]:
|
||||
return [
|
||||
{
|
||||
"tool_name": tool_call.get("tool_name"),
|
||||
"call_id": tool_call.get("call_id"),
|
||||
"action_name": tool_call.get("action_name"),
|
||||
"arguments": tool_call.get("arguments"),
|
||||
"artifact_id": tool_call.get("artifact_id"),
|
||||
"result": (
|
||||
f"{str(tool_call['result'])[:50]}..."
|
||||
if len(str(tool_call["result"])) > 50
|
||||
else tool_call["result"]
|
||||
),
|
||||
"status": "completed",
|
||||
}
|
||||
for tool_call in self.tool_calls
|
||||
]
|
||||
323
application/agents/tools/api_body_serializer.py
Normal file
323
application/agents/tools/api_body_serializer.py
Normal file
@@ -0,0 +1,323 @@
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, Optional, Union
|
||||
from urllib.parse import quote, urlencode
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ContentType(str, Enum):
|
||||
"""Supported content types for request bodies."""
|
||||
|
||||
JSON = "application/json"
|
||||
FORM_URLENCODED = "application/x-www-form-urlencoded"
|
||||
MULTIPART_FORM_DATA = "multipart/form-data"
|
||||
TEXT_PLAIN = "text/plain"
|
||||
XML = "application/xml"
|
||||
OCTET_STREAM = "application/octet-stream"
|
||||
|
||||
|
||||
class RequestBodySerializer:
|
||||
"""Serializes request bodies according to content-type and OpenAPI 3.1 spec."""
|
||||
|
||||
@staticmethod
|
||||
def serialize(
|
||||
body_data: Dict[str, Any],
|
||||
content_type: str = ContentType.JSON,
|
||||
encoding_rules: Optional[Dict[str, Dict[str, Any]]] = None,
|
||||
) -> tuple[Union[str, bytes], Dict[str, str]]:
|
||||
"""
|
||||
Serialize body data to appropriate format.
|
||||
|
||||
Args:
|
||||
body_data: Dictionary of body parameters
|
||||
content_type: Content-Type header value
|
||||
encoding_rules: OpenAPI Encoding Object rules per field
|
||||
|
||||
Returns:
|
||||
Tuple of (serialized_body, updated_headers_dict)
|
||||
|
||||
Raises:
|
||||
ValueError: If serialization fails
|
||||
"""
|
||||
if not body_data:
|
||||
return None, {}
|
||||
|
||||
try:
|
||||
content_type_lower = content_type.lower().split(";")[0].strip()
|
||||
|
||||
if content_type_lower == ContentType.JSON:
|
||||
return RequestBodySerializer._serialize_json(body_data)
|
||||
|
||||
elif content_type_lower == ContentType.FORM_URLENCODED:
|
||||
return RequestBodySerializer._serialize_form_urlencoded(
|
||||
body_data, encoding_rules
|
||||
)
|
||||
|
||||
elif content_type_lower == ContentType.MULTIPART_FORM_DATA:
|
||||
return RequestBodySerializer._serialize_multipart_form_data(
|
||||
body_data, encoding_rules
|
||||
)
|
||||
|
||||
elif content_type_lower == ContentType.TEXT_PLAIN:
|
||||
return RequestBodySerializer._serialize_text_plain(body_data)
|
||||
|
||||
elif content_type_lower == ContentType.XML:
|
||||
return RequestBodySerializer._serialize_xml(body_data)
|
||||
|
||||
elif content_type_lower == ContentType.OCTET_STREAM:
|
||||
return RequestBodySerializer._serialize_octet_stream(body_data)
|
||||
|
||||
else:
|
||||
logger.warning(
|
||||
f"Unknown content type: {content_type}, treating as JSON"
|
||||
)
|
||||
return RequestBodySerializer._serialize_json(body_data)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error serializing body: {str(e)}", exc_info=True)
|
||||
raise ValueError(f"Failed to serialize request body: {str(e)}")
|
||||
|
||||
@staticmethod
|
||||
def _serialize_json(body_data: Dict[str, Any]) -> tuple[str, Dict[str, str]]:
|
||||
"""Serialize body as JSON per OpenAPI spec."""
|
||||
try:
|
||||
serialized = json.dumps(
|
||||
body_data, separators=(",", ":"), ensure_ascii=False
|
||||
)
|
||||
headers = {"Content-Type": ContentType.JSON.value}
|
||||
return serialized, headers
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"Failed to serialize JSON body: {str(e)}")
|
||||
|
||||
@staticmethod
|
||||
def _serialize_form_urlencoded(
|
||||
body_data: Dict[str, Any],
|
||||
encoding_rules: Optional[Dict[str, Dict[str, Any]]] = None,
|
||||
) -> tuple[str, Dict[str, str]]:
|
||||
"""Serialize body as application/x-www-form-urlencoded per RFC1866/RFC3986."""
|
||||
encoding_rules = encoding_rules or {}
|
||||
params = []
|
||||
|
||||
for key, value in body_data.items():
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
rule = encoding_rules.get(key, {})
|
||||
style = rule.get("style", "form")
|
||||
explode = rule.get("explode", style == "form")
|
||||
content_type = rule.get("contentType", "text/plain")
|
||||
|
||||
serialized_value = RequestBodySerializer._serialize_form_value(
|
||||
value, style, explode, content_type, key
|
||||
)
|
||||
|
||||
if isinstance(serialized_value, list):
|
||||
for sv in serialized_value:
|
||||
params.append((key, sv))
|
||||
else:
|
||||
params.append((key, serialized_value))
|
||||
|
||||
# Use standard urlencode (replaces space with +)
|
||||
serialized = urlencode(params, safe="")
|
||||
headers = {"Content-Type": ContentType.FORM_URLENCODED.value}
|
||||
return serialized, headers
|
||||
|
||||
@staticmethod
|
||||
def _serialize_form_value(
|
||||
value: Any, style: str, explode: bool, content_type: str, key: str
|
||||
) -> Union[str, list]:
|
||||
"""Serialize individual form value with encoding rules."""
|
||||
if isinstance(value, dict):
|
||||
if content_type == "application/json":
|
||||
return json.dumps(value, separators=(",", ":"))
|
||||
elif content_type == "application/xml":
|
||||
return RequestBodySerializer._dict_to_xml(value)
|
||||
else:
|
||||
if style == "deepObject" and explode:
|
||||
return [
|
||||
f"{RequestBodySerializer._percent_encode(str(v))}"
|
||||
for v in value.values()
|
||||
]
|
||||
elif explode:
|
||||
return [
|
||||
f"{RequestBodySerializer._percent_encode(str(v))}"
|
||||
for v in value.values()
|
||||
]
|
||||
else:
|
||||
pairs = [f"{k},{v}" for k, v in value.items()]
|
||||
return RequestBodySerializer._percent_encode(",".join(pairs))
|
||||
|
||||
elif isinstance(value, (list, tuple)):
|
||||
if explode:
|
||||
return [
|
||||
RequestBodySerializer._percent_encode(str(item)) for item in value
|
||||
]
|
||||
else:
|
||||
return RequestBodySerializer._percent_encode(
|
||||
",".join(str(v) for v in value)
|
||||
)
|
||||
|
||||
else:
|
||||
return RequestBodySerializer._percent_encode(str(value))
|
||||
|
||||
@staticmethod
|
||||
def _serialize_multipart_form_data(
|
||||
body_data: Dict[str, Any],
|
||||
encoding_rules: Optional[Dict[str, Dict[str, Any]]] = None,
|
||||
) -> tuple[bytes, Dict[str, str]]:
|
||||
"""
|
||||
Serialize body as multipart/form-data per RFC7578.
|
||||
|
||||
Supports file uploads and encoding rules.
|
||||
"""
|
||||
import secrets
|
||||
|
||||
encoding_rules = encoding_rules or {}
|
||||
boundary = f"----DocsGPT{secrets.token_hex(16)}"
|
||||
parts = []
|
||||
|
||||
for key, value in body_data.items():
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
rule = encoding_rules.get(key, {})
|
||||
content_type = rule.get("contentType", "text/plain")
|
||||
headers_rule = rule.get("headers", {})
|
||||
|
||||
part = RequestBodySerializer._create_multipart_part(
|
||||
key, value, content_type, headers_rule
|
||||
)
|
||||
parts.append(part)
|
||||
|
||||
body_bytes = f"--{boundary}\r\n".encode("utf-8")
|
||||
body_bytes += f"--{boundary}\r\n".join(parts).encode("utf-8")
|
||||
body_bytes += f"\r\n--{boundary}--\r\n".encode("utf-8")
|
||||
|
||||
headers = {
|
||||
"Content-Type": f"multipart/form-data; boundary={boundary}",
|
||||
}
|
||||
return body_bytes, headers
|
||||
|
||||
@staticmethod
|
||||
def _create_multipart_part(
|
||||
name: str, value: Any, content_type: str, headers_rule: Dict[str, Any]
|
||||
) -> str:
|
||||
"""Create a single multipart/form-data part."""
|
||||
headers = [
|
||||
f'Content-Disposition: form-data; name="{RequestBodySerializer._percent_encode(name)}"'
|
||||
]
|
||||
|
||||
if isinstance(value, bytes):
|
||||
if content_type == "application/octet-stream":
|
||||
value_encoded = base64.b64encode(value).decode("utf-8")
|
||||
else:
|
||||
value_encoded = value.decode("utf-8", errors="replace")
|
||||
headers.append(f"Content-Type: {content_type}")
|
||||
headers.append("Content-Transfer-Encoding: base64")
|
||||
elif isinstance(value, dict):
|
||||
if content_type == "application/json":
|
||||
value_encoded = json.dumps(value, separators=(",", ":"))
|
||||
elif content_type == "application/xml":
|
||||
value_encoded = RequestBodySerializer._dict_to_xml(value)
|
||||
else:
|
||||
value_encoded = str(value)
|
||||
headers.append(f"Content-Type: {content_type}")
|
||||
elif isinstance(value, str) and content_type != "text/plain":
|
||||
try:
|
||||
if content_type == "application/json":
|
||||
json.loads(value)
|
||||
value_encoded = value
|
||||
elif content_type == "application/xml":
|
||||
value_encoded = value
|
||||
else:
|
||||
value_encoded = str(value)
|
||||
except json.JSONDecodeError:
|
||||
value_encoded = str(value)
|
||||
headers.append(f"Content-Type: {content_type}")
|
||||
else:
|
||||
value_encoded = str(value)
|
||||
if content_type != "text/plain":
|
||||
headers.append(f"Content-Type: {content_type}")
|
||||
|
||||
part = "\r\n".join(headers) + "\r\n\r\n" + value_encoded + "\r\n"
|
||||
return part
|
||||
|
||||
@staticmethod
|
||||
def _serialize_text_plain(body_data: Dict[str, Any]) -> tuple[str, Dict[str, str]]:
|
||||
"""Serialize body as plain text."""
|
||||
if len(body_data) == 1:
|
||||
value = list(body_data.values())[0]
|
||||
return str(value), {"Content-Type": ContentType.TEXT_PLAIN.value}
|
||||
else:
|
||||
text = "\n".join(f"{k}: {v}" for k, v in body_data.items())
|
||||
return text, {"Content-Type": ContentType.TEXT_PLAIN.value}
|
||||
|
||||
@staticmethod
|
||||
def _serialize_xml(body_data: Dict[str, Any]) -> tuple[str, Dict[str, str]]:
|
||||
"""Serialize body as XML."""
|
||||
xml_str = RequestBodySerializer._dict_to_xml(body_data)
|
||||
return xml_str, {"Content-Type": ContentType.XML.value}
|
||||
|
||||
@staticmethod
|
||||
def _serialize_octet_stream(
|
||||
body_data: Dict[str, Any],
|
||||
) -> tuple[bytes, Dict[str, str]]:
|
||||
"""Serialize body as binary octet stream."""
|
||||
if isinstance(body_data, bytes):
|
||||
return body_data, {"Content-Type": ContentType.OCTET_STREAM.value}
|
||||
elif isinstance(body_data, str):
|
||||
return body_data.encode("utf-8"), {
|
||||
"Content-Type": ContentType.OCTET_STREAM.value
|
||||
}
|
||||
else:
|
||||
serialized = json.dumps(body_data)
|
||||
return serialized.encode("utf-8"), {
|
||||
"Content-Type": ContentType.OCTET_STREAM.value
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _percent_encode(value: str, safe_chars: str = "") -> str:
|
||||
"""
|
||||
Percent-encode per RFC3986.
|
||||
|
||||
Args:
|
||||
value: String to encode
|
||||
safe_chars: Additional characters to not encode
|
||||
"""
|
||||
return quote(value, safe=safe_chars)
|
||||
|
||||
@staticmethod
|
||||
def _dict_to_xml(data: Dict[str, Any], root_name: str = "root") -> str:
|
||||
"""
|
||||
Convert dict to simple XML format.
|
||||
"""
|
||||
|
||||
def build_xml(obj: Any, name: str) -> str:
|
||||
if isinstance(obj, dict):
|
||||
inner = "".join(build_xml(v, k) for k, v in obj.items())
|
||||
return f"<{name}>{inner}</{name}>"
|
||||
elif isinstance(obj, (list, tuple)):
|
||||
items = "".join(
|
||||
build_xml(item, f"{name[:-1] if name.endswith('s') else name}")
|
||||
for item in obj
|
||||
)
|
||||
return items
|
||||
else:
|
||||
return f"<{name}>{RequestBodySerializer._escape_xml(str(obj))}</{name}>"
|
||||
|
||||
root = build_xml(data, root_name)
|
||||
return f'<?xml version="1.0" encoding="UTF-8"?>{root}'
|
||||
|
||||
@staticmethod
|
||||
def _escape_xml(value: str) -> str:
|
||||
"""Escape XML special characters."""
|
||||
return (
|
||||
value.replace("&", "&")
|
||||
.replace("<", "<")
|
||||
.replace(">", ">")
|
||||
.replace('"', """)
|
||||
.replace("'", "'")
|
||||
)
|
||||
280
application/agents/tools/api_tool.py
Normal file
280
application/agents/tools/api_tool.py
Normal file
@@ -0,0 +1,280 @@
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import Any, Dict, Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import requests
|
||||
|
||||
from application.agents.tools.api_body_serializer import (
|
||||
ContentType,
|
||||
RequestBodySerializer,
|
||||
)
|
||||
from application.agents.tools.base import Tool
|
||||
from application.core.url_validation import validate_url, SSRFError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_TIMEOUT = 90 # seconds
|
||||
|
||||
|
||||
class APITool(Tool):
|
||||
"""
|
||||
API Tool
|
||||
A flexible tool for performing various API actions (e.g., sending messages, retrieving data) via custom user-specified APIs.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.url = config.get("url", "")
|
||||
self.method = config.get("method", "GET")
|
||||
self.headers = config.get("headers", {})
|
||||
self.query_params = config.get("query_params", {})
|
||||
self.body_content_type = config.get("body_content_type", ContentType.JSON)
|
||||
self.body_encoding_rules = config.get("body_encoding_rules", {})
|
||||
|
||||
def execute_action(self, action_name, **kwargs):
|
||||
"""Execute an API action with the given arguments."""
|
||||
return self._make_api_call(
|
||||
self.url,
|
||||
self.method,
|
||||
self.headers,
|
||||
self.query_params,
|
||||
kwargs,
|
||||
self.body_content_type,
|
||||
self.body_encoding_rules,
|
||||
)
|
||||
|
||||
def _make_api_call(
|
||||
self,
|
||||
url: str,
|
||||
method: str,
|
||||
headers: Dict[str, str],
|
||||
query_params: Dict[str, Any],
|
||||
body: Dict[str, Any],
|
||||
content_type: str = ContentType.JSON,
|
||||
encoding_rules: Optional[Dict[str, Dict[str, Any]]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Make an API call with proper body serialization and error handling.
|
||||
|
||||
Args:
|
||||
url: API endpoint URL
|
||||
method: HTTP method (GET, POST, PUT, DELETE, PATCH, HEAD, OPTIONS)
|
||||
headers: Request headers dict
|
||||
query_params: URL query parameters
|
||||
body: Request body as dict
|
||||
content_type: Content-Type for serialization
|
||||
encoding_rules: OpenAPI encoding rules
|
||||
|
||||
Returns:
|
||||
Dict with status_code, data, and message
|
||||
"""
|
||||
request_url = url
|
||||
request_headers = headers.copy() if headers else {}
|
||||
response = None
|
||||
|
||||
# Validate URL to prevent SSRF attacks
|
||||
try:
|
||||
validate_url(request_url)
|
||||
except SSRFError as e:
|
||||
logger.error(f"URL validation failed: {e}")
|
||||
return {
|
||||
"status_code": None,
|
||||
"message": f"URL validation error: {e}",
|
||||
"data": None,
|
||||
}
|
||||
|
||||
try:
|
||||
path_params_used = set()
|
||||
if query_params:
|
||||
for match in re.finditer(r"\{([^}]+)\}", request_url):
|
||||
param_name = match.group(1)
|
||||
if param_name in query_params:
|
||||
request_url = request_url.replace(
|
||||
f"{{{param_name}}}", str(query_params[param_name])
|
||||
)
|
||||
path_params_used.add(param_name)
|
||||
remaining_params = {
|
||||
k: v for k, v in query_params.items() if k not in path_params_used
|
||||
}
|
||||
if remaining_params:
|
||||
query_string = urlencode(remaining_params)
|
||||
separator = "&" if "?" in request_url else "?"
|
||||
request_url = f"{request_url}{separator}{query_string}"
|
||||
|
||||
# Re-validate URL after parameter substitution to prevent SSRF via path params
|
||||
try:
|
||||
validate_url(request_url)
|
||||
except SSRFError as e:
|
||||
logger.error(f"URL validation failed after parameter substitution: {e}")
|
||||
return {
|
||||
"status_code": None,
|
||||
"message": f"URL validation error: {e}",
|
||||
"data": None,
|
||||
}
|
||||
|
||||
# Serialize body based on content type
|
||||
|
||||
if body and body != {}:
|
||||
try:
|
||||
serialized_body, body_headers = RequestBodySerializer.serialize(
|
||||
body, content_type, encoding_rules
|
||||
)
|
||||
request_headers.update(body_headers)
|
||||
except ValueError as e:
|
||||
logger.error(f"Body serialization failed: {str(e)}")
|
||||
return {
|
||||
"status_code": None,
|
||||
"message": f"Body serialization error: {str(e)}",
|
||||
"data": None,
|
||||
}
|
||||
else:
|
||||
serialized_body = None
|
||||
if "Content-Type" not in request_headers and method not in [
|
||||
"GET",
|
||||
"HEAD",
|
||||
"DELETE",
|
||||
]:
|
||||
request_headers["Content-Type"] = ContentType.JSON
|
||||
logger.debug(
|
||||
f"API Call: {method} {request_url} | Content-Type: {request_headers.get('Content-Type', 'N/A')}"
|
||||
)
|
||||
|
||||
if method.upper() == "GET":
|
||||
response = requests.get(
|
||||
request_url, headers=request_headers, timeout=DEFAULT_TIMEOUT
|
||||
)
|
||||
elif method.upper() == "POST":
|
||||
response = requests.post(
|
||||
request_url,
|
||||
data=serialized_body,
|
||||
headers=request_headers,
|
||||
timeout=DEFAULT_TIMEOUT,
|
||||
)
|
||||
elif method.upper() == "PUT":
|
||||
response = requests.put(
|
||||
request_url,
|
||||
data=serialized_body,
|
||||
headers=request_headers,
|
||||
timeout=DEFAULT_TIMEOUT,
|
||||
)
|
||||
elif method.upper() == "DELETE":
|
||||
response = requests.delete(
|
||||
request_url, headers=request_headers, timeout=DEFAULT_TIMEOUT
|
||||
)
|
||||
elif method.upper() == "PATCH":
|
||||
response = requests.patch(
|
||||
request_url,
|
||||
data=serialized_body,
|
||||
headers=request_headers,
|
||||
timeout=DEFAULT_TIMEOUT,
|
||||
)
|
||||
elif method.upper() == "HEAD":
|
||||
response = requests.head(
|
||||
request_url, headers=request_headers, timeout=DEFAULT_TIMEOUT
|
||||
)
|
||||
elif method.upper() == "OPTIONS":
|
||||
response = requests.options(
|
||||
request_url, headers=request_headers, timeout=DEFAULT_TIMEOUT
|
||||
)
|
||||
else:
|
||||
return {
|
||||
"status_code": None,
|
||||
"message": f"Unsupported HTTP method: {method}",
|
||||
"data": None,
|
||||
}
|
||||
response.raise_for_status()
|
||||
|
||||
data = self._parse_response(response)
|
||||
|
||||
return {
|
||||
"status_code": response.status_code,
|
||||
"data": data,
|
||||
"message": "API call successful.",
|
||||
}
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error(f"Request timeout for {request_url}")
|
||||
return {
|
||||
"status_code": None,
|
||||
"message": f"Request timeout ({DEFAULT_TIMEOUT}s exceeded)",
|
||||
"data": None,
|
||||
}
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
logger.error(f"Connection error: {str(e)}")
|
||||
return {
|
||||
"status_code": None,
|
||||
"message": f"Connection error: {str(e)}",
|
||||
"data": None,
|
||||
}
|
||||
except requests.exceptions.HTTPError as e:
|
||||
logger.error(f"HTTP error {response.status_code}: {str(e)}")
|
||||
try:
|
||||
error_data = response.json()
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
error_data = response.text
|
||||
return {
|
||||
"status_code": response.status_code,
|
||||
"message": f"HTTP Error {response.status_code}",
|
||||
"data": error_data,
|
||||
}
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Request failed: {str(e)}")
|
||||
return {
|
||||
"status_code": response.status_code if response else None,
|
||||
"message": f"API call failed: {str(e)}",
|
||||
"data": None,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in API call: {str(e)}", exc_info=True)
|
||||
return {
|
||||
"status_code": None,
|
||||
"message": f"Unexpected error: {str(e)}",
|
||||
"data": None,
|
||||
}
|
||||
|
||||
def _parse_response(self, response: requests.Response) -> Any:
|
||||
"""
|
||||
Parse response based on Content-Type header.
|
||||
|
||||
Supports: JSON, XML, plain text, binary data.
|
||||
"""
|
||||
content_type = response.headers.get("Content-Type", "").lower()
|
||||
|
||||
if not response.content:
|
||||
return None
|
||||
# JSON response
|
||||
|
||||
if "application/json" in content_type:
|
||||
try:
|
||||
return response.json()
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"Failed to parse JSON response: {str(e)}")
|
||||
return response.text
|
||||
# XML response
|
||||
|
||||
elif "application/xml" in content_type or "text/xml" in content_type:
|
||||
return response.text
|
||||
# Plain text response
|
||||
|
||||
elif "text/plain" in content_type or "text/html" in content_type:
|
||||
return response.text
|
||||
# Binary/unknown response
|
||||
|
||||
else:
|
||||
# Try to decode as text first, fall back to base64
|
||||
|
||||
try:
|
||||
return response.text
|
||||
except (UnicodeDecodeError, AttributeError):
|
||||
import base64
|
||||
|
||||
return base64.b64encode(response.content).decode("utf-8")
|
||||
|
||||
def get_actions_metadata(self):
|
||||
"""Return metadata for available actions (none for API Tool - actions are user-defined)."""
|
||||
return []
|
||||
|
||||
def get_config_requirements(self):
|
||||
"""Return configuration requirements for the tool."""
|
||||
return {}
|
||||
23
application/agents/tools/base.py
Normal file
23
application/agents/tools/base.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class Tool(ABC):
|
||||
internal: bool = False
|
||||
|
||||
@abstractmethod
|
||||
def execute_action(self, action_name: str, **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_actions_metadata(self):
|
||||
"""
|
||||
Returns a list of JSON objects describing the actions supported by the tool.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_config_requirements(self):
|
||||
"""
|
||||
Returns a dictionary describing the configuration requirements for the tool.
|
||||
"""
|
||||
pass
|
||||
191
application/agents/tools/brave.py
Normal file
191
application/agents/tools/brave.py
Normal file
@@ -0,0 +1,191 @@
|
||||
import logging
|
||||
|
||||
import requests
|
||||
|
||||
from application.agents.tools.base import Tool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BraveSearchTool(Tool):
|
||||
"""
|
||||
Brave Search
|
||||
A tool for performing web and image searches using the Brave Search API.
|
||||
Requires an API key for authentication.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.token = config.get("token", "")
|
||||
self.base_url = "https://api.search.brave.com/res/v1"
|
||||
|
||||
def execute_action(self, action_name, **kwargs):
|
||||
actions = {
|
||||
"brave_web_search": self._web_search,
|
||||
"brave_image_search": self._image_search,
|
||||
}
|
||||
|
||||
if action_name in actions:
|
||||
return actions[action_name](**kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unknown action: {action_name}")
|
||||
|
||||
def _web_search(
|
||||
self,
|
||||
query,
|
||||
country="ALL",
|
||||
search_lang="en",
|
||||
count=10,
|
||||
offset=0,
|
||||
safesearch="off",
|
||||
freshness=None,
|
||||
result_filter=None,
|
||||
extra_snippets=False,
|
||||
summary=False,
|
||||
):
|
||||
"""
|
||||
Performs a web search using the Brave Search API.
|
||||
"""
|
||||
logger.debug("Performing Brave web search for: %s", query)
|
||||
|
||||
url = f"{self.base_url}/web/search"
|
||||
|
||||
params = {
|
||||
"q": query,
|
||||
"country": country,
|
||||
"search_lang": search_lang,
|
||||
"count": min(count, 20),
|
||||
"offset": min(offset, 9),
|
||||
"safesearch": safesearch,
|
||||
}
|
||||
|
||||
if freshness:
|
||||
params["freshness"] = freshness
|
||||
if result_filter:
|
||||
params["result_filter"] = result_filter
|
||||
if extra_snippets:
|
||||
params["extra_snippets"] = 1
|
||||
if summary:
|
||||
params["summary"] = 1
|
||||
headers = {
|
||||
"Accept": "application/json",
|
||||
"Accept-Encoding": "gzip",
|
||||
"X-Subscription-Token": self.token,
|
||||
}
|
||||
|
||||
response = requests.get(url, params=params, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
return {
|
||||
"status_code": response.status_code,
|
||||
"results": response.json(),
|
||||
"message": "Search completed successfully.",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status_code": response.status_code,
|
||||
"message": f"Search failed with status code: {response.status_code}.",
|
||||
}
|
||||
|
||||
def _image_search(
|
||||
self,
|
||||
query,
|
||||
country="ALL",
|
||||
search_lang="en",
|
||||
count=5,
|
||||
safesearch="off",
|
||||
spellcheck=False,
|
||||
):
|
||||
"""
|
||||
Performs an image search using the Brave Search API.
|
||||
"""
|
||||
logger.debug("Performing Brave image search for: %s", query)
|
||||
|
||||
url = f"{self.base_url}/images/search"
|
||||
|
||||
params = {
|
||||
"q": query,
|
||||
"country": country,
|
||||
"search_lang": search_lang,
|
||||
"count": min(count, 100), # API max is 100
|
||||
"safesearch": safesearch,
|
||||
"spellcheck": 1 if spellcheck else 0,
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Accept": "application/json",
|
||||
"Accept-Encoding": "gzip",
|
||||
"X-Subscription-Token": self.token,
|
||||
}
|
||||
|
||||
response = requests.get(url, params=params, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
return {
|
||||
"status_code": response.status_code,
|
||||
"results": response.json(),
|
||||
"message": "Image search completed successfully.",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status_code": response.status_code,
|
||||
"message": f"Image search failed with status code: {response.status_code}.",
|
||||
}
|
||||
|
||||
def get_actions_metadata(self):
|
||||
return [
|
||||
{
|
||||
"name": "brave_web_search",
|
||||
"description": "Perform a web search using Brave Search",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query (max 400 characters, 50 words)",
|
||||
},
|
||||
"search_lang": {
|
||||
"type": "string",
|
||||
"description": "The search language preference (default: en)",
|
||||
},
|
||||
"freshness": {
|
||||
"type": "string",
|
||||
"description": "Time filter for results (pd: last 24h, pw: last week, pm: last month, py: last year)",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "brave_image_search",
|
||||
"description": "Perform an image search using Brave Search",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query (max 400 characters, 50 words)",
|
||||
},
|
||||
"count": {
|
||||
"type": "integer",
|
||||
"description": "Number of results to return (max 100, default: 5)",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def get_config_requirements(self):
|
||||
return {
|
||||
"token": {
|
||||
"type": "string",
|
||||
"label": "API Key",
|
||||
"description": "Brave Search API key for authentication",
|
||||
"required": True,
|
||||
"secret": True,
|
||||
"order": 1,
|
||||
},
|
||||
}
|
||||
76
application/agents/tools/cryptoprice.py
Normal file
76
application/agents/tools/cryptoprice.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import requests
|
||||
from application.agents.tools.base import Tool
|
||||
|
||||
|
||||
class CryptoPriceTool(Tool):
|
||||
"""
|
||||
CryptoPrice
|
||||
A tool for retrieving cryptocurrency prices using the CryptoCompare public API
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def execute_action(self, action_name, **kwargs):
|
||||
actions = {"cryptoprice_get": self._get_price}
|
||||
|
||||
if action_name in actions:
|
||||
return actions[action_name](**kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unknown action: {action_name}")
|
||||
|
||||
def _get_price(self, symbol, currency):
|
||||
"""
|
||||
Fetches the current price of a given cryptocurrency symbol in the specified currency.
|
||||
Example:
|
||||
symbol = "BTC"
|
||||
currency = "USD"
|
||||
returns price in USD.
|
||||
"""
|
||||
url = f"https://min-api.cryptocompare.com/data/price?fsym={symbol.upper()}&tsyms={currency.upper()}"
|
||||
response = requests.get(url)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if currency.upper() in data:
|
||||
return {
|
||||
"status_code": response.status_code,
|
||||
"price": data[currency.upper()],
|
||||
"message": f"Price of {symbol.upper()} in {currency.upper()} retrieved successfully.",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status_code": response.status_code,
|
||||
"message": f"Couldn't find price for {symbol.upper()} in {currency.upper()}.",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status_code": response.status_code,
|
||||
"message": "Failed to retrieve price.",
|
||||
}
|
||||
|
||||
def get_actions_metadata(self):
|
||||
return [
|
||||
{
|
||||
"name": "cryptoprice_get",
|
||||
"description": "Retrieve the price of a specified cryptocurrency in a given currency",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"symbol": {
|
||||
"type": "string",
|
||||
"description": "The cryptocurrency symbol (e.g. BTC)",
|
||||
},
|
||||
"currency": {
|
||||
"type": "string",
|
||||
"description": "The currency in which you want the price (e.g. USD)",
|
||||
},
|
||||
},
|
||||
"required": ["symbol", "currency"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
def get_config_requirements(self):
|
||||
# No specific configuration needed for this tool as it just queries a public endpoint
|
||||
return {}
|
||||
209
application/agents/tools/duckduckgo.py
Normal file
209
application/agents/tools/duckduckgo.py
Normal file
@@ -0,0 +1,209 @@
|
||||
import logging
|
||||
import time
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from application.agents.tools.base import Tool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAX_RETRIES = 3
|
||||
RETRY_DELAY = 2.0
|
||||
DEFAULT_TIMEOUT = 15
|
||||
|
||||
|
||||
class DuckDuckGoSearchTool(Tool):
|
||||
"""
|
||||
DuckDuckGo Search
|
||||
A tool for performing web and image searches using DuckDuckGo.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.timeout = config.get("timeout", DEFAULT_TIMEOUT)
|
||||
|
||||
def _get_ddgs_client(self):
|
||||
from ddgs import DDGS
|
||||
|
||||
return DDGS(timeout=self.timeout)
|
||||
|
||||
def _execute_with_retry(self, operation, operation_name: str) -> Dict[str, Any]:
|
||||
last_error = None
|
||||
for attempt in range(1, MAX_RETRIES + 1):
|
||||
try:
|
||||
results = operation()
|
||||
return {
|
||||
"status_code": 200,
|
||||
"results": list(results) if results else [],
|
||||
"message": f"{operation_name} completed successfully.",
|
||||
}
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
error_str = str(e).lower()
|
||||
if "ratelimit" in error_str or "429" in error_str:
|
||||
if attempt < MAX_RETRIES:
|
||||
delay = RETRY_DELAY * attempt
|
||||
logger.warning(
|
||||
f"{operation_name} rate limited, retrying in {delay}s (attempt {attempt}/{MAX_RETRIES})"
|
||||
)
|
||||
time.sleep(delay)
|
||||
continue
|
||||
logger.error(f"{operation_name} failed: {e}")
|
||||
break
|
||||
return {
|
||||
"status_code": 500,
|
||||
"results": [],
|
||||
"message": f"{operation_name} failed: {str(last_error)}",
|
||||
}
|
||||
|
||||
def execute_action(self, action_name, **kwargs):
|
||||
actions = {
|
||||
"ddg_web_search": self._web_search,
|
||||
"ddg_image_search": self._image_search,
|
||||
"ddg_news_search": self._news_search,
|
||||
}
|
||||
if action_name not in actions:
|
||||
raise ValueError(f"Unknown action: {action_name}")
|
||||
return actions[action_name](**kwargs)
|
||||
|
||||
def _web_search(
|
||||
self,
|
||||
query: str,
|
||||
max_results: int = 5,
|
||||
region: str = "wt-wt",
|
||||
safesearch: str = "moderate",
|
||||
timelimit: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
logger.info(f"DuckDuckGo web search: {query}")
|
||||
|
||||
def operation():
|
||||
client = self._get_ddgs_client()
|
||||
return client.text(
|
||||
query,
|
||||
region=region,
|
||||
safesearch=safesearch,
|
||||
timelimit=timelimit,
|
||||
max_results=min(max_results, 20),
|
||||
)
|
||||
|
||||
return self._execute_with_retry(operation, "Web search")
|
||||
|
||||
def _image_search(
|
||||
self,
|
||||
query: str,
|
||||
max_results: int = 5,
|
||||
region: str = "wt-wt",
|
||||
safesearch: str = "moderate",
|
||||
timelimit: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
logger.info(f"DuckDuckGo image search: {query}")
|
||||
|
||||
def operation():
|
||||
client = self._get_ddgs_client()
|
||||
return client.images(
|
||||
query,
|
||||
region=region,
|
||||
safesearch=safesearch,
|
||||
timelimit=timelimit,
|
||||
max_results=min(max_results, 50),
|
||||
)
|
||||
|
||||
return self._execute_with_retry(operation, "Image search")
|
||||
|
||||
def _news_search(
|
||||
self,
|
||||
query: str,
|
||||
max_results: int = 5,
|
||||
region: str = "wt-wt",
|
||||
safesearch: str = "moderate",
|
||||
timelimit: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
logger.info(f"DuckDuckGo news search: {query}")
|
||||
|
||||
def operation():
|
||||
client = self._get_ddgs_client()
|
||||
return client.news(
|
||||
query,
|
||||
region=region,
|
||||
safesearch=safesearch,
|
||||
timelimit=timelimit,
|
||||
max_results=min(max_results, 20),
|
||||
)
|
||||
|
||||
return self._execute_with_retry(operation, "News search")
|
||||
|
||||
def get_actions_metadata(self):
|
||||
return [
|
||||
{
|
||||
"name": "ddg_web_search",
|
||||
"description": "Search the web using DuckDuckGo. Returns titles, URLs, and snippets.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search query",
|
||||
},
|
||||
"max_results": {
|
||||
"type": "integer",
|
||||
"description": "Number of results (default: 5, max: 20)",
|
||||
},
|
||||
"region": {
|
||||
"type": "string",
|
||||
"description": "Region code (default: wt-wt for worldwide, us-en for US)",
|
||||
},
|
||||
"timelimit": {
|
||||
"type": "string",
|
||||
"description": "Time filter: d (day), w (week), m (month), y (year)",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "ddg_image_search",
|
||||
"description": "Search for images using DuckDuckGo. Returns image URLs and metadata.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Image search query",
|
||||
},
|
||||
"max_results": {
|
||||
"type": "integer",
|
||||
"description": "Number of results (default: 5, max: 50)",
|
||||
},
|
||||
"region": {
|
||||
"type": "string",
|
||||
"description": "Region code (default: wt-wt for worldwide)",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "ddg_news_search",
|
||||
"description": "Search for news articles using DuckDuckGo. Returns recent news.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "News search query",
|
||||
},
|
||||
"max_results": {
|
||||
"type": "integer",
|
||||
"description": "Number of results (default: 5, max: 20)",
|
||||
},
|
||||
"timelimit": {
|
||||
"type": "string",
|
||||
"description": "Time filter: d (day), w (week), m (month)",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def get_config_requirements(self):
|
||||
return {}
|
||||
438
application/agents/tools/internal_search.py
Normal file
438
application/agents/tools/internal_search.py
Normal file
@@ -0,0 +1,438 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from application.agents.tools.base import Tool
|
||||
from application.core.settings import settings
|
||||
from application.retriever.retriever_creator import RetrieverCreator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InternalSearchTool(Tool):
|
||||
"""Wraps the ClassicRAG retriever as an LLM-callable tool.
|
||||
|
||||
Instead of pre-fetching docs into the prompt, the LLM decides
|
||||
when and what to search. Supports multiple searches per session.
|
||||
|
||||
Optional capabilities (enabled when sources have directory_structure):
|
||||
- path_filter on search: restrict results to a specific file/folder
|
||||
- list_files action: browse the file/folder structure
|
||||
"""
|
||||
|
||||
internal = True
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
self.retrieved_docs: List[Dict] = []
|
||||
self._retriever = None
|
||||
self._directory_structure: Optional[Dict] = None
|
||||
self._dir_structure_loaded = False
|
||||
|
||||
def _get_retriever(self):
|
||||
if self._retriever is None:
|
||||
self._retriever = RetrieverCreator.create_retriever(
|
||||
self.config.get("retriever_name", "classic"),
|
||||
source=self.config.get("source", {}),
|
||||
chat_history=[],
|
||||
prompt="",
|
||||
chunks=int(self.config.get("chunks", 2)),
|
||||
doc_token_limit=int(self.config.get("doc_token_limit", 50000)),
|
||||
model_id=self.config.get("model_id", "docsgpt-local"),
|
||||
user_api_key=self.config.get("user_api_key"),
|
||||
agent_id=self.config.get("agent_id"),
|
||||
llm_name=self.config.get("llm_name", settings.LLM_PROVIDER),
|
||||
api_key=self.config.get("api_key", settings.API_KEY),
|
||||
decoded_token=self.config.get("decoded_token"),
|
||||
)
|
||||
return self._retriever
|
||||
|
||||
def _get_directory_structure(self) -> Optional[Dict]:
|
||||
"""Load directory structure from MongoDB for the configured sources."""
|
||||
if self._dir_structure_loaded:
|
||||
return self._directory_structure
|
||||
|
||||
self._dir_structure_loaded = True
|
||||
source = self.config.get("source", {})
|
||||
active_docs = source.get("active_docs", [])
|
||||
if not active_docs:
|
||||
return None
|
||||
|
||||
try:
|
||||
from bson.objectid import ObjectId
|
||||
from application.core.mongo_db import MongoDB
|
||||
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
sources_collection = db["sources"]
|
||||
|
||||
if isinstance(active_docs, str):
|
||||
active_docs = [active_docs]
|
||||
|
||||
merged_structure = {}
|
||||
for doc_id in active_docs:
|
||||
try:
|
||||
source_doc = sources_collection.find_one(
|
||||
{"_id": ObjectId(doc_id)}
|
||||
)
|
||||
if not source_doc:
|
||||
continue
|
||||
dir_str = source_doc.get("directory_structure")
|
||||
if dir_str:
|
||||
if isinstance(dir_str, str):
|
||||
dir_str = json.loads(dir_str)
|
||||
source_name = source_doc.get("name", doc_id)
|
||||
if len(active_docs) > 1:
|
||||
merged_structure[source_name] = dir_str
|
||||
else:
|
||||
merged_structure = dir_str
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not load dir structure for {doc_id}: {e}")
|
||||
|
||||
self._directory_structure = merged_structure if merged_structure else None
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to load directory structures: {e}")
|
||||
|
||||
return self._directory_structure
|
||||
|
||||
def execute_action(self, action_name: str, **kwargs):
|
||||
if action_name == "search":
|
||||
return self._execute_search(**kwargs)
|
||||
elif action_name == "list_files":
|
||||
return self._execute_list_files(**kwargs)
|
||||
return f"Unknown action: {action_name}"
|
||||
|
||||
def _execute_search(self, **kwargs) -> str:
|
||||
query = kwargs.get("query", "")
|
||||
path_filter = kwargs.get("path_filter", "")
|
||||
|
||||
if not query:
|
||||
return "Error: 'query' parameter is required."
|
||||
|
||||
try:
|
||||
retriever = self._get_retriever()
|
||||
docs = retriever.search(query)
|
||||
except Exception as e:
|
||||
logger.error(f"Internal search failed: {e}", exc_info=True)
|
||||
return "Search failed: an internal error occurred."
|
||||
|
||||
if not docs:
|
||||
return "No documents found matching your query."
|
||||
|
||||
# Apply path filter if specified
|
||||
if path_filter:
|
||||
path_lower = path_filter.lower()
|
||||
docs = [
|
||||
d
|
||||
for d in docs
|
||||
if path_lower in d.get("source", "").lower()
|
||||
or path_lower in d.get("filename", "").lower()
|
||||
or path_lower in d.get("title", "").lower()
|
||||
]
|
||||
if not docs:
|
||||
return f"No documents found matching query '{query}' in path '{path_filter}'."
|
||||
|
||||
# Accumulate for source tracking
|
||||
for doc in docs:
|
||||
if doc not in self.retrieved_docs:
|
||||
self.retrieved_docs.append(doc)
|
||||
|
||||
# Format results for the LLM
|
||||
formatted = []
|
||||
for i, doc in enumerate(docs, 1):
|
||||
title = doc.get("title", "Untitled")
|
||||
text = doc.get("text", "")
|
||||
source = doc.get("source", "Unknown")
|
||||
filename = doc.get("filename", "")
|
||||
header = filename or title
|
||||
formatted.append(f"[{i}] {header} (source: {source})\n{text}")
|
||||
|
||||
return "\n\n---\n\n".join(formatted)
|
||||
|
||||
def _execute_list_files(self, **kwargs) -> str:
|
||||
path = kwargs.get("path", "")
|
||||
dir_structure = self._get_directory_structure()
|
||||
|
||||
if not dir_structure:
|
||||
return "No file structure available for the current sources."
|
||||
|
||||
# Navigate to the requested path
|
||||
current = dir_structure
|
||||
if path:
|
||||
for part in path.strip("/").split("/"):
|
||||
if not part:
|
||||
continue
|
||||
if isinstance(current, dict) and part in current:
|
||||
current = current[part]
|
||||
else:
|
||||
return f"Path '{path}' not found in the file structure."
|
||||
|
||||
# Format the structure for the LLM
|
||||
return self._format_structure(current, path or "/")
|
||||
|
||||
def _format_structure(self, node: Dict, current_path: str) -> str:
|
||||
if not isinstance(node, dict):
|
||||
return f"'{current_path}' is a file, not a directory."
|
||||
|
||||
lines = [f"File structure at '{current_path}':\n"]
|
||||
folders = []
|
||||
files = []
|
||||
|
||||
for name, value in sorted(node.items()):
|
||||
if isinstance(value, dict):
|
||||
# Check if it's a file metadata dict or a folder
|
||||
if "type" in value or "size_bytes" in value or "token_count" in value:
|
||||
# It's a file with metadata
|
||||
size = value.get("token_count", "")
|
||||
ftype = value.get("type", "")
|
||||
info_parts = []
|
||||
if ftype:
|
||||
info_parts.append(ftype)
|
||||
if size:
|
||||
info_parts.append(f"{size} tokens")
|
||||
info = f" ({', '.join(info_parts)})" if info_parts else ""
|
||||
files.append(f" {name}{info}")
|
||||
else:
|
||||
# It's a folder
|
||||
count = self._count_files(value)
|
||||
folders.append(f" {name}/ ({count} items)")
|
||||
else:
|
||||
files.append(f" {name}")
|
||||
|
||||
if folders:
|
||||
lines.append("Folders:")
|
||||
lines.extend(folders)
|
||||
if files:
|
||||
lines.append("Files:")
|
||||
lines.extend(files)
|
||||
if not folders and not files:
|
||||
lines.append(" (empty)")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def _count_files(self, node: Dict) -> int:
|
||||
count = 0
|
||||
for value in node.values():
|
||||
if isinstance(value, dict):
|
||||
if "type" in value or "size_bytes" in value or "token_count" in value:
|
||||
count += 1
|
||||
else:
|
||||
count += self._count_files(value)
|
||||
else:
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def get_actions_metadata(self):
|
||||
actions = [
|
||||
{
|
||||
"name": "search",
|
||||
"description": (
|
||||
"Search the user's uploaded documents and knowledge base. "
|
||||
"Use this to find relevant information before answering questions. "
|
||||
"You can call this multiple times with different queries."
|
||||
),
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query. Be specific and focused.",
|
||||
"filled_by_llm": True,
|
||||
"required": True,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
# Add path_filter and list_files only if directory structure exists
|
||||
has_structure = self.config.get("has_directory_structure", False)
|
||||
if has_structure:
|
||||
actions[0]["parameters"]["properties"]["path_filter"] = {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Optional: filter results to a specific file or folder path. "
|
||||
"Use list_files first to see available paths."
|
||||
),
|
||||
"filled_by_llm": True,
|
||||
"required": False,
|
||||
}
|
||||
actions.append(
|
||||
{
|
||||
"name": "list_files",
|
||||
"description": (
|
||||
"Browse the file and folder structure of the knowledge base. "
|
||||
"Use this to see what files are available before searching. "
|
||||
"Optionally provide a path to browse a specific folder."
|
||||
),
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "Optional: folder path to browse. Leave empty for root.",
|
||||
"filled_by_llm": True,
|
||||
"required": False,
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
return actions
|
||||
|
||||
def get_config_requirements(self):
|
||||
return {}
|
||||
|
||||
|
||||
# Constants for building synthetic tools_dict entries
|
||||
INTERNAL_TOOL_ID = "internal"
|
||||
|
||||
|
||||
def build_internal_tool_entry(has_directory_structure: bool = False) -> Dict:
|
||||
"""Build the tools_dict entry for InternalSearchTool.
|
||||
|
||||
Dynamically includes list_files and path_filter based on
|
||||
whether the sources have directory structure.
|
||||
"""
|
||||
search_params = {
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query. Be specific and focused.",
|
||||
"filled_by_llm": True,
|
||||
"required": True,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
actions = [
|
||||
{
|
||||
"name": "search",
|
||||
"description": (
|
||||
"Search the user's uploaded documents and knowledge base. "
|
||||
"Use this to find relevant information before answering questions. "
|
||||
"You can call this multiple times with different queries."
|
||||
),
|
||||
"active": True,
|
||||
"parameters": search_params,
|
||||
}
|
||||
]
|
||||
|
||||
if has_directory_structure:
|
||||
search_params["properties"]["path_filter"] = {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Optional: filter results to a specific file or folder path. "
|
||||
"Use list_files first to see available paths."
|
||||
),
|
||||
"filled_by_llm": True,
|
||||
"required": False,
|
||||
}
|
||||
actions.append(
|
||||
{
|
||||
"name": "list_files",
|
||||
"description": (
|
||||
"Browse the file and folder structure of the knowledge base. "
|
||||
"Use this to see what files are available before searching. "
|
||||
"Optionally provide a path to browse a specific folder."
|
||||
),
|
||||
"active": True,
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "Optional: folder path to browse. Leave empty for root.",
|
||||
"filled_by_llm": True,
|
||||
"required": False,
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
return {"name": "internal_search", "actions": actions}
|
||||
|
||||
|
||||
# Keep backward compat
|
||||
INTERNAL_TOOL_ENTRY = build_internal_tool_entry(has_directory_structure=False)
|
||||
|
||||
|
||||
def sources_have_directory_structure(source: Dict) -> bool:
|
||||
"""Check if any of the active sources have directory_structure in MongoDB."""
|
||||
active_docs = source.get("active_docs", [])
|
||||
if not active_docs:
|
||||
return False
|
||||
|
||||
try:
|
||||
from bson.objectid import ObjectId
|
||||
from application.core.mongo_db import MongoDB
|
||||
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
sources_collection = db["sources"]
|
||||
|
||||
if isinstance(active_docs, str):
|
||||
active_docs = [active_docs]
|
||||
|
||||
for doc_id in active_docs:
|
||||
try:
|
||||
source_doc = sources_collection.find_one(
|
||||
{"_id": ObjectId(doc_id)},
|
||||
{"directory_structure": 1},
|
||||
)
|
||||
if source_doc and source_doc.get("directory_structure"):
|
||||
return True
|
||||
except Exception:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not check directory structure: {e}")
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def add_internal_search_tool(tools_dict: Dict, retriever_config: Dict) -> None:
|
||||
"""Add the internal search tool to tools_dict if sources are configured.
|
||||
|
||||
Shared by AgenticAgent and ResearchAgent to avoid duplicate setup logic.
|
||||
Mutates tools_dict in place.
|
||||
"""
|
||||
source = retriever_config.get("source", {})
|
||||
has_sources = bool(source.get("active_docs"))
|
||||
if not retriever_config or not has_sources:
|
||||
return
|
||||
|
||||
has_dir = sources_have_directory_structure(source)
|
||||
internal_entry = build_internal_tool_entry(has_directory_structure=has_dir)
|
||||
internal_entry["config"] = build_internal_tool_config(
|
||||
**retriever_config,
|
||||
has_directory_structure=has_dir,
|
||||
)
|
||||
tools_dict[INTERNAL_TOOL_ID] = internal_entry
|
||||
|
||||
|
||||
def build_internal_tool_config(
|
||||
source: Dict,
|
||||
retriever_name: str = "classic",
|
||||
chunks: int = 2,
|
||||
doc_token_limit: int = 50000,
|
||||
model_id: str = "docsgpt-local",
|
||||
user_api_key: Optional[str] = None,
|
||||
agent_id: Optional[str] = None,
|
||||
llm_name: str = None,
|
||||
api_key: str = None,
|
||||
decoded_token: Optional[Dict] = None,
|
||||
has_directory_structure: bool = False,
|
||||
) -> Dict:
|
||||
"""Build the config dict for InternalSearchTool."""
|
||||
return {
|
||||
"source": source,
|
||||
"retriever_name": retriever_name,
|
||||
"chunks": chunks,
|
||||
"doc_token_limit": doc_token_limit,
|
||||
"model_id": model_id,
|
||||
"user_api_key": user_api_key,
|
||||
"agent_id": agent_id,
|
||||
"llm_name": llm_name or settings.LLM_PROVIDER,
|
||||
"api_key": api_key or settings.API_KEY,
|
||||
"decoded_token": decoded_token,
|
||||
"has_directory_structure": has_directory_structure,
|
||||
}
|
||||
996
application/agents/tools/mcp_tool.py
Normal file
996
application/agents/tools/mcp_tool.py
Normal file
@@ -0,0 +1,996 @@
|
||||
import asyncio
|
||||
import base64
|
||||
import concurrent.futures
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from typing import Any, Dict, List, Optional
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
|
||||
from fastmcp import Client
|
||||
from fastmcp.client.auth import BearerAuth
|
||||
from fastmcp.client.transports import (
|
||||
SSETransport,
|
||||
StdioTransport,
|
||||
StreamableHttpTransport,
|
||||
)
|
||||
from mcp.client.auth import OAuthClientProvider, TokenStorage
|
||||
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken
|
||||
from pydantic import AnyHttpUrl, ValidationError
|
||||
from redis import Redis
|
||||
|
||||
from application.agents.tools.base import Tool
|
||||
from application.api.user.tasks import mcp_oauth_status_task, mcp_oauth_task
|
||||
from application.cache import get_redis_instance
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.core.url_validation import SSRFError, validate_url
|
||||
from application.security.encryption import decrypt_credentials
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
|
||||
_mcp_clients_cache = {}
|
||||
|
||||
|
||||
class MCPTool(Tool):
|
||||
"""
|
||||
MCP Tool
|
||||
Connect to remote Model Context Protocol (MCP) servers to access dynamic tools and resources.
|
||||
"""
|
||||
|
||||
def __init__(self, config: Dict[str, Any], user_id: Optional[str] = None):
|
||||
"""
|
||||
Initialize the MCP Tool with configuration.
|
||||
|
||||
Args:
|
||||
config: Dictionary containing MCP server configuration:
|
||||
- server_url: URL of the remote MCP server
|
||||
- transport_type: Transport type (auto, sse, http, stdio)
|
||||
- auth_type: Type of authentication (bearer, oauth, api_key, basic, none)
|
||||
- encrypted_credentials: Encrypted credentials (if available)
|
||||
- timeout: Request timeout in seconds (default: 30)
|
||||
- headers: Custom headers for requests
|
||||
- command: Command for STDIO transport
|
||||
- args: Arguments for STDIO transport
|
||||
- oauth_scopes: OAuth scopes for oauth auth type
|
||||
- oauth_client_name: OAuth client name for oauth auth type
|
||||
- query_mode: If True, use non-interactive OAuth (fail-fast on 401)
|
||||
user_id: User ID for decrypting credentials (required if encrypted_credentials exist)
|
||||
"""
|
||||
self.config = config
|
||||
self.user_id = user_id
|
||||
raw_url = config.get("server_url", "")
|
||||
self.server_url = self._validate_server_url(raw_url) if raw_url else ""
|
||||
self.transport_type = config.get("transport_type", "auto")
|
||||
self.auth_type = config.get("auth_type", "none")
|
||||
self.timeout = config.get("timeout", 30)
|
||||
self.custom_headers = config.get("headers", {})
|
||||
|
||||
self.auth_credentials = {}
|
||||
if config.get("encrypted_credentials") and user_id:
|
||||
self.auth_credentials = decrypt_credentials(
|
||||
config["encrypted_credentials"], user_id
|
||||
)
|
||||
else:
|
||||
self.auth_credentials = config.get("auth_credentials", {})
|
||||
self.oauth_scopes = config.get("oauth_scopes", [])
|
||||
self.oauth_task_id = config.get("oauth_task_id", None)
|
||||
self.oauth_client_name = config.get("oauth_client_name", "DocsGPT-MCP")
|
||||
self.redirect_uri = self._resolve_redirect_uri(config.get("redirect_uri"))
|
||||
|
||||
self.available_tools = []
|
||||
self._cache_key = self._generate_cache_key()
|
||||
self._client = None
|
||||
self.query_mode = config.get("query_mode", False)
|
||||
|
||||
if self.server_url and self.auth_type != "oauth":
|
||||
self._setup_client()
|
||||
|
||||
@staticmethod
|
||||
def _validate_server_url(server_url: str) -> str:
|
||||
"""Validate server_url to prevent SSRF to internal networks.
|
||||
|
||||
Raises:
|
||||
ValueError: If the URL points to a private/internal address.
|
||||
"""
|
||||
try:
|
||||
return validate_url(server_url)
|
||||
except SSRFError as exc:
|
||||
raise ValueError(f"Invalid MCP server URL: {exc}") from exc
|
||||
|
||||
def _resolve_redirect_uri(self, configured_redirect_uri: Optional[str]) -> str:
|
||||
if configured_redirect_uri:
|
||||
return configured_redirect_uri.rstrip("/")
|
||||
|
||||
explicit = getattr(settings, "MCP_OAUTH_REDIRECT_URI", None)
|
||||
if explicit:
|
||||
return explicit.rstrip("/")
|
||||
|
||||
connector_base = getattr(settings, "CONNECTOR_REDIRECT_BASE_URI", None)
|
||||
if connector_base:
|
||||
parsed = urlparse(connector_base)
|
||||
if parsed.scheme and parsed.netloc:
|
||||
return f"{parsed.scheme}://{parsed.netloc}/api/mcp_server/callback"
|
||||
|
||||
return f"{settings.API_URL.rstrip('/')}/api/mcp_server/callback"
|
||||
|
||||
def _generate_cache_key(self) -> str:
|
||||
"""Generate a unique cache key for this MCP server configuration."""
|
||||
auth_key = ""
|
||||
if self.auth_type == "oauth":
|
||||
scopes_str = ",".join(self.oauth_scopes) if self.oauth_scopes else "none"
|
||||
oauth_identity = self.user_id or self.oauth_task_id or "anonymous"
|
||||
auth_key = (
|
||||
f"oauth:{oauth_identity}:{self.oauth_client_name}:{scopes_str}:{self.redirect_uri}"
|
||||
)
|
||||
elif self.auth_type in ["bearer"]:
|
||||
token = self.auth_credentials.get(
|
||||
"bearer_token", ""
|
||||
) or self.auth_credentials.get("access_token", "")
|
||||
auth_key = f"bearer:{token[:10]}..." if token else "bearer:none"
|
||||
elif self.auth_type == "api_key":
|
||||
api_key = self.auth_credentials.get("api_key", "")
|
||||
auth_key = f"apikey:{api_key[:10]}..." if api_key else "apikey:none"
|
||||
elif self.auth_type == "basic":
|
||||
username = self.auth_credentials.get("username", "")
|
||||
auth_key = f"basic:{username}"
|
||||
else:
|
||||
auth_key = "none"
|
||||
return f"{self.server_url}#{self.transport_type}#{auth_key}"
|
||||
|
||||
def _setup_client(self):
|
||||
global _mcp_clients_cache
|
||||
if self._cache_key in _mcp_clients_cache:
|
||||
cached_data = _mcp_clients_cache[self._cache_key]
|
||||
if time.time() - cached_data["created_at"] < 300:
|
||||
self._client = cached_data["client"]
|
||||
return
|
||||
else:
|
||||
del _mcp_clients_cache[self._cache_key]
|
||||
transport = self._create_transport()
|
||||
auth = None
|
||||
|
||||
if self.auth_type == "oauth":
|
||||
redis_client = get_redis_instance()
|
||||
if self.query_mode:
|
||||
auth = NonInteractiveOAuth(
|
||||
mcp_url=self.server_url,
|
||||
scopes=self.oauth_scopes,
|
||||
redis_client=redis_client,
|
||||
redirect_uri=self.redirect_uri,
|
||||
db=db,
|
||||
user_id=self.user_id,
|
||||
)
|
||||
else:
|
||||
auth = DocsGPTOAuth(
|
||||
mcp_url=self.server_url,
|
||||
scopes=self.oauth_scopes,
|
||||
redis_client=redis_client,
|
||||
redirect_uri=self.redirect_uri,
|
||||
task_id=self.oauth_task_id,
|
||||
db=db,
|
||||
user_id=self.user_id,
|
||||
)
|
||||
elif self.auth_type == "bearer":
|
||||
token = self.auth_credentials.get(
|
||||
"bearer_token", ""
|
||||
) or self.auth_credentials.get("access_token", "")
|
||||
if token:
|
||||
auth = BearerAuth(token)
|
||||
self._client = Client(transport, auth=auth)
|
||||
_mcp_clients_cache[self._cache_key] = {
|
||||
"client": self._client,
|
||||
"created_at": time.time(),
|
||||
}
|
||||
|
||||
def _create_transport(self):
|
||||
"""Create appropriate transport based on configuration."""
|
||||
headers = {"Content-Type": "application/json", "User-Agent": "DocsGPT-MCP/1.0"}
|
||||
headers.update(self.custom_headers)
|
||||
|
||||
if self.auth_type == "api_key":
|
||||
api_key = self.auth_credentials.get("api_key", "")
|
||||
header_name = self.auth_credentials.get("api_key_header", "X-API-Key")
|
||||
if api_key:
|
||||
headers[header_name] = api_key
|
||||
elif self.auth_type == "basic":
|
||||
username = self.auth_credentials.get("username", "")
|
||||
password = self.auth_credentials.get("password", "")
|
||||
if username and password:
|
||||
credentials = base64.b64encode(
|
||||
f"{username}:{password}".encode()
|
||||
).decode()
|
||||
headers["Authorization"] = f"Basic {credentials}"
|
||||
if self.transport_type == "auto":
|
||||
if "sse" in self.server_url.lower() or self.server_url.endswith("/sse"):
|
||||
transport_type = "sse"
|
||||
else:
|
||||
transport_type = "http"
|
||||
else:
|
||||
transport_type = self.transport_type
|
||||
if transport_type == "stdio":
|
||||
raise ValueError("STDIO transport is disabled")
|
||||
if transport_type == "sse":
|
||||
headers.update({"Accept": "text/event-stream", "Cache-Control": "no-cache"})
|
||||
return SSETransport(url=self.server_url, headers=headers)
|
||||
elif transport_type == "http":
|
||||
return StreamableHttpTransport(url=self.server_url, headers=headers)
|
||||
elif transport_type == "stdio":
|
||||
command = self.config.get("command", "python")
|
||||
args = self.config.get("args", [])
|
||||
env = self.auth_credentials if self.auth_credentials else None
|
||||
return StdioTransport(command=command, args=args, env=env)
|
||||
else:
|
||||
return StreamableHttpTransport(url=self.server_url, headers=headers)
|
||||
|
||||
def _format_tools(self, tools_response) -> List[Dict]:
|
||||
"""Format tools response to match expected format."""
|
||||
if hasattr(tools_response, "tools"):
|
||||
tools = tools_response.tools
|
||||
elif isinstance(tools_response, list):
|
||||
tools = tools_response
|
||||
else:
|
||||
tools = []
|
||||
tools_dict = []
|
||||
for tool in tools:
|
||||
if hasattr(tool, "name"):
|
||||
tool_dict = {
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
}
|
||||
if hasattr(tool, "inputSchema"):
|
||||
tool_dict["inputSchema"] = tool.inputSchema
|
||||
tools_dict.append(tool_dict)
|
||||
elif isinstance(tool, dict):
|
||||
tools_dict.append(tool)
|
||||
else:
|
||||
if hasattr(tool, "model_dump"):
|
||||
tools_dict.append(tool.model_dump())
|
||||
else:
|
||||
tools_dict.append({"name": str(tool), "description": ""})
|
||||
return tools_dict
|
||||
|
||||
async def _execute_with_client(self, operation: str, *args, **kwargs):
|
||||
"""Execute operation with FastMCP client."""
|
||||
if not self._client:
|
||||
raise Exception("FastMCP client not initialized")
|
||||
async with self._client:
|
||||
if operation == "ping":
|
||||
return await self._client.ping()
|
||||
elif operation == "list_tools":
|
||||
tools_response = await self._client.list_tools()
|
||||
self.available_tools = self._format_tools(tools_response)
|
||||
return self.available_tools
|
||||
elif operation == "call_tool":
|
||||
tool_name = args[0]
|
||||
tool_args = kwargs
|
||||
return await self._client.call_tool(tool_name, tool_args)
|
||||
elif operation == "list_resources":
|
||||
return await self._client.list_resources()
|
||||
elif operation == "list_prompts":
|
||||
return await self._client.list_prompts()
|
||||
else:
|
||||
raise Exception(f"Unknown operation: {operation}")
|
||||
|
||||
_ERROR_MAP = [
|
||||
(concurrent.futures.TimeoutError, lambda op, t, _: f"Timed out after {t}s"),
|
||||
(ConnectionRefusedError, lambda *_: "Connection refused"),
|
||||
]
|
||||
|
||||
_ERROR_PATTERNS = {
|
||||
("403", "Forbidden"): "Access denied (403 Forbidden)",
|
||||
("401", "Unauthorized"): "Authentication failed (401 Unauthorized)",
|
||||
("ECONNREFUSED",): "Connection refused",
|
||||
("SSL", "certificate"): "SSL/TLS error",
|
||||
}
|
||||
|
||||
def _run_async_operation(self, operation: str, *args, **kwargs):
|
||||
try:
|
||||
try:
|
||||
asyncio.get_running_loop()
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future = executor.submit(
|
||||
self._run_in_new_loop, operation, *args, **kwargs
|
||||
)
|
||||
return future.result(timeout=self.timeout)
|
||||
except RuntimeError:
|
||||
return self._run_in_new_loop(operation, *args, **kwargs)
|
||||
except Exception as e:
|
||||
raise self._map_error(operation, e) from e
|
||||
raise self._map_error(operation, e) from e
|
||||
|
||||
def _run_in_new_loop(self, operation, *args, **kwargs):
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
return loop.run_until_complete(
|
||||
self._execute_with_client(operation, *args, **kwargs)
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
def _map_error(self, operation: str, exc: Exception) -> Exception:
|
||||
for exc_type, msg_fn in self._ERROR_MAP:
|
||||
if isinstance(exc, exc_type):
|
||||
return Exception(msg_fn(operation, self.timeout, exc))
|
||||
error_msg = str(exc)
|
||||
for patterns, friendly in self._ERROR_PATTERNS.items():
|
||||
if any(p.lower() in error_msg.lower() for p in patterns):
|
||||
return Exception(friendly)
|
||||
logger.error("MCP %s failed: %s", operation, exc)
|
||||
return exc
|
||||
|
||||
def discover_tools(self) -> List[Dict]:
|
||||
"""
|
||||
Discover available tools from the MCP server using FastMCP.
|
||||
|
||||
Returns:
|
||||
List of tool definitions from the server
|
||||
"""
|
||||
if not self.server_url:
|
||||
return []
|
||||
if not self._client:
|
||||
self._setup_client()
|
||||
try:
|
||||
tools = self._run_async_operation("list_tools")
|
||||
self.available_tools = tools
|
||||
return self.available_tools
|
||||
except Exception as e:
|
||||
raise Exception(f"Failed to discover tools from MCP server: {str(e)}")
|
||||
|
||||
def execute_action(self, action_name: str, **kwargs) -> Any:
|
||||
if not self.server_url:
|
||||
raise Exception("No MCP server configured")
|
||||
if not self._client:
|
||||
self._setup_client()
|
||||
cleaned_kwargs = {}
|
||||
for key, value in kwargs.items():
|
||||
if value == "" or value is None:
|
||||
continue
|
||||
cleaned_kwargs[key] = value
|
||||
try:
|
||||
result = self._run_async_operation(
|
||||
"call_tool", action_name, **cleaned_kwargs
|
||||
)
|
||||
return self._format_result(result)
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
lower_msg = error_msg.lower()
|
||||
is_auth_error = (
|
||||
"401" in error_msg
|
||||
or "unauthorized" in lower_msg
|
||||
or "session expired" in lower_msg
|
||||
or "re-authorize" in lower_msg
|
||||
)
|
||||
if is_auth_error:
|
||||
if self.auth_type == "oauth":
|
||||
raise Exception(
|
||||
f"Action '{action_name}' failed: OAuth session expired. "
|
||||
"Please re-authorize this MCP server in tool settings."
|
||||
) from e
|
||||
global _mcp_clients_cache
|
||||
_mcp_clients_cache.pop(self._cache_key, None)
|
||||
self._client = None
|
||||
self._setup_client()
|
||||
try:
|
||||
result = self._run_async_operation(
|
||||
"call_tool", action_name, **cleaned_kwargs
|
||||
)
|
||||
return self._format_result(result)
|
||||
except Exception as retry_e:
|
||||
raise Exception(
|
||||
f"Action '{action_name}' failed after re-auth attempt: {retry_e}. "
|
||||
"Your credentials may have expired — please re-authorize in tool settings."
|
||||
) from retry_e
|
||||
raise Exception(
|
||||
f"Failed to execute action '{action_name}': {error_msg}"
|
||||
) from e
|
||||
|
||||
def _format_result(self, result) -> Dict:
|
||||
"""Format FastMCP result to match expected format."""
|
||||
if hasattr(result, "content"):
|
||||
content_list = []
|
||||
for content_item in result.content:
|
||||
if hasattr(content_item, "text"):
|
||||
content_list.append({"type": "text", "text": content_item.text})
|
||||
elif hasattr(content_item, "data"):
|
||||
content_list.append({"type": "data", "data": content_item.data})
|
||||
else:
|
||||
content_list.append(
|
||||
{"type": "unknown", "content": str(content_item)}
|
||||
)
|
||||
return {
|
||||
"content": content_list,
|
||||
"isError": getattr(result, "isError", False),
|
||||
}
|
||||
else:
|
||||
return result
|
||||
|
||||
def test_connection(self) -> Dict:
|
||||
if not self.server_url:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "No server URL configured",
|
||||
"tools_count": 0,
|
||||
}
|
||||
try:
|
||||
parsed = urlparse(self.server_url)
|
||||
if parsed.scheme not in ("http", "https"):
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Invalid URL scheme '{parsed.scheme}' — use http:// or https://",
|
||||
"tools_count": 0,
|
||||
}
|
||||
except Exception:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Invalid URL format",
|
||||
"tools_count": 0,
|
||||
}
|
||||
if not self._client:
|
||||
try:
|
||||
self._setup_client()
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Client init failed: {str(e)}",
|
||||
"tools_count": 0,
|
||||
}
|
||||
try:
|
||||
if self.auth_type == "oauth":
|
||||
return self._test_oauth_connection()
|
||||
else:
|
||||
return self._test_regular_connection()
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Connection failed: {str(e)}",
|
||||
"tools_count": 0,
|
||||
}
|
||||
|
||||
def _test_regular_connection(self) -> Dict:
|
||||
ping_ok = False
|
||||
ping_error = None
|
||||
try:
|
||||
self._run_async_operation("ping")
|
||||
ping_ok = True
|
||||
except Exception as e:
|
||||
ping_error = str(e)
|
||||
|
||||
try:
|
||||
tools = self.discover_tools()
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Connection failed: {ping_error or str(e)}",
|
||||
"tools_count": 0,
|
||||
}
|
||||
|
||||
if not tools and not ping_ok:
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Connection failed: {ping_error or 'No tools found'}",
|
||||
"tools_count": 0,
|
||||
}
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Connected — found {len(tools)} tool{'s' if len(tools) != 1 else ''}.",
|
||||
"tools_count": len(tools),
|
||||
"tools": [
|
||||
{
|
||||
"name": tool.get("name", "unknown"),
|
||||
"description": tool.get("description", ""),
|
||||
}
|
||||
for tool in tools
|
||||
],
|
||||
}
|
||||
|
||||
def _test_oauth_connection(self) -> Dict:
|
||||
storage = DBTokenStorage(
|
||||
server_url=self.server_url, user_id=self.user_id, db_client=db
|
||||
)
|
||||
loop = asyncio.new_event_loop()
|
||||
try:
|
||||
tokens = loop.run_until_complete(storage.get_tokens())
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
if tokens and tokens.access_token:
|
||||
self.query_mode = True
|
||||
_mcp_clients_cache.pop(self._cache_key, None)
|
||||
self._client = None
|
||||
self._setup_client()
|
||||
try:
|
||||
tools = self.discover_tools()
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Connected — found {len(tools)} tool{'s' if len(tools) != 1 else ''}.",
|
||||
"tools_count": len(tools),
|
||||
"tools": [
|
||||
{
|
||||
"name": t.get("name", "unknown"),
|
||||
"description": t.get("description", ""),
|
||||
}
|
||||
for t in tools
|
||||
],
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning("OAuth token validation failed: %s", e)
|
||||
_mcp_clients_cache.pop(self._cache_key, None)
|
||||
self._client = None
|
||||
|
||||
return self._start_oauth_task()
|
||||
|
||||
def _start_oauth_task(self) -> Dict:
|
||||
task_config = self.config.copy()
|
||||
task_config.pop("query_mode", None)
|
||||
result = mcp_oauth_task.delay(task_config, self.user_id)
|
||||
return {
|
||||
"success": False,
|
||||
"requires_oauth": True,
|
||||
"task_id": result.id,
|
||||
"message": "OAuth authorization required.",
|
||||
"tools_count": 0,
|
||||
}
|
||||
|
||||
def get_actions_metadata(self) -> List[Dict]:
|
||||
"""
|
||||
Get metadata for all available actions.
|
||||
|
||||
Returns:
|
||||
List of action metadata dictionaries
|
||||
"""
|
||||
actions = []
|
||||
for tool in self.available_tools:
|
||||
input_schema = (
|
||||
tool.get("inputSchema")
|
||||
or tool.get("input_schema")
|
||||
or tool.get("schema")
|
||||
or tool.get("parameters")
|
||||
)
|
||||
|
||||
parameters_schema = {
|
||||
"type": "object",
|
||||
"properties": {},
|
||||
"required": [],
|
||||
}
|
||||
|
||||
if input_schema:
|
||||
if isinstance(input_schema, dict):
|
||||
if "properties" in input_schema:
|
||||
parameters_schema = {
|
||||
"type": input_schema.get("type", "object"),
|
||||
"properties": input_schema.get("properties", {}),
|
||||
"required": input_schema.get("required", []),
|
||||
}
|
||||
|
||||
for key in ["additionalProperties", "description"]:
|
||||
if key in input_schema:
|
||||
parameters_schema[key] = input_schema[key]
|
||||
else:
|
||||
parameters_schema["properties"] = input_schema
|
||||
action = {
|
||||
"name": tool.get("name", ""),
|
||||
"description": tool.get("description", ""),
|
||||
"parameters": parameters_schema,
|
||||
}
|
||||
actions.append(action)
|
||||
return actions
|
||||
|
||||
def get_config_requirements(self) -> Dict:
|
||||
return {
|
||||
"server_url": {
|
||||
"type": "string",
|
||||
"label": "Server URL",
|
||||
"description": "URL of the remote MCP server",
|
||||
"required": True,
|
||||
"secret": False,
|
||||
"order": 1,
|
||||
},
|
||||
"auth_type": {
|
||||
"type": "string",
|
||||
"label": "Authentication Type",
|
||||
"description": "Authentication method for the MCP server",
|
||||
"enum": ["none", "bearer", "oauth", "api_key", "basic"],
|
||||
"default": "none",
|
||||
"required": True,
|
||||
"secret": False,
|
||||
"order": 2,
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"label": "API Key",
|
||||
"description": "API key for authentication",
|
||||
"required": False,
|
||||
"secret": True,
|
||||
"order": 3,
|
||||
"depends_on": {"auth_type": "api_key"},
|
||||
},
|
||||
"api_key_header": {
|
||||
"type": "string",
|
||||
"label": "API Key Header",
|
||||
"description": "Header name for API key (default: X-API-Key)",
|
||||
"default": "X-API-Key",
|
||||
"required": False,
|
||||
"secret": False,
|
||||
"order": 4,
|
||||
"depends_on": {"auth_type": "api_key"},
|
||||
},
|
||||
"bearer_token": {
|
||||
"type": "string",
|
||||
"label": "Bearer Token",
|
||||
"description": "Bearer token for authentication",
|
||||
"required": False,
|
||||
"secret": True,
|
||||
"order": 3,
|
||||
"depends_on": {"auth_type": "bearer"},
|
||||
},
|
||||
"username": {
|
||||
"type": "string",
|
||||
"label": "Username",
|
||||
"description": "Username for basic authentication",
|
||||
"required": False,
|
||||
"secret": False,
|
||||
"order": 3,
|
||||
"depends_on": {"auth_type": "basic"},
|
||||
},
|
||||
"password": {
|
||||
"type": "string",
|
||||
"label": "Password",
|
||||
"description": "Password for basic authentication",
|
||||
"required": False,
|
||||
"secret": True,
|
||||
"order": 4,
|
||||
"depends_on": {"auth_type": "basic"},
|
||||
},
|
||||
"oauth_scopes": {
|
||||
"type": "string",
|
||||
"label": "OAuth Scopes",
|
||||
"description": "Comma-separated OAuth scopes to request",
|
||||
"required": False,
|
||||
"secret": False,
|
||||
"order": 3,
|
||||
"depends_on": {"auth_type": "oauth"},
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number",
|
||||
"label": "Timeout (seconds)",
|
||||
"description": "Request timeout in seconds (1-300)",
|
||||
"default": 30,
|
||||
"required": False,
|
||||
"secret": False,
|
||||
"order": 10,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class DocsGPTOAuth(OAuthClientProvider):
|
||||
"""
|
||||
Custom OAuth handler for DocsGPT that uses frontend redirect instead of browser.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mcp_url: str,
|
||||
redirect_uri: str,
|
||||
redis_client: Redis | None = None,
|
||||
redis_prefix: str = "mcp_oauth:",
|
||||
task_id: str = None,
|
||||
scopes: str | list[str] | None = None,
|
||||
client_name: str = "DocsGPT-MCP",
|
||||
user_id=None,
|
||||
db=None,
|
||||
additional_client_metadata: dict[str, Any] | None = None,
|
||||
skip_redirect_validation: bool = False,
|
||||
):
|
||||
self.redirect_uri = redirect_uri
|
||||
self.redis_client = redis_client
|
||||
self.redis_prefix = redis_prefix
|
||||
self.task_id = task_id
|
||||
self.user_id = user_id
|
||||
self.db = db
|
||||
|
||||
parsed_url = urlparse(mcp_url)
|
||||
self.server_base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
|
||||
|
||||
if isinstance(scopes, list):
|
||||
scopes = " ".join(scopes)
|
||||
client_metadata = OAuthClientMetadata(
|
||||
client_name=client_name,
|
||||
redirect_uris=[AnyHttpUrl(redirect_uri)],
|
||||
grant_types=["authorization_code", "refresh_token"],
|
||||
response_types=["code"],
|
||||
scope=scopes,
|
||||
**(additional_client_metadata or {}),
|
||||
)
|
||||
|
||||
storage = DBTokenStorage(
|
||||
server_url=self.server_base_url,
|
||||
user_id=self.user_id,
|
||||
db_client=self.db,
|
||||
expected_redirect_uri=None if skip_redirect_validation else redirect_uri,
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
server_url=self.server_base_url,
|
||||
client_metadata=client_metadata,
|
||||
storage=storage,
|
||||
redirect_handler=self.redirect_handler,
|
||||
callback_handler=self.callback_handler,
|
||||
)
|
||||
|
||||
self.auth_url = None
|
||||
self.extracted_state = None
|
||||
|
||||
def _process_auth_url(self, authorization_url: str) -> tuple[str, str]:
|
||||
"""Process authorization URL to extract state"""
|
||||
try:
|
||||
parsed_url = urlparse(authorization_url)
|
||||
query_params = parse_qs(parsed_url.query)
|
||||
|
||||
state_params = query_params.get("state", [])
|
||||
if state_params:
|
||||
state = state_params[0]
|
||||
else:
|
||||
raise ValueError("No state in auth URL")
|
||||
return authorization_url, state
|
||||
except Exception as e:
|
||||
raise Exception(f"Failed to process auth URL: {e}")
|
||||
|
||||
async def redirect_handler(self, authorization_url: str) -> None:
|
||||
"""Store auth URL and state in Redis for frontend to use."""
|
||||
auth_url, state = self._process_auth_url(authorization_url)
|
||||
logger.info("Processed auth_url: %s, state: %s", auth_url, state)
|
||||
self.auth_url = auth_url
|
||||
self.extracted_state = state
|
||||
|
||||
if self.redis_client and self.extracted_state:
|
||||
key = f"{self.redis_prefix}auth_url:{self.extracted_state}"
|
||||
self.redis_client.setex(key, 600, auth_url)
|
||||
logger.info("Stored auth_url in Redis: %s", key)
|
||||
|
||||
if self.task_id:
|
||||
status_key = f"mcp_oauth_status:{self.task_id}"
|
||||
status_data = {
|
||||
"status": "requires_redirect",
|
||||
"message": "Authorization required",
|
||||
"authorization_url": self.auth_url,
|
||||
"state": self.extracted_state,
|
||||
"requires_oauth": True,
|
||||
"task_id": self.task_id,
|
||||
}
|
||||
self.redis_client.setex(status_key, 600, json.dumps(status_data))
|
||||
|
||||
async def callback_handler(self) -> tuple[str, str | None]:
|
||||
"""Wait for auth code from Redis using the state value."""
|
||||
if not self.redis_client or not self.extracted_state:
|
||||
raise Exception("Redis client or state not configured for OAuth")
|
||||
poll_interval = 1
|
||||
max_wait_time = 300
|
||||
code_key = f"{self.redis_prefix}code:{self.extracted_state}"
|
||||
|
||||
if self.task_id:
|
||||
status_key = f"mcp_oauth_status:{self.task_id}"
|
||||
status_data = {
|
||||
"status": "awaiting_callback",
|
||||
"message": "Waiting for authorization...",
|
||||
"authorization_url": self.auth_url,
|
||||
"state": self.extracted_state,
|
||||
"requires_oauth": True,
|
||||
"task_id": self.task_id,
|
||||
}
|
||||
self.redis_client.setex(status_key, 600, json.dumps(status_data))
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < max_wait_time:
|
||||
code_data = self.redis_client.get(code_key)
|
||||
if code_data:
|
||||
code = code_data.decode()
|
||||
returned_state = self.extracted_state
|
||||
|
||||
self.redis_client.delete(code_key)
|
||||
self.redis_client.delete(
|
||||
f"{self.redis_prefix}auth_url:{self.extracted_state}"
|
||||
)
|
||||
self.redis_client.delete(
|
||||
f"{self.redis_prefix}state:{self.extracted_state}"
|
||||
)
|
||||
|
||||
if self.task_id:
|
||||
status_data = {
|
||||
"status": "callback_received",
|
||||
"message": "Completing authentication...",
|
||||
"task_id": self.task_id,
|
||||
}
|
||||
self.redis_client.setex(status_key, 600, json.dumps(status_data))
|
||||
return code, returned_state
|
||||
error_key = f"{self.redis_prefix}error:{self.extracted_state}"
|
||||
error_data = self.redis_client.get(error_key)
|
||||
if error_data:
|
||||
error_msg = error_data.decode()
|
||||
self.redis_client.delete(error_key)
|
||||
self.redis_client.delete(
|
||||
f"{self.redis_prefix}auth_url:{self.extracted_state}"
|
||||
)
|
||||
self.redis_client.delete(
|
||||
f"{self.redis_prefix}state:{self.extracted_state}"
|
||||
)
|
||||
raise Exception(f"OAuth error: {error_msg}")
|
||||
await asyncio.sleep(poll_interval)
|
||||
self.redis_client.delete(f"{self.redis_prefix}auth_url:{self.extracted_state}")
|
||||
self.redis_client.delete(f"{self.redis_prefix}state:{self.extracted_state}")
|
||||
raise Exception("OAuth timeout: no code received within 5 minutes")
|
||||
|
||||
|
||||
class NonInteractiveOAuth(DocsGPTOAuth):
|
||||
"""OAuth provider that fails fast on 401 instead of starting interactive auth.
|
||||
|
||||
Used during query execution to prevent the streaming response from blocking
|
||||
while waiting for user authorization that will never come.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
kwargs.setdefault("task_id", None)
|
||||
kwargs["skip_redirect_validation"] = True
|
||||
super().__init__(**kwargs)
|
||||
|
||||
async def redirect_handler(self, authorization_url: str) -> None:
|
||||
raise Exception(
|
||||
"OAuth session expired — please re-authorize this MCP server in tool settings."
|
||||
)
|
||||
|
||||
async def callback_handler(self) -> tuple[str, str | None]:
|
||||
raise Exception(
|
||||
"OAuth session expired — please re-authorize this MCP server in tool settings."
|
||||
)
|
||||
|
||||
|
||||
class DBTokenStorage(TokenStorage):
|
||||
def __init__(
|
||||
self,
|
||||
server_url: str,
|
||||
user_id: str,
|
||||
db_client,
|
||||
expected_redirect_uri: Optional[str] = None,
|
||||
):
|
||||
self.server_url = server_url
|
||||
self.user_id = user_id
|
||||
self.db_client = db_client
|
||||
self.expected_redirect_uri = expected_redirect_uri
|
||||
self.collection = db_client["connector_sessions"]
|
||||
|
||||
@staticmethod
|
||||
def get_base_url(url: str) -> str:
|
||||
parsed = urlparse(url)
|
||||
return f"{parsed.scheme}://{parsed.netloc}"
|
||||
|
||||
def get_db_key(self) -> dict:
|
||||
return {
|
||||
"server_url": self.get_base_url(self.server_url),
|
||||
"user_id": self.user_id,
|
||||
}
|
||||
|
||||
async def get_tokens(self) -> OAuthToken | None:
|
||||
doc = await asyncio.to_thread(self.collection.find_one, self.get_db_key())
|
||||
if not doc or "tokens" not in doc:
|
||||
return None
|
||||
try:
|
||||
return OAuthToken.model_validate(doc["tokens"])
|
||||
except ValidationError as e:
|
||||
logger.error("Could not load tokens: %s", e)
|
||||
return None
|
||||
|
||||
async def set_tokens(self, tokens: OAuthToken) -> None:
|
||||
await asyncio.to_thread(
|
||||
self.collection.update_one,
|
||||
self.get_db_key(),
|
||||
{"$set": {"tokens": tokens.model_dump()}},
|
||||
True,
|
||||
)
|
||||
logger.info("Saved tokens for %s", self.get_base_url(self.server_url))
|
||||
|
||||
async def get_client_info(self) -> OAuthClientInformationFull | None:
|
||||
doc = await asyncio.to_thread(self.collection.find_one, self.get_db_key())
|
||||
if not doc or "client_info" not in doc:
|
||||
logger.debug(
|
||||
"No client_info in DB for %s", self.get_base_url(self.server_url)
|
||||
)
|
||||
return None
|
||||
try:
|
||||
client_info = OAuthClientInformationFull.model_validate(doc["client_info"])
|
||||
if self.expected_redirect_uri:
|
||||
stored_uris = [
|
||||
str(uri).rstrip("/") for uri in client_info.redirect_uris
|
||||
]
|
||||
expected_uri = self.expected_redirect_uri.rstrip("/")
|
||||
if expected_uri not in stored_uris:
|
||||
logger.warning(
|
||||
"Redirect URI mismatch for %s: expected=%s stored=%s — clearing.",
|
||||
self.get_base_url(self.server_url),
|
||||
expected_uri,
|
||||
stored_uris,
|
||||
)
|
||||
await asyncio.to_thread(
|
||||
self.collection.update_one,
|
||||
self.get_db_key(),
|
||||
{"$unset": {"client_info": "", "tokens": ""}},
|
||||
)
|
||||
return None
|
||||
return client_info
|
||||
except ValidationError as e:
|
||||
logger.error("Could not load client info: %s", e)
|
||||
return None
|
||||
|
||||
def _serialize_client_info(self, info: dict) -> dict:
|
||||
if "redirect_uris" in info and isinstance(info["redirect_uris"], list):
|
||||
info["redirect_uris"] = [str(u) for u in info["redirect_uris"]]
|
||||
return info
|
||||
|
||||
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
|
||||
serialized_info = self._serialize_client_info(client_info.model_dump())
|
||||
await asyncio.to_thread(
|
||||
self.collection.update_one,
|
||||
self.get_db_key(),
|
||||
{"$set": {"client_info": serialized_info}},
|
||||
True,
|
||||
)
|
||||
logger.info("Saved client info for %s", self.get_base_url(self.server_url))
|
||||
|
||||
async def clear(self) -> None:
|
||||
await asyncio.to_thread(self.collection.delete_one, self.get_db_key())
|
||||
logger.info("Cleared OAuth cache for %s", self.get_base_url(self.server_url))
|
||||
|
||||
@classmethod
|
||||
async def clear_all(cls, db_client) -> None:
|
||||
collection = db_client["connector_sessions"]
|
||||
await asyncio.to_thread(collection.delete_many, {})
|
||||
logger.info("Cleared all OAuth client cache data.")
|
||||
|
||||
|
||||
class MCPOAuthManager:
|
||||
"""Manager for handling MCP OAuth callbacks."""
|
||||
|
||||
def __init__(self, redis_client: Redis | None, redis_prefix: str = "mcp_oauth:"):
|
||||
self.redis_client = redis_client
|
||||
self.redis_prefix = redis_prefix
|
||||
|
||||
def handle_oauth_callback(
|
||||
self, state: str, code: str, error: Optional[str] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Handle OAuth callback from provider.
|
||||
|
||||
Args:
|
||||
state: The state parameter from OAuth callback
|
||||
code: The authorization code from OAuth callback
|
||||
error: Error message if OAuth failed
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
if not self.redis_client or not state:
|
||||
raise Exception("Redis client or state not provided")
|
||||
if error:
|
||||
error_key = f"{self.redis_prefix}error:{state}"
|
||||
self.redis_client.setex(error_key, 300, error)
|
||||
raise Exception(f"OAuth error received: {error}")
|
||||
code_key = f"{self.redis_prefix}code:{state}"
|
||||
self.redis_client.setex(code_key, 300, code)
|
||||
|
||||
state_key = f"{self.redis_prefix}state:{state}"
|
||||
self.redis_client.setex(state_key, 300, "completed")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error("Error handling OAuth callback: %s", e)
|
||||
return False
|
||||
|
||||
def get_oauth_status(self, task_id: str) -> Dict[str, Any]:
|
||||
"""Get current status of OAuth flow using provided task_id."""
|
||||
if not task_id:
|
||||
return {"status": "not_started", "message": "OAuth flow not started"}
|
||||
return mcp_oauth_status_task(task_id)
|
||||
546
application/agents/tools/memory.py
Normal file
546
application/agents/tools/memory.py
Normal file
@@ -0,0 +1,546 @@
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
import re
|
||||
import uuid
|
||||
|
||||
from .base import Tool
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
|
||||
|
||||
class MemoryTool(Tool):
|
||||
"""Memory
|
||||
|
||||
Stores and retrieves information across conversations through a memory file directory.
|
||||
"""
|
||||
|
||||
def __init__(self, tool_config: Optional[Dict[str, Any]] = None, user_id: Optional[str] = None) -> None:
|
||||
"""Initialize the tool.
|
||||
|
||||
Args:
|
||||
tool_config: Optional tool configuration. Should include:
|
||||
- tool_id: Unique identifier for this memory tool instance (from user_tools._id)
|
||||
This ensures each user's tool configuration has isolated memories
|
||||
user_id: The authenticated user's id (should come from decoded_token["sub"]).
|
||||
"""
|
||||
self.user_id: Optional[str] = user_id
|
||||
|
||||
# Get tool_id from configuration (passed from user_tools._id in production)
|
||||
# In production, tool_id is the MongoDB ObjectId string from user_tools collection
|
||||
if tool_config and "tool_id" in tool_config:
|
||||
self.tool_id = tool_config["tool_id"]
|
||||
elif user_id:
|
||||
# Fallback for backward compatibility or testing
|
||||
self.tool_id = f"default_{user_id}"
|
||||
else:
|
||||
# Last resort fallback (shouldn't happen in normal use)
|
||||
self.tool_id = str(uuid.uuid4())
|
||||
|
||||
db = MongoDB.get_client()[settings.MONGO_DB_NAME]
|
||||
self.collection = db["memories"]
|
||||
|
||||
# -----------------------------
|
||||
# Action implementations
|
||||
# -----------------------------
|
||||
def execute_action(self, action_name: str, **kwargs: Any) -> str:
|
||||
"""Execute an action by name.
|
||||
|
||||
Args:
|
||||
action_name: One of view, create, str_replace, insert, delete, rename.
|
||||
**kwargs: Parameters for the action.
|
||||
|
||||
Returns:
|
||||
A human-readable string result.
|
||||
"""
|
||||
if not self.user_id:
|
||||
return "Error: MemoryTool requires a valid user_id."
|
||||
|
||||
if action_name == "view":
|
||||
return self._view(
|
||||
kwargs.get("path", "/"),
|
||||
kwargs.get("view_range")
|
||||
)
|
||||
|
||||
if action_name == "create":
|
||||
return self._create(
|
||||
kwargs.get("path", ""),
|
||||
kwargs.get("file_text", "")
|
||||
)
|
||||
|
||||
if action_name == "str_replace":
|
||||
return self._str_replace(
|
||||
kwargs.get("path", ""),
|
||||
kwargs.get("old_str", ""),
|
||||
kwargs.get("new_str", "")
|
||||
)
|
||||
|
||||
if action_name == "insert":
|
||||
return self._insert(
|
||||
kwargs.get("path", ""),
|
||||
kwargs.get("insert_line", 1),
|
||||
kwargs.get("insert_text", "")
|
||||
)
|
||||
|
||||
if action_name == "delete":
|
||||
return self._delete(kwargs.get("path", ""))
|
||||
|
||||
if action_name == "rename":
|
||||
return self._rename(
|
||||
kwargs.get("old_path", ""),
|
||||
kwargs.get("new_path", "")
|
||||
)
|
||||
|
||||
return f"Unknown action: {action_name}"
|
||||
|
||||
def get_actions_metadata(self) -> List[Dict[str, Any]]:
|
||||
"""Return JSON metadata describing supported actions for tool schemas."""
|
||||
return [
|
||||
{
|
||||
"name": "view",
|
||||
"description": "Shows directory contents or file contents with optional line ranges.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "Path to file or directory (e.g., /notes.txt or /project/ or /)."
|
||||
},
|
||||
"view_range": {
|
||||
"type": "array",
|
||||
"items": {"type": "integer"},
|
||||
"description": "Optional [start_line, end_line] to view specific lines (1-indexed)."
|
||||
}
|
||||
},
|
||||
"required": ["path"]
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "create",
|
||||
"description": "Create or overwrite a file.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "File path to create (e.g., /notes.txt or /project/task.txt)."
|
||||
},
|
||||
"file_text": {
|
||||
"type": "string",
|
||||
"description": "Content to write to the file."
|
||||
}
|
||||
},
|
||||
"required": ["path", "file_text"]
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "str_replace",
|
||||
"description": "Replace text in a file.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "File path (e.g., /notes.txt)."
|
||||
},
|
||||
"old_str": {
|
||||
"type": "string",
|
||||
"description": "String to find."
|
||||
},
|
||||
"new_str": {
|
||||
"type": "string",
|
||||
"description": "String to replace with."
|
||||
}
|
||||
},
|
||||
"required": ["path", "old_str", "new_str"]
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "insert",
|
||||
"description": "Insert text at a specific line in a file.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "File path (e.g., /notes.txt)."
|
||||
},
|
||||
"insert_line": {
|
||||
"type": "integer",
|
||||
"description": "Line number to insert at (1-indexed)."
|
||||
},
|
||||
"insert_text": {
|
||||
"type": "string",
|
||||
"description": "Text to insert."
|
||||
}
|
||||
},
|
||||
"required": ["path", "insert_line", "insert_text"]
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "delete",
|
||||
"description": "Delete a file or directory.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "Path to delete (e.g., /notes.txt or /project/)."
|
||||
}
|
||||
},
|
||||
"required": ["path"]
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "rename",
|
||||
"description": "Rename or move a file/directory.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"old_path": {
|
||||
"type": "string",
|
||||
"description": "Current path (e.g., /old.txt)."
|
||||
},
|
||||
"new_path": {
|
||||
"type": "string",
|
||||
"description": "New path (e.g., /new.txt)."
|
||||
}
|
||||
},
|
||||
"required": ["old_path", "new_path"]
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def get_config_requirements(self) -> Dict[str, Any]:
|
||||
"""Return configuration requirements."""
|
||||
return {}
|
||||
|
||||
# -----------------------------
|
||||
# Path validation
|
||||
# -----------------------------
|
||||
def _validate_path(self, path: str) -> Optional[str]:
|
||||
"""Validate and normalize path.
|
||||
|
||||
Args:
|
||||
path: User-provided path.
|
||||
|
||||
Returns:
|
||||
Normalized path or None if invalid.
|
||||
"""
|
||||
if not path:
|
||||
return None
|
||||
|
||||
# Remove any leading/trailing whitespace
|
||||
path = path.strip()
|
||||
|
||||
# Preserve whether path ends with / (indicates directory)
|
||||
is_directory = path.endswith("/")
|
||||
|
||||
# Ensure path starts with / for consistency
|
||||
if not path.startswith("/"):
|
||||
path = "/" + path
|
||||
|
||||
# Check for directory traversal patterns
|
||||
if ".." in path or path.count("//") > 0:
|
||||
return None
|
||||
|
||||
# Normalize the path
|
||||
try:
|
||||
# Convert to Path object and resolve to canonical form
|
||||
normalized = str(Path(path).as_posix())
|
||||
|
||||
# Ensure it still starts with /
|
||||
if not normalized.startswith("/"):
|
||||
return None
|
||||
|
||||
# Preserve trailing slash for directories
|
||||
if is_directory and not normalized.endswith("/") and normalized != "/":
|
||||
normalized = normalized + "/"
|
||||
|
||||
return normalized
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
# -----------------------------
|
||||
# Internal helpers
|
||||
# -----------------------------
|
||||
def _view(self, path: str, view_range: Optional[List[int]] = None) -> str:
|
||||
"""View directory contents or file contents."""
|
||||
validated_path = self._validate_path(path)
|
||||
if not validated_path:
|
||||
return "Error: Invalid path."
|
||||
|
||||
# Check if viewing directory (ends with / or is root)
|
||||
if validated_path == "/" or validated_path.endswith("/"):
|
||||
return self._view_directory(validated_path)
|
||||
|
||||
# Otherwise view file
|
||||
return self._view_file(validated_path, view_range)
|
||||
|
||||
def _view_directory(self, path: str) -> str:
|
||||
"""List files in a directory."""
|
||||
# Ensure path ends with / for proper prefix matching
|
||||
search_path = path if path.endswith("/") else path + "/"
|
||||
|
||||
# Find all files that start with this directory path
|
||||
query = {
|
||||
"user_id": self.user_id,
|
||||
"tool_id": self.tool_id,
|
||||
"path": {"$regex": f"^{re.escape(search_path)}"}
|
||||
}
|
||||
|
||||
docs = list(self.collection.find(query, {"path": 1}))
|
||||
|
||||
if not docs:
|
||||
return f"Directory: {path}\n(empty)"
|
||||
|
||||
# Extract filenames relative to the directory
|
||||
files = []
|
||||
for doc in docs:
|
||||
file_path = doc["path"]
|
||||
# Remove the directory prefix
|
||||
if file_path.startswith(search_path):
|
||||
relative = file_path[len(search_path):]
|
||||
if relative:
|
||||
files.append(relative)
|
||||
|
||||
files.sort()
|
||||
file_list = "\n".join(f"- {f}" for f in files)
|
||||
return f"Directory: {path}\n{file_list}"
|
||||
|
||||
def _view_file(self, path: str, view_range: Optional[List[int]] = None) -> str:
|
||||
"""View file contents with optional line range."""
|
||||
doc = self.collection.find_one({"user_id": self.user_id, "tool_id": self.tool_id, "path": path})
|
||||
|
||||
if not doc or not doc.get("content"):
|
||||
return f"Error: File not found: {path}"
|
||||
|
||||
content = str(doc["content"])
|
||||
|
||||
# Apply view_range if specified
|
||||
if view_range and len(view_range) == 2:
|
||||
lines = content.split("\n")
|
||||
start, end = view_range
|
||||
# Convert to 0-indexed
|
||||
start_idx = max(0, start - 1)
|
||||
end_idx = min(len(lines), end)
|
||||
|
||||
if start_idx >= len(lines):
|
||||
return f"Error: Line range out of bounds. File has {len(lines)} lines."
|
||||
|
||||
selected_lines = lines[start_idx:end_idx]
|
||||
# Add line numbers (enumerate with 1-based start)
|
||||
numbered_lines = [f"{i}: {line}" for i, line in enumerate(selected_lines, start=start)]
|
||||
return "\n".join(numbered_lines)
|
||||
|
||||
return content
|
||||
|
||||
def _create(self, path: str, file_text: str) -> str:
|
||||
"""Create or overwrite a file."""
|
||||
validated_path = self._validate_path(path)
|
||||
if not validated_path:
|
||||
return "Error: Invalid path."
|
||||
|
||||
if validated_path == "/" or validated_path.endswith("/"):
|
||||
return "Error: Cannot create a file at directory path."
|
||||
|
||||
self.collection.update_one(
|
||||
{"user_id": self.user_id, "tool_id": self.tool_id, "path": validated_path},
|
||||
{
|
||||
"$set": {
|
||||
"content": file_text,
|
||||
"updated_at": datetime.now()
|
||||
}
|
||||
},
|
||||
upsert=True
|
||||
)
|
||||
|
||||
return f"File created: {validated_path}"
|
||||
|
||||
def _str_replace(self, path: str, old_str: str, new_str: str) -> str:
|
||||
"""Replace text in a file."""
|
||||
validated_path = self._validate_path(path)
|
||||
if not validated_path:
|
||||
return "Error: Invalid path."
|
||||
|
||||
if not old_str:
|
||||
return "Error: old_str is required."
|
||||
|
||||
doc = self.collection.find_one({"user_id": self.user_id, "tool_id": self.tool_id, "path": validated_path})
|
||||
|
||||
if not doc or not doc.get("content"):
|
||||
return f"Error: File not found: {validated_path}"
|
||||
|
||||
current_content = str(doc["content"])
|
||||
|
||||
# Check if old_str exists (case-insensitive)
|
||||
if old_str.lower() not in current_content.lower():
|
||||
return f"Error: String '{old_str}' not found in file."
|
||||
|
||||
# Replace the string (case-insensitive)
|
||||
import re as regex_module
|
||||
updated_content = regex_module.sub(regex_module.escape(old_str), new_str, current_content, flags=regex_module.IGNORECASE)
|
||||
|
||||
self.collection.update_one(
|
||||
{"user_id": self.user_id, "tool_id": self.tool_id, "path": validated_path},
|
||||
{
|
||||
"$set": {
|
||||
"content": updated_content,
|
||||
"updated_at": datetime.now()
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
return f"File updated: {validated_path}"
|
||||
|
||||
def _insert(self, path: str, insert_line: int, insert_text: str) -> str:
|
||||
"""Insert text at a specific line."""
|
||||
validated_path = self._validate_path(path)
|
||||
if not validated_path:
|
||||
return "Error: Invalid path."
|
||||
|
||||
if not insert_text:
|
||||
return "Error: insert_text is required."
|
||||
|
||||
doc = self.collection.find_one({"user_id": self.user_id, "tool_id": self.tool_id, "path": validated_path})
|
||||
|
||||
if not doc or not doc.get("content"):
|
||||
return f"Error: File not found: {validated_path}"
|
||||
|
||||
current_content = str(doc["content"])
|
||||
lines = current_content.split("\n")
|
||||
|
||||
# Convert to 0-indexed
|
||||
index = insert_line - 1
|
||||
if index < 0 or index > len(lines):
|
||||
return f"Error: Invalid line number. File has {len(lines)} lines."
|
||||
|
||||
lines.insert(index, insert_text)
|
||||
updated_content = "\n".join(lines)
|
||||
|
||||
self.collection.update_one(
|
||||
{"user_id": self.user_id, "tool_id": self.tool_id, "path": validated_path},
|
||||
{
|
||||
"$set": {
|
||||
"content": updated_content,
|
||||
"updated_at": datetime.now()
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
return f"Text inserted at line {insert_line} in {validated_path}"
|
||||
|
||||
def _delete(self, path: str) -> str:
|
||||
"""Delete a file or directory."""
|
||||
validated_path = self._validate_path(path)
|
||||
if not validated_path:
|
||||
return "Error: Invalid path."
|
||||
|
||||
if validated_path == "/":
|
||||
# Delete all files for this user and tool
|
||||
result = self.collection.delete_many({"user_id": self.user_id, "tool_id": self.tool_id})
|
||||
return f"Deleted {result.deleted_count} file(s) from memory."
|
||||
|
||||
# Check if it's a directory (ends with /)
|
||||
if validated_path.endswith("/"):
|
||||
# Delete all files in directory
|
||||
result = self.collection.delete_many({
|
||||
"user_id": self.user_id,
|
||||
"tool_id": self.tool_id,
|
||||
"path": {"$regex": f"^{re.escape(validated_path)}"}
|
||||
})
|
||||
return f"Deleted directory and {result.deleted_count} file(s)."
|
||||
|
||||
# Try to delete as directory first (without trailing slash)
|
||||
# Check if any files start with this path + /
|
||||
search_path = validated_path + "/"
|
||||
directory_result = self.collection.delete_many({
|
||||
"user_id": self.user_id,
|
||||
"tool_id": self.tool_id,
|
||||
"path": {"$regex": f"^{re.escape(search_path)}"}
|
||||
})
|
||||
|
||||
if directory_result.deleted_count > 0:
|
||||
return f"Deleted directory and {directory_result.deleted_count} file(s)."
|
||||
|
||||
# Delete single file
|
||||
result = self.collection.delete_one({
|
||||
"user_id": self.user_id,
|
||||
"tool_id": self.tool_id,
|
||||
"path": validated_path
|
||||
})
|
||||
|
||||
if result.deleted_count:
|
||||
return f"Deleted: {validated_path}"
|
||||
return f"Error: File not found: {validated_path}"
|
||||
|
||||
def _rename(self, old_path: str, new_path: str) -> str:
|
||||
"""Rename or move a file/directory."""
|
||||
validated_old = self._validate_path(old_path)
|
||||
validated_new = self._validate_path(new_path)
|
||||
|
||||
if not validated_old or not validated_new:
|
||||
return "Error: Invalid path."
|
||||
|
||||
if validated_old == "/" or validated_new == "/":
|
||||
return "Error: Cannot rename root directory."
|
||||
|
||||
# Check if renaming a directory
|
||||
if validated_old.endswith("/"):
|
||||
# Ensure validated_new also ends with / for proper path replacement
|
||||
if not validated_new.endswith("/"):
|
||||
validated_new = validated_new + "/"
|
||||
|
||||
# Find all files in the old directory
|
||||
docs = list(self.collection.find({
|
||||
"user_id": self.user_id,
|
||||
"tool_id": self.tool_id,
|
||||
"path": {"$regex": f"^{re.escape(validated_old)}"}
|
||||
}))
|
||||
|
||||
if not docs:
|
||||
return f"Error: Directory not found: {validated_old}"
|
||||
|
||||
# Update paths for all files
|
||||
for doc in docs:
|
||||
old_file_path = doc["path"]
|
||||
new_file_path = old_file_path.replace(validated_old, validated_new, 1)
|
||||
|
||||
self.collection.update_one(
|
||||
{"_id": doc["_id"]},
|
||||
{"$set": {"path": new_file_path, "updated_at": datetime.now()}}
|
||||
)
|
||||
|
||||
return f"Renamed directory: {validated_old} -> {validated_new} ({len(docs)} files)"
|
||||
|
||||
# Rename single file
|
||||
doc = self.collection.find_one({
|
||||
"user_id": self.user_id,
|
||||
"tool_id": self.tool_id,
|
||||
"path": validated_old
|
||||
})
|
||||
|
||||
if not doc:
|
||||
return f"Error: File not found: {validated_old}"
|
||||
|
||||
# Check if new path already exists
|
||||
existing = self.collection.find_one({
|
||||
"user_id": self.user_id,
|
||||
"tool_id": self.tool_id,
|
||||
"path": validated_new
|
||||
})
|
||||
|
||||
if existing:
|
||||
return f"Error: File already exists at {validated_new}"
|
||||
|
||||
# Delete the old document and create a new one with the new path
|
||||
self.collection.delete_one({"user_id": self.user_id, "tool_id": self.tool_id, "path": validated_old})
|
||||
self.collection.insert_one({
|
||||
"user_id": self.user_id,
|
||||
"tool_id": self.tool_id,
|
||||
"path": validated_new,
|
||||
"content": doc.get("content", ""),
|
||||
"updated_at": datetime.now()
|
||||
})
|
||||
|
||||
return f"Renamed: {validated_old} -> {validated_new}"
|
||||
223
application/agents/tools/notes.py
Normal file
223
application/agents/tools/notes.py
Normal file
@@ -0,0 +1,223 @@
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional
|
||||
import uuid
|
||||
|
||||
from .base import Tool
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
|
||||
|
||||
class NotesTool(Tool):
|
||||
"""Notepad
|
||||
|
||||
Single note. Supports viewing, overwriting, string replacement.
|
||||
"""
|
||||
|
||||
def __init__(self, tool_config: Optional[Dict[str, Any]] = None, user_id: Optional[str] = None) -> None:
|
||||
"""Initialize the tool.
|
||||
|
||||
Args:
|
||||
tool_config: Optional tool configuration. Should include:
|
||||
- tool_id: Unique identifier for this notes tool instance (from user_tools._id)
|
||||
This ensures each user's tool configuration has isolated notes
|
||||
user_id: The authenticated user's id (should come from decoded_token["sub"]).
|
||||
"""
|
||||
self.user_id: Optional[str] = user_id
|
||||
|
||||
# Get tool_id from configuration (passed from user_tools._id in production)
|
||||
# In production, tool_id is the MongoDB ObjectId string from user_tools collection
|
||||
if tool_config and "tool_id" in tool_config:
|
||||
self.tool_id = tool_config["tool_id"]
|
||||
elif user_id:
|
||||
# Fallback for backward compatibility or testing
|
||||
self.tool_id = f"default_{user_id}"
|
||||
else:
|
||||
# Last resort fallback (shouldn't happen in normal use)
|
||||
self.tool_id = str(uuid.uuid4())
|
||||
|
||||
db = MongoDB.get_client()[settings.MONGO_DB_NAME]
|
||||
self.collection = db["notes"]
|
||||
|
||||
self._last_artifact_id: Optional[str] = None
|
||||
|
||||
# -----------------------------
|
||||
# Action implementations
|
||||
# -----------------------------
|
||||
def execute_action(self, action_name: str, **kwargs: Any) -> str:
|
||||
"""Execute an action by name.
|
||||
|
||||
Args:
|
||||
action_name: One of view, overwrite, str_replace, insert, delete.
|
||||
**kwargs: Parameters for the action.
|
||||
|
||||
Returns:
|
||||
A human-readable string result.
|
||||
"""
|
||||
if not self.user_id:
|
||||
return "Error: NotesTool requires a valid user_id."
|
||||
|
||||
self._last_artifact_id = None
|
||||
|
||||
if action_name == "view":
|
||||
return self._get_note()
|
||||
|
||||
if action_name == "overwrite":
|
||||
return self._overwrite_note(kwargs.get("text", ""))
|
||||
|
||||
if action_name == "str_replace":
|
||||
return self._str_replace(kwargs.get("old_str", ""), kwargs.get("new_str", ""))
|
||||
|
||||
if action_name == "insert":
|
||||
return self._insert(kwargs.get("line_number", 1), kwargs.get("text", ""))
|
||||
|
||||
if action_name == "delete":
|
||||
return self._delete_note()
|
||||
|
||||
return f"Unknown action: {action_name}"
|
||||
|
||||
def get_actions_metadata(self) -> List[Dict[str, Any]]:
|
||||
"""Return JSON metadata describing supported actions for tool schemas."""
|
||||
return [
|
||||
{
|
||||
"name": "view",
|
||||
"description": "Retrieve the user's note.",
|
||||
"parameters": {"type": "object", "properties": {}},
|
||||
},
|
||||
{
|
||||
"name": "overwrite",
|
||||
"description": "Replace the entire note content (creates if doesn't exist).",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"text": {"type": "string", "description": "New note content."}
|
||||
},
|
||||
"required": ["text"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "str_replace",
|
||||
"description": "Replace occurrences of old_str with new_str in the note.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"old_str": {"type": "string", "description": "String to find."},
|
||||
"new_str": {"type": "string", "description": "String to replace with."}
|
||||
},
|
||||
"required": ["old_str", "new_str"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "insert",
|
||||
"description": "Insert text at the specified line number (1-indexed).",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"line_number": {"type": "integer", "description": "Line number to insert at (1-indexed)."},
|
||||
"text": {"type": "string", "description": "Text to insert."}
|
||||
},
|
||||
"required": ["line_number", "text"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "delete",
|
||||
"description": "Delete the user's note.",
|
||||
"parameters": {"type": "object", "properties": {}},
|
||||
},
|
||||
]
|
||||
|
||||
def get_config_requirements(self) -> Dict[str, Any]:
|
||||
"""Return configuration requirements (none for now)."""
|
||||
return {}
|
||||
|
||||
def get_artifact_id(self, action_name: str, **kwargs: Any) -> Optional[str]:
|
||||
return self._last_artifact_id
|
||||
|
||||
# -----------------------------
|
||||
# Internal helpers (single-note)
|
||||
# -----------------------------
|
||||
def _get_note(self) -> str:
|
||||
doc = self.collection.find_one({"user_id": self.user_id, "tool_id": self.tool_id})
|
||||
if not doc or not doc.get("note"):
|
||||
return "No note found."
|
||||
if doc.get("_id") is not None:
|
||||
self._last_artifact_id = str(doc.get("_id"))
|
||||
return str(doc["note"])
|
||||
|
||||
def _overwrite_note(self, content: str) -> str:
|
||||
content = (content or "").strip()
|
||||
if not content:
|
||||
return "Note content required."
|
||||
result = self.collection.find_one_and_update(
|
||||
{"user_id": self.user_id, "tool_id": self.tool_id},
|
||||
{"$set": {"note": content, "updated_at": datetime.utcnow()}},
|
||||
upsert=True,
|
||||
return_document=True,
|
||||
)
|
||||
if result and result.get("_id") is not None:
|
||||
self._last_artifact_id = str(result.get("_id"))
|
||||
return "Note saved."
|
||||
|
||||
def _str_replace(self, old_str: str, new_str: str) -> str:
|
||||
if not old_str:
|
||||
return "old_str is required."
|
||||
|
||||
doc = self.collection.find_one({"user_id": self.user_id, "tool_id": self.tool_id})
|
||||
if not doc or not doc.get("note"):
|
||||
return "No note found."
|
||||
|
||||
current_note = str(doc["note"])
|
||||
|
||||
# Case-insensitive search
|
||||
if old_str.lower() not in current_note.lower():
|
||||
return f"String '{old_str}' not found in note."
|
||||
|
||||
# Case-insensitive replacement
|
||||
import re
|
||||
updated_note = re.sub(re.escape(old_str), new_str, current_note, flags=re.IGNORECASE)
|
||||
|
||||
result = self.collection.find_one_and_update(
|
||||
{"user_id": self.user_id, "tool_id": self.tool_id},
|
||||
{"$set": {"note": updated_note, "updated_at": datetime.utcnow()}},
|
||||
return_document=True,
|
||||
)
|
||||
if result and result.get("_id") is not None:
|
||||
self._last_artifact_id = str(result.get("_id"))
|
||||
return "Note updated."
|
||||
|
||||
def _insert(self, line_number: int, text: str) -> str:
|
||||
if not text:
|
||||
return "Text is required."
|
||||
|
||||
doc = self.collection.find_one({"user_id": self.user_id, "tool_id": self.tool_id})
|
||||
if not doc or not doc.get("note"):
|
||||
return "No note found."
|
||||
|
||||
current_note = str(doc["note"])
|
||||
lines = current_note.split("\n")
|
||||
|
||||
# Convert to 0-indexed and validate
|
||||
index = line_number - 1
|
||||
if index < 0 or index > len(lines):
|
||||
return f"Invalid line number. Note has {len(lines)} lines."
|
||||
|
||||
lines.insert(index, text)
|
||||
updated_note = "\n".join(lines)
|
||||
|
||||
result = self.collection.find_one_and_update(
|
||||
{"user_id": self.user_id, "tool_id": self.tool_id},
|
||||
{"$set": {"note": updated_note, "updated_at": datetime.utcnow()}},
|
||||
return_document=True,
|
||||
)
|
||||
if result and result.get("_id") is not None:
|
||||
self._last_artifact_id = str(result.get("_id"))
|
||||
return "Text inserted."
|
||||
|
||||
def _delete_note(self) -> str:
|
||||
doc = self.collection.find_one_and_delete(
|
||||
{"user_id": self.user_id, "tool_id": self.tool_id}
|
||||
)
|
||||
if not doc:
|
||||
return "No note found to delete."
|
||||
if doc.get("_id") is not None:
|
||||
self._last_artifact_id = str(doc.get("_id"))
|
||||
return "Note deleted."
|
||||
128
application/agents/tools/ntfy.py
Normal file
128
application/agents/tools/ntfy.py
Normal file
@@ -0,0 +1,128 @@
|
||||
import requests
|
||||
from application.agents.tools.base import Tool
|
||||
|
||||
class NtfyTool(Tool):
|
||||
"""
|
||||
Ntfy Tool
|
||||
A tool for sending notifications to ntfy topics on a specified server.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Initialize the NtfyTool with configuration.
|
||||
|
||||
Args:
|
||||
config (dict): Configuration dictionary containing the access token.
|
||||
"""
|
||||
self.config = config
|
||||
self.token = config.get("token", "")
|
||||
|
||||
def execute_action(self, action_name, **kwargs):
|
||||
"""
|
||||
Execute the specified action with given parameters.
|
||||
|
||||
Args:
|
||||
action_name (str): Name of the action to execute.
|
||||
**kwargs: Parameters for the action, including server_url.
|
||||
|
||||
Returns:
|
||||
dict: Result of the action with status code and message.
|
||||
|
||||
Raises:
|
||||
ValueError: If the action name is unknown.
|
||||
"""
|
||||
actions = {
|
||||
"ntfy_send_message": self._send_message,
|
||||
}
|
||||
if action_name in actions:
|
||||
return actions[action_name](**kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unknown action: {action_name}")
|
||||
|
||||
def _send_message(self, server_url, message, topic, title=None, priority=None):
|
||||
"""
|
||||
Send a message to an ntfy topic on the specified server.
|
||||
|
||||
Args:
|
||||
server_url (str): Base URL of the ntfy server (e.g., https://ntfy.sh).
|
||||
message (str): The message text to send.
|
||||
topic (str): The topic to send the message to.
|
||||
title (str, optional): Title of the notification.
|
||||
priority (int, optional): Priority of the notification (1-5).
|
||||
|
||||
Returns:
|
||||
dict: Response with status code and a confirmation message.
|
||||
|
||||
Raises:
|
||||
ValueError: If priority is not an integer between 1 and 5.
|
||||
"""
|
||||
url = f"{server_url.rstrip('/')}/{topic}"
|
||||
headers = {}
|
||||
if title:
|
||||
headers["X-Title"] = title
|
||||
if priority:
|
||||
try:
|
||||
priority = int(priority)
|
||||
except (ValueError, TypeError):
|
||||
raise ValueError("Priority must be convertible to an integer")
|
||||
if priority < 1 or priority > 5:
|
||||
raise ValueError("Priority must be an integer between 1 and 5")
|
||||
headers["X-Priority"] = str(priority)
|
||||
if self.token:
|
||||
headers["Authorization"] = f"Basic {self.token}"
|
||||
data = message.encode("utf-8")
|
||||
response = requests.post(url, headers=headers, data=data)
|
||||
return {"status_code": response.status_code, "message": "Message sent"}
|
||||
|
||||
def get_actions_metadata(self):
|
||||
"""
|
||||
Provide metadata about available actions.
|
||||
|
||||
Returns:
|
||||
list: List of dictionaries describing each action.
|
||||
"""
|
||||
return [
|
||||
{
|
||||
"name": "ntfy_send_message",
|
||||
"description": "Send a notification to an ntfy topic",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"server_url": {
|
||||
"type": "string",
|
||||
"description": "Base URL of the ntfy server",
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"description": "Text to send in the notification",
|
||||
},
|
||||
"topic": {
|
||||
"type": "string",
|
||||
"description": "Topic to send the notification to",
|
||||
},
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "Title of the notification (optional)",
|
||||
},
|
||||
"priority": {
|
||||
"type": "integer",
|
||||
"description": "Priority of the notification (1-5, optional)",
|
||||
},
|
||||
},
|
||||
"required": ["server_url", "message", "topic"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def get_config_requirements(self):
|
||||
return {
|
||||
"token": {
|
||||
"type": "string",
|
||||
"label": "Access Token",
|
||||
"description": "Ntfy access token for authentication",
|
||||
"required": True,
|
||||
"secret": True,
|
||||
"order": 1,
|
||||
},
|
||||
}
|
||||
179
application/agents/tools/postgres.py
Normal file
179
application/agents/tools/postgres.py
Normal file
@@ -0,0 +1,179 @@
|
||||
import logging
|
||||
|
||||
import psycopg2
|
||||
|
||||
from application.agents.tools.base import Tool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PostgresTool(Tool):
|
||||
"""
|
||||
PostgreSQL Database Tool
|
||||
A tool for connecting to a PostgreSQL database using a connection string,
|
||||
executing SQL queries, and retrieving schema information.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.connection_string = config.get("token", "")
|
||||
|
||||
def execute_action(self, action_name, **kwargs):
|
||||
actions = {
|
||||
"postgres_execute_sql": self._execute_sql,
|
||||
"postgres_get_schema": self._get_schema,
|
||||
}
|
||||
if action_name not in actions:
|
||||
raise ValueError(f"Unknown action: {action_name}")
|
||||
return actions[action_name](**kwargs)
|
||||
|
||||
def _execute_sql(self, sql_query):
|
||||
"""
|
||||
Executes an SQL query against the PostgreSQL database using a connection string.
|
||||
"""
|
||||
conn = None
|
||||
try:
|
||||
conn = psycopg2.connect(self.connection_string)
|
||||
cur = conn.cursor()
|
||||
cur.execute(sql_query)
|
||||
conn.commit()
|
||||
|
||||
if sql_query.strip().lower().startswith("select"):
|
||||
column_names = (
|
||||
[desc[0] for desc in cur.description] if cur.description else []
|
||||
)
|
||||
results = []
|
||||
rows = cur.fetchall()
|
||||
for row in rows:
|
||||
results.append(dict(zip(column_names, row)))
|
||||
response_data = {"data": results, "column_names": column_names}
|
||||
else:
|
||||
row_count = cur.rowcount
|
||||
response_data = {
|
||||
"message": f"Query executed successfully, {row_count} rows affected."
|
||||
}
|
||||
|
||||
cur.close()
|
||||
return {
|
||||
"status_code": 200,
|
||||
"message": "SQL query executed successfully.",
|
||||
"response_data": response_data,
|
||||
}
|
||||
|
||||
except psycopg2.Error as e:
|
||||
error_message = f"Database error: {e}"
|
||||
logger.error("PostgreSQL execute_sql error: %s", e)
|
||||
return {
|
||||
"status_code": 500,
|
||||
"message": "Failed to execute SQL query.",
|
||||
"error": error_message,
|
||||
}
|
||||
finally:
|
||||
if conn:
|
||||
conn.close()
|
||||
|
||||
def _get_schema(self, db_name):
|
||||
"""
|
||||
Retrieves the schema of the PostgreSQL database using a connection string.
|
||||
"""
|
||||
conn = None
|
||||
try:
|
||||
conn = psycopg2.connect(self.connection_string)
|
||||
cur = conn.cursor()
|
||||
|
||||
cur.execute(
|
||||
"""
|
||||
SELECT
|
||||
table_name,
|
||||
column_name,
|
||||
data_type,
|
||||
column_default,
|
||||
is_nullable
|
||||
FROM
|
||||
information_schema.columns
|
||||
WHERE
|
||||
table_schema = 'public'
|
||||
ORDER BY
|
||||
table_name,
|
||||
ordinal_position;
|
||||
"""
|
||||
)
|
||||
|
||||
schema_data = {}
|
||||
for row in cur.fetchall():
|
||||
table_name, column_name, data_type, column_default, is_nullable = row
|
||||
if table_name not in schema_data:
|
||||
schema_data[table_name] = []
|
||||
schema_data[table_name].append(
|
||||
{
|
||||
"column_name": column_name,
|
||||
"data_type": data_type,
|
||||
"column_default": column_default,
|
||||
"is_nullable": is_nullable,
|
||||
}
|
||||
)
|
||||
|
||||
cur.close()
|
||||
return {
|
||||
"status_code": 200,
|
||||
"message": "Database schema retrieved successfully.",
|
||||
"schema": schema_data,
|
||||
}
|
||||
|
||||
except psycopg2.Error as e:
|
||||
error_message = f"Database error: {e}"
|
||||
logger.error("PostgreSQL get_schema error: %s", e)
|
||||
return {
|
||||
"status_code": 500,
|
||||
"message": "Failed to retrieve database schema.",
|
||||
"error": error_message,
|
||||
}
|
||||
finally:
|
||||
if conn:
|
||||
conn.close()
|
||||
|
||||
def get_actions_metadata(self):
|
||||
return [
|
||||
{
|
||||
"name": "postgres_execute_sql",
|
||||
"description": "Execute an SQL query against the PostgreSQL database and return the results. Use this tool to interact with the database, e.g., retrieve specific data or perform updates. Only SELECT queries will return data, other queries will return execution status.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"sql_query": {
|
||||
"type": "string",
|
||||
"description": "The SQL query to execute.",
|
||||
},
|
||||
},
|
||||
"required": ["sql_query"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "postgres_get_schema",
|
||||
"description": "Retrieve the schema of the PostgreSQL database, including tables and their columns. Use this to understand the database structure before executing queries. db_name is 'default' if not provided.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"db_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the database to retrieve the schema for.",
|
||||
},
|
||||
},
|
||||
"required": ["db_name"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def get_config_requirements(self):
|
||||
return {
|
||||
"token": {
|
||||
"type": "string",
|
||||
"label": "Connection String",
|
||||
"description": "PostgreSQL database connection string",
|
||||
"required": True,
|
||||
"secret": True,
|
||||
"order": 1,
|
||||
},
|
||||
}
|
||||
84
application/agents/tools/read_webpage.py
Normal file
84
application/agents/tools/read_webpage.py
Normal file
@@ -0,0 +1,84 @@
|
||||
import requests
|
||||
from markdownify import markdownify
|
||||
from application.agents.tools.base import Tool
|
||||
from application.core.url_validation import validate_url, SSRFError
|
||||
|
||||
class ReadWebpageTool(Tool):
|
||||
"""
|
||||
Read Webpage (browser)
|
||||
A tool to fetch the HTML content of a URL and convert it to Markdown.
|
||||
"""
|
||||
|
||||
def __init__(self, config=None):
|
||||
"""
|
||||
Initializes the tool.
|
||||
:param config: Optional configuration dictionary. Not used by this tool.
|
||||
"""
|
||||
self.config = config
|
||||
|
||||
def execute_action(self, action_name: str, **kwargs) -> str:
|
||||
"""
|
||||
Executes the specified action. For this tool, the only action is 'read_webpage'.
|
||||
|
||||
:param action_name: The name of the action to execute. Should be 'read_webpage'.
|
||||
:param kwargs: Keyword arguments, must include 'url'.
|
||||
:return: The Markdown content of the webpage or an error message.
|
||||
"""
|
||||
if action_name != "read_webpage":
|
||||
return f"Error: Unknown action '{action_name}'. This tool only supports 'read_webpage'."
|
||||
|
||||
url = kwargs.get("url")
|
||||
if not url:
|
||||
return "Error: URL parameter is missing."
|
||||
|
||||
# Validate URL to prevent SSRF attacks
|
||||
try:
|
||||
url = validate_url(url)
|
||||
except SSRFError as e:
|
||||
return f"Error: URL validation failed - {e}"
|
||||
|
||||
try:
|
||||
response = requests.get(url, timeout=10, headers={'User-Agent': 'DocsGPT-Agent/1.0'})
|
||||
response.raise_for_status() # Raise an exception for HTTP errors (4xx or 5xx)
|
||||
|
||||
html_content = response.text
|
||||
#soup = BeautifulSoup(html_content, 'html.parser')
|
||||
|
||||
|
||||
markdown_content = markdownify(html_content, heading_style="ATX", newline_style="BACKSLASH")
|
||||
|
||||
return markdown_content
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
return f"Error fetching URL {url}: {e}"
|
||||
except Exception as e:
|
||||
return f"Error processing URL {url}: {e}"
|
||||
|
||||
def get_actions_metadata(self):
|
||||
"""
|
||||
Returns metadata for the actions supported by this tool.
|
||||
"""
|
||||
return [
|
||||
{
|
||||
"name": "read_webpage",
|
||||
"description": "Fetches the HTML content of a given URL and returns it as clean Markdown text. Input must be a valid URL.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "The fully qualified URL of the webpage to read (e.g., 'https://www.example.com').",
|
||||
}
|
||||
},
|
||||
"required": ["url"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
def get_config_requirements(self):
|
||||
"""
|
||||
Returns a dictionary describing the configuration requirements for the tool.
|
||||
This tool does not require any specific configuration.
|
||||
"""
|
||||
return {}
|
||||
342
application/agents/tools/spec_parser.py
Normal file
342
application/agents/tools/spec_parser.py
Normal file
@@ -0,0 +1,342 @@
|
||||
"""
|
||||
API Specification Parser
|
||||
|
||||
Parses OpenAPI 3.x and Swagger 2.0 specifications and converts them
|
||||
to API Tool action definitions for use in DocsGPT.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import yaml
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SUPPORTED_METHODS = frozenset(
|
||||
{"get", "post", "put", "delete", "patch", "head", "options"}
|
||||
)
|
||||
|
||||
|
||||
def parse_spec(spec_content: str) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]:
|
||||
"""
|
||||
Parse an API specification and convert operations to action definitions.
|
||||
|
||||
Supports OpenAPI 3.x and Swagger 2.0 formats in JSON or YAML.
|
||||
|
||||
Args:
|
||||
spec_content: Raw specification content as string
|
||||
|
||||
Returns:
|
||||
Tuple of (metadata dict, list of action dicts)
|
||||
|
||||
Raises:
|
||||
ValueError: If the spec is invalid or uses an unsupported format
|
||||
"""
|
||||
spec = _load_spec(spec_content)
|
||||
_validate_spec(spec)
|
||||
|
||||
is_swagger = "swagger" in spec
|
||||
metadata = _extract_metadata(spec, is_swagger)
|
||||
actions = _extract_actions(spec, is_swagger)
|
||||
|
||||
return metadata, actions
|
||||
|
||||
|
||||
def _load_spec(content: str) -> Dict[str, Any]:
|
||||
"""Parse spec content from JSON or YAML string."""
|
||||
content = content.strip()
|
||||
if not content:
|
||||
raise ValueError("Empty specification content")
|
||||
try:
|
||||
if content.startswith("{"):
|
||||
return json.loads(content)
|
||||
return yaml.safe_load(content)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Invalid JSON format: {e.msg}")
|
||||
except yaml.YAMLError as e:
|
||||
raise ValueError(f"Invalid YAML format: {e}")
|
||||
|
||||
|
||||
def _validate_spec(spec: Dict[str, Any]) -> None:
|
||||
"""Validate spec version and required fields."""
|
||||
if not isinstance(spec, dict):
|
||||
raise ValueError("Specification must be a valid object")
|
||||
openapi_version = spec.get("openapi", "")
|
||||
swagger_version = spec.get("swagger", "")
|
||||
|
||||
if not (openapi_version.startswith("3.") or swagger_version == "2.0"):
|
||||
raise ValueError(
|
||||
"Unsupported specification version. Expected OpenAPI 3.x or Swagger 2.0"
|
||||
)
|
||||
if "paths" not in spec or not spec["paths"]:
|
||||
raise ValueError("No API paths defined in the specification")
|
||||
|
||||
|
||||
def _extract_metadata(spec: Dict[str, Any], is_swagger: bool) -> Dict[str, Any]:
|
||||
"""Extract API metadata from specification."""
|
||||
info = spec.get("info", {})
|
||||
base_url = _get_base_url(spec, is_swagger)
|
||||
|
||||
return {
|
||||
"title": info.get("title", "Untitled API"),
|
||||
"description": (info.get("description", "") or "")[:500],
|
||||
"version": info.get("version", ""),
|
||||
"base_url": base_url,
|
||||
}
|
||||
|
||||
|
||||
def _get_base_url(spec: Dict[str, Any], is_swagger: bool) -> str:
|
||||
"""Extract base URL from spec (handles both OpenAPI 3.x and Swagger 2.0)."""
|
||||
if is_swagger:
|
||||
schemes = spec.get("schemes", ["https"])
|
||||
host = spec.get("host", "")
|
||||
base_path = spec.get("basePath", "")
|
||||
if host:
|
||||
scheme = schemes[0] if schemes else "https"
|
||||
return f"{scheme}://{host}{base_path}".rstrip("/")
|
||||
return ""
|
||||
servers = spec.get("servers", [])
|
||||
if servers and isinstance(servers, list) and servers[0].get("url"):
|
||||
return servers[0]["url"].rstrip("/")
|
||||
return ""
|
||||
|
||||
|
||||
def _extract_actions(spec: Dict[str, Any], is_swagger: bool) -> List[Dict[str, Any]]:
|
||||
"""Extract all API operations as action definitions."""
|
||||
actions = []
|
||||
paths = spec.get("paths", {})
|
||||
base_url = _get_base_url(spec, is_swagger)
|
||||
|
||||
components = spec.get("components", {})
|
||||
definitions = spec.get("definitions", {})
|
||||
|
||||
for path, path_item in paths.items():
|
||||
if not isinstance(path_item, dict):
|
||||
continue
|
||||
path_params = path_item.get("parameters", [])
|
||||
|
||||
for method in SUPPORTED_METHODS:
|
||||
operation = path_item.get(method)
|
||||
if not isinstance(operation, dict):
|
||||
continue
|
||||
try:
|
||||
action = _build_action(
|
||||
path=path,
|
||||
method=method,
|
||||
operation=operation,
|
||||
path_params=path_params,
|
||||
base_url=base_url,
|
||||
components=components,
|
||||
definitions=definitions,
|
||||
is_swagger=is_swagger,
|
||||
)
|
||||
actions.append(action)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to parse operation {method.upper()} {path}: {e}"
|
||||
)
|
||||
continue
|
||||
return actions
|
||||
|
||||
|
||||
def _build_action(
|
||||
path: str,
|
||||
method: str,
|
||||
operation: Dict[str, Any],
|
||||
path_params: List[Dict],
|
||||
base_url: str,
|
||||
components: Dict[str, Any],
|
||||
definitions: Dict[str, Any],
|
||||
is_swagger: bool,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build a single action from an API operation."""
|
||||
action_name = _generate_action_name(operation, method, path)
|
||||
full_url = f"{base_url}{path}" if base_url else path
|
||||
|
||||
all_params = path_params + operation.get("parameters", [])
|
||||
query_params, headers = _categorize_parameters(all_params, components, definitions)
|
||||
|
||||
body, body_content_type = _extract_request_body(
|
||||
operation, components, definitions, is_swagger
|
||||
)
|
||||
|
||||
description = operation.get("summary", "") or operation.get("description", "")
|
||||
|
||||
return {
|
||||
"name": action_name,
|
||||
"url": full_url,
|
||||
"method": method.upper(),
|
||||
"description": (description or "")[:500],
|
||||
"query_params": {"type": "object", "properties": query_params},
|
||||
"headers": {"type": "object", "properties": headers},
|
||||
"body": {"type": "object", "properties": body},
|
||||
"body_content_type": body_content_type,
|
||||
"active": True,
|
||||
}
|
||||
|
||||
|
||||
def _generate_action_name(operation: Dict[str, Any], method: str, path: str) -> str:
|
||||
"""Generate a valid action name from operationId or method+path."""
|
||||
if operation.get("operationId"):
|
||||
name = operation["operationId"]
|
||||
else:
|
||||
path_slug = re.sub(r"[{}]", "", path)
|
||||
path_slug = re.sub(r"[^a-zA-Z0-9]", "_", path_slug)
|
||||
path_slug = re.sub(r"_+", "_", path_slug).strip("_")
|
||||
name = f"{method}_{path_slug}"
|
||||
name = re.sub(r"[^a-zA-Z0-9_-]", "_", name)
|
||||
return name[:64]
|
||||
|
||||
|
||||
def _categorize_parameters(
|
||||
parameters: List[Dict],
|
||||
components: Dict[str, Any],
|
||||
definitions: Dict[str, Any],
|
||||
) -> Tuple[Dict, Dict]:
|
||||
"""Categorize parameters into query params and headers."""
|
||||
query_params = {}
|
||||
headers = {}
|
||||
|
||||
for param in parameters:
|
||||
resolved = _resolve_ref(param, components, definitions)
|
||||
if not resolved or "name" not in resolved:
|
||||
continue
|
||||
location = resolved.get("in", "query")
|
||||
prop = _param_to_property(resolved)
|
||||
|
||||
if location in ("query", "path"):
|
||||
query_params[resolved["name"]] = prop
|
||||
elif location == "header":
|
||||
headers[resolved["name"]] = prop
|
||||
return query_params, headers
|
||||
|
||||
|
||||
def _param_to_property(param: Dict) -> Dict[str, Any]:
|
||||
"""Convert an API parameter to an action property definition."""
|
||||
schema = param.get("schema", {})
|
||||
param_type = schema.get("type", param.get("type", "string"))
|
||||
|
||||
mapped_type = "integer" if param_type in ("integer", "number") else "string"
|
||||
|
||||
return {
|
||||
"type": mapped_type,
|
||||
"description": (param.get("description", "") or "")[:200],
|
||||
"value": "",
|
||||
"filled_by_llm": param.get("required", False),
|
||||
"required": param.get("required", False),
|
||||
}
|
||||
|
||||
|
||||
def _extract_request_body(
|
||||
operation: Dict[str, Any],
|
||||
components: Dict[str, Any],
|
||||
definitions: Dict[str, Any],
|
||||
is_swagger: bool,
|
||||
) -> Tuple[Dict, str]:
|
||||
"""Extract request body schema and content type."""
|
||||
content_types = [
|
||||
"application/json",
|
||||
"application/x-www-form-urlencoded",
|
||||
"multipart/form-data",
|
||||
"text/plain",
|
||||
"application/xml",
|
||||
]
|
||||
|
||||
if is_swagger:
|
||||
consumes = operation.get("consumes", [])
|
||||
body_param = next(
|
||||
(p for p in operation.get("parameters", []) if p.get("in") == "body"), None
|
||||
)
|
||||
if not body_param:
|
||||
return {}, "application/json"
|
||||
selected_type = consumes[0] if consumes else "application/json"
|
||||
schema = body_param.get("schema", {})
|
||||
else:
|
||||
request_body = operation.get("requestBody", {})
|
||||
if not request_body:
|
||||
return {}, "application/json"
|
||||
request_body = _resolve_ref(request_body, components, definitions)
|
||||
content = request_body.get("content", {})
|
||||
|
||||
selected_type = "application/json"
|
||||
schema = {}
|
||||
|
||||
for ct in content_types:
|
||||
if ct in content:
|
||||
selected_type = ct
|
||||
schema = content[ct].get("schema", {})
|
||||
break
|
||||
if not schema and content:
|
||||
first_type = next(iter(content))
|
||||
selected_type = first_type
|
||||
schema = content[first_type].get("schema", {})
|
||||
properties = _schema_to_properties(schema, components, definitions)
|
||||
return properties, selected_type
|
||||
|
||||
|
||||
def _schema_to_properties(
|
||||
schema: Dict,
|
||||
components: Dict[str, Any],
|
||||
definitions: Dict[str, Any],
|
||||
depth: int = 0,
|
||||
) -> Dict[str, Any]:
|
||||
"""Convert schema to action body properties (limited depth to prevent recursion)."""
|
||||
if depth > 3:
|
||||
return {}
|
||||
schema = _resolve_ref(schema, components, definitions)
|
||||
if not schema or not isinstance(schema, dict):
|
||||
return {}
|
||||
properties = {}
|
||||
schema_type = schema.get("type", "object")
|
||||
|
||||
if schema_type == "object":
|
||||
required_fields = set(schema.get("required", []))
|
||||
for prop_name, prop_schema in schema.get("properties", {}).items():
|
||||
resolved = _resolve_ref(prop_schema, components, definitions)
|
||||
if not isinstance(resolved, dict):
|
||||
continue
|
||||
prop_type = resolved.get("type", "string")
|
||||
mapped_type = "integer" if prop_type in ("integer", "number") else "string"
|
||||
|
||||
properties[prop_name] = {
|
||||
"type": mapped_type,
|
||||
"description": (resolved.get("description", "") or "")[:200],
|
||||
"value": "",
|
||||
"filled_by_llm": prop_name in required_fields,
|
||||
"required": prop_name in required_fields,
|
||||
}
|
||||
return properties
|
||||
|
||||
|
||||
def _resolve_ref(
|
||||
obj: Any,
|
||||
components: Dict[str, Any],
|
||||
definitions: Dict[str, Any],
|
||||
) -> Optional[Dict]:
|
||||
"""Resolve $ref references in the specification."""
|
||||
if not isinstance(obj, dict):
|
||||
return obj if isinstance(obj, dict) else None
|
||||
if "$ref" not in obj:
|
||||
return obj
|
||||
ref_path = obj["$ref"]
|
||||
|
||||
if ref_path.startswith("#/components/"):
|
||||
parts = ref_path.replace("#/components/", "").split("/")
|
||||
return _traverse_path(components, parts)
|
||||
elif ref_path.startswith("#/definitions/"):
|
||||
parts = ref_path.replace("#/definitions/", "").split("/")
|
||||
return _traverse_path(definitions, parts)
|
||||
logger.debug(f"Unsupported ref path: {ref_path}")
|
||||
return None
|
||||
|
||||
|
||||
def _traverse_path(obj: Dict, parts: List[str]) -> Optional[Dict]:
|
||||
"""Traverse a nested dictionary using path parts."""
|
||||
try:
|
||||
for part in parts:
|
||||
obj = obj[part]
|
||||
return obj if isinstance(obj, dict) else None
|
||||
except (KeyError, TypeError):
|
||||
return None
|
||||
96
application/agents/tools/telegram.py
Normal file
96
application/agents/tools/telegram.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import logging
|
||||
|
||||
import requests
|
||||
|
||||
from application.agents.tools.base import Tool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TelegramTool(Tool):
|
||||
"""
|
||||
Telegram Bot
|
||||
A flexible Telegram tool for performing various actions (e.g., sending messages, images).
|
||||
Requires a bot token and chat ID for configuration
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.token = config.get("token", "")
|
||||
|
||||
def execute_action(self, action_name, **kwargs):
|
||||
actions = {
|
||||
"telegram_send_message": self._send_message,
|
||||
"telegram_send_image": self._send_image,
|
||||
}
|
||||
if action_name not in actions:
|
||||
raise ValueError(f"Unknown action: {action_name}")
|
||||
return actions[action_name](**kwargs)
|
||||
|
||||
def _send_message(self, text, chat_id):
|
||||
logger.debug("Sending Telegram message to chat_id=%s", chat_id)
|
||||
url = f"https://api.telegram.org/bot{self.token}/sendMessage"
|
||||
payload = {"chat_id": chat_id, "text": text}
|
||||
response = requests.post(url, data=payload)
|
||||
return {"status_code": response.status_code, "message": "Message sent"}
|
||||
|
||||
def _send_image(self, image_url, chat_id):
|
||||
logger.debug("Sending Telegram image to chat_id=%s", chat_id)
|
||||
url = f"https://api.telegram.org/bot{self.token}/sendPhoto"
|
||||
payload = {"chat_id": chat_id, "photo": image_url}
|
||||
response = requests.post(url, data=payload)
|
||||
return {"status_code": response.status_code, "message": "Image sent"}
|
||||
|
||||
def get_actions_metadata(self):
|
||||
return [
|
||||
{
|
||||
"name": "telegram_send_message",
|
||||
"description": "Send a notification to Telegram chat",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"text": {
|
||||
"type": "string",
|
||||
"description": "Text to send in the notification",
|
||||
},
|
||||
"chat_id": {
|
||||
"type": "string",
|
||||
"description": "Chat ID to send the notification to",
|
||||
},
|
||||
},
|
||||
"required": ["text"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "telegram_send_image",
|
||||
"description": "Send an image to the Telegram chat",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"image_url": {
|
||||
"type": "string",
|
||||
"description": "URL of the image to send",
|
||||
},
|
||||
"chat_id": {
|
||||
"type": "string",
|
||||
"description": "Chat ID to send the image to",
|
||||
},
|
||||
},
|
||||
"required": ["image_url"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def get_config_requirements(self):
|
||||
return {
|
||||
"token": {
|
||||
"type": "string",
|
||||
"label": "Bot Token",
|
||||
"description": "Telegram bot token for authentication",
|
||||
"required": True,
|
||||
"secret": True,
|
||||
"order": 1,
|
||||
},
|
||||
}
|
||||
70
application/agents/tools/think.py
Normal file
70
application/agents/tools/think.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from application.agents.tools.base import Tool
|
||||
|
||||
|
||||
THINK_TOOL_ID = "think"
|
||||
|
||||
THINK_TOOL_ENTRY = {
|
||||
"name": "think",
|
||||
"actions": [
|
||||
{
|
||||
"name": "reason",
|
||||
"description": (
|
||||
"Use this tool to think through your reasoning step by step "
|
||||
"before deciding on your next action. Always reason before "
|
||||
"searching or answering."
|
||||
),
|
||||
"active": True,
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"reasoning": {
|
||||
"type": "string",
|
||||
"description": "Your step-by-step reasoning and analysis",
|
||||
"filled_by_llm": True,
|
||||
"required": True,
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class ThinkTool(Tool):
|
||||
"""Pseudo-tool that captures chain-of-thought reasoning.
|
||||
|
||||
Returns a short acknowledgment so the LLM can continue.
|
||||
The reasoning content is captured in tool_call data for transparency.
|
||||
"""
|
||||
|
||||
internal = True
|
||||
|
||||
def __init__(self, config=None):
|
||||
pass
|
||||
|
||||
def execute_action(self, action_name: str, **kwargs):
|
||||
return "Continue."
|
||||
|
||||
def get_actions_metadata(self):
|
||||
return [
|
||||
{
|
||||
"name": "reason",
|
||||
"description": (
|
||||
"Use this tool to think through your reasoning step by step "
|
||||
"before deciding on your next action. Always reason before "
|
||||
"searching or answering."
|
||||
),
|
||||
"parameters": {
|
||||
"properties": {
|
||||
"reasoning": {
|
||||
"type": "string",
|
||||
"description": "Your step-by-step reasoning and analysis",
|
||||
"filled_by_llm": True,
|
||||
"required": True,
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
def get_config_requirements(self):
|
||||
return {}
|
||||
333
application/agents/tools/todo_list.py
Normal file
333
application/agents/tools/todo_list.py
Normal file
@@ -0,0 +1,333 @@
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional
|
||||
import uuid
|
||||
|
||||
from .base import Tool
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
|
||||
|
||||
class TodoListTool(Tool):
|
||||
"""Todo List
|
||||
|
||||
Manages todo items for users. Supports creating, viewing, updating, and deleting todos.
|
||||
"""
|
||||
|
||||
def __init__(self, tool_config: Optional[Dict[str, Any]] = None, user_id: Optional[str] = None) -> None:
|
||||
"""Initialize the tool.
|
||||
|
||||
Args:
|
||||
tool_config: Optional tool configuration. Should include:
|
||||
- tool_id: Unique identifier for this todo list tool instance (from user_tools._id)
|
||||
This ensures each user's tool configuration has isolated todos
|
||||
user_id: The authenticated user's id (should come from decoded_token["sub"]).
|
||||
"""
|
||||
self.user_id: Optional[str] = user_id
|
||||
|
||||
# Get tool_id from configuration (passed from user_tools._id in production)
|
||||
# In production, tool_id is the MongoDB ObjectId string from user_tools collection
|
||||
if tool_config and "tool_id" in tool_config:
|
||||
self.tool_id = tool_config["tool_id"]
|
||||
elif user_id:
|
||||
# Fallback for backward compatibility or testing
|
||||
self.tool_id = f"default_{user_id}"
|
||||
else:
|
||||
# Last resort fallback (shouldn't happen in normal use)
|
||||
self.tool_id = str(uuid.uuid4())
|
||||
|
||||
db = MongoDB.get_client()[settings.MONGO_DB_NAME]
|
||||
self.collection = db["todos"]
|
||||
|
||||
self._last_artifact_id: Optional[str] = None
|
||||
|
||||
# -----------------------------
|
||||
# Action implementations
|
||||
# -----------------------------
|
||||
def execute_action(self, action_name: str, **kwargs: Any) -> str:
|
||||
"""Execute an action by name.
|
||||
|
||||
Args:
|
||||
action_name: One of list, create, get, update, complete, delete.
|
||||
**kwargs: Parameters for the action.
|
||||
|
||||
Returns:
|
||||
A human-readable string result.
|
||||
"""
|
||||
if not self.user_id:
|
||||
return "Error: TodoListTool requires a valid user_id."
|
||||
|
||||
self._last_artifact_id = None
|
||||
|
||||
if action_name == "list":
|
||||
return self._list()
|
||||
|
||||
if action_name == "create":
|
||||
return self._create(kwargs.get("title", ""))
|
||||
|
||||
if action_name == "get":
|
||||
return self._get(kwargs.get("todo_id"))
|
||||
|
||||
if action_name == "update":
|
||||
return self._update(
|
||||
kwargs.get("todo_id"),
|
||||
kwargs.get("title", "")
|
||||
)
|
||||
|
||||
if action_name == "complete":
|
||||
return self._complete(kwargs.get("todo_id"))
|
||||
|
||||
if action_name == "delete":
|
||||
return self._delete(kwargs.get("todo_id"))
|
||||
|
||||
return f"Unknown action: {action_name}"
|
||||
|
||||
def get_actions_metadata(self) -> List[Dict[str, Any]]:
|
||||
"""Return JSON metadata describing supported actions for tool schemas."""
|
||||
return [
|
||||
{
|
||||
"name": "list",
|
||||
"description": "List all todos for the user.",
|
||||
"parameters": {"type": "object", "properties": {}},
|
||||
},
|
||||
{
|
||||
"name": "create",
|
||||
"description": "Create a new todo item.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "Title of the todo item."
|
||||
}
|
||||
},
|
||||
"required": ["title"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "get",
|
||||
"description": "Get a specific todo by ID.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"todo_id": {
|
||||
"type": "integer",
|
||||
"description": "The ID of the todo to retrieve."
|
||||
}
|
||||
},
|
||||
"required": ["todo_id"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "update",
|
||||
"description": "Update a todo's title by ID.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"todo_id": {
|
||||
"type": "integer",
|
||||
"description": "The ID of the todo to update."
|
||||
},
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "The new title for the todo."
|
||||
}
|
||||
},
|
||||
"required": ["todo_id", "title"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "complete",
|
||||
"description": "Mark a todo as completed.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"todo_id": {
|
||||
"type": "integer",
|
||||
"description": "The ID of the todo to mark as completed."
|
||||
}
|
||||
},
|
||||
"required": ["todo_id"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "delete",
|
||||
"description": "Delete a specific todo by ID.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"todo_id": {
|
||||
"type": "integer",
|
||||
"description": "The ID of the todo to delete."
|
||||
}
|
||||
},
|
||||
"required": ["todo_id"],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def get_config_requirements(self) -> Dict[str, Any]:
|
||||
"""Return configuration requirements."""
|
||||
return {}
|
||||
|
||||
def get_artifact_id(self, action_name: str, **kwargs: Any) -> Optional[str]:
|
||||
return self._last_artifact_id
|
||||
|
||||
# -----------------------------
|
||||
# Internal helpers
|
||||
# -----------------------------
|
||||
def _coerce_todo_id(self, value: Optional[Any]) -> Optional[int]:
|
||||
"""Convert todo identifiers to sequential integers."""
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
if isinstance(value, int):
|
||||
return value if value > 0 else None
|
||||
|
||||
if isinstance(value, str):
|
||||
stripped = value.strip()
|
||||
if stripped.isdigit():
|
||||
numeric_value = int(stripped)
|
||||
return numeric_value if numeric_value > 0 else None
|
||||
|
||||
return None
|
||||
|
||||
def _get_next_todo_id(self) -> int:
|
||||
"""Get the next sequential todo_id for this user and tool.
|
||||
|
||||
Returns a simple integer (1, 2, 3, ...) scoped to this user/tool.
|
||||
With 5-10 todos max, scanning is negligible.
|
||||
"""
|
||||
query = {"user_id": self.user_id, "tool_id": self.tool_id}
|
||||
todos = list(self.collection.find(query, {"todo_id": 1}))
|
||||
|
||||
# Find the maximum todo_id
|
||||
max_id = 0
|
||||
for todo in todos:
|
||||
todo_id = self._coerce_todo_id(todo.get("todo_id"))
|
||||
if todo_id is not None:
|
||||
max_id = max(max_id, todo_id)
|
||||
|
||||
return max_id + 1
|
||||
|
||||
def _list(self) -> str:
|
||||
"""List all todos for the user."""
|
||||
query = {"user_id": self.user_id, "tool_id": self.tool_id}
|
||||
todos = list(self.collection.find(query))
|
||||
|
||||
if not todos:
|
||||
return "No todos found."
|
||||
|
||||
result_lines = ["Todos:"]
|
||||
for doc in todos:
|
||||
todo_id = doc.get("todo_id")
|
||||
title = doc.get("title", "Untitled")
|
||||
status = doc.get("status", "open")
|
||||
|
||||
line = f"[{todo_id}] {title} ({status})"
|
||||
result_lines.append(line)
|
||||
|
||||
return "\n".join(result_lines)
|
||||
|
||||
def _create(self, title: str) -> str:
|
||||
"""Create a new todo item."""
|
||||
title = (title or "").strip()
|
||||
if not title:
|
||||
return "Error: Title is required."
|
||||
|
||||
now = datetime.now()
|
||||
todo_id = self._get_next_todo_id()
|
||||
|
||||
doc = {
|
||||
"todo_id": todo_id,
|
||||
"user_id": self.user_id,
|
||||
"tool_id": self.tool_id,
|
||||
"title": title,
|
||||
"status": "open",
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
}
|
||||
insert_result = self.collection.insert_one(doc)
|
||||
inserted_id = getattr(insert_result, "inserted_id", None) or doc.get("_id")
|
||||
if inserted_id is not None:
|
||||
self._last_artifact_id = str(inserted_id)
|
||||
return f"Todo created with ID {todo_id}: {title}"
|
||||
|
||||
def _get(self, todo_id: Optional[Any]) -> str:
|
||||
"""Get a specific todo by ID."""
|
||||
parsed_todo_id = self._coerce_todo_id(todo_id)
|
||||
if parsed_todo_id is None:
|
||||
return "Error: todo_id must be a positive integer."
|
||||
|
||||
query = {"user_id": self.user_id, "tool_id": self.tool_id, "todo_id": parsed_todo_id}
|
||||
doc = self.collection.find_one(query)
|
||||
|
||||
if not doc:
|
||||
return f"Error: Todo with ID {parsed_todo_id} not found."
|
||||
|
||||
if doc.get("_id") is not None:
|
||||
self._last_artifact_id = str(doc.get("_id"))
|
||||
|
||||
title = doc.get("title", "Untitled")
|
||||
status = doc.get("status", "open")
|
||||
|
||||
result = f"Todo [{parsed_todo_id}]:\nTitle: {title}\nStatus: {status}"
|
||||
|
||||
return result
|
||||
|
||||
def _update(self, todo_id: Optional[Any], title: str) -> str:
|
||||
"""Update a todo's title by ID."""
|
||||
parsed_todo_id = self._coerce_todo_id(todo_id)
|
||||
if parsed_todo_id is None:
|
||||
return "Error: todo_id must be a positive integer."
|
||||
|
||||
title = (title or "").strip()
|
||||
if not title:
|
||||
return "Error: Title is required."
|
||||
|
||||
query = {"user_id": self.user_id, "tool_id": self.tool_id, "todo_id": parsed_todo_id}
|
||||
doc = self.collection.find_one_and_update(
|
||||
query,
|
||||
{"$set": {"title": title, "updated_at": datetime.now()}},
|
||||
)
|
||||
if not doc:
|
||||
return f"Error: Todo with ID {parsed_todo_id} not found."
|
||||
|
||||
if doc.get("_id") is not None:
|
||||
self._last_artifact_id = str(doc.get("_id"))
|
||||
|
||||
return f"Todo {parsed_todo_id} updated to: {title}"
|
||||
|
||||
def _complete(self, todo_id: Optional[Any]) -> str:
|
||||
"""Mark a todo as completed."""
|
||||
parsed_todo_id = self._coerce_todo_id(todo_id)
|
||||
if parsed_todo_id is None:
|
||||
return "Error: todo_id must be a positive integer."
|
||||
|
||||
query = {"user_id": self.user_id, "tool_id": self.tool_id, "todo_id": parsed_todo_id}
|
||||
doc = self.collection.find_one_and_update(
|
||||
query,
|
||||
{"$set": {"status": "completed", "updated_at": datetime.now()}},
|
||||
)
|
||||
if not doc:
|
||||
return f"Error: Todo with ID {parsed_todo_id} not found."
|
||||
|
||||
if doc.get("_id") is not None:
|
||||
self._last_artifact_id = str(doc.get("_id"))
|
||||
|
||||
return f"Todo {parsed_todo_id} marked as completed."
|
||||
|
||||
def _delete(self, todo_id: Optional[Any]) -> str:
|
||||
"""Delete a specific todo by ID."""
|
||||
parsed_todo_id = self._coerce_todo_id(todo_id)
|
||||
if parsed_todo_id is None:
|
||||
return "Error: todo_id must be a positive integer."
|
||||
|
||||
query = {"user_id": self.user_id, "tool_id": self.tool_id, "todo_id": parsed_todo_id}
|
||||
doc = self.collection.find_one_and_delete(query)
|
||||
if not doc:
|
||||
return f"Error: Todo with ID {parsed_todo_id} not found."
|
||||
|
||||
if doc.get("_id") is not None:
|
||||
self._last_artifact_id = str(doc.get("_id"))
|
||||
|
||||
return f"Todo {parsed_todo_id} deleted."
|
||||
86
application/agents/tools/tool_action_parser.py
Normal file
86
application/agents/tools/tool_action_parser.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ToolActionParser:
|
||||
def __init__(self, llm_type, name_mapping=None):
|
||||
self.llm_type = llm_type
|
||||
self.name_mapping = name_mapping
|
||||
self.parsers = {
|
||||
"OpenAILLM": self._parse_openai_llm,
|
||||
"GoogleLLM": self._parse_google_llm,
|
||||
}
|
||||
|
||||
def parse_args(self, call):
|
||||
parser = self.parsers.get(self.llm_type, self._parse_openai_llm)
|
||||
return parser(call)
|
||||
|
||||
def _resolve_via_mapping(self, call_name):
|
||||
"""Look up (tool_id, action_name) from the name mapping if available."""
|
||||
if self.name_mapping and call_name in self.name_mapping:
|
||||
return self.name_mapping[call_name]
|
||||
return None
|
||||
|
||||
def _parse_openai_llm(self, call):
|
||||
try:
|
||||
call_args = json.loads(call.arguments)
|
||||
|
||||
resolved = self._resolve_via_mapping(call.name)
|
||||
if resolved:
|
||||
return resolved[0], resolved[1], call_args
|
||||
|
||||
# Fallback: legacy split on "_" for backward compatibility
|
||||
tool_parts = call.name.split("_")
|
||||
|
||||
if len(tool_parts) < 2:
|
||||
logger.warning(
|
||||
f"Invalid tool name format: {call.name}. "
|
||||
"Could not resolve via mapping or legacy parsing."
|
||||
)
|
||||
return None, None, None
|
||||
|
||||
tool_id = tool_parts[-1]
|
||||
action_name = "_".join(tool_parts[:-1])
|
||||
|
||||
if not tool_id.isdigit():
|
||||
logger.warning(
|
||||
f"Tool ID '{tool_id}' is not numerical. This might be a hallucinated tool call."
|
||||
)
|
||||
|
||||
except (AttributeError, TypeError, json.JSONDecodeError) as e:
|
||||
logger.error(f"Error parsing OpenAI LLM call: {e}")
|
||||
return None, None, None
|
||||
return tool_id, action_name, call_args
|
||||
|
||||
def _parse_google_llm(self, call):
|
||||
try:
|
||||
call_args = call.arguments
|
||||
|
||||
resolved = self._resolve_via_mapping(call.name)
|
||||
if resolved:
|
||||
return resolved[0], resolved[1], call_args
|
||||
|
||||
# Fallback: legacy split on "_" for backward compatibility
|
||||
tool_parts = call.name.split("_")
|
||||
|
||||
if len(tool_parts) < 2:
|
||||
logger.warning(
|
||||
f"Invalid tool name format: {call.name}. "
|
||||
"Could not resolve via mapping or legacy parsing."
|
||||
)
|
||||
return None, None, None
|
||||
|
||||
tool_id = tool_parts[-1]
|
||||
action_name = "_".join(tool_parts[:-1])
|
||||
|
||||
if not tool_id.isdigit():
|
||||
logger.warning(
|
||||
f"Tool ID '{tool_id}' is not numerical. This might be a hallucinated tool call."
|
||||
)
|
||||
|
||||
except (AttributeError, TypeError) as e:
|
||||
logger.error(f"Error parsing Google LLM call: {e}")
|
||||
return None, None, None
|
||||
return tool_id, action_name, call_args
|
||||
49
application/agents/tools/tool_manager.py
Normal file
49
application/agents/tools/tool_manager.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import importlib
|
||||
import inspect
|
||||
import os
|
||||
import pkgutil
|
||||
|
||||
from application.agents.tools.base import Tool
|
||||
|
||||
|
||||
class ToolManager:
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.tools = {}
|
||||
self.load_tools()
|
||||
|
||||
def load_tools(self):
|
||||
tools_dir = os.path.join(os.path.dirname(__file__))
|
||||
for finder, name, ispkg in pkgutil.iter_modules([tools_dir]):
|
||||
if name == "base" or name.startswith("__"):
|
||||
continue
|
||||
module = importlib.import_module(f"application.agents.tools.{name}")
|
||||
for member_name, obj in inspect.getmembers(module, inspect.isclass):
|
||||
if issubclass(obj, Tool) and obj is not Tool and not obj.internal:
|
||||
tool_config = self.config.get(name, {})
|
||||
self.tools[name] = obj(tool_config)
|
||||
|
||||
def load_tool(self, tool_name, tool_config, user_id=None):
|
||||
self.config[tool_name] = tool_config
|
||||
module = importlib.import_module(f"application.agents.tools.{tool_name}")
|
||||
for member_name, obj in inspect.getmembers(module, inspect.isclass):
|
||||
if issubclass(obj, Tool) and obj is not Tool:
|
||||
if tool_name in {"mcp_tool", "notes", "memory", "todo_list"} and user_id:
|
||||
return obj(tool_config, user_id)
|
||||
else:
|
||||
return obj(tool_config)
|
||||
|
||||
def execute_action(self, tool_name, action_name, user_id=None, **kwargs):
|
||||
if tool_name not in self.tools:
|
||||
raise ValueError(f"Tool '{tool_name}' not loaded")
|
||||
if tool_name in {"mcp_tool", "memory", "todo_list", "notes"} and user_id:
|
||||
tool_config = self.config.get(tool_name, {})
|
||||
tool = self.load_tool(tool_name, tool_config, user_id)
|
||||
return tool.execute_action(action_name, **kwargs)
|
||||
return self.tools[tool_name].execute_action(action_name, **kwargs)
|
||||
|
||||
def get_all_actions_metadata(self):
|
||||
metadata = []
|
||||
for tool in self.tools.values():
|
||||
metadata.extend(tool.get_actions_metadata())
|
||||
return metadata
|
||||
231
application/agents/workflow_agent.py
Normal file
231
application/agents/workflow_agent.py
Normal file
@@ -0,0 +1,231 @@
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, Generator, Optional
|
||||
|
||||
from application.agents.base import BaseAgent
|
||||
from application.agents.workflows.schemas import (
|
||||
ExecutionStatus,
|
||||
Workflow,
|
||||
WorkflowEdge,
|
||||
WorkflowGraph,
|
||||
WorkflowNode,
|
||||
WorkflowRun,
|
||||
)
|
||||
from application.agents.workflows.workflow_engine import WorkflowEngine
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.logging import log_activity, LogContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WorkflowAgent(BaseAgent):
|
||||
"""A specialized agent that executes predefined workflows."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args,
|
||||
workflow_id: Optional[str] = None,
|
||||
workflow: Optional[Dict[str, Any]] = None,
|
||||
workflow_owner: Optional[str] = None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.workflow_id = workflow_id
|
||||
self.workflow_owner = workflow_owner
|
||||
self._workflow_data = workflow
|
||||
self._engine: Optional[WorkflowEngine] = None
|
||||
|
||||
@log_activity()
|
||||
def gen(
|
||||
self, query: str, log_context: LogContext = None
|
||||
) -> Generator[Dict[str, str], None, None]:
|
||||
yield from self._gen_inner(query, log_context)
|
||||
|
||||
def _gen_inner(
|
||||
self, query: str, log_context: LogContext
|
||||
) -> Generator[Dict[str, str], None, None]:
|
||||
graph = self._load_workflow_graph()
|
||||
if not graph:
|
||||
yield {"type": "error", "error": "Failed to load workflow configuration."}
|
||||
return
|
||||
self._engine = WorkflowEngine(graph, self)
|
||||
yield from self._engine.execute({}, query)
|
||||
self._save_workflow_run(query)
|
||||
|
||||
def _load_workflow_graph(self) -> Optional[WorkflowGraph]:
|
||||
if self._workflow_data:
|
||||
return self._parse_embedded_workflow()
|
||||
if self.workflow_id:
|
||||
return self._load_from_database()
|
||||
return None
|
||||
|
||||
def _parse_embedded_workflow(self) -> Optional[WorkflowGraph]:
|
||||
try:
|
||||
nodes_data = self._workflow_data.get("nodes", [])
|
||||
edges_data = self._workflow_data.get("edges", [])
|
||||
|
||||
workflow = Workflow(
|
||||
name=self._workflow_data.get("name", "Embedded Workflow"),
|
||||
description=self._workflow_data.get("description"),
|
||||
)
|
||||
|
||||
nodes = []
|
||||
for n in nodes_data:
|
||||
node_config = n.get("data", {})
|
||||
nodes.append(
|
||||
WorkflowNode(
|
||||
id=n["id"],
|
||||
workflow_id=self.workflow_id or "embedded",
|
||||
type=n["type"],
|
||||
title=n.get("title", "Node"),
|
||||
description=n.get("description"),
|
||||
position=n.get("position", {"x": 0, "y": 0}),
|
||||
config=node_config,
|
||||
)
|
||||
)
|
||||
edges = []
|
||||
for e in edges_data:
|
||||
edges.append(
|
||||
WorkflowEdge(
|
||||
id=e["id"],
|
||||
workflow_id=self.workflow_id or "embedded",
|
||||
source=e.get("source") or e.get("source_id"),
|
||||
target=e.get("target") or e.get("target_id"),
|
||||
sourceHandle=e.get("sourceHandle") or e.get("source_handle"),
|
||||
targetHandle=e.get("targetHandle") or e.get("target_handle"),
|
||||
)
|
||||
)
|
||||
return WorkflowGraph(workflow=workflow, nodes=nodes, edges=edges)
|
||||
except Exception as e:
|
||||
logger.error(f"Invalid embedded workflow: {e}")
|
||||
return None
|
||||
|
||||
def _load_from_database(self) -> Optional[WorkflowGraph]:
|
||||
try:
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
if not self.workflow_id or not ObjectId.is_valid(self.workflow_id):
|
||||
logger.error(f"Invalid workflow ID: {self.workflow_id}")
|
||||
return None
|
||||
owner_id = self.workflow_owner
|
||||
if not owner_id and isinstance(self.decoded_token, dict):
|
||||
owner_id = self.decoded_token.get("sub")
|
||||
if not owner_id:
|
||||
logger.error(
|
||||
f"Workflow owner not available for workflow load: {self.workflow_id}"
|
||||
)
|
||||
return None
|
||||
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
|
||||
workflows_coll = db["workflows"]
|
||||
workflow_nodes_coll = db["workflow_nodes"]
|
||||
workflow_edges_coll = db["workflow_edges"]
|
||||
|
||||
workflow_doc = workflows_coll.find_one(
|
||||
{"_id": ObjectId(self.workflow_id), "user": owner_id}
|
||||
)
|
||||
if not workflow_doc:
|
||||
logger.error(
|
||||
f"Workflow {self.workflow_id} not found or inaccessible for user {owner_id}"
|
||||
)
|
||||
return None
|
||||
workflow = Workflow(**workflow_doc)
|
||||
graph_version = workflow_doc.get("current_graph_version", 1)
|
||||
try:
|
||||
graph_version = int(graph_version)
|
||||
if graph_version <= 0:
|
||||
graph_version = 1
|
||||
except (ValueError, TypeError):
|
||||
graph_version = 1
|
||||
|
||||
nodes_docs = list(
|
||||
workflow_nodes_coll.find(
|
||||
{"workflow_id": self.workflow_id, "graph_version": graph_version}
|
||||
)
|
||||
)
|
||||
if not nodes_docs and graph_version == 1:
|
||||
nodes_docs = list(
|
||||
workflow_nodes_coll.find(
|
||||
{
|
||||
"workflow_id": self.workflow_id,
|
||||
"graph_version": {"$exists": False},
|
||||
}
|
||||
)
|
||||
)
|
||||
nodes = [WorkflowNode(**doc) for doc in nodes_docs]
|
||||
|
||||
edges_docs = list(
|
||||
workflow_edges_coll.find(
|
||||
{"workflow_id": self.workflow_id, "graph_version": graph_version}
|
||||
)
|
||||
)
|
||||
if not edges_docs and graph_version == 1:
|
||||
edges_docs = list(
|
||||
workflow_edges_coll.find(
|
||||
{
|
||||
"workflow_id": self.workflow_id,
|
||||
"graph_version": {"$exists": False},
|
||||
}
|
||||
)
|
||||
)
|
||||
edges = [WorkflowEdge(**doc) for doc in edges_docs]
|
||||
|
||||
return WorkflowGraph(workflow=workflow, nodes=nodes, edges=edges)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load workflow from database: {e}")
|
||||
return None
|
||||
|
||||
def _save_workflow_run(self, query: str) -> None:
|
||||
if not self._engine:
|
||||
return
|
||||
try:
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
workflow_runs_coll = db["workflow_runs"]
|
||||
|
||||
run = WorkflowRun(
|
||||
workflow_id=self.workflow_id or "unknown",
|
||||
status=self._determine_run_status(),
|
||||
inputs={"query": query},
|
||||
outputs=self._serialize_state(self._engine.state),
|
||||
steps=self._engine.get_execution_summary(),
|
||||
created_at=datetime.now(timezone.utc),
|
||||
completed_at=datetime.now(timezone.utc),
|
||||
)
|
||||
|
||||
workflow_runs_coll.insert_one(run.to_mongo_doc())
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save workflow run: {e}")
|
||||
|
||||
def _determine_run_status(self) -> ExecutionStatus:
|
||||
if not self._engine or not self._engine.execution_log:
|
||||
return ExecutionStatus.COMPLETED
|
||||
for log in self._engine.execution_log:
|
||||
if log.get("status") == ExecutionStatus.FAILED.value:
|
||||
return ExecutionStatus.FAILED
|
||||
return ExecutionStatus.COMPLETED
|
||||
|
||||
def _serialize_state(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
serialized: Dict[str, Any] = {}
|
||||
for key, value in state.items():
|
||||
serialized[key] = self._serialize_state_value(value)
|
||||
return serialized
|
||||
|
||||
def _serialize_state_value(self, value: Any) -> Any:
|
||||
if isinstance(value, dict):
|
||||
return {
|
||||
str(dict_key): self._serialize_state_value(dict_value)
|
||||
for dict_key, dict_value in value.items()
|
||||
}
|
||||
if isinstance(value, list):
|
||||
return [self._serialize_state_value(item) for item in value]
|
||||
if isinstance(value, tuple):
|
||||
return [self._serialize_state_value(item) for item in value]
|
||||
if isinstance(value, datetime):
|
||||
return value.isoformat()
|
||||
if isinstance(value, (str, int, float, bool, type(None))):
|
||||
return value
|
||||
return str(value)
|
||||
64
application/agents/workflows/cel_evaluator.py
Normal file
64
application/agents/workflows/cel_evaluator.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
import celpy
|
||||
import celpy.celtypes
|
||||
|
||||
|
||||
class CelEvaluationError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _convert_value(value: Any) -> Any:
|
||||
if isinstance(value, bool):
|
||||
return celpy.celtypes.BoolType(value)
|
||||
if isinstance(value, int):
|
||||
return celpy.celtypes.IntType(value)
|
||||
if isinstance(value, float):
|
||||
return celpy.celtypes.DoubleType(value)
|
||||
if isinstance(value, str):
|
||||
return celpy.celtypes.StringType(value)
|
||||
if isinstance(value, list):
|
||||
return celpy.celtypes.ListType([_convert_value(item) for item in value])
|
||||
if isinstance(value, dict):
|
||||
return celpy.celtypes.MapType(
|
||||
{celpy.celtypes.StringType(k): _convert_value(v) for k, v in value.items()}
|
||||
)
|
||||
if value is None:
|
||||
return celpy.celtypes.BoolType(False)
|
||||
return celpy.celtypes.StringType(str(value))
|
||||
|
||||
|
||||
def build_activation(state: Dict[str, Any]) -> Dict[str, Any]:
|
||||
return {k: _convert_value(v) for k, v in state.items()}
|
||||
|
||||
|
||||
def evaluate_cel(expression: str, state: Dict[str, Any]) -> Any:
|
||||
if not expression or not expression.strip():
|
||||
raise CelEvaluationError("Empty expression")
|
||||
try:
|
||||
env = celpy.Environment()
|
||||
ast = env.compile(expression)
|
||||
program = env.program(ast)
|
||||
activation = build_activation(state)
|
||||
result = program.evaluate(activation)
|
||||
except celpy.CELEvalError as exc:
|
||||
raise CelEvaluationError(f"CEL evaluation error: {exc}") from exc
|
||||
except Exception as exc:
|
||||
raise CelEvaluationError(f"CEL error: {exc}") from exc
|
||||
return cel_to_python(result)
|
||||
|
||||
|
||||
def cel_to_python(value: Any) -> Any:
|
||||
if isinstance(value, celpy.celtypes.BoolType):
|
||||
return bool(value)
|
||||
if isinstance(value, celpy.celtypes.IntType):
|
||||
return int(value)
|
||||
if isinstance(value, celpy.celtypes.DoubleType):
|
||||
return float(value)
|
||||
if isinstance(value, celpy.celtypes.StringType):
|
||||
return str(value)
|
||||
if isinstance(value, celpy.celtypes.ListType):
|
||||
return [cel_to_python(item) for item in value]
|
||||
if isinstance(value, celpy.celtypes.MapType):
|
||||
return {str(k): cel_to_python(v) for k, v in value.items()}
|
||||
return value
|
||||
104
application/agents/workflows/node_agent.py
Normal file
104
application/agents/workflows/node_agent.py
Normal file
@@ -0,0 +1,104 @@
|
||||
"""Workflow Node Agents - defines specialized agents for workflow nodes."""
|
||||
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
from application.agents.agentic_agent import AgenticAgent
|
||||
from application.agents.base import BaseAgent
|
||||
from application.agents.classic_agent import ClassicAgent
|
||||
from application.agents.research_agent import ResearchAgent
|
||||
from application.agents.workflows.schemas import AgentType
|
||||
|
||||
|
||||
class ToolFilterMixin:
|
||||
"""Mixin that filters fetched tools to only those specified in tool_ids."""
|
||||
|
||||
_allowed_tool_ids: List[str]
|
||||
|
||||
def _get_user_tools(self, user: str = "local") -> Dict[str, Dict[str, Any]]:
|
||||
all_tools = super()._get_user_tools(user)
|
||||
if not self._allowed_tool_ids:
|
||||
return {}
|
||||
filtered_tools = {
|
||||
tool_id: tool
|
||||
for tool_id, tool in all_tools.items()
|
||||
if str(tool.get("_id", "")) in self._allowed_tool_ids
|
||||
}
|
||||
return filtered_tools
|
||||
|
||||
def _get_tools(self, api_key: str = None) -> Dict[str, Dict[str, Any]]:
|
||||
all_tools = super()._get_tools(api_key)
|
||||
if not self._allowed_tool_ids:
|
||||
return {}
|
||||
filtered_tools = {
|
||||
tool_id: tool
|
||||
for tool_id, tool in all_tools.items()
|
||||
if str(tool.get("_id", "")) in self._allowed_tool_ids
|
||||
}
|
||||
return filtered_tools
|
||||
|
||||
|
||||
class _WorkflowNodeMixin:
|
||||
"""Common __init__ for all workflow node agents."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
endpoint: str,
|
||||
llm_name: str,
|
||||
model_id: str,
|
||||
api_key: str,
|
||||
tool_ids: Optional[List[str]] = None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(
|
||||
endpoint=endpoint,
|
||||
llm_name=llm_name,
|
||||
model_id=model_id,
|
||||
api_key=api_key,
|
||||
**kwargs,
|
||||
)
|
||||
self._allowed_tool_ids = tool_ids or []
|
||||
|
||||
|
||||
class WorkflowNodeClassicAgent(ToolFilterMixin, _WorkflowNodeMixin, ClassicAgent):
|
||||
pass
|
||||
|
||||
|
||||
class WorkflowNodeAgenticAgent(ToolFilterMixin, _WorkflowNodeMixin, AgenticAgent):
|
||||
pass
|
||||
|
||||
|
||||
class WorkflowNodeResearchAgent(ToolFilterMixin, _WorkflowNodeMixin, ResearchAgent):
|
||||
pass
|
||||
|
||||
|
||||
class WorkflowNodeAgentFactory:
|
||||
|
||||
_agents: Dict[AgentType, Type[BaseAgent]] = {
|
||||
AgentType.CLASSIC: WorkflowNodeClassicAgent,
|
||||
AgentType.REACT: WorkflowNodeClassicAgent, # backwards compat
|
||||
AgentType.AGENTIC: WorkflowNodeAgenticAgent,
|
||||
AgentType.RESEARCH: WorkflowNodeResearchAgent,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def create(
|
||||
cls,
|
||||
agent_type: AgentType,
|
||||
endpoint: str,
|
||||
llm_name: str,
|
||||
model_id: str,
|
||||
api_key: str,
|
||||
tool_ids: Optional[List[str]] = None,
|
||||
**kwargs,
|
||||
) -> BaseAgent:
|
||||
agent_class = cls._agents.get(agent_type)
|
||||
if not agent_class:
|
||||
raise ValueError(f"Unsupported agent type: {agent_type}")
|
||||
return agent_class(
|
||||
endpoint=endpoint,
|
||||
llm_name=llm_name,
|
||||
model_id=model_id,
|
||||
api_key=api_key,
|
||||
tool_ids=tool_ids,
|
||||
**kwargs,
|
||||
)
|
||||
237
application/agents/workflows/schemas.py
Normal file
237
application/agents/workflows/schemas.py
Normal file
@@ -0,0 +1,237 @@
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
|
||||
from bson import ObjectId
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
||||
|
||||
|
||||
class NodeType(str, Enum):
|
||||
START = "start"
|
||||
END = "end"
|
||||
AGENT = "agent"
|
||||
NOTE = "note"
|
||||
STATE = "state"
|
||||
CONDITION = "condition"
|
||||
|
||||
|
||||
class AgentType(str, Enum):
|
||||
CLASSIC = "classic"
|
||||
REACT = "react"
|
||||
AGENTIC = "agentic"
|
||||
RESEARCH = "research"
|
||||
|
||||
|
||||
class ExecutionStatus(str, Enum):
|
||||
PENDING = "pending"
|
||||
RUNNING = "running"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
class Position(BaseModel):
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
x: float = 0.0
|
||||
y: float = 0.0
|
||||
|
||||
|
||||
class AgentNodeConfig(BaseModel):
|
||||
model_config = ConfigDict(extra="allow")
|
||||
agent_type: AgentType = AgentType.CLASSIC
|
||||
llm_name: Optional[str] = None
|
||||
system_prompt: str = "You are a helpful assistant."
|
||||
prompt_template: str = ""
|
||||
output_variable: Optional[str] = None
|
||||
stream_to_user: bool = True
|
||||
tools: List[str] = Field(default_factory=list)
|
||||
sources: List[str] = Field(default_factory=list)
|
||||
chunks: str = "2"
|
||||
retriever: str = ""
|
||||
model_id: Optional[str] = None
|
||||
json_schema: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class ConditionCase(BaseModel):
|
||||
model_config = ConfigDict(extra="forbid", populate_by_name=True)
|
||||
name: Optional[str] = None
|
||||
expression: str = ""
|
||||
source_handle: str = Field(..., alias="sourceHandle")
|
||||
|
||||
|
||||
class ConditionNodeConfig(BaseModel):
|
||||
model_config = ConfigDict(extra="allow")
|
||||
mode: Literal["simple", "advanced"] = "simple"
|
||||
cases: List[ConditionCase] = Field(default_factory=list)
|
||||
|
||||
|
||||
class StateOperation(BaseModel):
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
expression: str = ""
|
||||
target_variable: str = ""
|
||||
|
||||
|
||||
class WorkflowEdgeCreate(BaseModel):
|
||||
model_config = ConfigDict(populate_by_name=True)
|
||||
id: str
|
||||
workflow_id: str
|
||||
source_id: str = Field(..., alias="source")
|
||||
target_id: str = Field(..., alias="target")
|
||||
source_handle: Optional[str] = Field(None, alias="sourceHandle")
|
||||
target_handle: Optional[str] = Field(None, alias="targetHandle")
|
||||
|
||||
|
||||
class WorkflowEdge(WorkflowEdgeCreate):
|
||||
mongo_id: Optional[str] = Field(None, alias="_id")
|
||||
|
||||
@field_validator("mongo_id", mode="before")
|
||||
@classmethod
|
||||
def convert_objectid(cls, v: Any) -> Optional[str]:
|
||||
if isinstance(v, ObjectId):
|
||||
return str(v)
|
||||
return v
|
||||
|
||||
def to_mongo_doc(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"id": self.id,
|
||||
"workflow_id": self.workflow_id,
|
||||
"source_id": self.source_id,
|
||||
"target_id": self.target_id,
|
||||
"source_handle": self.source_handle,
|
||||
"target_handle": self.target_handle,
|
||||
}
|
||||
|
||||
|
||||
class WorkflowNodeCreate(BaseModel):
|
||||
model_config = ConfigDict(extra="allow")
|
||||
id: str
|
||||
workflow_id: str
|
||||
type: NodeType
|
||||
title: str = "Node"
|
||||
description: Optional[str] = None
|
||||
position: Position = Field(default_factory=Position)
|
||||
config: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
@field_validator("position", mode="before")
|
||||
@classmethod
|
||||
def parse_position(cls, v: Union[Dict[str, float], Position]) -> Position:
|
||||
if isinstance(v, dict):
|
||||
return Position(**v)
|
||||
return v
|
||||
|
||||
|
||||
class WorkflowNode(WorkflowNodeCreate):
|
||||
mongo_id: Optional[str] = Field(None, alias="_id")
|
||||
|
||||
@field_validator("mongo_id", mode="before")
|
||||
@classmethod
|
||||
def convert_objectid(cls, v: Any) -> Optional[str]:
|
||||
if isinstance(v, ObjectId):
|
||||
return str(v)
|
||||
return v
|
||||
|
||||
def to_mongo_doc(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"id": self.id,
|
||||
"workflow_id": self.workflow_id,
|
||||
"type": self.type.value,
|
||||
"title": self.title,
|
||||
"description": self.description,
|
||||
"position": self.position.model_dump(),
|
||||
"config": self.config,
|
||||
}
|
||||
|
||||
|
||||
class WorkflowCreate(BaseModel):
|
||||
model_config = ConfigDict(extra="allow")
|
||||
name: str = "New Workflow"
|
||||
description: Optional[str] = None
|
||||
user: Optional[str] = None
|
||||
|
||||
|
||||
class Workflow(WorkflowCreate):
|
||||
id: Optional[str] = Field(None, alias="_id")
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def convert_objectid(cls, v: Any) -> Optional[str]:
|
||||
if isinstance(v, ObjectId):
|
||||
return str(v)
|
||||
return v
|
||||
|
||||
def to_mongo_doc(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"user": self.user,
|
||||
"created_at": self.created_at,
|
||||
"updated_at": self.updated_at,
|
||||
}
|
||||
|
||||
|
||||
class WorkflowGraph(BaseModel):
|
||||
workflow: Workflow
|
||||
nodes: List[WorkflowNode] = Field(default_factory=list)
|
||||
edges: List[WorkflowEdge] = Field(default_factory=list)
|
||||
|
||||
def get_node_by_id(self, node_id: str) -> Optional[WorkflowNode]:
|
||||
for node in self.nodes:
|
||||
if node.id == node_id:
|
||||
return node
|
||||
return None
|
||||
|
||||
def get_start_node(self) -> Optional[WorkflowNode]:
|
||||
for node in self.nodes:
|
||||
if node.type == NodeType.START:
|
||||
return node
|
||||
return None
|
||||
|
||||
def get_outgoing_edges(self, node_id: str) -> List[WorkflowEdge]:
|
||||
return [edge for edge in self.edges if edge.source_id == node_id]
|
||||
|
||||
|
||||
class NodeExecutionLog(BaseModel):
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
node_id: str
|
||||
node_type: str
|
||||
status: ExecutionStatus
|
||||
started_at: datetime
|
||||
completed_at: Optional[datetime] = None
|
||||
error: Optional[str] = None
|
||||
state_snapshot: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class WorkflowRunCreate(BaseModel):
|
||||
workflow_id: str
|
||||
inputs: Dict[str, str] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class WorkflowRun(BaseModel):
|
||||
model_config = ConfigDict(extra="allow")
|
||||
id: Optional[str] = Field(None, alias="_id")
|
||||
workflow_id: str
|
||||
status: ExecutionStatus = ExecutionStatus.PENDING
|
||||
inputs: Dict[str, str] = Field(default_factory=dict)
|
||||
outputs: Dict[str, Any] = Field(default_factory=dict)
|
||||
steps: List[NodeExecutionLog] = Field(default_factory=list)
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
completed_at: Optional[datetime] = None
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def convert_objectid(cls, v: Any) -> Optional[str]:
|
||||
if isinstance(v, ObjectId):
|
||||
return str(v)
|
||||
return v
|
||||
|
||||
def to_mongo_doc(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"workflow_id": self.workflow_id,
|
||||
"status": self.status.value,
|
||||
"inputs": self.inputs,
|
||||
"outputs": self.outputs,
|
||||
"steps": [step.model_dump() for step in self.steps],
|
||||
"created_at": self.created_at,
|
||||
"completed_at": self.completed_at,
|
||||
}
|
||||
470
application/agents/workflows/workflow_engine.py
Normal file
470
application/agents/workflows/workflow_engine.py
Normal file
@@ -0,0 +1,470 @@
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, Generator, List, Optional, TYPE_CHECKING
|
||||
|
||||
from application.agents.workflows.cel_evaluator import CelEvaluationError, evaluate_cel
|
||||
from application.agents.workflows.node_agent import WorkflowNodeAgentFactory
|
||||
from application.agents.workflows.schemas import (
|
||||
AgentNodeConfig,
|
||||
AgentType,
|
||||
ConditionNodeConfig,
|
||||
ExecutionStatus,
|
||||
NodeExecutionLog,
|
||||
NodeType,
|
||||
WorkflowGraph,
|
||||
WorkflowNode,
|
||||
)
|
||||
from application.core.json_schema_utils import (
|
||||
JsonSchemaValidationError,
|
||||
normalize_json_schema_payload,
|
||||
)
|
||||
from application.error import sanitize_api_error
|
||||
from application.templates.namespaces import NamespaceManager
|
||||
from application.templates.template_engine import TemplateEngine, TemplateRenderError
|
||||
|
||||
try:
|
||||
import jsonschema
|
||||
except ImportError: # pragma: no cover - optional dependency in some deployments.
|
||||
jsonschema = None
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from application.agents.base import BaseAgent
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
StateValue = Any
|
||||
WorkflowState = Dict[str, StateValue]
|
||||
TEMPLATE_RESERVED_NAMESPACES = {"agent", "system", "source", "tools", "passthrough"}
|
||||
|
||||
|
||||
class WorkflowEngine:
|
||||
MAX_EXECUTION_STEPS = 50
|
||||
|
||||
def __init__(self, graph: WorkflowGraph, agent: "BaseAgent"):
|
||||
self.graph = graph
|
||||
self.agent = agent
|
||||
self.state: WorkflowState = {}
|
||||
self.execution_log: List[Dict[str, Any]] = []
|
||||
self._condition_result: Optional[str] = None
|
||||
self._template_engine = TemplateEngine()
|
||||
self._namespace_manager = NamespaceManager()
|
||||
|
||||
def execute(
|
||||
self, initial_inputs: WorkflowState, query: str
|
||||
) -> Generator[Dict[str, str], None, None]:
|
||||
self._initialize_state(initial_inputs, query)
|
||||
|
||||
start_node = self.graph.get_start_node()
|
||||
if not start_node:
|
||||
yield {"type": "error", "error": "No start node found in workflow."}
|
||||
return
|
||||
current_node_id: Optional[str] = start_node.id
|
||||
steps = 0
|
||||
|
||||
while current_node_id and steps < self.MAX_EXECUTION_STEPS:
|
||||
node = self.graph.get_node_by_id(current_node_id)
|
||||
if not node:
|
||||
yield {"type": "error", "error": f"Node {current_node_id} not found."}
|
||||
break
|
||||
log_entry = self._create_log_entry(node)
|
||||
|
||||
yield {
|
||||
"type": "workflow_step",
|
||||
"node_id": node.id,
|
||||
"node_type": node.type.value,
|
||||
"node_title": node.title,
|
||||
"status": "running",
|
||||
}
|
||||
|
||||
try:
|
||||
yield from self._execute_node(node)
|
||||
log_entry["status"] = ExecutionStatus.COMPLETED.value
|
||||
log_entry["completed_at"] = datetime.now(timezone.utc)
|
||||
|
||||
output_key = f"node_{node.id}_output"
|
||||
node_output = self.state.get(output_key)
|
||||
|
||||
yield {
|
||||
"type": "workflow_step",
|
||||
"node_id": node.id,
|
||||
"node_type": node.type.value,
|
||||
"node_title": node.title,
|
||||
"status": "completed",
|
||||
"state_snapshot": dict(self.state),
|
||||
"output": node_output,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing node {node.id}: {e}", exc_info=True)
|
||||
log_entry["status"] = ExecutionStatus.FAILED.value
|
||||
log_entry["error"] = str(e)
|
||||
log_entry["completed_at"] = datetime.now(timezone.utc)
|
||||
log_entry["state_snapshot"] = dict(self.state)
|
||||
self.execution_log.append(log_entry)
|
||||
|
||||
user_friendly_error = sanitize_api_error(e)
|
||||
yield {
|
||||
"type": "workflow_step",
|
||||
"node_id": node.id,
|
||||
"node_type": node.type.value,
|
||||
"node_title": node.title,
|
||||
"status": "failed",
|
||||
"state_snapshot": dict(self.state),
|
||||
"error": user_friendly_error,
|
||||
}
|
||||
yield {"type": "error", "error": user_friendly_error}
|
||||
break
|
||||
log_entry["state_snapshot"] = dict(self.state)
|
||||
self.execution_log.append(log_entry)
|
||||
|
||||
if node.type == NodeType.END:
|
||||
break
|
||||
current_node_id = self._get_next_node_id(current_node_id)
|
||||
if current_node_id is None and node.type != NodeType.END:
|
||||
logger.warning(
|
||||
f"Branch ended at node '{node.title}' ({node.id}) without reaching an end node"
|
||||
)
|
||||
steps += 1
|
||||
if steps >= self.MAX_EXECUTION_STEPS:
|
||||
logger.warning(
|
||||
f"Workflow reached max steps limit ({self.MAX_EXECUTION_STEPS})"
|
||||
)
|
||||
|
||||
def _initialize_state(self, initial_inputs: WorkflowState, query: str) -> None:
|
||||
self.state.update(initial_inputs)
|
||||
self.state["query"] = query
|
||||
self.state["chat_history"] = str(self.agent.chat_history)
|
||||
|
||||
def _create_log_entry(self, node: WorkflowNode) -> Dict[str, Any]:
|
||||
return {
|
||||
"node_id": node.id,
|
||||
"node_type": node.type.value,
|
||||
"started_at": datetime.now(timezone.utc),
|
||||
"completed_at": None,
|
||||
"status": ExecutionStatus.RUNNING.value,
|
||||
"error": None,
|
||||
"state_snapshot": {},
|
||||
}
|
||||
|
||||
def _get_next_node_id(self, current_node_id: str) -> Optional[str]:
|
||||
node = self.graph.get_node_by_id(current_node_id)
|
||||
edges = self.graph.get_outgoing_edges(current_node_id)
|
||||
if not edges:
|
||||
return None
|
||||
|
||||
if node and node.type == NodeType.CONDITION and self._condition_result:
|
||||
target_handle = self._condition_result
|
||||
self._condition_result = None
|
||||
for edge in edges:
|
||||
if edge.source_handle == target_handle:
|
||||
return edge.target_id
|
||||
return None
|
||||
|
||||
return edges[0].target_id
|
||||
|
||||
def _execute_node(
|
||||
self, node: WorkflowNode
|
||||
) -> Generator[Dict[str, str], None, None]:
|
||||
logger.info(f"Executing node {node.id} ({node.type.value})")
|
||||
|
||||
node_handlers = {
|
||||
NodeType.START: self._execute_start_node,
|
||||
NodeType.NOTE: self._execute_note_node,
|
||||
NodeType.AGENT: self._execute_agent_node,
|
||||
NodeType.STATE: self._execute_state_node,
|
||||
NodeType.CONDITION: self._execute_condition_node,
|
||||
NodeType.END: self._execute_end_node,
|
||||
}
|
||||
|
||||
handler = node_handlers.get(node.type)
|
||||
if handler:
|
||||
yield from handler(node)
|
||||
|
||||
def _execute_start_node(
|
||||
self, node: WorkflowNode
|
||||
) -> Generator[Dict[str, str], None, None]:
|
||||
yield from ()
|
||||
|
||||
def _execute_note_node(
|
||||
self, node: WorkflowNode
|
||||
) -> Generator[Dict[str, str], None, None]:
|
||||
yield from ()
|
||||
|
||||
def _execute_agent_node(
|
||||
self, node: WorkflowNode
|
||||
) -> Generator[Dict[str, str], None, None]:
|
||||
from application.core.model_utils import (
|
||||
get_api_key_for_provider,
|
||||
get_model_capabilities,
|
||||
get_provider_from_model_id,
|
||||
)
|
||||
|
||||
node_config = AgentNodeConfig(**node.config.get("config", node.config))
|
||||
|
||||
if node_config.prompt_template:
|
||||
formatted_prompt = self._format_template(node_config.prompt_template)
|
||||
else:
|
||||
formatted_prompt = self.state.get("query", "")
|
||||
node_json_schema = self._normalize_node_json_schema(
|
||||
node_config.json_schema, node.title
|
||||
)
|
||||
node_model_id = node_config.model_id or self.agent.model_id
|
||||
node_llm_name = (
|
||||
node_config.llm_name
|
||||
or get_provider_from_model_id(node_model_id or "")
|
||||
or self.agent.llm_name
|
||||
)
|
||||
node_api_key = get_api_key_for_provider(node_llm_name) or self.agent.api_key
|
||||
|
||||
if node_json_schema and node_model_id:
|
||||
model_capabilities = get_model_capabilities(node_model_id)
|
||||
if model_capabilities and not model_capabilities.get(
|
||||
"supports_structured_output", False
|
||||
):
|
||||
raise ValueError(
|
||||
f'Model "{node_model_id}" does not support structured output for node "{node.title}"'
|
||||
)
|
||||
|
||||
factory_kwargs = {
|
||||
"agent_type": node_config.agent_type,
|
||||
"endpoint": self.agent.endpoint,
|
||||
"llm_name": node_llm_name,
|
||||
"model_id": node_model_id,
|
||||
"api_key": node_api_key,
|
||||
"tool_ids": node_config.tools,
|
||||
"prompt": node_config.system_prompt,
|
||||
"chat_history": self.agent.chat_history,
|
||||
"decoded_token": self.agent.decoded_token,
|
||||
"json_schema": node_json_schema,
|
||||
}
|
||||
|
||||
# Agentic/research agents need retriever_config for on-demand search
|
||||
if node_config.agent_type in (AgentType.AGENTIC, AgentType.RESEARCH):
|
||||
factory_kwargs["retriever_config"] = {
|
||||
"source": {"active_docs": node_config.sources} if node_config.sources else {},
|
||||
"retriever_name": node_config.retriever or "classic",
|
||||
"chunks": int(node_config.chunks) if node_config.chunks else 2,
|
||||
"model_id": node_model_id,
|
||||
"llm_name": node_llm_name,
|
||||
"api_key": node_api_key,
|
||||
"decoded_token": self.agent.decoded_token,
|
||||
}
|
||||
|
||||
node_agent = WorkflowNodeAgentFactory.create(**factory_kwargs)
|
||||
|
||||
full_response_parts: List[str] = []
|
||||
structured_response_parts: List[str] = []
|
||||
has_structured_response = False
|
||||
first_chunk = True
|
||||
for event in node_agent.gen(formatted_prompt):
|
||||
if "answer" in event:
|
||||
chunk = str(event["answer"])
|
||||
full_response_parts.append(chunk)
|
||||
if event.get("structured"):
|
||||
has_structured_response = True
|
||||
structured_response_parts.append(chunk)
|
||||
if node_config.stream_to_user:
|
||||
if first_chunk and hasattr(self, "_has_streamed"):
|
||||
yield {"answer": "\n\n"}
|
||||
first_chunk = False
|
||||
yield event
|
||||
|
||||
if node_config.stream_to_user:
|
||||
self._has_streamed = True
|
||||
|
||||
full_response = "".join(full_response_parts).strip()
|
||||
output_value: Any = full_response
|
||||
if has_structured_response:
|
||||
structured_response = "".join(structured_response_parts).strip()
|
||||
response_to_parse = structured_response or full_response
|
||||
parsed_success, parsed_structured = self._parse_structured_output(
|
||||
response_to_parse
|
||||
)
|
||||
output_value = parsed_structured if parsed_success else response_to_parse
|
||||
if node_json_schema:
|
||||
self._validate_structured_output(node_json_schema, output_value)
|
||||
elif node_json_schema:
|
||||
parsed_success, parsed_structured = self._parse_structured_output(
|
||||
full_response
|
||||
)
|
||||
if not parsed_success:
|
||||
raise ValueError(
|
||||
"Structured output was expected but response was not valid JSON"
|
||||
)
|
||||
output_value = parsed_structured
|
||||
self._validate_structured_output(node_json_schema, output_value)
|
||||
|
||||
default_output_key = f"node_{node.id}_output"
|
||||
self.state[default_output_key] = output_value
|
||||
|
||||
if node_config.output_variable:
|
||||
self.state[node_config.output_variable] = output_value
|
||||
|
||||
def _execute_state_node(
|
||||
self, node: WorkflowNode
|
||||
) -> Generator[Dict[str, str], None, None]:
|
||||
config = node.config.get("config", node.config)
|
||||
for op in config.get("operations", []):
|
||||
expression = op.get("expression", "")
|
||||
target_variable = op.get("target_variable", "")
|
||||
if expression and target_variable:
|
||||
self.state[target_variable] = evaluate_cel(expression, self.state)
|
||||
yield from ()
|
||||
|
||||
def _execute_condition_node(
|
||||
self, node: WorkflowNode
|
||||
) -> Generator[Dict[str, str], None, None]:
|
||||
config = ConditionNodeConfig(**node.config.get("config", node.config))
|
||||
matched_handle = None
|
||||
|
||||
for case in config.cases:
|
||||
if not case.expression.strip():
|
||||
continue
|
||||
try:
|
||||
if evaluate_cel(case.expression, self.state):
|
||||
matched_handle = case.source_handle
|
||||
break
|
||||
except CelEvaluationError:
|
||||
continue
|
||||
|
||||
self._condition_result = matched_handle or "else"
|
||||
yield from ()
|
||||
|
||||
def _execute_end_node(
|
||||
self, node: WorkflowNode
|
||||
) -> Generator[Dict[str, str], None, None]:
|
||||
config = node.config.get("config", node.config)
|
||||
output_template = str(config.get("output_template", ""))
|
||||
if output_template:
|
||||
formatted_output = self._format_template(output_template)
|
||||
yield {"answer": formatted_output}
|
||||
|
||||
def _parse_structured_output(self, raw_response: str) -> tuple[bool, Optional[Any]]:
|
||||
normalized_response = raw_response.strip()
|
||||
if not normalized_response:
|
||||
return False, None
|
||||
|
||||
try:
|
||||
return True, json.loads(normalized_response)
|
||||
except json.JSONDecodeError:
|
||||
logger.warning(
|
||||
"Workflow agent returned structured output that was not valid JSON"
|
||||
)
|
||||
return False, None
|
||||
|
||||
def _normalize_node_json_schema(
|
||||
self, schema: Optional[Dict[str, Any]], node_title: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
if schema is None:
|
||||
return None
|
||||
try:
|
||||
return normalize_json_schema_payload(schema)
|
||||
except JsonSchemaValidationError as exc:
|
||||
raise ValueError(
|
||||
f'Invalid JSON schema for node "{node_title}": {exc}'
|
||||
) from exc
|
||||
|
||||
def _validate_structured_output(self, schema: Dict[str, Any], output_value: Any) -> None:
|
||||
if jsonschema is None:
|
||||
logger.warning(
|
||||
"jsonschema package is not available, skipping structured output validation"
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
normalized_schema = normalize_json_schema_payload(schema)
|
||||
except JsonSchemaValidationError as exc:
|
||||
raise ValueError(f"Invalid JSON schema: {exc}") from exc
|
||||
|
||||
try:
|
||||
jsonschema.validate(instance=output_value, schema=normalized_schema)
|
||||
except jsonschema.exceptions.ValidationError as exc:
|
||||
raise ValueError(f"Structured output did not match schema: {exc.message}") from exc
|
||||
except jsonschema.exceptions.SchemaError as exc:
|
||||
raise ValueError(f"Invalid JSON schema: {exc.message}") from exc
|
||||
|
||||
def _format_template(self, template: str) -> str:
|
||||
context = self._build_template_context()
|
||||
try:
|
||||
return self._template_engine.render(template, context)
|
||||
except TemplateRenderError as e:
|
||||
logger.warning(
|
||||
"Workflow template rendering failed, using raw template: %s", str(e)
|
||||
)
|
||||
return template
|
||||
|
||||
def _build_template_context(self) -> Dict[str, Any]:
|
||||
docs, docs_together = self._get_source_template_data()
|
||||
passthrough_data = (
|
||||
self.state.get("passthrough")
|
||||
if isinstance(self.state.get("passthrough"), dict)
|
||||
else None
|
||||
)
|
||||
tools_data = (
|
||||
self.state.get("tools") if isinstance(self.state.get("tools"), dict) else None
|
||||
)
|
||||
|
||||
context = self._namespace_manager.build_context(
|
||||
user_id=getattr(self.agent, "user", None),
|
||||
request_id=getattr(self.agent, "request_id", None),
|
||||
passthrough_data=passthrough_data,
|
||||
docs=docs,
|
||||
docs_together=docs_together,
|
||||
tools_data=tools_data,
|
||||
)
|
||||
|
||||
agent_context: Dict[str, Any] = {}
|
||||
for key, value in self.state.items():
|
||||
if not isinstance(key, str):
|
||||
continue
|
||||
normalized_key = key.strip()
|
||||
if not normalized_key:
|
||||
continue
|
||||
agent_context[normalized_key] = value
|
||||
|
||||
context["agent"] = agent_context
|
||||
|
||||
# Keep legacy top-level variables working while namespaced variables are adopted.
|
||||
for key, value in agent_context.items():
|
||||
if key in TEMPLATE_RESERVED_NAMESPACES:
|
||||
context[f"agent_{key}"] = value
|
||||
continue
|
||||
if key not in context:
|
||||
context[key] = value
|
||||
|
||||
return context
|
||||
|
||||
def _get_source_template_data(self) -> tuple[Optional[List[Dict[str, Any]]], Optional[str]]:
|
||||
docs = getattr(self.agent, "retrieved_docs", None)
|
||||
if not isinstance(docs, list) or len(docs) == 0:
|
||||
return None, None
|
||||
|
||||
docs_together_parts: List[str] = []
|
||||
for doc in docs:
|
||||
if not isinstance(doc, dict):
|
||||
continue
|
||||
text = doc.get("text")
|
||||
if not isinstance(text, str):
|
||||
continue
|
||||
|
||||
filename = doc.get("filename") or doc.get("title") or doc.get("source")
|
||||
if isinstance(filename, str) and filename.strip():
|
||||
docs_together_parts.append(f"{filename}\n{text}")
|
||||
else:
|
||||
docs_together_parts.append(text)
|
||||
|
||||
docs_together = "\n\n".join(docs_together_parts) if docs_together_parts else None
|
||||
return docs, docs_together
|
||||
|
||||
def get_execution_summary(self) -> List[NodeExecutionLog]:
|
||||
return [
|
||||
NodeExecutionLog(
|
||||
node_id=log["node_id"],
|
||||
node_type=log["node_type"],
|
||||
status=ExecutionStatus(log["status"]),
|
||||
started_at=log["started_at"],
|
||||
completed_at=log.get("completed_at"),
|
||||
error=log.get("error"),
|
||||
state_snapshot=log.get("state_snapshot", {}),
|
||||
)
|
||||
for log in self.execution_log
|
||||
]
|
||||
@@ -0,0 +1,7 @@
|
||||
from flask_restx import Api
|
||||
|
||||
api = Api(
|
||||
version="1.0",
|
||||
title="DocsGPT API",
|
||||
description="API for DocsGPT",
|
||||
)
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
from flask import Blueprint
|
||||
|
||||
from application.api import api
|
||||
from application.api.answer.routes.answer import AnswerResource
|
||||
from application.api.answer.routes.base import answer_ns
|
||||
from application.api.answer.routes.search import SearchResource
|
||||
from application.api.answer.routes.stream import StreamResource
|
||||
|
||||
|
||||
answer = Blueprint("answer", __name__)
|
||||
|
||||
api.add_namespace(answer_ns)
|
||||
|
||||
|
||||
def init_answer_routes():
|
||||
api.add_resource(StreamResource, "/stream")
|
||||
api.add_resource(AnswerResource, "/api/answer")
|
||||
api.add_resource(SearchResource, "/api/search")
|
||||
|
||||
|
||||
init_answer_routes()
|
||||
|
||||
@@ -1,374 +0,0 @@
|
||||
import asyncio
|
||||
import os
|
||||
from flask import Blueprint, request, Response
|
||||
import json
|
||||
import datetime
|
||||
import logging
|
||||
import traceback
|
||||
|
||||
from pymongo import MongoClient
|
||||
from bson.objectid import ObjectId
|
||||
from transformers import GPT2TokenizerFast
|
||||
|
||||
|
||||
|
||||
from application.core.settings import settings
|
||||
from application.vectorstore.vector_creator import VectorCreator
|
||||
from application.llm.llm_creator import LLMCreator
|
||||
from application.error import bad_request
|
||||
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
mongo = MongoClient(settings.MONGO_URI)
|
||||
db = mongo["docsgpt"]
|
||||
conversations_collection = db["conversations"]
|
||||
vectors_collection = db["vectors"]
|
||||
prompts_collection = db["prompts"]
|
||||
answer = Blueprint('answer', __name__)
|
||||
|
||||
if settings.LLM_NAME == "gpt4":
|
||||
gpt_model = 'gpt-4'
|
||||
elif settings.LLM_NAME == "anthropic":
|
||||
gpt_model = 'claude-2'
|
||||
else:
|
||||
gpt_model = 'gpt-3.5-turbo'
|
||||
|
||||
# load the prompts
|
||||
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_default.txt"), "r") as f:
|
||||
chat_combine_template = f.read()
|
||||
|
||||
with open(os.path.join(current_dir, "prompts", "chat_reduce_prompt.txt"), "r") as f:
|
||||
chat_reduce_template = f.read()
|
||||
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_creative.txt"), "r") as f:
|
||||
chat_combine_creative = f.read()
|
||||
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_strict.txt"), "r") as f:
|
||||
chat_combine_strict = f.read()
|
||||
|
||||
api_key_set = settings.API_KEY is not None
|
||||
embeddings_key_set = settings.EMBEDDINGS_KEY is not None
|
||||
|
||||
|
||||
async def async_generate(chain, question, chat_history):
|
||||
result = await chain.arun({"question": question, "chat_history": chat_history})
|
||||
return result
|
||||
|
||||
|
||||
def count_tokens(string):
|
||||
tokenizer = GPT2TokenizerFast.from_pretrained('gpt2')
|
||||
return len(tokenizer(string)['input_ids'])
|
||||
|
||||
|
||||
def run_async_chain(chain, question, chat_history):
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
result = {}
|
||||
try:
|
||||
answer = loop.run_until_complete(async_generate(chain, question, chat_history))
|
||||
finally:
|
||||
loop.close()
|
||||
result["answer"] = answer
|
||||
return result
|
||||
|
||||
|
||||
def get_vectorstore(data):
|
||||
if "active_docs" in data:
|
||||
if data["active_docs"].split("/")[0] == "default":
|
||||
vectorstore = ""
|
||||
elif data["active_docs"].split("/")[0] == "local":
|
||||
vectorstore = "indexes/" + data["active_docs"]
|
||||
else:
|
||||
vectorstore = "vectors/" + data["active_docs"]
|
||||
if data["active_docs"] == "default":
|
||||
vectorstore = ""
|
||||
else:
|
||||
vectorstore = ""
|
||||
vectorstore = os.path.join("application", vectorstore)
|
||||
return vectorstore
|
||||
|
||||
|
||||
def is_azure_configured():
|
||||
return settings.OPENAI_API_BASE and settings.OPENAI_API_VERSION and settings.AZURE_DEPLOYMENT_NAME
|
||||
|
||||
|
||||
def complete_stream(question, docsearch, chat_history, api_key, prompt_id, conversation_id):
|
||||
llm = LLMCreator.create_llm(settings.LLM_NAME, api_key=api_key)
|
||||
|
||||
if prompt_id == 'default':
|
||||
prompt = chat_combine_template
|
||||
elif prompt_id == 'creative':
|
||||
prompt = chat_combine_creative
|
||||
elif prompt_id == 'strict':
|
||||
prompt = chat_combine_strict
|
||||
else:
|
||||
prompt = prompts_collection.find_one({"_id": ObjectId(prompt_id)})["content"]
|
||||
|
||||
docs = docsearch.search(question, k=2)
|
||||
if settings.LLM_NAME == "llama.cpp":
|
||||
docs = [docs[0]]
|
||||
# join all page_content together with a newline
|
||||
docs_together = "\n".join([doc.page_content for doc in docs])
|
||||
p_chat_combine = prompt.replace("{summaries}", docs_together)
|
||||
messages_combine = [{"role": "system", "content": p_chat_combine}]
|
||||
source_log_docs = []
|
||||
for doc in docs:
|
||||
if doc.metadata:
|
||||
source_log_docs.append({"title": doc.metadata['title'].split('/')[-1], "text": doc.page_content})
|
||||
else:
|
||||
source_log_docs.append({"title": doc.page_content, "text": doc.page_content})
|
||||
|
||||
if len(chat_history) > 1:
|
||||
tokens_current_history = 0
|
||||
# count tokens in history
|
||||
chat_history.reverse()
|
||||
for i in chat_history:
|
||||
if "prompt" in i and "response" in i:
|
||||
tokens_batch = count_tokens(i["prompt"]) + count_tokens(i["response"])
|
||||
if tokens_current_history + tokens_batch < settings.TOKENS_MAX_HISTORY:
|
||||
tokens_current_history += tokens_batch
|
||||
messages_combine.append({"role": "user", "content": i["prompt"]})
|
||||
messages_combine.append({"role": "system", "content": i["response"]})
|
||||
messages_combine.append({"role": "user", "content": question})
|
||||
|
||||
response_full = ""
|
||||
completion = llm.gen_stream(model=gpt_model, engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
messages=messages_combine)
|
||||
for line in completion:
|
||||
data = json.dumps({"answer": str(line)})
|
||||
response_full += str(line)
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
# save conversation to database
|
||||
if conversation_id is not None:
|
||||
conversations_collection.update_one(
|
||||
{"_id": ObjectId(conversation_id)},
|
||||
{"$push": {"queries": {"prompt": question, "response": response_full, "sources": source_log_docs}}},
|
||||
)
|
||||
|
||||
else:
|
||||
# create new conversation
|
||||
# generate summary
|
||||
messages_summary = [{"role": "assistant", "content": "Summarise following conversation in no more than 3 "
|
||||
"words, respond ONLY with the summary, use the same "
|
||||
"language as the system \n\nUser: " + question + "\n\n" +
|
||||
"AI: " +
|
||||
response_full},
|
||||
{"role": "user", "content": "Summarise following conversation in no more than 3 words, "
|
||||
"respond ONLY with the summary, use the same language as the "
|
||||
"system"}]
|
||||
|
||||
completion = llm.gen(model=gpt_model, engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
messages=messages_summary, max_tokens=30)
|
||||
conversation_id = conversations_collection.insert_one(
|
||||
{"user": "local",
|
||||
"date": datetime.datetime.utcnow(),
|
||||
"name": completion,
|
||||
"queries": [{"prompt": question, "response": response_full, "sources": source_log_docs}]}
|
||||
).inserted_id
|
||||
|
||||
# send data.type = "end" to indicate that the stream has ended as json
|
||||
data = json.dumps({"type": "id", "id": str(conversation_id)})
|
||||
yield f"data: {data}\n\n"
|
||||
data = json.dumps({"type": "end"})
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
|
||||
@answer.route("/stream", methods=["POST"])
|
||||
def stream():
|
||||
data = request.get_json()
|
||||
# get parameter from url question
|
||||
question = data["question"]
|
||||
history = data["history"]
|
||||
# history to json object from string
|
||||
history = json.loads(history)
|
||||
conversation_id = data["conversation_id"]
|
||||
if 'prompt_id' in data:
|
||||
prompt_id = data["prompt_id"]
|
||||
else:
|
||||
prompt_id = 'default'
|
||||
|
||||
# check if active_docs is set
|
||||
|
||||
if not api_key_set:
|
||||
api_key = data["api_key"]
|
||||
else:
|
||||
api_key = settings.API_KEY
|
||||
if not embeddings_key_set:
|
||||
embeddings_key = data["embeddings_key"]
|
||||
else:
|
||||
embeddings_key = settings.EMBEDDINGS_KEY
|
||||
if "active_docs" in data:
|
||||
vectorstore = get_vectorstore({"active_docs": data["active_docs"]})
|
||||
else:
|
||||
vectorstore = ""
|
||||
docsearch = VectorCreator.create_vectorstore(settings.VECTOR_STORE, vectorstore, embeddings_key)
|
||||
|
||||
return Response(
|
||||
complete_stream(question, docsearch,
|
||||
chat_history=history, api_key=api_key,
|
||||
prompt_id=prompt_id,
|
||||
conversation_id=conversation_id), mimetype="text/event-stream"
|
||||
)
|
||||
|
||||
|
||||
@answer.route("/api/answer", methods=["POST"])
|
||||
def api_answer():
|
||||
data = request.get_json()
|
||||
question = data["question"]
|
||||
history = data["history"]
|
||||
if "conversation_id" not in data:
|
||||
conversation_id = None
|
||||
else:
|
||||
conversation_id = data["conversation_id"]
|
||||
print("-" * 5)
|
||||
if not api_key_set:
|
||||
api_key = data["api_key"]
|
||||
else:
|
||||
api_key = settings.API_KEY
|
||||
if not embeddings_key_set:
|
||||
embeddings_key = data["embeddings_key"]
|
||||
else:
|
||||
embeddings_key = settings.EMBEDDINGS_KEY
|
||||
if 'prompt_id' in data:
|
||||
prompt_id = data["prompt_id"]
|
||||
else:
|
||||
prompt_id = 'default'
|
||||
|
||||
if prompt_id == 'default':
|
||||
prompt = chat_combine_template
|
||||
elif prompt_id == 'creative':
|
||||
prompt = chat_combine_creative
|
||||
elif prompt_id == 'strict':
|
||||
prompt = chat_combine_strict
|
||||
else:
|
||||
prompt = prompts_collection.find_one({"_id": ObjectId(prompt_id)})["content"]
|
||||
|
||||
# use try and except to check for exception
|
||||
try:
|
||||
# check if the vectorstore is set
|
||||
vectorstore = get_vectorstore(data)
|
||||
# loading the index and the store and the prompt template
|
||||
# Note if you have used other embeddings than OpenAI, you need to change the embeddings
|
||||
docsearch = VectorCreator.create_vectorstore(settings.VECTOR_STORE, vectorstore, embeddings_key)
|
||||
|
||||
|
||||
llm = LLMCreator.create_llm(settings.LLM_NAME, api_key=api_key)
|
||||
|
||||
|
||||
|
||||
docs = docsearch.search(question, k=2)
|
||||
# join all page_content together with a newline
|
||||
docs_together = "\n".join([doc.page_content for doc in docs])
|
||||
p_chat_combine = prompt.replace("{summaries}", docs_together)
|
||||
messages_combine = [{"role": "system", "content": p_chat_combine}]
|
||||
source_log_docs = []
|
||||
for doc in docs:
|
||||
if doc.metadata:
|
||||
source_log_docs.append({"title": doc.metadata['title'].split('/')[-1], "text": doc.page_content})
|
||||
else:
|
||||
source_log_docs.append({"title": doc.page_content, "text": doc.page_content})
|
||||
# join all page_content together with a newline
|
||||
|
||||
|
||||
if len(history) > 1:
|
||||
tokens_current_history = 0
|
||||
# count tokens in history
|
||||
history.reverse()
|
||||
for i in history:
|
||||
if "prompt" in i and "response" in i:
|
||||
tokens_batch = count_tokens(i["prompt"]) + count_tokens(i["response"])
|
||||
if tokens_current_history + tokens_batch < settings.TOKENS_MAX_HISTORY:
|
||||
tokens_current_history += tokens_batch
|
||||
messages_combine.append({"role": "user", "content": i["prompt"]})
|
||||
messages_combine.append({"role": "system", "content": i["response"]})
|
||||
messages_combine.append({"role": "user", "content": question})
|
||||
|
||||
|
||||
completion = llm.gen(model=gpt_model, engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
messages=messages_combine)
|
||||
|
||||
|
||||
result = {"answer": completion, "sources": source_log_docs}
|
||||
logger.debug(result)
|
||||
|
||||
# generate conversationId
|
||||
if conversation_id is not None:
|
||||
conversations_collection.update_one(
|
||||
{"_id": ObjectId(conversation_id)},
|
||||
{"$push": {"queries": {"prompt": question,
|
||||
"response": result["answer"], "sources": result['sources']}}},
|
||||
)
|
||||
|
||||
else:
|
||||
# create new conversation
|
||||
# generate summary
|
||||
messages_summary = [
|
||||
{"role": "assistant", "content": "Summarise following conversation in no more than 3 words, "
|
||||
"respond ONLY with the summary, use the same language as the system \n\n"
|
||||
"User: " + question + "\n\n" + "AI: " + result["answer"]},
|
||||
{"role": "user", "content": "Summarise following conversation in no more than 3 words, "
|
||||
"respond ONLY with the summary, use the same language as the system"}
|
||||
]
|
||||
|
||||
completion = llm.gen(
|
||||
model=gpt_model,
|
||||
engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
messages=messages_summary,
|
||||
max_tokens=30
|
||||
)
|
||||
conversation_id = conversations_collection.insert_one(
|
||||
{"user": "local",
|
||||
"date": datetime.datetime.utcnow(),
|
||||
"name": completion,
|
||||
"queries": [{"prompt": question, "response": result["answer"], "sources": source_log_docs}]}
|
||||
).inserted_id
|
||||
|
||||
result["conversation_id"] = str(conversation_id)
|
||||
|
||||
# mock result
|
||||
# result = {
|
||||
# "answer": "The answer is 42",
|
||||
# "sources": ["https://en.wikipedia.org/wiki/42_(number)", "https://en.wikipedia.org/wiki/42_(number)"]
|
||||
# }
|
||||
return result
|
||||
except Exception as e:
|
||||
# print whole traceback
|
||||
traceback.print_exc()
|
||||
print(str(e))
|
||||
return bad_request(500, str(e))
|
||||
|
||||
|
||||
@answer.route("/api/search", methods=["POST"])
|
||||
def api_search():
|
||||
data = request.get_json()
|
||||
# get parameter from url question
|
||||
question = data["question"]
|
||||
|
||||
if not embeddings_key_set:
|
||||
if "embeddings_key" in data:
|
||||
embeddings_key = data["embeddings_key"]
|
||||
else:
|
||||
embeddings_key = settings.EMBEDDINGS_KEY
|
||||
else:
|
||||
embeddings_key = settings.EMBEDDINGS_KEY
|
||||
if "active_docs" in data:
|
||||
vectorstore = get_vectorstore({"active_docs": data["active_docs"]})
|
||||
else:
|
||||
vectorstore = ""
|
||||
docsearch = VectorCreator.create_vectorstore(settings.VECTOR_STORE, vectorstore, embeddings_key)
|
||||
|
||||
docs = docsearch.search(question, k=2)
|
||||
|
||||
source_log_docs = []
|
||||
for doc in docs:
|
||||
if doc.metadata:
|
||||
source_log_docs.append({"title": doc.metadata['title'].split('/')[-1], "text": doc.page_content})
|
||||
else:
|
||||
source_log_docs.append({"title": doc.page_content, "text": doc.page_content})
|
||||
#yield f"data:{data}\n\n"
|
||||
return source_log_docs
|
||||
|
||||
153
application/api/answer/routes/answer.py
Normal file
153
application/api/answer/routes/answer.py
Normal file
@@ -0,0 +1,153 @@
|
||||
import logging
|
||||
import traceback
|
||||
|
||||
from flask import make_response, request
|
||||
from flask_restx import fields, Resource
|
||||
|
||||
from application.api import api
|
||||
|
||||
from application.api.answer.routes.base import answer_ns, BaseAnswerResource
|
||||
|
||||
from application.api.answer.services.stream_processor import StreamProcessor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@answer_ns.route("/api/answer")
|
||||
class AnswerResource(Resource, BaseAnswerResource):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Resource.__init__(self, *args, **kwargs)
|
||||
BaseAnswerResource.__init__(self)
|
||||
|
||||
answer_model = answer_ns.model(
|
||||
"AnswerModel",
|
||||
{
|
||||
"question": fields.String(
|
||||
required=True, description="Question to be asked"
|
||||
),
|
||||
"history": fields.List(
|
||||
fields.String,
|
||||
required=False,
|
||||
description="Conversation history (only for new conversations)",
|
||||
),
|
||||
"conversation_id": fields.String(
|
||||
required=False,
|
||||
description="Existing conversation ID (loads history)",
|
||||
),
|
||||
"prompt_id": fields.String(
|
||||
required=False, default="default", description="Prompt ID"
|
||||
),
|
||||
"chunks": fields.Integer(
|
||||
required=False, default=2, description="Number of chunks"
|
||||
),
|
||||
"retriever": fields.String(required=False, description="Retriever type"),
|
||||
"api_key": fields.String(required=False, description="API key"),
|
||||
"agent_id": fields.String(required=False, description="Agent ID"),
|
||||
"active_docs": fields.String(
|
||||
required=False, description="Active documents"
|
||||
),
|
||||
"isNoneDoc": fields.Boolean(
|
||||
required=False, description="Flag indicating if no document is used"
|
||||
),
|
||||
"save_conversation": fields.Boolean(
|
||||
required=False,
|
||||
default=True,
|
||||
description="Whether to save the conversation",
|
||||
),
|
||||
"model_id": fields.String(
|
||||
required=False,
|
||||
description="Model ID to use for this request",
|
||||
),
|
||||
"passthrough": fields.Raw(
|
||||
required=False,
|
||||
description="Dynamic parameters to inject into prompt template",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@api.expect(answer_model)
|
||||
@api.doc(description="Provide a response based on the question and retriever")
|
||||
def post(self):
|
||||
data = request.get_json()
|
||||
if error := self.validate_request(data):
|
||||
return error
|
||||
decoded_token = getattr(request, "decoded_token", None)
|
||||
processor = StreamProcessor(data, decoded_token)
|
||||
try:
|
||||
# ---- Continuation mode ----
|
||||
if data.get("tool_actions"):
|
||||
(
|
||||
agent,
|
||||
messages,
|
||||
tools_dict,
|
||||
pending_tool_calls,
|
||||
tool_actions,
|
||||
) = processor.resume_from_tool_actions(
|
||||
data["tool_actions"], data["conversation_id"]
|
||||
)
|
||||
if not processor.decoded_token:
|
||||
return make_response({"error": "Unauthorized"}, 401)
|
||||
if error := self.check_usage(processor.agent_config):
|
||||
return error
|
||||
stream = self.complete_stream(
|
||||
question="",
|
||||
agent=agent,
|
||||
conversation_id=processor.conversation_id,
|
||||
user_api_key=processor.agent_config.get("user_api_key"),
|
||||
decoded_token=processor.decoded_token,
|
||||
agent_id=processor.agent_id,
|
||||
model_id=processor.model_id,
|
||||
_continuation={
|
||||
"messages": messages,
|
||||
"tools_dict": tools_dict,
|
||||
"pending_tool_calls": pending_tool_calls,
|
||||
"tool_actions": tool_actions,
|
||||
},
|
||||
)
|
||||
else:
|
||||
# ---- Normal mode ----
|
||||
agent = processor.build_agent(data.get("question", ""))
|
||||
if not processor.decoded_token:
|
||||
return make_response({"error": "Unauthorized"}, 401)
|
||||
|
||||
if error := self.check_usage(processor.agent_config):
|
||||
return error
|
||||
|
||||
stream = self.complete_stream(
|
||||
question=data["question"],
|
||||
agent=agent,
|
||||
conversation_id=processor.conversation_id,
|
||||
user_api_key=processor.agent_config.get("user_api_key"),
|
||||
decoded_token=processor.decoded_token,
|
||||
isNoneDoc=data.get("isNoneDoc"),
|
||||
index=None,
|
||||
should_save_conversation=data.get("save_conversation", True),
|
||||
agent_id=processor.agent_id,
|
||||
is_shared_usage=processor.is_shared_usage,
|
||||
shared_token=processor.shared_token,
|
||||
model_id=processor.model_id,
|
||||
)
|
||||
|
||||
stream_result = self.process_response_stream(stream)
|
||||
|
||||
if stream_result["error"]:
|
||||
return make_response({"error": stream_result["error"]}, 400)
|
||||
|
||||
result = {
|
||||
"conversation_id": stream_result["conversation_id"],
|
||||
"answer": stream_result["answer"],
|
||||
"sources": stream_result["sources"],
|
||||
"tool_calls": stream_result["tool_calls"],
|
||||
"thought": stream_result["thought"],
|
||||
}
|
||||
|
||||
extra_info = stream_result.get("extra")
|
||||
if extra_info:
|
||||
result.update(extra_info)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"/api/answer - error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||
)
|
||||
return make_response({"error": "An error occurred processing your request"}, 500)
|
||||
return make_response(result, 200)
|
||||
628
application/api/answer/routes/base.py
Normal file
628
application/api/answer/routes/base.py
Normal file
@@ -0,0 +1,628 @@
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, Generator, List, Optional
|
||||
|
||||
from flask import jsonify, make_response, Response
|
||||
from flask_restx import Namespace
|
||||
|
||||
from application.api.answer.services.continuation_service import ContinuationService
|
||||
from application.api.answer.services.conversation_service import ConversationService
|
||||
from application.core.model_utils import (
|
||||
get_api_key_for_provider,
|
||||
get_default_model_id,
|
||||
get_provider_from_model_id,
|
||||
)
|
||||
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.error import sanitize_api_error
|
||||
from application.llm.llm_creator import LLMCreator
|
||||
from application.utils import check_required_fields
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
answer_ns = Namespace("answer", description="Answer related operations", path="/")
|
||||
|
||||
|
||||
class BaseAnswerResource:
|
||||
"""Shared base class for answer endpoints"""
|
||||
|
||||
def __init__(self):
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
self.db = db
|
||||
self.user_logs_collection = db["user_logs"]
|
||||
self.default_model_id = get_default_model_id()
|
||||
self.conversation_service = ConversationService()
|
||||
|
||||
def validate_request(
|
||||
self, data: Dict[str, Any], require_conversation_id: bool = False
|
||||
) -> Optional[Response]:
|
||||
"""Common request validation.
|
||||
|
||||
Continuation requests (``tool_actions`` present) require
|
||||
``conversation_id`` but not ``question``.
|
||||
"""
|
||||
if data.get("tool_actions"):
|
||||
# Continuation mode — question is not required
|
||||
if missing := check_required_fields(data, ["conversation_id"]):
|
||||
return missing
|
||||
return None
|
||||
required_fields = ["question"]
|
||||
if require_conversation_id:
|
||||
required_fields.append("conversation_id")
|
||||
if missing_fields := check_required_fields(data, required_fields):
|
||||
return missing_fields
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _prepare_tool_calls_for_logging(
|
||||
tool_calls: Optional[List[Dict[str, Any]]], max_chars: int = 10000
|
||||
) -> List[Dict[str, Any]]:
|
||||
if not tool_calls:
|
||||
return []
|
||||
|
||||
prepared = []
|
||||
for tool_call in tool_calls:
|
||||
if not isinstance(tool_call, dict):
|
||||
prepared.append({"result": str(tool_call)[:max_chars]})
|
||||
continue
|
||||
|
||||
item = dict(tool_call)
|
||||
for key in ("result", "result_full"):
|
||||
value = item.get(key)
|
||||
if isinstance(value, str) and len(value) > max_chars:
|
||||
item[key] = value[:max_chars]
|
||||
prepared.append(item)
|
||||
return prepared
|
||||
|
||||
def check_usage(self, agent_config: Dict) -> Optional[Response]:
|
||||
"""Check if there is a usage limit and if it is exceeded
|
||||
|
||||
Args:
|
||||
agent_config: The config dict of agent instance
|
||||
|
||||
Returns:
|
||||
None or Response if either of limits exceeded.
|
||||
|
||||
"""
|
||||
api_key = agent_config.get("user_api_key")
|
||||
if not api_key:
|
||||
return None
|
||||
agents_collection = self.db["agents"]
|
||||
agent = agents_collection.find_one({"key": api_key})
|
||||
|
||||
if not agent:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Invalid API key."}), 401
|
||||
)
|
||||
limited_token_mode_raw = agent.get("limited_token_mode", False)
|
||||
limited_request_mode_raw = agent.get("limited_request_mode", False)
|
||||
|
||||
limited_token_mode = (
|
||||
limited_token_mode_raw
|
||||
if isinstance(limited_token_mode_raw, bool)
|
||||
else limited_token_mode_raw == "True"
|
||||
)
|
||||
limited_request_mode = (
|
||||
limited_request_mode_raw
|
||||
if isinstance(limited_request_mode_raw, bool)
|
||||
else limited_request_mode_raw == "True"
|
||||
)
|
||||
|
||||
token_limit = int(
|
||||
agent.get("token_limit", settings.DEFAULT_AGENT_LIMITS["token_limit"])
|
||||
)
|
||||
request_limit = int(
|
||||
agent.get("request_limit", settings.DEFAULT_AGENT_LIMITS["request_limit"])
|
||||
)
|
||||
|
||||
token_usage_collection = self.db["token_usage"]
|
||||
|
||||
end_date = datetime.datetime.now()
|
||||
start_date = end_date - datetime.timedelta(hours=24)
|
||||
|
||||
match_query = {
|
||||
"timestamp": {"$gte": start_date, "$lte": end_date},
|
||||
"api_key": api_key,
|
||||
}
|
||||
|
||||
if limited_token_mode:
|
||||
token_pipeline = [
|
||||
{"$match": match_query},
|
||||
{
|
||||
"$group": {
|
||||
"_id": None,
|
||||
"total_tokens": {
|
||||
"$sum": {"$add": ["$prompt_tokens", "$generated_tokens"]}
|
||||
},
|
||||
}
|
||||
},
|
||||
]
|
||||
token_result = list(token_usage_collection.aggregate(token_pipeline))
|
||||
daily_token_usage = token_result[0]["total_tokens"] if token_result else 0
|
||||
else:
|
||||
daily_token_usage = 0
|
||||
if limited_request_mode:
|
||||
daily_request_usage = token_usage_collection.count_documents(match_query)
|
||||
else:
|
||||
daily_request_usage = 0
|
||||
if not limited_token_mode and not limited_request_mode:
|
||||
return None
|
||||
token_exceeded = (
|
||||
limited_token_mode and token_limit > 0 and daily_token_usage >= token_limit
|
||||
)
|
||||
request_exceeded = (
|
||||
limited_request_mode
|
||||
and request_limit > 0
|
||||
and daily_request_usage >= request_limit
|
||||
)
|
||||
|
||||
if token_exceeded or request_exceeded:
|
||||
return make_response(
|
||||
jsonify(
|
||||
{
|
||||
"success": False,
|
||||
"message": "Exceeding usage limit, please try again later.",
|
||||
}
|
||||
),
|
||||
429,
|
||||
)
|
||||
return None
|
||||
|
||||
def complete_stream(
|
||||
self,
|
||||
question: str,
|
||||
agent: Any,
|
||||
conversation_id: Optional[str],
|
||||
user_api_key: Optional[str],
|
||||
decoded_token: Dict[str, Any],
|
||||
isNoneDoc: bool = False,
|
||||
index: Optional[int] = None,
|
||||
should_save_conversation: bool = True,
|
||||
attachment_ids: Optional[List[str]] = None,
|
||||
agent_id: Optional[str] = None,
|
||||
is_shared_usage: bool = False,
|
||||
shared_token: Optional[str] = None,
|
||||
model_id: Optional[str] = None,
|
||||
_continuation: Optional[Dict] = None,
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Generator function that streams the complete conversation response.
|
||||
|
||||
Args:
|
||||
question: The user's question
|
||||
agent: The agent instance
|
||||
retriever: The retriever instance
|
||||
conversation_id: Existing conversation ID
|
||||
user_api_key: User's API key if any
|
||||
decoded_token: Decoded JWT token
|
||||
isNoneDoc: Flag for document-less responses
|
||||
index: Index of message to update
|
||||
should_save_conversation: Whether to persist the conversation
|
||||
attachment_ids: List of attachment IDs
|
||||
agent_id: ID of agent used
|
||||
is_shared_usage: Flag for shared agent usage
|
||||
shared_token: Token for shared agent
|
||||
model_id: Model ID used for the request
|
||||
retrieved_docs: Pre-fetched documents for sources (optional)
|
||||
|
||||
Yields:
|
||||
Server-sent event strings
|
||||
"""
|
||||
try:
|
||||
response_full, thought, source_log_docs, tool_calls = "", "", [], []
|
||||
is_structured = False
|
||||
schema_info = None
|
||||
structured_chunks = []
|
||||
query_metadata = {}
|
||||
paused = False
|
||||
|
||||
if _continuation:
|
||||
gen_iter = agent.gen_continuation(
|
||||
messages=_continuation["messages"],
|
||||
tools_dict=_continuation["tools_dict"],
|
||||
pending_tool_calls=_continuation["pending_tool_calls"],
|
||||
tool_actions=_continuation["tool_actions"],
|
||||
)
|
||||
else:
|
||||
gen_iter = agent.gen(query=question)
|
||||
|
||||
for line in gen_iter:
|
||||
if "metadata" in line:
|
||||
query_metadata.update(line["metadata"])
|
||||
elif "answer" in line:
|
||||
response_full += str(line["answer"])
|
||||
if line.get("structured"):
|
||||
is_structured = True
|
||||
schema_info = line.get("schema")
|
||||
structured_chunks.append(line["answer"])
|
||||
else:
|
||||
data = json.dumps({"type": "answer", "answer": line["answer"]})
|
||||
yield f"data: {data}\n\n"
|
||||
elif "sources" in line:
|
||||
truncated_sources = []
|
||||
source_log_docs = line["sources"]
|
||||
for source in line["sources"]:
|
||||
truncated_source = source.copy()
|
||||
if "text" in truncated_source:
|
||||
truncated_source["text"] = (
|
||||
truncated_source["text"][:100].strip() + "..."
|
||||
)
|
||||
truncated_sources.append(truncated_source)
|
||||
if truncated_sources:
|
||||
data = json.dumps(
|
||||
{"type": "source", "source": truncated_sources}
|
||||
)
|
||||
yield f"data: {data}\n\n"
|
||||
elif "tool_calls" in line:
|
||||
tool_calls = line["tool_calls"]
|
||||
data = json.dumps({"type": "tool_calls", "tool_calls": tool_calls})
|
||||
yield f"data: {data}\n\n"
|
||||
elif "thought" in line:
|
||||
thought += line["thought"]
|
||||
data = json.dumps({"type": "thought", "thought": line["thought"]})
|
||||
yield f"data: {data}\n\n"
|
||||
elif "type" in line:
|
||||
if line.get("type") == "tool_calls_pending":
|
||||
# Save continuation state and end the stream
|
||||
paused = True
|
||||
data = json.dumps(line)
|
||||
yield f"data: {data}\n\n"
|
||||
elif line.get("type") == "error":
|
||||
sanitized_error = {
|
||||
"type": "error",
|
||||
"error": sanitize_api_error(line.get("error", "An error occurred"))
|
||||
}
|
||||
data = json.dumps(sanitized_error)
|
||||
yield f"data: {data}\n\n"
|
||||
else:
|
||||
data = json.dumps(line)
|
||||
yield f"data: {data}\n\n"
|
||||
if is_structured and structured_chunks:
|
||||
structured_data = {
|
||||
"type": "structured_answer",
|
||||
"answer": response_full,
|
||||
"structured": True,
|
||||
"schema": schema_info,
|
||||
}
|
||||
data = json.dumps(structured_data)
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
# ---- Paused: save continuation state and end stream early ----
|
||||
if paused:
|
||||
continuation = getattr(agent, "_pending_continuation", None)
|
||||
if continuation:
|
||||
# Ensure we have a conversation_id — create a partial
|
||||
# conversation if this is the first turn.
|
||||
if not conversation_id and should_save_conversation:
|
||||
try:
|
||||
provider = (
|
||||
get_provider_from_model_id(model_id)
|
||||
if model_id
|
||||
else settings.LLM_PROVIDER
|
||||
)
|
||||
sys_api_key = get_api_key_for_provider(
|
||||
provider or settings.LLM_PROVIDER
|
||||
)
|
||||
llm = LLMCreator.create_llm(
|
||||
provider or settings.LLM_PROVIDER,
|
||||
api_key=sys_api_key,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
model_id=model_id,
|
||||
agent_id=agent_id,
|
||||
)
|
||||
conversation_id = (
|
||||
self.conversation_service.save_conversation(
|
||||
None,
|
||||
question,
|
||||
response_full,
|
||||
thought,
|
||||
source_log_docs,
|
||||
tool_calls,
|
||||
llm,
|
||||
model_id or self.default_model_id,
|
||||
decoded_token,
|
||||
api_key=user_api_key,
|
||||
agent_id=agent_id,
|
||||
is_shared_usage=is_shared_usage,
|
||||
shared_token=shared_token,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to create conversation for continuation: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
if conversation_id:
|
||||
try:
|
||||
cont_service = ContinuationService()
|
||||
cont_service.save_state(
|
||||
conversation_id=str(conversation_id),
|
||||
user=decoded_token.get("sub", "local"),
|
||||
messages=continuation["messages"],
|
||||
pending_tool_calls=continuation["pending_tool_calls"],
|
||||
tools_dict=continuation["tools_dict"],
|
||||
tool_schemas=getattr(agent, "tools", []),
|
||||
agent_config={
|
||||
"model_id": model_id or self.default_model_id,
|
||||
"llm_name": getattr(agent, "llm_name", settings.LLM_PROVIDER),
|
||||
"api_key": getattr(agent, "api_key", None),
|
||||
"user_api_key": user_api_key,
|
||||
"agent_id": agent_id,
|
||||
"agent_type": agent.__class__.__name__,
|
||||
"prompt": getattr(agent, "prompt", ""),
|
||||
"json_schema": getattr(agent, "json_schema", None),
|
||||
"retriever_config": getattr(agent, "retriever_config", None),
|
||||
},
|
||||
client_tools=getattr(
|
||||
agent.tool_executor, "client_tools", None
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to save continuation state: {str(e)}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
id_data = {"type": "id", "id": str(conversation_id)}
|
||||
data = json.dumps(id_data)
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
data = json.dumps({"type": "end"})
|
||||
yield f"data: {data}\n\n"
|
||||
return
|
||||
|
||||
if isNoneDoc:
|
||||
for doc in source_log_docs:
|
||||
doc["source"] = "None"
|
||||
provider = (
|
||||
get_provider_from_model_id(model_id)
|
||||
if model_id
|
||||
else settings.LLM_PROVIDER
|
||||
)
|
||||
system_api_key = get_api_key_for_provider(provider or settings.LLM_PROVIDER)
|
||||
|
||||
llm = LLMCreator.create_llm(
|
||||
provider or settings.LLM_PROVIDER,
|
||||
api_key=system_api_key,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
model_id=model_id,
|
||||
agent_id=agent_id,
|
||||
)
|
||||
|
||||
if should_save_conversation:
|
||||
conversation_id = self.conversation_service.save_conversation(
|
||||
conversation_id,
|
||||
question,
|
||||
response_full,
|
||||
thought,
|
||||
source_log_docs,
|
||||
tool_calls,
|
||||
llm,
|
||||
model_id or self.default_model_id,
|
||||
decoded_token,
|
||||
index=index,
|
||||
api_key=user_api_key,
|
||||
agent_id=agent_id,
|
||||
is_shared_usage=is_shared_usage,
|
||||
shared_token=shared_token,
|
||||
attachment_ids=attachment_ids,
|
||||
metadata=query_metadata if query_metadata else None,
|
||||
)
|
||||
# Persist compression metadata/summary if it exists and wasn't saved mid-execution
|
||||
compression_meta = getattr(agent, "compression_metadata", None)
|
||||
compression_saved = getattr(agent, "compression_saved", False)
|
||||
if conversation_id and compression_meta and not compression_saved:
|
||||
try:
|
||||
self.conversation_service.update_compression_metadata(
|
||||
conversation_id, compression_meta
|
||||
)
|
||||
self.conversation_service.append_compression_message(
|
||||
conversation_id, compression_meta
|
||||
)
|
||||
agent.compression_saved = True
|
||||
logger.info(
|
||||
f"Persisted compression metadata for conversation {conversation_id}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to persist compression metadata: {str(e)}",
|
||||
exc_info=True,
|
||||
)
|
||||
else:
|
||||
conversation_id = None
|
||||
id_data = {"type": "id", "id": str(conversation_id)}
|
||||
data = json.dumps(id_data)
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
tool_calls_for_logging = self._prepare_tool_calls_for_logging(
|
||||
getattr(agent, "tool_calls", tool_calls) or tool_calls
|
||||
)
|
||||
|
||||
log_data = {
|
||||
"action": "stream_answer",
|
||||
"level": "info",
|
||||
"user": decoded_token.get("sub"),
|
||||
"api_key": user_api_key,
|
||||
"agent_id": agent_id,
|
||||
"question": question,
|
||||
"response": response_full,
|
||||
"sources": source_log_docs,
|
||||
"tool_calls": tool_calls_for_logging,
|
||||
"attachments": attachment_ids,
|
||||
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
||||
}
|
||||
if is_structured:
|
||||
log_data["structured_output"] = True
|
||||
if schema_info:
|
||||
log_data["schema"] = schema_info
|
||||
# Clean up text fields to be no longer than 10000 characters
|
||||
|
||||
for key, value in log_data.items():
|
||||
if isinstance(value, str) and len(value) > 10000:
|
||||
log_data[key] = value[:10000]
|
||||
self.user_logs_collection.insert_one(log_data)
|
||||
|
||||
data = json.dumps({"type": "end"})
|
||||
yield f"data: {data}\n\n"
|
||||
except GeneratorExit:
|
||||
logger.info(f"Stream aborted by client for question: {question[:50]}... ")
|
||||
# Save partial response
|
||||
|
||||
if should_save_conversation and response_full:
|
||||
try:
|
||||
if isNoneDoc:
|
||||
for doc in source_log_docs:
|
||||
doc["source"] = "None"
|
||||
llm = LLMCreator.create_llm(
|
||||
settings.LLM_PROVIDER,
|
||||
api_key=settings.API_KEY,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
agent_id=agent_id,
|
||||
)
|
||||
self.conversation_service.save_conversation(
|
||||
conversation_id,
|
||||
question,
|
||||
response_full,
|
||||
thought,
|
||||
source_log_docs,
|
||||
tool_calls,
|
||||
llm,
|
||||
model_id or self.default_model_id,
|
||||
decoded_token,
|
||||
index=index,
|
||||
api_key=user_api_key,
|
||||
agent_id=agent_id,
|
||||
is_shared_usage=is_shared_usage,
|
||||
shared_token=shared_token,
|
||||
attachment_ids=attachment_ids,
|
||||
metadata=query_metadata if query_metadata else None,
|
||||
)
|
||||
compression_meta = getattr(agent, "compression_metadata", None)
|
||||
compression_saved = getattr(agent, "compression_saved", False)
|
||||
if conversation_id and compression_meta and not compression_saved:
|
||||
try:
|
||||
self.conversation_service.update_compression_metadata(
|
||||
conversation_id, compression_meta
|
||||
)
|
||||
self.conversation_service.append_compression_message(
|
||||
conversation_id, compression_meta
|
||||
)
|
||||
agent.compression_saved = True
|
||||
logger.info(
|
||||
f"Persisted compression metadata for conversation {conversation_id} (partial stream)"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to persist compression metadata (partial stream): {str(e)}",
|
||||
exc_info=True,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error saving partial response: {str(e)}", exc_info=True
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error in stream: {str(e)}", exc_info=True)
|
||||
data = json.dumps(
|
||||
{
|
||||
"type": "error",
|
||||
"error": "Please try again later. We apologize for any inconvenience.",
|
||||
}
|
||||
)
|
||||
yield f"data: {data}\n\n"
|
||||
return
|
||||
|
||||
def process_response_stream(self, stream) -> Dict[str, Any]:
|
||||
"""Process the stream response for non-streaming endpoint.
|
||||
|
||||
Returns:
|
||||
Dict with keys: conversation_id, answer, sources, tool_calls,
|
||||
thought, error, and optional extra.
|
||||
"""
|
||||
conversation_id = ""
|
||||
response_full = ""
|
||||
source_log_docs = []
|
||||
tool_calls = []
|
||||
thought = ""
|
||||
stream_ended = False
|
||||
is_structured = False
|
||||
schema_info = None
|
||||
pending_tool_calls = None
|
||||
|
||||
for line in stream:
|
||||
try:
|
||||
event_data = line.replace("data: ", "").strip()
|
||||
event = json.loads(event_data)
|
||||
|
||||
if event["type"] == "id":
|
||||
conversation_id = event["id"]
|
||||
elif event["type"] == "answer":
|
||||
response_full += event["answer"]
|
||||
elif event["type"] == "structured_answer":
|
||||
response_full = event["answer"]
|
||||
is_structured = True
|
||||
schema_info = event.get("schema")
|
||||
elif event["type"] == "source":
|
||||
source_log_docs = event["source"]
|
||||
elif event["type"] == "tool_calls":
|
||||
tool_calls = event["tool_calls"]
|
||||
elif event["type"] == "tool_calls_pending":
|
||||
pending_tool_calls = event.get("data", {}).get(
|
||||
"pending_tool_calls", []
|
||||
)
|
||||
elif event["type"] == "thought":
|
||||
thought = event["thought"]
|
||||
elif event["type"] == "error":
|
||||
logger.error(f"Error from stream: {event['error']}")
|
||||
return {
|
||||
"conversation_id": None,
|
||||
"answer": None,
|
||||
"sources": None,
|
||||
"tool_calls": None,
|
||||
"thought": None,
|
||||
"error": event["error"],
|
||||
}
|
||||
elif event["type"] == "end":
|
||||
stream_ended = True
|
||||
except (json.JSONDecodeError, KeyError) as e:
|
||||
logger.warning(f"Error parsing stream event: {e}, line: {line}")
|
||||
continue
|
||||
if not stream_ended:
|
||||
logger.error("Stream ended unexpectedly without an 'end' event.")
|
||||
return {
|
||||
"conversation_id": None,
|
||||
"answer": None,
|
||||
"sources": None,
|
||||
"tool_calls": None,
|
||||
"thought": None,
|
||||
"error": "Stream ended unexpectedly",
|
||||
}
|
||||
|
||||
result: Dict[str, Any] = {
|
||||
"conversation_id": conversation_id,
|
||||
"answer": response_full,
|
||||
"sources": source_log_docs,
|
||||
"tool_calls": tool_calls,
|
||||
"thought": thought,
|
||||
"error": None,
|
||||
}
|
||||
|
||||
if pending_tool_calls is not None:
|
||||
result["extra"] = {"pending_tool_calls": pending_tool_calls}
|
||||
|
||||
if is_structured:
|
||||
result["extra"] = {"structured": True, "schema": schema_info}
|
||||
|
||||
return result
|
||||
|
||||
def error_stream_generate(self, err_response):
|
||||
data = json.dumps({"type": "error", "error": err_response})
|
||||
yield f"data: {data}\n\n"
|
||||
186
application/api/answer/routes/search.py
Normal file
186
application/api/answer/routes/search.py
Normal file
@@ -0,0 +1,186 @@
|
||||
import logging
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from flask import make_response, request
|
||||
from flask_restx import fields, Resource
|
||||
|
||||
from bson.dbref import DBRef
|
||||
|
||||
from application.api.answer.routes.base import answer_ns
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.vectorstore.vector_creator import VectorCreator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@answer_ns.route("/api/search")
|
||||
class SearchResource(Resource):
|
||||
"""Fast search endpoint for retrieving relevant documents"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
mongo = MongoDB.get_client()
|
||||
self.db = mongo[settings.MONGO_DB_NAME]
|
||||
self.agents_collection = self.db["agents"]
|
||||
|
||||
search_model = answer_ns.model(
|
||||
"SearchModel",
|
||||
{
|
||||
"question": fields.String(
|
||||
required=True, description="Search query"
|
||||
),
|
||||
"api_key": fields.String(
|
||||
required=True, description="API key for authentication"
|
||||
),
|
||||
"chunks": fields.Integer(
|
||||
required=False, default=5, description="Number of results to return"
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
def _get_sources_from_api_key(self, api_key: str) -> List[str]:
|
||||
"""Get source IDs connected to the API key/agent.
|
||||
|
||||
"""
|
||||
agent_data = self.agents_collection.find_one({"key": api_key})
|
||||
if not agent_data:
|
||||
return []
|
||||
|
||||
source_ids = []
|
||||
|
||||
# Handle multiple sources (only if non-empty)
|
||||
sources = agent_data.get("sources", [])
|
||||
if sources and isinstance(sources, list) and len(sources) > 0:
|
||||
for source_ref in sources:
|
||||
# Skip "default" - it's a placeholder, not an actual vectorstore
|
||||
if source_ref == "default":
|
||||
continue
|
||||
elif isinstance(source_ref, DBRef):
|
||||
source_doc = self.db.dereference(source_ref)
|
||||
if source_doc:
|
||||
source_ids.append(str(source_doc["_id"]))
|
||||
|
||||
# Handle single source (legacy) - check if sources was empty or didn't yield results
|
||||
if not source_ids:
|
||||
source = agent_data.get("source")
|
||||
if isinstance(source, DBRef):
|
||||
source_doc = self.db.dereference(source)
|
||||
if source_doc:
|
||||
source_ids.append(str(source_doc["_id"]))
|
||||
# Skip "default" - it's a placeholder, not an actual vectorstore
|
||||
elif source and source != "default":
|
||||
source_ids.append(source)
|
||||
|
||||
return source_ids
|
||||
|
||||
def _search_vectorstores(
|
||||
self, query: str, source_ids: List[str], chunks: int
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Search across vectorstores and return results"""
|
||||
if not source_ids:
|
||||
return []
|
||||
|
||||
results = []
|
||||
chunks_per_source = max(1, chunks // len(source_ids))
|
||||
seen_texts = set()
|
||||
|
||||
for source_id in source_ids:
|
||||
if not source_id or not source_id.strip():
|
||||
continue
|
||||
|
||||
try:
|
||||
docsearch = VectorCreator.create_vectorstore(
|
||||
settings.VECTOR_STORE, source_id, settings.EMBEDDINGS_KEY
|
||||
)
|
||||
docs = docsearch.search(query, k=chunks_per_source * 2)
|
||||
|
||||
for doc in docs:
|
||||
if len(results) >= chunks:
|
||||
break
|
||||
|
||||
if hasattr(doc, "page_content") and hasattr(doc, "metadata"):
|
||||
page_content = doc.page_content
|
||||
metadata = doc.metadata
|
||||
else:
|
||||
page_content = doc.get("text", doc.get("page_content", ""))
|
||||
metadata = doc.get("metadata", {})
|
||||
|
||||
# Skip duplicates
|
||||
text_hash = hash(page_content[:200])
|
||||
if text_hash in seen_texts:
|
||||
continue
|
||||
seen_texts.add(text_hash)
|
||||
|
||||
title = metadata.get(
|
||||
"title", metadata.get("post_title", "")
|
||||
)
|
||||
if not isinstance(title, str):
|
||||
title = str(title) if title else ""
|
||||
|
||||
# Clean up title
|
||||
if title:
|
||||
title = title.split("/")[-1]
|
||||
else:
|
||||
# Use filename or first part of content as title
|
||||
title = metadata.get("filename", page_content[:50] + "...")
|
||||
|
||||
source = metadata.get("source", source_id)
|
||||
|
||||
results.append({
|
||||
"text": page_content,
|
||||
"title": title,
|
||||
"source": source,
|
||||
})
|
||||
|
||||
if len(results) >= chunks:
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error searching vectorstore {source_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
continue
|
||||
|
||||
return results[:chunks]
|
||||
|
||||
@answer_ns.expect(search_model)
|
||||
@answer_ns.doc(description="Search for relevant documents based on query")
|
||||
def post(self):
|
||||
data = request.get_json()
|
||||
|
||||
question = data.get("question")
|
||||
api_key = data.get("api_key")
|
||||
chunks = data.get("chunks", 5)
|
||||
|
||||
if not question:
|
||||
return make_response({"error": "question is required"}, 400)
|
||||
|
||||
if not api_key:
|
||||
return make_response({"error": "api_key is required"}, 400)
|
||||
|
||||
# Validate API key
|
||||
agent = self.agents_collection.find_one({"key": api_key})
|
||||
if not agent:
|
||||
return make_response({"error": "Invalid API key"}, 401)
|
||||
|
||||
try:
|
||||
# Get sources connected to this API key
|
||||
source_ids = self._get_sources_from_api_key(api_key)
|
||||
|
||||
if not source_ids:
|
||||
return make_response([], 200)
|
||||
|
||||
# Perform search
|
||||
results = self._search_vectorstores(question, source_ids, chunks)
|
||||
|
||||
return make_response(results, 200)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"/api/search - error: {str(e)}",
|
||||
extra={"error": str(e)},
|
||||
exc_info=True,
|
||||
)
|
||||
return make_response({"error": "Search failed"}, 500)
|
||||
171
application/api/answer/routes/stream.py
Normal file
171
application/api/answer/routes/stream.py
Normal file
@@ -0,0 +1,171 @@
|
||||
import logging
|
||||
import traceback
|
||||
|
||||
from flask import request, Response
|
||||
from flask_restx import fields, Resource
|
||||
|
||||
from application.api import api
|
||||
|
||||
from application.api.answer.routes.base import answer_ns, BaseAnswerResource
|
||||
|
||||
from application.api.answer.services.stream_processor import StreamProcessor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@answer_ns.route("/stream")
|
||||
class StreamResource(Resource, BaseAnswerResource):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Resource.__init__(self, *args, **kwargs)
|
||||
BaseAnswerResource.__init__(self)
|
||||
|
||||
stream_model = answer_ns.model(
|
||||
"StreamModel",
|
||||
{
|
||||
"question": fields.String(
|
||||
required=True, description="Question to be asked"
|
||||
),
|
||||
"history": fields.List(
|
||||
fields.String,
|
||||
required=False,
|
||||
description="Conversation history (only for new conversations)",
|
||||
),
|
||||
"conversation_id": fields.String(
|
||||
required=False,
|
||||
description="Existing conversation ID (loads history)",
|
||||
),
|
||||
"prompt_id": fields.String(
|
||||
required=False, default="default", description="Prompt ID"
|
||||
),
|
||||
"chunks": fields.Integer(
|
||||
required=False, default=2, description="Number of chunks"
|
||||
),
|
||||
"retriever": fields.String(required=False, description="Retriever type"),
|
||||
"api_key": fields.String(required=False, description="API key"),
|
||||
"agent_id": fields.String(required=False, description="Agent ID"),
|
||||
"active_docs": fields.String(
|
||||
required=False, description="Active documents"
|
||||
),
|
||||
"isNoneDoc": fields.Boolean(
|
||||
required=False, description="Flag indicating if no document is used"
|
||||
),
|
||||
"index": fields.Integer(
|
||||
required=False, description="Index of the query to update"
|
||||
),
|
||||
"save_conversation": fields.Boolean(
|
||||
required=False,
|
||||
default=True,
|
||||
description="Whether to save the conversation",
|
||||
),
|
||||
"model_id": fields.String(
|
||||
required=False,
|
||||
description="Model ID to use for this request",
|
||||
),
|
||||
"attachments": fields.List(
|
||||
fields.String, required=False, description="List of attachment IDs"
|
||||
),
|
||||
"passthrough": fields.Raw(
|
||||
required=False,
|
||||
description="Dynamic parameters to inject into prompt template",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@api.expect(stream_model)
|
||||
@api.doc(description="Stream a response based on the question and retriever")
|
||||
def post(self):
|
||||
data = request.get_json()
|
||||
if error := self.validate_request(data, "index" in data):
|
||||
return error
|
||||
decoded_token = getattr(request, "decoded_token", None)
|
||||
processor = StreamProcessor(data, decoded_token)
|
||||
|
||||
try:
|
||||
# ---- Continuation mode ----
|
||||
if data.get("tool_actions"):
|
||||
(
|
||||
agent,
|
||||
messages,
|
||||
tools_dict,
|
||||
pending_tool_calls,
|
||||
tool_actions,
|
||||
) = processor.resume_from_tool_actions(
|
||||
data["tool_actions"], data["conversation_id"]
|
||||
)
|
||||
if not processor.decoded_token:
|
||||
return Response(
|
||||
self.error_stream_generate("Unauthorized"),
|
||||
status=401,
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
if error := self.check_usage(processor.agent_config):
|
||||
return error
|
||||
return Response(
|
||||
self.complete_stream(
|
||||
question="",
|
||||
agent=agent,
|
||||
conversation_id=processor.conversation_id,
|
||||
user_api_key=processor.agent_config.get("user_api_key"),
|
||||
decoded_token=processor.decoded_token,
|
||||
agent_id=processor.agent_id,
|
||||
model_id=processor.model_id,
|
||||
_continuation={
|
||||
"messages": messages,
|
||||
"tools_dict": tools_dict,
|
||||
"pending_tool_calls": pending_tool_calls,
|
||||
"tool_actions": tool_actions,
|
||||
},
|
||||
),
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
|
||||
# ---- Normal mode ----
|
||||
agent = processor.build_agent(data["question"])
|
||||
if not processor.decoded_token:
|
||||
return Response(
|
||||
self.error_stream_generate("Unauthorized"),
|
||||
status=401,
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
|
||||
if error := self.check_usage(processor.agent_config):
|
||||
return error
|
||||
return Response(
|
||||
self.complete_stream(
|
||||
question=data["question"],
|
||||
agent=agent,
|
||||
conversation_id=processor.conversation_id,
|
||||
user_api_key=processor.agent_config.get("user_api_key"),
|
||||
decoded_token=processor.decoded_token,
|
||||
isNoneDoc=data.get("isNoneDoc"),
|
||||
index=data.get("index"),
|
||||
should_save_conversation=data.get("save_conversation", True),
|
||||
attachment_ids=data.get("attachments", []),
|
||||
agent_id=processor.agent_id,
|
||||
is_shared_usage=processor.is_shared_usage,
|
||||
shared_token=processor.shared_token,
|
||||
model_id=processor.model_id,
|
||||
),
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
except ValueError as e:
|
||||
message = "Malformed request body"
|
||||
logger.error(
|
||||
f"/stream - error: {message} - specific error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||
)
|
||||
return Response(
|
||||
self.error_stream_generate(message),
|
||||
status=400,
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"/stream - error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||
)
|
||||
return Response(
|
||||
self.error_stream_generate("Unknown error occurred"),
|
||||
status=400,
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
20
application/api/answer/services/compression/__init__.py
Normal file
20
application/api/answer/services/compression/__init__.py
Normal file
@@ -0,0 +1,20 @@
|
||||
"""
|
||||
Compression module for managing conversation context compression.
|
||||
|
||||
"""
|
||||
|
||||
from application.api.answer.services.compression.orchestrator import (
|
||||
CompressionOrchestrator,
|
||||
)
|
||||
from application.api.answer.services.compression.service import CompressionService
|
||||
from application.api.answer.services.compression.types import (
|
||||
CompressionResult,
|
||||
CompressionMetadata,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"CompressionOrchestrator",
|
||||
"CompressionService",
|
||||
"CompressionResult",
|
||||
"CompressionMetadata",
|
||||
]
|
||||
249
application/api/answer/services/compression/message_builder.py
Normal file
249
application/api/answer/services/compression/message_builder.py
Normal file
@@ -0,0 +1,249 @@
|
||||
"""Message reconstruction utilities for compression."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MessageBuilder:
|
||||
"""Builds message arrays from compressed context."""
|
||||
|
||||
@staticmethod
|
||||
def build_from_compressed_context(
|
||||
system_prompt: str,
|
||||
compressed_summary: Optional[str],
|
||||
recent_queries: List[Dict],
|
||||
include_tool_calls: bool = False,
|
||||
context_type: str = "pre_request",
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Build messages from compressed context.
|
||||
|
||||
Args:
|
||||
system_prompt: Original system prompt
|
||||
compressed_summary: Compressed summary (if any)
|
||||
recent_queries: Recent uncompressed queries
|
||||
include_tool_calls: Whether to include tool calls from history
|
||||
context_type: Type of context ('pre_request' or 'mid_execution')
|
||||
|
||||
Returns:
|
||||
List of message dicts ready for LLM
|
||||
"""
|
||||
# Append compression summary to system prompt if present
|
||||
if compressed_summary:
|
||||
system_prompt = MessageBuilder._append_compression_context(
|
||||
system_prompt, compressed_summary, context_type
|
||||
)
|
||||
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
|
||||
# Add recent history
|
||||
for query in recent_queries:
|
||||
if "prompt" in query and "response" in query:
|
||||
messages.append({"role": "user", "content": query["prompt"]})
|
||||
messages.append({"role": "assistant", "content": query["response"]})
|
||||
|
||||
# Add tool calls from history if present
|
||||
if include_tool_calls and "tool_calls" in query:
|
||||
for tool_call in query["tool_calls"]:
|
||||
call_id = tool_call.get("call_id") or str(uuid.uuid4())
|
||||
args = tool_call.get("arguments")
|
||||
args_str = (
|
||||
json.dumps(args)
|
||||
if isinstance(args, dict)
|
||||
else (args or "{}")
|
||||
)
|
||||
messages.append({
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [{
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": tool_call.get("action_name", ""),
|
||||
"arguments": args_str,
|
||||
},
|
||||
}],
|
||||
})
|
||||
result = tool_call.get("result")
|
||||
result_str = (
|
||||
json.dumps(result)
|
||||
if not isinstance(result, str)
|
||||
else (result or "")
|
||||
)
|
||||
messages.append({
|
||||
"role": "tool",
|
||||
"tool_call_id": call_id,
|
||||
"content": result_str,
|
||||
})
|
||||
|
||||
# If no recent queries (everything was compressed), add a continuation user message
|
||||
if len(recent_queries) == 0 and compressed_summary:
|
||||
messages.append({
|
||||
"role": "user",
|
||||
"content": "Please continue with the remaining tasks based on the context above."
|
||||
})
|
||||
logger.info("Added continuation user message to maintain proper turn-taking after full compression")
|
||||
|
||||
return messages
|
||||
|
||||
@staticmethod
|
||||
def _append_compression_context(
|
||||
system_prompt: str, compressed_summary: str, context_type: str = "pre_request"
|
||||
) -> str:
|
||||
"""
|
||||
Append compression context to system prompt.
|
||||
|
||||
Args:
|
||||
system_prompt: Original system prompt
|
||||
compressed_summary: Summary to append
|
||||
context_type: Type of compression context
|
||||
|
||||
Returns:
|
||||
Updated system prompt
|
||||
"""
|
||||
# Remove existing compression context if present
|
||||
if "This session is being continued" in system_prompt or "Context window limit reached" in system_prompt:
|
||||
parts = system_prompt.split("\n\n---\n\n")
|
||||
system_prompt = parts[0]
|
||||
|
||||
# Build appropriate context message based on type
|
||||
if context_type == "mid_execution":
|
||||
context_message = (
|
||||
"\n\n---\n\n"
|
||||
"Context window limit reached during execution. "
|
||||
"Previous conversation has been compressed to fit within limits. "
|
||||
"The conversation is summarized below:\n\n"
|
||||
f"{compressed_summary}"
|
||||
)
|
||||
else: # pre_request
|
||||
context_message = (
|
||||
"\n\n---\n\n"
|
||||
"This session is being continued from a previous conversation that "
|
||||
"has been compressed to fit within context limits. "
|
||||
"The conversation is summarized below:\n\n"
|
||||
f"{compressed_summary}"
|
||||
)
|
||||
|
||||
return system_prompt + context_message
|
||||
|
||||
@staticmethod
|
||||
def rebuild_messages_after_compression(
|
||||
messages: List[Dict],
|
||||
compressed_summary: Optional[str],
|
||||
recent_queries: List[Dict],
|
||||
include_current_execution: bool = False,
|
||||
include_tool_calls: bool = False,
|
||||
) -> Optional[List[Dict]]:
|
||||
"""
|
||||
Rebuild the message list after compression so tool execution can continue.
|
||||
|
||||
Args:
|
||||
messages: Original message list
|
||||
compressed_summary: Compressed summary
|
||||
recent_queries: Recent uncompressed queries
|
||||
include_current_execution: Whether to preserve current execution messages
|
||||
include_tool_calls: Whether to include tool calls from history
|
||||
|
||||
Returns:
|
||||
Rebuilt message list or None if failed
|
||||
"""
|
||||
# Find the system message
|
||||
system_message = next(
|
||||
(msg for msg in messages if msg.get("role") == "system"), None
|
||||
)
|
||||
if not system_message:
|
||||
logger.warning("No system message found in messages list")
|
||||
return None
|
||||
|
||||
# Update system message with compressed summary
|
||||
if compressed_summary:
|
||||
content = system_message.get("content", "")
|
||||
system_message["content"] = MessageBuilder._append_compression_context(
|
||||
content, compressed_summary, "mid_execution"
|
||||
)
|
||||
logger.info(
|
||||
"Appended compression summary to system prompt (truncated): %s",
|
||||
(
|
||||
compressed_summary[:500] + "..."
|
||||
if len(compressed_summary) > 500
|
||||
else compressed_summary
|
||||
),
|
||||
)
|
||||
|
||||
rebuilt_messages = [system_message]
|
||||
|
||||
# Add recent history from compressed context
|
||||
for query in recent_queries:
|
||||
if "prompt" in query and "response" in query:
|
||||
rebuilt_messages.append({"role": "user", "content": query["prompt"]})
|
||||
rebuilt_messages.append(
|
||||
{"role": "assistant", "content": query["response"]}
|
||||
)
|
||||
|
||||
# Add tool calls from history if present
|
||||
if include_tool_calls and "tool_calls" in query:
|
||||
for tool_call in query["tool_calls"]:
|
||||
call_id = tool_call.get("call_id") or str(uuid.uuid4())
|
||||
args = tool_call.get("arguments")
|
||||
args_str = (
|
||||
json.dumps(args)
|
||||
if isinstance(args, dict)
|
||||
else (args or "{}")
|
||||
)
|
||||
rebuilt_messages.append({
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [{
|
||||
"id": call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": tool_call.get("action_name", ""),
|
||||
"arguments": args_str,
|
||||
},
|
||||
}],
|
||||
})
|
||||
result = tool_call.get("result")
|
||||
result_str = (
|
||||
json.dumps(result)
|
||||
if not isinstance(result, str)
|
||||
else (result or "")
|
||||
)
|
||||
rebuilt_messages.append({
|
||||
"role": "tool",
|
||||
"tool_call_id": call_id,
|
||||
"content": result_str,
|
||||
})
|
||||
|
||||
# If no recent queries (everything was compressed), add a continuation user message
|
||||
if len(recent_queries) == 0 and compressed_summary:
|
||||
rebuilt_messages.append({
|
||||
"role": "user",
|
||||
"content": "Please continue with the remaining tasks based on the context above."
|
||||
})
|
||||
logger.info("Added continuation user message to maintain proper turn-taking after full compression")
|
||||
|
||||
if include_current_execution:
|
||||
# Preserve any messages that were added during the current execution cycle
|
||||
recent_msg_count = 1 # system message
|
||||
for query in recent_queries:
|
||||
if "prompt" in query and "response" in query:
|
||||
recent_msg_count += 2
|
||||
if "tool_calls" in query:
|
||||
recent_msg_count += len(query["tool_calls"]) * 2
|
||||
|
||||
if len(messages) > recent_msg_count:
|
||||
current_execution_messages = messages[recent_msg_count:]
|
||||
rebuilt_messages.extend(current_execution_messages)
|
||||
logger.info(
|
||||
f"Preserved {len(current_execution_messages)} messages from current execution cycle"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Messages rebuilt: {len(messages)} → {len(rebuilt_messages)} messages. "
|
||||
f"Ready to continue tool execution."
|
||||
)
|
||||
return rebuilt_messages
|
||||
233
application/api/answer/services/compression/orchestrator.py
Normal file
233
application/api/answer/services/compression/orchestrator.py
Normal file
@@ -0,0 +1,233 @@
|
||||
"""High-level compression orchestration."""
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from application.api.answer.services.compression.service import CompressionService
|
||||
from application.api.answer.services.compression.threshold_checker import (
|
||||
CompressionThresholdChecker,
|
||||
)
|
||||
from application.api.answer.services.compression.types import CompressionResult
|
||||
from application.api.answer.services.conversation_service import ConversationService
|
||||
from application.core.model_utils import (
|
||||
get_api_key_for_provider,
|
||||
get_provider_from_model_id,
|
||||
)
|
||||
from application.core.settings import settings
|
||||
from application.llm.llm_creator import LLMCreator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CompressionOrchestrator:
|
||||
"""
|
||||
Facade for compression operations.
|
||||
|
||||
Coordinates between all compression components and provides
|
||||
a simple interface for callers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
conversation_service: ConversationService,
|
||||
threshold_checker: Optional[CompressionThresholdChecker] = None,
|
||||
):
|
||||
"""
|
||||
Initialize orchestrator.
|
||||
|
||||
Args:
|
||||
conversation_service: Service for DB operations
|
||||
threshold_checker: Custom threshold checker (optional)
|
||||
"""
|
||||
self.conversation_service = conversation_service
|
||||
self.threshold_checker = threshold_checker or CompressionThresholdChecker()
|
||||
|
||||
def compress_if_needed(
|
||||
self,
|
||||
conversation_id: str,
|
||||
user_id: str,
|
||||
model_id: str,
|
||||
decoded_token: Dict[str, Any],
|
||||
current_query_tokens: int = 500,
|
||||
) -> CompressionResult:
|
||||
"""
|
||||
Check if compression is needed and perform it if so.
|
||||
|
||||
This is the main entry point for compression operations.
|
||||
|
||||
Args:
|
||||
conversation_id: Conversation ID
|
||||
user_id: User ID
|
||||
model_id: Model being used for conversation
|
||||
decoded_token: User's decoded JWT token
|
||||
current_query_tokens: Estimated tokens for current query
|
||||
|
||||
Returns:
|
||||
CompressionResult with summary and recent queries
|
||||
"""
|
||||
try:
|
||||
# Load conversation
|
||||
conversation = self.conversation_service.get_conversation(
|
||||
conversation_id, user_id
|
||||
)
|
||||
|
||||
if not conversation:
|
||||
logger.warning(
|
||||
f"Conversation {conversation_id} not found for user {user_id}"
|
||||
)
|
||||
return CompressionResult.failure("Conversation not found")
|
||||
|
||||
# Check if compression is needed
|
||||
if not self.threshold_checker.should_compress(
|
||||
conversation, model_id, current_query_tokens
|
||||
):
|
||||
# No compression needed, return full history
|
||||
queries = conversation.get("queries", [])
|
||||
return CompressionResult.success_no_compression(queries)
|
||||
|
||||
# Perform compression
|
||||
return self._perform_compression(
|
||||
conversation_id, conversation, model_id, decoded_token
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error in compress_if_needed: {str(e)}", exc_info=True
|
||||
)
|
||||
return CompressionResult.failure(str(e))
|
||||
|
||||
def _perform_compression(
|
||||
self,
|
||||
conversation_id: str,
|
||||
conversation: Dict[str, Any],
|
||||
model_id: str,
|
||||
decoded_token: Dict[str, Any],
|
||||
) -> CompressionResult:
|
||||
"""
|
||||
Perform the actual compression operation.
|
||||
|
||||
Args:
|
||||
conversation_id: Conversation ID
|
||||
conversation: Conversation document
|
||||
model_id: Model ID for conversation
|
||||
decoded_token: User token
|
||||
|
||||
Returns:
|
||||
CompressionResult
|
||||
"""
|
||||
try:
|
||||
# Determine which model to use for compression
|
||||
compression_model = (
|
||||
settings.COMPRESSION_MODEL_OVERRIDE
|
||||
if settings.COMPRESSION_MODEL_OVERRIDE
|
||||
else model_id
|
||||
)
|
||||
|
||||
# Get provider and API key for compression model
|
||||
provider = get_provider_from_model_id(compression_model)
|
||||
api_key = get_api_key_for_provider(provider)
|
||||
|
||||
# Create compression LLM
|
||||
compression_llm = LLMCreator.create_llm(
|
||||
provider,
|
||||
api_key=api_key,
|
||||
user_api_key=None,
|
||||
decoded_token=decoded_token,
|
||||
model_id=compression_model,
|
||||
agent_id=conversation.get("agent_id"),
|
||||
)
|
||||
|
||||
# Create compression service with DB update capability
|
||||
compression_service = CompressionService(
|
||||
llm=compression_llm,
|
||||
model_id=compression_model,
|
||||
conversation_service=self.conversation_service,
|
||||
)
|
||||
|
||||
# Compress all queries up to the latest
|
||||
queries_count = len(conversation.get("queries", []))
|
||||
compress_up_to = queries_count - 1
|
||||
|
||||
if compress_up_to < 0:
|
||||
logger.warning("No queries to compress")
|
||||
return CompressionResult.success_no_compression([])
|
||||
|
||||
logger.info(
|
||||
f"Initiating compression for conversation {conversation_id}: "
|
||||
f"compressing all {queries_count} queries (0-{compress_up_to})"
|
||||
)
|
||||
|
||||
# Perform compression and save to DB
|
||||
metadata = compression_service.compress_and_save(
|
||||
conversation_id, conversation, compress_up_to
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Compression successful - ratio: {metadata.compression_ratio:.1f}x, "
|
||||
f"saved {metadata.original_token_count - metadata.compressed_token_count} tokens"
|
||||
)
|
||||
|
||||
# Reload conversation with updated metadata
|
||||
conversation = self.conversation_service.get_conversation(
|
||||
conversation_id, user_id=decoded_token.get("sub")
|
||||
)
|
||||
|
||||
# Get compressed context
|
||||
compressed_summary, recent_queries = (
|
||||
compression_service.get_compressed_context(conversation)
|
||||
)
|
||||
|
||||
return CompressionResult.success_with_compression(
|
||||
compressed_summary, recent_queries, metadata
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error performing compression: {str(e)}", exc_info=True)
|
||||
return CompressionResult.failure(str(e))
|
||||
|
||||
def compress_mid_execution(
|
||||
self,
|
||||
conversation_id: str,
|
||||
user_id: str,
|
||||
model_id: str,
|
||||
decoded_token: Dict[str, Any],
|
||||
current_conversation: Optional[Dict[str, Any]] = None,
|
||||
) -> CompressionResult:
|
||||
"""
|
||||
Perform compression during tool execution.
|
||||
|
||||
Args:
|
||||
conversation_id: Conversation ID
|
||||
user_id: User ID
|
||||
model_id: Model ID
|
||||
decoded_token: User token
|
||||
current_conversation: Pre-loaded conversation (optional)
|
||||
|
||||
Returns:
|
||||
CompressionResult
|
||||
"""
|
||||
try:
|
||||
# Load conversation if not provided
|
||||
if current_conversation:
|
||||
conversation = current_conversation
|
||||
else:
|
||||
conversation = self.conversation_service.get_conversation(
|
||||
conversation_id, user_id
|
||||
)
|
||||
|
||||
if not conversation:
|
||||
logger.warning(
|
||||
f"Could not load conversation {conversation_id} for mid-execution compression"
|
||||
)
|
||||
return CompressionResult.failure("Conversation not found")
|
||||
|
||||
# Perform compression
|
||||
return self._perform_compression(
|
||||
conversation_id, conversation, model_id, decoded_token
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error in mid-execution compression: {str(e)}", exc_info=True
|
||||
)
|
||||
return CompressionResult.failure(str(e))
|
||||
149
application/api/answer/services/compression/prompt_builder.py
Normal file
149
application/api/answer/services/compression/prompt_builder.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""Compression prompt building logic."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CompressionPromptBuilder:
|
||||
"""Builds prompts for LLM compression calls."""
|
||||
|
||||
def __init__(self, version: str = "v1.0"):
|
||||
"""
|
||||
Initialize prompt builder.
|
||||
|
||||
Args:
|
||||
version: Prompt template version to use
|
||||
"""
|
||||
self.version = version
|
||||
self.system_prompt = self._load_prompt(version)
|
||||
|
||||
def _load_prompt(self, version: str) -> str:
|
||||
"""
|
||||
Load prompt template from file.
|
||||
|
||||
Args:
|
||||
version: Version string (e.g., 'v1.0')
|
||||
|
||||
Returns:
|
||||
Prompt template content
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If prompt template file doesn't exist
|
||||
"""
|
||||
current_dir = Path(__file__).resolve().parents[4]
|
||||
prompt_path = current_dir / "prompts" / "compression" / f"{version}.txt"
|
||||
|
||||
try:
|
||||
with open(prompt_path, "r") as f:
|
||||
return f.read()
|
||||
except FileNotFoundError:
|
||||
logger.error(f"Compression prompt template not found: {prompt_path}")
|
||||
raise FileNotFoundError(
|
||||
f"Compression prompt template '{version}' not found at {prompt_path}. "
|
||||
f"Please ensure the template file exists."
|
||||
)
|
||||
|
||||
def build_prompt(
|
||||
self,
|
||||
queries: List[Dict[str, Any]],
|
||||
existing_compressions: Optional[List[Dict[str, Any]]] = None,
|
||||
) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Build messages for compression LLM call.
|
||||
|
||||
Args:
|
||||
queries: List of query objects to compress
|
||||
existing_compressions: List of previous compression points
|
||||
|
||||
Returns:
|
||||
List of message dicts for LLM
|
||||
"""
|
||||
# Build conversation text
|
||||
conversation_text = self._format_conversation(queries)
|
||||
|
||||
# Add existing compression context if present
|
||||
existing_compression_context = ""
|
||||
if existing_compressions and len(existing_compressions) > 0:
|
||||
existing_compression_context = (
|
||||
"\n\nIMPORTANT: This conversation has been compressed before. "
|
||||
"Previous compression summaries:\n\n"
|
||||
)
|
||||
for i, comp in enumerate(existing_compressions):
|
||||
existing_compression_context += (
|
||||
f"--- Compression {i + 1} (up to message {comp.get('query_index', 'unknown')}) ---\n"
|
||||
f"{comp.get('compressed_summary', '')}\n\n"
|
||||
)
|
||||
existing_compression_context += (
|
||||
"Your task is to create a NEW summary that incorporates the context from "
|
||||
"previous compressions AND the new messages below. The final summary should "
|
||||
"be comprehensive and include all important information from both previous "
|
||||
"compressions and new messages.\n\n"
|
||||
)
|
||||
|
||||
user_prompt = (
|
||||
f"{existing_compression_context}"
|
||||
f"Here is the conversation to summarize:\n\n"
|
||||
f"{conversation_text}"
|
||||
)
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": self.system_prompt},
|
||||
{"role": "user", "content": user_prompt},
|
||||
]
|
||||
|
||||
return messages
|
||||
|
||||
def _format_conversation(self, queries: List[Dict[str, Any]]) -> str:
|
||||
"""
|
||||
Format conversation queries into readable text for compression.
|
||||
|
||||
Args:
|
||||
queries: List of query objects
|
||||
|
||||
Returns:
|
||||
Formatted conversation text
|
||||
"""
|
||||
conversation_lines = []
|
||||
|
||||
for i, query in enumerate(queries):
|
||||
conversation_lines.append(f"--- Message {i + 1} ---")
|
||||
conversation_lines.append(f"User: {query.get('prompt', '')}")
|
||||
|
||||
# Add tool calls if present
|
||||
tool_calls = query.get("tool_calls", [])
|
||||
if tool_calls:
|
||||
conversation_lines.append("\nTool Calls:")
|
||||
for tc in tool_calls:
|
||||
tool_name = tc.get("tool_name", "unknown")
|
||||
action_name = tc.get("action_name", "unknown")
|
||||
arguments = tc.get("arguments", {})
|
||||
result = tc.get("result", "")
|
||||
if result is None:
|
||||
result = ""
|
||||
status = tc.get("status", "unknown")
|
||||
|
||||
# Include full tool result for complete compression context
|
||||
conversation_lines.append(
|
||||
f" - {tool_name}.{action_name}({arguments}) "
|
||||
f"[{status}] → {result}"
|
||||
)
|
||||
|
||||
# Add agent thought if present
|
||||
thought = query.get("thought", "")
|
||||
if thought:
|
||||
conversation_lines.append(f"\nAgent Thought: {thought}")
|
||||
|
||||
# Add assistant response
|
||||
conversation_lines.append(f"\nAssistant: {query.get('response', '')}")
|
||||
|
||||
# Add sources if present
|
||||
sources = query.get("sources", [])
|
||||
if sources:
|
||||
conversation_lines.append(f"\nSources Used: {len(sources)} documents")
|
||||
|
||||
conversation_lines.append("") # Empty line between messages
|
||||
|
||||
return "\n".join(conversation_lines)
|
||||
306
application/api/answer/services/compression/service.py
Normal file
306
application/api/answer/services/compression/service.py
Normal file
@@ -0,0 +1,306 @@
|
||||
"""Core compression service with simplified responsibilities."""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from application.api.answer.services.compression.prompt_builder import (
|
||||
CompressionPromptBuilder,
|
||||
)
|
||||
from application.api.answer.services.compression.token_counter import TokenCounter
|
||||
from application.api.answer.services.compression.types import (
|
||||
CompressionMetadata,
|
||||
)
|
||||
from application.core.settings import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CompressionService:
|
||||
"""
|
||||
Service for compressing conversation history.
|
||||
|
||||
Handles DB updates.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm,
|
||||
model_id: str,
|
||||
conversation_service=None,
|
||||
prompt_builder: Optional[CompressionPromptBuilder] = None,
|
||||
):
|
||||
"""
|
||||
Initialize compression service.
|
||||
|
||||
Args:
|
||||
llm: LLM instance to use for compression
|
||||
model_id: Model ID for compression
|
||||
conversation_service: Service for DB operations (optional, for DB updates)
|
||||
prompt_builder: Custom prompt builder (optional)
|
||||
"""
|
||||
self.llm = llm
|
||||
self.model_id = model_id
|
||||
self.conversation_service = conversation_service
|
||||
self.prompt_builder = prompt_builder or CompressionPromptBuilder(
|
||||
version=settings.COMPRESSION_PROMPT_VERSION
|
||||
)
|
||||
|
||||
def compress_conversation(
|
||||
self,
|
||||
conversation: Dict[str, Any],
|
||||
compress_up_to_index: int,
|
||||
) -> CompressionMetadata:
|
||||
"""
|
||||
Compress conversation history up to specified index.
|
||||
|
||||
Args:
|
||||
conversation: Full conversation document
|
||||
compress_up_to_index: Last query index to include in compression
|
||||
|
||||
Returns:
|
||||
CompressionMetadata with compression details
|
||||
|
||||
Raises:
|
||||
ValueError: If compress_up_to_index is invalid
|
||||
"""
|
||||
try:
|
||||
queries = conversation.get("queries", [])
|
||||
|
||||
if compress_up_to_index < 0 or compress_up_to_index >= len(queries):
|
||||
raise ValueError(
|
||||
f"Invalid compress_up_to_index: {compress_up_to_index} "
|
||||
f"(conversation has {len(queries)} queries)"
|
||||
)
|
||||
|
||||
# Get queries to compress
|
||||
queries_to_compress = queries[: compress_up_to_index + 1]
|
||||
|
||||
# Check if there are existing compressions
|
||||
existing_compressions = conversation.get("compression_metadata", {}).get(
|
||||
"compression_points", []
|
||||
)
|
||||
|
||||
if existing_compressions:
|
||||
logger.info(
|
||||
f"Found {len(existing_compressions)} previous compression(s) - "
|
||||
f"will incorporate into new summary"
|
||||
)
|
||||
|
||||
# Calculate original token count
|
||||
original_tokens = TokenCounter.count_query_tokens(queries_to_compress)
|
||||
|
||||
# Log tool call stats
|
||||
self._log_tool_call_stats(queries_to_compress)
|
||||
|
||||
# Build compression prompt
|
||||
messages = self.prompt_builder.build_prompt(
|
||||
queries_to_compress, existing_compressions
|
||||
)
|
||||
|
||||
# Call LLM to generate compression
|
||||
logger.info(
|
||||
f"Starting compression: {len(queries_to_compress)} queries "
|
||||
f"(messages 0-{compress_up_to_index}, {original_tokens} tokens) "
|
||||
f"using model {self.model_id}"
|
||||
)
|
||||
|
||||
response = self.llm.gen(
|
||||
model=self.model_id, messages=messages, max_tokens=4000
|
||||
)
|
||||
|
||||
# Extract summary from response
|
||||
compressed_summary = self._extract_summary(response)
|
||||
|
||||
# Calculate compressed token count
|
||||
compressed_tokens = TokenCounter.count_message_tokens(
|
||||
[{"content": compressed_summary}]
|
||||
)
|
||||
|
||||
# Calculate compression ratio
|
||||
compression_ratio = (
|
||||
original_tokens / compressed_tokens if compressed_tokens > 0 else 0
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Compression complete: {original_tokens} → {compressed_tokens} tokens "
|
||||
f"({compression_ratio:.1f}x compression)"
|
||||
)
|
||||
|
||||
# Build compression metadata
|
||||
compression_metadata = CompressionMetadata(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
query_index=compress_up_to_index,
|
||||
compressed_summary=compressed_summary,
|
||||
original_token_count=original_tokens,
|
||||
compressed_token_count=compressed_tokens,
|
||||
compression_ratio=compression_ratio,
|
||||
model_used=self.model_id,
|
||||
compression_prompt_version=self.prompt_builder.version,
|
||||
)
|
||||
|
||||
return compression_metadata
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error compressing conversation: {str(e)}", exc_info=True)
|
||||
raise
|
||||
|
||||
def compress_and_save(
|
||||
self,
|
||||
conversation_id: str,
|
||||
conversation: Dict[str, Any],
|
||||
compress_up_to_index: int,
|
||||
) -> CompressionMetadata:
|
||||
"""
|
||||
Compress conversation and save to database.
|
||||
|
||||
Args:
|
||||
conversation_id: Conversation ID
|
||||
conversation: Full conversation document
|
||||
compress_up_to_index: Last query index to include
|
||||
|
||||
Returns:
|
||||
CompressionMetadata
|
||||
|
||||
Raises:
|
||||
ValueError: If conversation_service not provided or invalid index
|
||||
"""
|
||||
if not self.conversation_service:
|
||||
raise ValueError(
|
||||
"conversation_service required for compress_and_save operation"
|
||||
)
|
||||
|
||||
# Perform compression
|
||||
metadata = self.compress_conversation(conversation, compress_up_to_index)
|
||||
|
||||
# Save to database
|
||||
self.conversation_service.update_compression_metadata(
|
||||
conversation_id, metadata.to_dict()
|
||||
)
|
||||
|
||||
logger.info(f"Compression metadata saved to database for {conversation_id}")
|
||||
|
||||
return metadata
|
||||
|
||||
def get_compressed_context(
|
||||
self, conversation: Dict[str, Any]
|
||||
) -> tuple[Optional[str], List[Dict[str, Any]]]:
|
||||
"""
|
||||
Get compressed summary + recent uncompressed messages.
|
||||
|
||||
Args:
|
||||
conversation: Full conversation document
|
||||
|
||||
Returns:
|
||||
(compressed_summary, recent_messages)
|
||||
"""
|
||||
try:
|
||||
compression_metadata = conversation.get("compression_metadata", {})
|
||||
|
||||
if not compression_metadata.get("is_compressed"):
|
||||
logger.debug("No compression metadata found - using full history")
|
||||
queries = conversation.get("queries", [])
|
||||
if queries is None:
|
||||
logger.error("Conversation queries is None - returning empty list")
|
||||
return None, []
|
||||
return None, queries
|
||||
|
||||
compression_points = compression_metadata.get("compression_points", [])
|
||||
|
||||
if not compression_points:
|
||||
logger.debug("No compression points found - using full history")
|
||||
queries = conversation.get("queries", [])
|
||||
if queries is None:
|
||||
logger.error("Conversation queries is None - returning empty list")
|
||||
return None, []
|
||||
return None, queries
|
||||
|
||||
# Get the most recent compression point
|
||||
latest_compression = compression_points[-1]
|
||||
compressed_summary = latest_compression.get("compressed_summary")
|
||||
last_compressed_index = latest_compression.get("query_index")
|
||||
compressed_tokens = latest_compression.get("compressed_token_count", 0)
|
||||
original_tokens = latest_compression.get("original_token_count", 0)
|
||||
|
||||
# Get only messages after compression point
|
||||
queries = conversation.get("queries", [])
|
||||
total_queries = len(queries)
|
||||
recent_queries = queries[last_compressed_index + 1 :]
|
||||
|
||||
logger.info(
|
||||
f"Using compressed context: summary ({compressed_tokens} tokens, "
|
||||
f"compressed from {original_tokens}) + {len(recent_queries)} recent messages "
|
||||
f"(messages {last_compressed_index + 1}-{total_queries - 1})"
|
||||
)
|
||||
|
||||
return compressed_summary, recent_queries
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting compressed context: {str(e)}", exc_info=True
|
||||
)
|
||||
queries = conversation.get("queries", [])
|
||||
if queries is None:
|
||||
return None, []
|
||||
return None, queries
|
||||
|
||||
def _extract_summary(self, llm_response: str) -> str:
|
||||
"""
|
||||
Extract clean summary from LLM response.
|
||||
|
||||
Args:
|
||||
llm_response: Raw LLM response
|
||||
|
||||
Returns:
|
||||
Cleaned summary text
|
||||
"""
|
||||
try:
|
||||
# Try to extract content within <summary> tags
|
||||
summary_match = re.search(
|
||||
r"<summary>(.*?)</summary>", llm_response, re.DOTALL
|
||||
)
|
||||
|
||||
if summary_match:
|
||||
summary = summary_match.group(1).strip()
|
||||
else:
|
||||
# If no summary tags, remove analysis tags and use the rest
|
||||
summary = re.sub(
|
||||
r"<analysis>.*?</analysis>", "", llm_response, flags=re.DOTALL
|
||||
).strip()
|
||||
|
||||
return summary
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error extracting summary: {str(e)}, using full response")
|
||||
return llm_response
|
||||
|
||||
def _log_tool_call_stats(self, queries: List[Dict[str, Any]]) -> None:
|
||||
"""Log statistics about tool calls in queries."""
|
||||
total_tool_calls = 0
|
||||
total_tool_result_chars = 0
|
||||
tool_call_breakdown = {}
|
||||
|
||||
for q in queries:
|
||||
for tc in q.get("tool_calls", []):
|
||||
total_tool_calls += 1
|
||||
tool_name = tc.get("tool_name", "unknown")
|
||||
action_name = tc.get("action_name", "unknown")
|
||||
key = f"{tool_name}.{action_name}"
|
||||
tool_call_breakdown[key] = tool_call_breakdown.get(key, 0) + 1
|
||||
|
||||
# Track total tool result size
|
||||
result = tc.get("result", "")
|
||||
if result:
|
||||
total_tool_result_chars += len(str(result))
|
||||
|
||||
if total_tool_calls > 0:
|
||||
tool_breakdown_str = ", ".join(
|
||||
f"{tool}({count})"
|
||||
for tool, count in sorted(tool_call_breakdown.items())
|
||||
)
|
||||
tool_result_kb = total_tool_result_chars / 1024
|
||||
logger.info(
|
||||
f"Tool call breakdown: {tool_breakdown_str} "
|
||||
f"(total result size: {tool_result_kb:.1f} KB, {total_tool_result_chars:,} chars)"
|
||||
)
|
||||
103
application/api/answer/services/compression/threshold_checker.py
Normal file
103
application/api/answer/services/compression/threshold_checker.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""Compression threshold checking logic."""
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
from application.core.model_utils import get_token_limit
|
||||
from application.core.settings import settings
|
||||
from application.api.answer.services.compression.token_counter import TokenCounter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CompressionThresholdChecker:
|
||||
"""Determines if compression is needed based on token thresholds."""
|
||||
|
||||
def __init__(self, threshold_percentage: float = None):
|
||||
"""
|
||||
Initialize threshold checker.
|
||||
|
||||
Args:
|
||||
threshold_percentage: Percentage of context to use as threshold
|
||||
(defaults to settings.COMPRESSION_THRESHOLD_PERCENTAGE)
|
||||
"""
|
||||
self.threshold_percentage = (
|
||||
threshold_percentage or settings.COMPRESSION_THRESHOLD_PERCENTAGE
|
||||
)
|
||||
|
||||
def should_compress(
|
||||
self,
|
||||
conversation: Dict[str, Any],
|
||||
model_id: str,
|
||||
current_query_tokens: int = 500,
|
||||
) -> bool:
|
||||
"""
|
||||
Determine if compression is needed.
|
||||
|
||||
Args:
|
||||
conversation: Full conversation document
|
||||
model_id: Target model for this request
|
||||
current_query_tokens: Estimated tokens for current query
|
||||
|
||||
Returns:
|
||||
True if tokens >= threshold% of context window
|
||||
"""
|
||||
try:
|
||||
# Calculate total tokens in conversation
|
||||
total_tokens = TokenCounter.count_conversation_tokens(conversation)
|
||||
total_tokens += current_query_tokens
|
||||
|
||||
# Get context window limit for model
|
||||
context_limit = get_token_limit(model_id)
|
||||
|
||||
# Calculate threshold
|
||||
threshold = int(context_limit * self.threshold_percentage)
|
||||
|
||||
compression_needed = total_tokens >= threshold
|
||||
percentage_used = (total_tokens / context_limit) * 100
|
||||
|
||||
if compression_needed:
|
||||
logger.warning(
|
||||
f"COMPRESSION TRIGGERED: {total_tokens} tokens / {context_limit} limit "
|
||||
f"({percentage_used:.1f}% used, threshold: {self.threshold_percentage * 100:.0f}%)"
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
f"Compression check: {total_tokens}/{context_limit} tokens "
|
||||
f"({percentage_used:.1f}% used, threshold: {self.threshold_percentage * 100:.0f}%) - No compression needed"
|
||||
)
|
||||
|
||||
return compression_needed
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking compression need: {str(e)}", exc_info=True)
|
||||
return False
|
||||
|
||||
def check_message_tokens(self, messages: list, model_id: str) -> bool:
|
||||
"""
|
||||
Check if message list exceeds threshold.
|
||||
|
||||
Args:
|
||||
messages: List of message dicts
|
||||
model_id: Target model
|
||||
|
||||
Returns:
|
||||
True if at or above threshold
|
||||
"""
|
||||
try:
|
||||
current_tokens = TokenCounter.count_message_tokens(messages)
|
||||
context_limit = get_token_limit(model_id)
|
||||
threshold = int(context_limit * self.threshold_percentage)
|
||||
|
||||
if current_tokens >= threshold:
|
||||
logger.warning(
|
||||
f"Message context limit approaching: {current_tokens}/{context_limit} tokens "
|
||||
f"({(current_tokens/context_limit)*100:.1f}%)"
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking message tokens: {str(e)}", exc_info=True)
|
||||
return False
|
||||
103
application/api/answer/services/compression/token_counter.py
Normal file
103
application/api/answer/services/compression/token_counter.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""Token counting utilities for compression."""
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from application.utils import num_tokens_from_string
|
||||
from application.core.settings import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TokenCounter:
|
||||
"""Centralized token counting for conversations and messages."""
|
||||
|
||||
@staticmethod
|
||||
def count_message_tokens(messages: List[Dict]) -> int:
|
||||
"""
|
||||
Calculate total tokens in a list of messages.
|
||||
|
||||
Args:
|
||||
messages: List of message dicts with 'content' field
|
||||
|
||||
Returns:
|
||||
Total token count
|
||||
"""
|
||||
total_tokens = 0
|
||||
for message in messages:
|
||||
content = message.get("content", "")
|
||||
if isinstance(content, str):
|
||||
total_tokens += num_tokens_from_string(content)
|
||||
elif isinstance(content, list):
|
||||
# Handle structured content (tool calls, etc.)
|
||||
for item in content:
|
||||
if isinstance(item, dict):
|
||||
total_tokens += num_tokens_from_string(str(item))
|
||||
return total_tokens
|
||||
|
||||
@staticmethod
|
||||
def count_query_tokens(
|
||||
queries: List[Dict[str, Any]], include_tool_calls: bool = True
|
||||
) -> int:
|
||||
"""
|
||||
Count tokens across multiple query objects.
|
||||
|
||||
Args:
|
||||
queries: List of query objects from conversation
|
||||
include_tool_calls: Whether to count tool call tokens
|
||||
|
||||
Returns:
|
||||
Total token count
|
||||
"""
|
||||
total_tokens = 0
|
||||
|
||||
for query in queries:
|
||||
# Count prompt and response tokens
|
||||
if "prompt" in query:
|
||||
total_tokens += num_tokens_from_string(query["prompt"])
|
||||
if "response" in query:
|
||||
total_tokens += num_tokens_from_string(query["response"])
|
||||
if "thought" in query:
|
||||
total_tokens += num_tokens_from_string(query.get("thought", ""))
|
||||
|
||||
# Count tool call tokens
|
||||
if include_tool_calls and "tool_calls" in query:
|
||||
for tool_call in query["tool_calls"]:
|
||||
tool_call_string = (
|
||||
f"Tool: {tool_call.get('tool_name')} | "
|
||||
f"Action: {tool_call.get('action_name')} | "
|
||||
f"Args: {tool_call.get('arguments')} | "
|
||||
f"Response: {tool_call.get('result')}"
|
||||
)
|
||||
total_tokens += num_tokens_from_string(tool_call_string)
|
||||
|
||||
return total_tokens
|
||||
|
||||
@staticmethod
|
||||
def count_conversation_tokens(
|
||||
conversation: Dict[str, Any], include_system_prompt: bool = False
|
||||
) -> int:
|
||||
"""
|
||||
Calculate total tokens in a conversation.
|
||||
|
||||
Args:
|
||||
conversation: Conversation document
|
||||
include_system_prompt: Whether to include system prompt in count
|
||||
|
||||
Returns:
|
||||
Total token count
|
||||
"""
|
||||
try:
|
||||
queries = conversation.get("queries", [])
|
||||
total_tokens = TokenCounter.count_query_tokens(queries)
|
||||
|
||||
# Add system prompt tokens if requested
|
||||
if include_system_prompt:
|
||||
# Rough estimate for system prompt
|
||||
total_tokens += settings.RESERVED_TOKENS.get("system_prompt", 500)
|
||||
|
||||
return total_tokens
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error calculating conversation tokens: {str(e)}")
|
||||
return 0
|
||||
83
application/api/answer/services/compression/types.py
Normal file
83
application/api/answer/services/compression/types.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""Type definitions for compression module."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompressionMetadata:
|
||||
"""Metadata about a compression operation."""
|
||||
|
||||
timestamp: datetime
|
||||
query_index: int
|
||||
compressed_summary: str
|
||||
original_token_count: int
|
||||
compressed_token_count: int
|
||||
compression_ratio: float
|
||||
model_used: str
|
||||
compression_prompt_version: str
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for DB storage."""
|
||||
return {
|
||||
"timestamp": self.timestamp,
|
||||
"query_index": self.query_index,
|
||||
"compressed_summary": self.compressed_summary,
|
||||
"original_token_count": self.original_token_count,
|
||||
"compressed_token_count": self.compressed_token_count,
|
||||
"compression_ratio": self.compression_ratio,
|
||||
"model_used": self.model_used,
|
||||
"compression_prompt_version": self.compression_prompt_version,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompressionResult:
|
||||
"""Result of a compression operation."""
|
||||
|
||||
success: bool
|
||||
compressed_summary: Optional[str] = None
|
||||
recent_queries: List[Dict[str, Any]] = field(default_factory=list)
|
||||
metadata: Optional[CompressionMetadata] = None
|
||||
error: Optional[str] = None
|
||||
compression_performed: bool = False
|
||||
|
||||
@classmethod
|
||||
def success_with_compression(
|
||||
cls, summary: str, queries: List[Dict], metadata: CompressionMetadata
|
||||
) -> "CompressionResult":
|
||||
"""Create a successful result with compression."""
|
||||
return cls(
|
||||
success=True,
|
||||
compressed_summary=summary,
|
||||
recent_queries=queries,
|
||||
metadata=metadata,
|
||||
compression_performed=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def success_no_compression(cls, queries: List[Dict]) -> "CompressionResult":
|
||||
"""Create a successful result without compression needed."""
|
||||
return cls(
|
||||
success=True,
|
||||
recent_queries=queries,
|
||||
compression_performed=False,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def failure(cls, error: str) -> "CompressionResult":
|
||||
"""Create a failure result."""
|
||||
return cls(success=False, error=error, compression_performed=False)
|
||||
|
||||
def as_history(self) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Convert recent queries to history format.
|
||||
|
||||
Returns:
|
||||
List of prompt/response dicts
|
||||
"""
|
||||
return [
|
||||
{"prompt": q["prompt"], "response": q["response"]}
|
||||
for q in self.recent_queries
|
||||
]
|
||||
141
application/api/answer/services/continuation_service.py
Normal file
141
application/api/answer/services/continuation_service.py
Normal file
@@ -0,0 +1,141 @@
|
||||
"""Service for saving and restoring tool-call continuation state.
|
||||
|
||||
When a stream pauses (tool needs approval or client-side execution),
|
||||
the full execution state is persisted to MongoDB so the client can
|
||||
resume later by sending tool_actions.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from bson import ObjectId
|
||||
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# TTL for pending states — auto-cleaned after this period
|
||||
PENDING_STATE_TTL_SECONDS = 30 * 60 # 30 minutes
|
||||
|
||||
|
||||
def _make_serializable(obj: Any) -> Any:
|
||||
"""Recursively convert MongoDB ObjectIds and other non-JSON types."""
|
||||
if isinstance(obj, ObjectId):
|
||||
return str(obj)
|
||||
if isinstance(obj, dict):
|
||||
return {str(k): _make_serializable(v) for k, v in obj.items()}
|
||||
if isinstance(obj, list):
|
||||
return [_make_serializable(v) for v in obj]
|
||||
if isinstance(obj, bytes):
|
||||
return obj.decode("utf-8", errors="replace")
|
||||
return obj
|
||||
|
||||
|
||||
class ContinuationService:
|
||||
"""Manages pending tool-call state in MongoDB."""
|
||||
|
||||
def __init__(self):
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
self.collection = db["pending_tool_state"]
|
||||
self._ensure_indexes()
|
||||
|
||||
def _ensure_indexes(self):
|
||||
try:
|
||||
self.collection.create_index(
|
||||
"expires_at", expireAfterSeconds=0
|
||||
)
|
||||
self.collection.create_index(
|
||||
[("conversation_id", 1), ("user", 1)], unique=True
|
||||
)
|
||||
except Exception:
|
||||
# Indexes may already exist or mongomock doesn't support TTL
|
||||
pass
|
||||
|
||||
def save_state(
|
||||
self,
|
||||
conversation_id: str,
|
||||
user: str,
|
||||
messages: List[Dict],
|
||||
pending_tool_calls: List[Dict],
|
||||
tools_dict: Dict,
|
||||
tool_schemas: List[Dict],
|
||||
agent_config: Dict,
|
||||
client_tools: Optional[List[Dict]] = None,
|
||||
) -> str:
|
||||
"""Save execution state for later continuation.
|
||||
|
||||
Args:
|
||||
conversation_id: The conversation this state belongs to.
|
||||
user: Owner user ID.
|
||||
messages: Full messages array at the pause point.
|
||||
pending_tool_calls: Tool calls awaiting client action.
|
||||
tools_dict: Serializable tools configuration dict.
|
||||
tool_schemas: LLM-formatted tool schemas (agent.tools).
|
||||
agent_config: Config needed to recreate the agent on resume.
|
||||
client_tools: Client-provided tool schemas for client-side execution.
|
||||
|
||||
Returns:
|
||||
The string ID of the saved state document.
|
||||
"""
|
||||
now = datetime.datetime.now(datetime.timezone.utc)
|
||||
expires_at = now + datetime.timedelta(seconds=PENDING_STATE_TTL_SECONDS)
|
||||
|
||||
doc = {
|
||||
"conversation_id": conversation_id,
|
||||
"user": user,
|
||||
"messages": _make_serializable(messages),
|
||||
"pending_tool_calls": _make_serializable(pending_tool_calls),
|
||||
"tools_dict": _make_serializable(tools_dict),
|
||||
"tool_schemas": _make_serializable(tool_schemas),
|
||||
"agent_config": _make_serializable(agent_config),
|
||||
"client_tools": _make_serializable(client_tools) if client_tools else None,
|
||||
"created_at": now,
|
||||
"expires_at": expires_at,
|
||||
}
|
||||
|
||||
# Upsert — only one pending state per conversation per user
|
||||
result = self.collection.replace_one(
|
||||
{"conversation_id": conversation_id, "user": user},
|
||||
doc,
|
||||
upsert=True,
|
||||
)
|
||||
state_id = str(result.upserted_id) if result.upserted_id else conversation_id
|
||||
logger.info(
|
||||
f"Saved continuation state for conversation {conversation_id} "
|
||||
f"with {len(pending_tool_calls)} pending tool call(s)"
|
||||
)
|
||||
return state_id
|
||||
|
||||
def load_state(
|
||||
self, conversation_id: str, user: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Load pending continuation state.
|
||||
|
||||
Returns:
|
||||
The state dict, or None if no pending state exists.
|
||||
"""
|
||||
doc = self.collection.find_one(
|
||||
{"conversation_id": conversation_id, "user": user}
|
||||
)
|
||||
if not doc:
|
||||
return None
|
||||
doc["_id"] = str(doc["_id"])
|
||||
return doc
|
||||
|
||||
def delete_state(self, conversation_id: str, user: str) -> bool:
|
||||
"""Delete pending state after successful resumption.
|
||||
|
||||
Returns:
|
||||
True if a document was deleted.
|
||||
"""
|
||||
result = self.collection.delete_one(
|
||||
{"conversation_id": conversation_id, "user": user}
|
||||
)
|
||||
if result.deleted_count:
|
||||
logger.info(
|
||||
f"Deleted continuation state for conversation {conversation_id}"
|
||||
)
|
||||
return result.deleted_count > 0
|
||||
296
application/api/answer/services/conversation_service.py
Normal file
296
application/api/answer/services/conversation_service.py
Normal file
@@ -0,0 +1,296 @@
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from application.core.mongo_db import MongoDB
|
||||
|
||||
from application.core.settings import settings
|
||||
from bson import ObjectId
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConversationService:
|
||||
def __init__(self):
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
self.conversations_collection = db["conversations"]
|
||||
self.agents_collection = db["agents"]
|
||||
|
||||
def get_conversation(
|
||||
self, conversation_id: str, user_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Retrieve a conversation with proper access control"""
|
||||
if not conversation_id or not user_id:
|
||||
return None
|
||||
try:
|
||||
conversation = self.conversations_collection.find_one(
|
||||
{
|
||||
"_id": ObjectId(conversation_id),
|
||||
"$or": [{"user": user_id}, {"shared_with": user_id}],
|
||||
}
|
||||
)
|
||||
|
||||
if not conversation:
|
||||
logger.warning(
|
||||
f"Conversation not found or unauthorized - ID: {conversation_id}, User: {user_id}"
|
||||
)
|
||||
return None
|
||||
conversation["_id"] = str(conversation["_id"])
|
||||
return conversation
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching conversation: {str(e)}", exc_info=True)
|
||||
return None
|
||||
|
||||
def save_conversation(
|
||||
self,
|
||||
conversation_id: Optional[str],
|
||||
question: str,
|
||||
response: str,
|
||||
thought: str,
|
||||
sources: List[Dict[str, Any]],
|
||||
tool_calls: List[Dict[str, Any]],
|
||||
llm: Any,
|
||||
model_id: str,
|
||||
decoded_token: Dict[str, Any],
|
||||
index: Optional[int] = None,
|
||||
api_key: Optional[str] = None,
|
||||
agent_id: Optional[str] = None,
|
||||
is_shared_usage: bool = False,
|
||||
shared_token: Optional[str] = None,
|
||||
attachment_ids: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""Save or update a conversation in the database"""
|
||||
if decoded_token is None:
|
||||
raise ValueError("Invalid or missing authentication token")
|
||||
user_id = decoded_token.get("sub")
|
||||
if not user_id:
|
||||
raise ValueError("User ID not found in token")
|
||||
current_time = datetime.now(timezone.utc)
|
||||
|
||||
# clean up in sources array such that we save max 1k characters for text part
|
||||
for source in sources:
|
||||
if "text" in source and isinstance(source["text"], str):
|
||||
source["text"] = source["text"][:1000]
|
||||
|
||||
if conversation_id is not None and index is not None:
|
||||
# Update existing conversation with new query
|
||||
|
||||
result = self.conversations_collection.update_one(
|
||||
{
|
||||
"_id": ObjectId(conversation_id),
|
||||
"user": user_id,
|
||||
f"queries.{index}": {"$exists": True},
|
||||
},
|
||||
{
|
||||
"$set": {
|
||||
f"queries.{index}.prompt": question,
|
||||
f"queries.{index}.response": response,
|
||||
f"queries.{index}.thought": thought,
|
||||
f"queries.{index}.sources": sources,
|
||||
f"queries.{index}.tool_calls": tool_calls,
|
||||
f"queries.{index}.timestamp": current_time,
|
||||
f"queries.{index}.attachments": attachment_ids,
|
||||
f"queries.{index}.model_id": model_id,
|
||||
**(
|
||||
{f"queries.{index}.metadata": metadata}
|
||||
if metadata
|
||||
else {}
|
||||
),
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
if result.matched_count == 0:
|
||||
raise ValueError("Conversation not found or unauthorized")
|
||||
self.conversations_collection.update_one(
|
||||
{
|
||||
"_id": ObjectId(conversation_id),
|
||||
"user": user_id,
|
||||
f"queries.{index}": {"$exists": True},
|
||||
},
|
||||
{"$push": {"queries": {"$each": [], "$slice": index + 1}}},
|
||||
)
|
||||
return conversation_id
|
||||
elif conversation_id:
|
||||
# Append new message to existing conversation
|
||||
|
||||
result = self.conversations_collection.update_one(
|
||||
{"_id": ObjectId(conversation_id), "user": user_id},
|
||||
{
|
||||
"$push": {
|
||||
"queries": {
|
||||
"prompt": question,
|
||||
"response": response,
|
||||
"thought": thought,
|
||||
"sources": sources,
|
||||
"tool_calls": tool_calls,
|
||||
"timestamp": current_time,
|
||||
"attachments": attachment_ids,
|
||||
"model_id": model_id,
|
||||
**({"metadata": metadata} if metadata else {}),
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
if result.matched_count == 0:
|
||||
raise ValueError("Conversation not found or unauthorized")
|
||||
return conversation_id
|
||||
else:
|
||||
# Create new conversation
|
||||
|
||||
messages_summary = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful assistant that creates concise conversation titles. "
|
||||
"Summarize conversations in 3 words or less using the same language as the user.",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Summarise following conversation in no more than 3 words, "
|
||||
"respond ONLY with the summary, use the same language as the "
|
||||
"user query \n\nUser: " + question + "\n\n" + "AI: " + response,
|
||||
},
|
||||
]
|
||||
|
||||
completion = llm.gen(
|
||||
model=model_id, messages=messages_summary, max_tokens=500
|
||||
)
|
||||
|
||||
if not completion or not completion.strip():
|
||||
completion = question[:50] if question else "New Conversation"
|
||||
|
||||
query_doc = {
|
||||
"prompt": question,
|
||||
"response": response,
|
||||
"thought": thought,
|
||||
"sources": sources,
|
||||
"tool_calls": tool_calls,
|
||||
"timestamp": current_time,
|
||||
"attachments": attachment_ids,
|
||||
"model_id": model_id,
|
||||
}
|
||||
if metadata:
|
||||
query_doc["metadata"] = metadata
|
||||
|
||||
conversation_data = {
|
||||
"user": user_id,
|
||||
"date": current_time,
|
||||
"name": completion,
|
||||
"queries": [query_doc],
|
||||
}
|
||||
|
||||
if api_key:
|
||||
if agent_id:
|
||||
conversation_data["agent_id"] = agent_id
|
||||
if is_shared_usage:
|
||||
conversation_data["is_shared_usage"] = is_shared_usage
|
||||
conversation_data["shared_token"] = shared_token
|
||||
agent = self.agents_collection.find_one({"key": api_key})
|
||||
if agent:
|
||||
conversation_data["api_key"] = agent["key"]
|
||||
result = self.conversations_collection.insert_one(conversation_data)
|
||||
return str(result.inserted_id)
|
||||
|
||||
def update_compression_metadata(
|
||||
self, conversation_id: str, compression_metadata: Dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Update conversation with compression metadata.
|
||||
|
||||
Uses $push with $slice to keep only the most recent compression points,
|
||||
preventing unbounded array growth. Since each compression incorporates
|
||||
previous compressions, older points become redundant.
|
||||
|
||||
Args:
|
||||
conversation_id: Conversation ID
|
||||
compression_metadata: Compression point data
|
||||
"""
|
||||
try:
|
||||
self.conversations_collection.update_one(
|
||||
{"_id": ObjectId(conversation_id)},
|
||||
{
|
||||
"$set": {
|
||||
"compression_metadata.is_compressed": True,
|
||||
"compression_metadata.last_compression_at": compression_metadata.get(
|
||||
"timestamp"
|
||||
),
|
||||
},
|
||||
"$push": {
|
||||
"compression_metadata.compression_points": {
|
||||
"$each": [compression_metadata],
|
||||
"$slice": -settings.COMPRESSION_MAX_HISTORY_POINTS,
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
logger.info(
|
||||
f"Updated compression metadata for conversation {conversation_id}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error updating compression metadata: {str(e)}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
def append_compression_message(
|
||||
self, conversation_id: str, compression_metadata: Dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Append a synthetic compression summary entry into the conversation history.
|
||||
This makes the summary visible in the DB alongside normal queries.
|
||||
"""
|
||||
try:
|
||||
summary = compression_metadata.get("compressed_summary", "")
|
||||
if not summary:
|
||||
return
|
||||
timestamp = compression_metadata.get("timestamp", datetime.now(timezone.utc))
|
||||
|
||||
self.conversations_collection.update_one(
|
||||
{"_id": ObjectId(conversation_id)},
|
||||
{
|
||||
"$push": {
|
||||
"queries": {
|
||||
"prompt": "[Context Compression Summary]",
|
||||
"response": summary,
|
||||
"thought": "",
|
||||
"sources": [],
|
||||
"tool_calls": [],
|
||||
"timestamp": timestamp,
|
||||
"attachments": [],
|
||||
"model_id": compression_metadata.get("model_used"),
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
logger.info(f"Appended compression summary to conversation {conversation_id}")
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error appending compression summary: {str(e)}", exc_info=True
|
||||
)
|
||||
|
||||
def get_compression_metadata(
|
||||
self, conversation_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get compression metadata for a conversation.
|
||||
|
||||
Args:
|
||||
conversation_id: Conversation ID
|
||||
|
||||
Returns:
|
||||
Compression metadata dict or None
|
||||
"""
|
||||
try:
|
||||
conversation = self.conversations_collection.find_one(
|
||||
{"_id": ObjectId(conversation_id)}, {"compression_metadata": 1}
|
||||
)
|
||||
return conversation.get("compression_metadata") if conversation else None
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error getting compression metadata: {str(e)}", exc_info=True
|
||||
)
|
||||
return None
|
||||
97
application/api/answer/services/prompt_renderer.py
Normal file
97
application/api/answer/services/prompt_renderer.py
Normal file
@@ -0,0 +1,97 @@
|
||||
import logging
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from application.templates.namespaces import NamespaceManager
|
||||
|
||||
from application.templates.template_engine import TemplateEngine, TemplateRenderError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PromptRenderer:
|
||||
"""Service for rendering prompts with dynamic context using namespaces"""
|
||||
|
||||
def __init__(self):
|
||||
self.template_engine = TemplateEngine()
|
||||
self.namespace_manager = NamespaceManager()
|
||||
|
||||
def render_prompt(
|
||||
self,
|
||||
prompt_content: str,
|
||||
user_id: Optional[str] = None,
|
||||
request_id: Optional[str] = None,
|
||||
passthrough_data: Optional[Dict[str, Any]] = None,
|
||||
docs: Optional[list] = None,
|
||||
docs_together: Optional[str] = None,
|
||||
tools_data: Optional[Dict[str, Any]] = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
"""
|
||||
Render prompt with full context from all namespaces.
|
||||
|
||||
Args:
|
||||
prompt_content: Raw prompt template string
|
||||
user_id: Current user identifier
|
||||
request_id: Unique request identifier
|
||||
passthrough_data: Parameters from web request
|
||||
docs: RAG retrieved documents
|
||||
docs_together: Concatenated document content
|
||||
tools_data: Pre-fetched tool results organized by tool name
|
||||
**kwargs: Additional parameters for namespace builders
|
||||
|
||||
Returns:
|
||||
Rendered prompt string with all variables substituted
|
||||
|
||||
Raises:
|
||||
TemplateRenderError: If template rendering fails
|
||||
"""
|
||||
if not prompt_content:
|
||||
return ""
|
||||
|
||||
uses_template = self._uses_template_syntax(prompt_content)
|
||||
|
||||
if not uses_template:
|
||||
return self._apply_legacy_substitutions(prompt_content, docs_together)
|
||||
|
||||
try:
|
||||
context = self.namespace_manager.build_context(
|
||||
user_id=user_id,
|
||||
request_id=request_id,
|
||||
passthrough_data=passthrough_data,
|
||||
docs=docs,
|
||||
docs_together=docs_together,
|
||||
tools_data=tools_data,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
return self.template_engine.render(prompt_content, context)
|
||||
except TemplateRenderError:
|
||||
raise
|
||||
except Exception as e:
|
||||
error_msg = f"Prompt rendering failed: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
raise TemplateRenderError(error_msg) from e
|
||||
|
||||
def _uses_template_syntax(self, prompt_content: str) -> bool:
|
||||
"""Check if prompt uses Jinja2 template syntax"""
|
||||
return "{{" in prompt_content and "}}" in prompt_content
|
||||
|
||||
def _apply_legacy_substitutions(
|
||||
self, prompt_content: str, docs_together: Optional[str] = None
|
||||
) -> str:
|
||||
"""
|
||||
Apply backward-compatible substitutions for old prompt format.
|
||||
|
||||
Handles legacy {summaries} and {query} placeholders during transition period.
|
||||
"""
|
||||
if docs_together:
|
||||
prompt_content = prompt_content.replace("{summaries}", docs_together)
|
||||
return prompt_content
|
||||
|
||||
def validate_template(self, prompt_content: str) -> bool:
|
||||
"""Validate prompt template syntax"""
|
||||
return self.template_engine.validate_template(prompt_content)
|
||||
|
||||
def extract_variables(self, prompt_content: str) -> set[str]:
|
||||
"""Extract all variable names from prompt template"""
|
||||
return self.template_engine.extract_variables(prompt_content)
|
||||
1072
application/api/answer/services/stream_processor.py
Normal file
1072
application/api/answer/services/stream_processor.py
Normal file
File diff suppressed because it is too large
Load Diff
545
application/api/connector/routes.py
Normal file
545
application/api/connector/routes.py
Normal file
@@ -0,0 +1,545 @@
|
||||
import base64
|
||||
import datetime
|
||||
import html
|
||||
import json
|
||||
import uuid
|
||||
from urllib.parse import urlencode
|
||||
|
||||
|
||||
from bson.objectid import ObjectId
|
||||
from flask import (
|
||||
Blueprint,
|
||||
current_app,
|
||||
jsonify,
|
||||
make_response,
|
||||
request
|
||||
)
|
||||
from flask_restx import fields, Namespace, Resource
|
||||
|
||||
|
||||
from application.api.user.tasks import (
|
||||
ingest_connector_task,
|
||||
)
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.api import api
|
||||
|
||||
|
||||
from application.parser.connectors.connector_creator import ConnectorCreator
|
||||
|
||||
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
sources_collection = db["sources"]
|
||||
sessions_collection = db["connector_sessions"]
|
||||
|
||||
connector = Blueprint("connector", __name__)
|
||||
connectors_ns = Namespace("connectors", description="Connector operations", path="/")
|
||||
api.add_namespace(connectors_ns)
|
||||
|
||||
# Fixed callback status path to prevent open redirect
|
||||
CALLBACK_STATUS_PATH = "/api/connectors/callback-status"
|
||||
|
||||
|
||||
def build_callback_redirect(params: dict) -> str:
|
||||
"""Build a safe redirect URL to the callback status page.
|
||||
|
||||
Uses a fixed path and properly URL-encodes all parameters
|
||||
to prevent URL injection and open redirect vulnerabilities.
|
||||
"""
|
||||
return f"{CALLBACK_STATUS_PATH}?{urlencode(params)}"
|
||||
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/auth")
|
||||
class ConnectorAuth(Resource):
|
||||
@api.doc(description="Get connector OAuth authorization URL", params={"provider": "Connector provider (e.g., google_drive)"})
|
||||
def get(self):
|
||||
try:
|
||||
provider = request.args.get('provider') or request.args.get('source')
|
||||
if not provider:
|
||||
return make_response(jsonify({"success": False, "error": "Missing provider"}), 400)
|
||||
|
||||
if not ConnectorCreator.is_supported(provider):
|
||||
return make_response(jsonify({"success": False, "error": f"Unsupported provider: {provider}"}), 400)
|
||||
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False, "error": "Unauthorized"}), 401)
|
||||
user_id = decoded_token.get('sub')
|
||||
|
||||
now = datetime.datetime.now(datetime.timezone.utc)
|
||||
result = sessions_collection.insert_one({
|
||||
"provider": provider,
|
||||
"user": user_id,
|
||||
"status": "pending",
|
||||
"created_at": now
|
||||
})
|
||||
state_dict = {
|
||||
"provider": provider,
|
||||
"object_id": str(result.inserted_id)
|
||||
}
|
||||
state = base64.urlsafe_b64encode(json.dumps(state_dict).encode()).decode()
|
||||
|
||||
auth = ConnectorCreator.create_auth(provider)
|
||||
authorization_url = auth.get_authorization_url(state=state)
|
||||
return make_response(jsonify({
|
||||
"success": True,
|
||||
"authorization_url": authorization_url,
|
||||
"state": state
|
||||
}), 200)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error generating connector auth URL: {e}", exc_info=True)
|
||||
return make_response(jsonify({"success": False, "error": "Failed to generate authorization URL"}), 500)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/callback")
|
||||
class ConnectorsCallback(Resource):
|
||||
@api.doc(description="Handle OAuth callback for external connectors")
|
||||
def get(self):
|
||||
"""Handle OAuth callback for external connectors"""
|
||||
try:
|
||||
from application.parser.connectors.connector_creator import ConnectorCreator
|
||||
from flask import request, redirect
|
||||
|
||||
authorization_code = request.args.get('code')
|
||||
state = request.args.get('state')
|
||||
error = request.args.get('error')
|
||||
|
||||
state_dict = json.loads(base64.urlsafe_b64decode(state.encode()).decode())
|
||||
provider = state_dict.get("provider")
|
||||
state_object_id = state_dict.get("object_id")
|
||||
|
||||
# Validate provider
|
||||
if not provider or not isinstance(provider, str) or not ConnectorCreator.is_supported(provider):
|
||||
return redirect(build_callback_redirect({
|
||||
"status": "error",
|
||||
"message": "Invalid provider"
|
||||
}))
|
||||
|
||||
if error:
|
||||
if error == "access_denied":
|
||||
return redirect(build_callback_redirect({
|
||||
"status": "cancelled",
|
||||
"message": "Authentication was cancelled. You can try again if you'd like to connect your account.",
|
||||
"provider": provider
|
||||
}))
|
||||
else:
|
||||
current_app.logger.warning(f"OAuth error in callback: {error}")
|
||||
return redirect(build_callback_redirect({
|
||||
"status": "error",
|
||||
"message": "Authentication failed. Please try again and make sure to grant all requested permissions.",
|
||||
"provider": provider
|
||||
}))
|
||||
|
||||
if not authorization_code:
|
||||
return redirect(build_callback_redirect({
|
||||
"status": "error",
|
||||
"message": "Authentication failed. Please try again and make sure to grant all requested permissions.",
|
||||
"provider": provider
|
||||
}))
|
||||
|
||||
try:
|
||||
auth = ConnectorCreator.create_auth(provider)
|
||||
token_info = auth.exchange_code_for_tokens(authorization_code)
|
||||
|
||||
session_token = str(uuid.uuid4())
|
||||
|
||||
try:
|
||||
if provider == "google_drive":
|
||||
credentials = auth.create_credentials_from_token_info(token_info)
|
||||
service = auth.build_drive_service(credentials)
|
||||
user_info = service.about().get(fields="user").execute()
|
||||
user_email = user_info.get('user', {}).get('emailAddress', 'Connected User')
|
||||
else:
|
||||
user_email = token_info.get('user_info', {}).get('email', 'Connected User')
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.warning(f"Could not get user info: {e}")
|
||||
user_email = 'Connected User'
|
||||
|
||||
sanitized_token_info = auth.sanitize_token_info(token_info)
|
||||
|
||||
sessions_collection.find_one_and_update(
|
||||
{"_id": ObjectId(state_object_id), "provider": provider},
|
||||
{
|
||||
"$set": {
|
||||
"session_token": session_token,
|
||||
"token_info": sanitized_token_info,
|
||||
"user_email": user_email,
|
||||
"status": "authorized"
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# Redirect to success page with session token and user email
|
||||
return redirect(build_callback_redirect({
|
||||
"status": "success",
|
||||
"message": "Authentication successful",
|
||||
"provider": provider,
|
||||
"session_token": session_token,
|
||||
"user_email": user_email
|
||||
}))
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error exchanging code for tokens: {str(e)}", exc_info=True)
|
||||
return redirect(build_callback_redirect({
|
||||
"status": "error",
|
||||
"message": "Authentication failed. Please try again and make sure to grant all requested permissions.",
|
||||
"provider": provider
|
||||
}))
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error handling connector callback: {e}")
|
||||
return redirect(build_callback_redirect({
|
||||
"status": "error",
|
||||
"message": "Authentication failed. Please try again and make sure to grant all requested permissions."
|
||||
}))
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/files")
|
||||
class ConnectorFiles(Resource):
|
||||
@api.expect(api.model("ConnectorFilesModel", {
|
||||
"provider": fields.String(required=True),
|
||||
"session_token": fields.String(required=True),
|
||||
"folder_id": fields.String(required=False),
|
||||
"limit": fields.Integer(required=False),
|
||||
"page_token": fields.String(required=False),
|
||||
"search_query": fields.String(required=False),
|
||||
}))
|
||||
@api.doc(description="List files from a connector provider (supports pagination and search)")
|
||||
def post(self):
|
||||
try:
|
||||
data = request.get_json()
|
||||
provider = data.get('provider')
|
||||
session_token = data.get('session_token')
|
||||
limit = data.get('limit', 10)
|
||||
|
||||
if not provider or not session_token:
|
||||
return make_response(jsonify({"success": False, "error": "provider and session_token are required"}), 400)
|
||||
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False, "error": "Unauthorized"}), 401)
|
||||
user = decoded_token.get('sub')
|
||||
session = sessions_collection.find_one({"session_token": session_token, "user": user})
|
||||
if not session:
|
||||
return make_response(jsonify({"success": False, "error": "Invalid or unauthorized session"}), 401)
|
||||
|
||||
loader = ConnectorCreator.create_connector(provider, session_token)
|
||||
|
||||
generic_keys = {'provider', 'session_token'}
|
||||
input_config = {
|
||||
k: v for k, v in data.items() if k not in generic_keys
|
||||
}
|
||||
input_config['list_only'] = True
|
||||
|
||||
documents = loader.load_data(input_config)
|
||||
|
||||
files = []
|
||||
for doc in documents[:limit]:
|
||||
metadata = doc.extra_info
|
||||
modified_time = metadata.get('modified_time')
|
||||
if modified_time:
|
||||
date_part = modified_time.split('T')[0]
|
||||
time_part = modified_time.split('T')[1].split('.')[0].split('Z')[0]
|
||||
formatted_time = f"{date_part} {time_part}"
|
||||
else:
|
||||
formatted_time = None
|
||||
|
||||
files.append({
|
||||
'id': doc.doc_id,
|
||||
'name': metadata.get('file_name', 'Unknown File'),
|
||||
'type': metadata.get('mime_type', 'unknown'),
|
||||
'size': metadata.get('size', None),
|
||||
'modifiedTime': formatted_time,
|
||||
'isFolder': metadata.get('is_folder', False)
|
||||
})
|
||||
|
||||
next_token = getattr(loader, 'next_page_token', None)
|
||||
has_more = bool(next_token)
|
||||
|
||||
return make_response(jsonify({
|
||||
"success": True,
|
||||
"files": files,
|
||||
"total": len(files),
|
||||
"next_page_token": next_token,
|
||||
"has_more": has_more
|
||||
}), 200)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error loading connector files: {e}", exc_info=True)
|
||||
return make_response(jsonify({"success": False, "error": "Failed to load files"}), 500)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/validate-session")
|
||||
class ConnectorValidateSession(Resource):
|
||||
@api.expect(api.model("ConnectorValidateSessionModel", {"provider": fields.String(required=True), "session_token": fields.String(required=True)}))
|
||||
@api.doc(description="Validate connector session token and return user info and access token")
|
||||
def post(self):
|
||||
try:
|
||||
data = request.get_json()
|
||||
provider = data.get('provider')
|
||||
session_token = data.get('session_token')
|
||||
if not provider or not session_token:
|
||||
return make_response(jsonify({"success": False, "error": "provider and session_token are required"}), 400)
|
||||
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False, "error": "Unauthorized"}), 401)
|
||||
user = decoded_token.get('sub')
|
||||
|
||||
session = sessions_collection.find_one({"session_token": session_token, "user": user})
|
||||
if not session or "token_info" not in session:
|
||||
return make_response(jsonify({"success": False, "error": "Invalid or expired session"}), 401)
|
||||
|
||||
token_info = session["token_info"]
|
||||
auth = ConnectorCreator.create_auth(provider)
|
||||
is_expired = auth.is_token_expired(token_info)
|
||||
|
||||
if is_expired and token_info.get('refresh_token'):
|
||||
try:
|
||||
refreshed_token_info = auth.refresh_access_token(token_info.get('refresh_token'))
|
||||
sanitized_token_info = auth.sanitize_token_info(refreshed_token_info)
|
||||
sessions_collection.update_one(
|
||||
{"session_token": session_token},
|
||||
{"$set": {"token_info": sanitized_token_info}}
|
||||
)
|
||||
token_info = sanitized_token_info
|
||||
is_expired = False
|
||||
except Exception as refresh_error:
|
||||
current_app.logger.error(f"Failed to refresh token: {refresh_error}")
|
||||
|
||||
if is_expired:
|
||||
return make_response(jsonify({
|
||||
"success": False,
|
||||
"expired": True,
|
||||
"error": "Session token has expired. Please reconnect."
|
||||
}), 401)
|
||||
|
||||
_base_fields = {"access_token", "refresh_token", "token_uri", "expiry"}
|
||||
provider_extras = {k: v for k, v in token_info.items() if k not in _base_fields}
|
||||
|
||||
response_data = {
|
||||
"success": True,
|
||||
"expired": False,
|
||||
"user_email": session.get('user_email', 'Connected User'),
|
||||
"access_token": token_info.get('access_token'),
|
||||
**provider_extras,
|
||||
}
|
||||
|
||||
return make_response(jsonify(response_data), 200)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error validating connector session: {e}", exc_info=True)
|
||||
return make_response(jsonify({"success": False, "error": "Failed to validate session"}), 500)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/disconnect")
|
||||
class ConnectorDisconnect(Resource):
|
||||
@api.expect(api.model("ConnectorDisconnectModel", {"provider": fields.String(required=True), "session_token": fields.String(required=False)}))
|
||||
@api.doc(description="Disconnect a connector session")
|
||||
def post(self):
|
||||
try:
|
||||
data = request.get_json()
|
||||
provider = data.get('provider')
|
||||
session_token = data.get('session_token')
|
||||
if not provider:
|
||||
return make_response(jsonify({"success": False, "error": "provider is required"}), 400)
|
||||
|
||||
|
||||
if session_token:
|
||||
sessions_collection.delete_one({"session_token": session_token})
|
||||
|
||||
return make_response(jsonify({"success": True}), 200)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error disconnecting connector session: {e}", exc_info=True)
|
||||
return make_response(jsonify({"success": False, "error": "Failed to disconnect session"}), 500)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/sync")
|
||||
class ConnectorSync(Resource):
|
||||
@api.expect(
|
||||
api.model(
|
||||
"ConnectorSyncModel",
|
||||
{
|
||||
"source_id": fields.String(required=True, description="Source ID to sync"),
|
||||
"session_token": fields.String(required=True, description="Authentication token")
|
||||
},
|
||||
)
|
||||
)
|
||||
@api.doc(description="Sync connector source to check for modifications")
|
||||
def post(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
|
||||
try:
|
||||
data = request.get_json()
|
||||
source_id = data.get('source_id')
|
||||
session_token = data.get('session_token')
|
||||
|
||||
if not all([source_id, session_token]):
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": False,
|
||||
"error": "source_id and session_token are required"
|
||||
}),
|
||||
400
|
||||
)
|
||||
source = sources_collection.find_one({"_id": ObjectId(source_id)})
|
||||
if not source:
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": False,
|
||||
"error": "Source not found"
|
||||
}),
|
||||
404
|
||||
)
|
||||
|
||||
if source.get('user') != decoded_token.get('sub'):
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": False,
|
||||
"error": "Unauthorized access to source"
|
||||
}),
|
||||
403
|
||||
)
|
||||
|
||||
remote_data = {}
|
||||
try:
|
||||
if source.get('remote_data'):
|
||||
remote_data = json.loads(source.get('remote_data'))
|
||||
except json.JSONDecodeError:
|
||||
current_app.logger.error(f"Invalid remote_data format for source {source_id}")
|
||||
remote_data = {}
|
||||
|
||||
source_type = remote_data.get('provider')
|
||||
if not source_type:
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": False,
|
||||
"error": "Source provider not found in remote_data"
|
||||
}),
|
||||
400
|
||||
)
|
||||
|
||||
# Extract configuration from remote_data
|
||||
file_ids = remote_data.get('file_ids', [])
|
||||
folder_ids = remote_data.get('folder_ids', [])
|
||||
recursive = remote_data.get('recursive', True)
|
||||
|
||||
# Start the sync task
|
||||
task = ingest_connector_task.delay(
|
||||
job_name=source.get('name'),
|
||||
user=decoded_token.get('sub'),
|
||||
source_type=source_type,
|
||||
session_token=session_token,
|
||||
file_ids=file_ids,
|
||||
folder_ids=folder_ids,
|
||||
recursive=recursive,
|
||||
retriever=source.get('retriever', 'classic'),
|
||||
operation_mode="sync",
|
||||
doc_id=source_id,
|
||||
sync_frequency=source.get('sync_frequency', 'never')
|
||||
)
|
||||
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": True,
|
||||
"task_id": task.id
|
||||
}),
|
||||
200
|
||||
)
|
||||
|
||||
except Exception as err:
|
||||
current_app.logger.error(
|
||||
f"Error syncing connector source: {err}",
|
||||
exc_info=True
|
||||
)
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": False,
|
||||
"error": "Failed to sync connector source"
|
||||
}),
|
||||
400
|
||||
)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/callback-status")
|
||||
class ConnectorCallbackStatus(Resource):
|
||||
@api.doc(description="Return HTML page with connector authentication status")
|
||||
def get(self):
|
||||
"""Return HTML page with connector authentication status"""
|
||||
try:
|
||||
# Validate and sanitize status to a known value
|
||||
status_raw = request.args.get('status', 'error')
|
||||
status = status_raw if status_raw in ('success', 'error', 'cancelled') else 'error'
|
||||
|
||||
# Escape all user-controlled values for HTML context
|
||||
message = html.escape(request.args.get('message', ''))
|
||||
provider_raw = request.args.get('provider', 'connector')
|
||||
provider = html.escape(provider_raw.replace('_', ' ').title())
|
||||
session_token = request.args.get('session_token', '')
|
||||
user_email = html.escape(request.args.get('user_email', ''))
|
||||
|
||||
def safe_js_string(value: str) -> str:
|
||||
"""Safely encode a string for embedding in inline JavaScript."""
|
||||
js_encoded = json.dumps(value)
|
||||
return js_encoded.replace('</', '<\\/').replace('<!--', '<\\!--')
|
||||
|
||||
js_status = safe_js_string(status)
|
||||
js_session_token = safe_js_string(session_token)
|
||||
js_user_email = safe_js_string(user_email)
|
||||
js_provider_type = safe_js_string(provider_raw)
|
||||
|
||||
html_content = f"""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>{provider} Authentication</title>
|
||||
<style>
|
||||
body {{ font-family: Arial, sans-serif; text-align: center; padding: 40px; }}
|
||||
.container {{ max-width: 600px; margin: 0 auto; }}
|
||||
.success {{ color: #4CAF50; }}
|
||||
.error {{ color: #F44336; }}
|
||||
.cancelled {{ color: #FF9800; }}
|
||||
</style>
|
||||
<script>
|
||||
window.onload = function() {{
|
||||
const status = {js_status};
|
||||
const sessionToken = {js_session_token};
|
||||
const userEmail = {js_user_email};
|
||||
const providerType = {js_provider_type};
|
||||
|
||||
if (status === "success" && window.opener) {{
|
||||
window.opener.postMessage({{
|
||||
type: providerType + '_auth_success',
|
||||
session_token: sessionToken,
|
||||
user_email: userEmail
|
||||
}}, '*');
|
||||
|
||||
setTimeout(() => window.close(), 3000);
|
||||
}} else if (status === "cancelled" || status === "error") {{
|
||||
setTimeout(() => window.close(), 3000);
|
||||
}}
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h2>{provider} Authentication</h2>
|
||||
<div class="{status}">
|
||||
<p>{message}</p>
|
||||
{f'<p>Connected as: {user_email}</p>' if status == 'success' else ''}
|
||||
</div>
|
||||
<p><small>You can close this window. {f"Your {provider} is now connected and ready to use." if status == 'success' else "Feel free to close this window."}</small></p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
return make_response(html_content, 200, {'Content-Type': 'text/html'})
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error rendering callback status page: {e}")
|
||||
return make_response("Authentication error occurred", 500, {'Content-Type': 'text/html'})
|
||||
|
||||
|
||||
145
application/api/internal/routes.py
Normal file → Executable file
145
application/api/internal/routes.py
Normal file → Executable file
@@ -1,20 +1,47 @@
|
||||
import os
|
||||
import datetime
|
||||
from flask import Blueprint, request, send_from_directory
|
||||
from pymongo import MongoClient
|
||||
import json
|
||||
from flask import Blueprint, request, send_from_directory, jsonify
|
||||
from werkzeug.utils import secure_filename
|
||||
|
||||
|
||||
from bson.objectid import ObjectId
|
||||
import logging
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
mongo = MongoClient(settings.MONGO_URI)
|
||||
db = mongo["docsgpt"]
|
||||
from application.storage.storage_creator import StorageCreator
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
conversations_collection = db["conversations"]
|
||||
vectors_collection = db["vectors"]
|
||||
sources_collection = db["sources"]
|
||||
|
||||
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
current_dir = os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
)
|
||||
|
||||
|
||||
internal = Blueprint("internal", __name__)
|
||||
|
||||
|
||||
@internal.before_request
|
||||
def verify_internal_key():
|
||||
"""Verify INTERNAL_KEY for all internal endpoint requests.
|
||||
|
||||
Deny by default: if INTERNAL_KEY is not configured, reject all requests.
|
||||
"""
|
||||
if not settings.INTERNAL_KEY:
|
||||
logger.warning(
|
||||
f"Internal API request rejected from {request.remote_addr}: "
|
||||
"INTERNAL_KEY is not configured"
|
||||
)
|
||||
return jsonify({"error": "Unauthorized", "message": "Internal API is not configured"}), 401
|
||||
internal_key = request.headers.get("X-Internal-Key")
|
||||
if not internal_key or internal_key != settings.INTERNAL_KEY:
|
||||
logger.warning(f"Unauthorized internal API access attempt from {request.remote_addr}")
|
||||
return jsonify({"error": "Unauthorized", "message": "Invalid or missing internal key"}), 401
|
||||
|
||||
|
||||
internal = Blueprint('internal', __name__)
|
||||
@internal.route("/api/download", methods=["get"])
|
||||
def download_file():
|
||||
user = secure_filename(request.args.get("user"))
|
||||
@@ -24,46 +51,106 @@ def download_file():
|
||||
return send_from_directory(save_dir, filename, as_attachment=True)
|
||||
|
||||
|
||||
|
||||
@internal.route("/api/upload_index", methods=["POST"])
|
||||
def upload_index_files():
|
||||
"""Upload two files(index.faiss, index.pkl) to the user's folder."""
|
||||
if "user" not in request.form:
|
||||
return {"status": "no user"}
|
||||
user = secure_filename(request.form["user"])
|
||||
user = request.form["user"]
|
||||
if "name" not in request.form:
|
||||
return {"status": "no name"}
|
||||
job_name = secure_filename(request.form["name"])
|
||||
save_dir = os.path.join(current_dir, "indexes", user, job_name)
|
||||
job_name = request.form["name"]
|
||||
tokens = request.form["tokens"]
|
||||
retriever = request.form["retriever"]
|
||||
id = request.form["id"]
|
||||
type = request.form["type"]
|
||||
remote_data = request.form["remote_data"] if "remote_data" in request.form else None
|
||||
sync_frequency = request.form["sync_frequency"] if "sync_frequency" in request.form else None
|
||||
|
||||
file_path = request.form.get("file_path")
|
||||
directory_structure = request.form.get("directory_structure")
|
||||
file_name_map = request.form.get("file_name_map")
|
||||
|
||||
if directory_structure:
|
||||
try:
|
||||
directory_structure = json.loads(directory_structure)
|
||||
except Exception:
|
||||
logger.error("Error parsing directory_structure")
|
||||
directory_structure = {}
|
||||
else:
|
||||
directory_structure = {}
|
||||
if file_name_map:
|
||||
try:
|
||||
file_name_map = json.loads(file_name_map)
|
||||
except Exception:
|
||||
logger.error("Error parsing file_name_map")
|
||||
file_name_map = None
|
||||
else:
|
||||
file_name_map = None
|
||||
|
||||
storage = StorageCreator.get_storage()
|
||||
index_base_path = f"indexes/{id}"
|
||||
|
||||
if settings.VECTOR_STORE == "faiss":
|
||||
if "file_faiss" not in request.files:
|
||||
print("No file part")
|
||||
logger.error("No file_faiss part")
|
||||
return {"status": "no file"}
|
||||
file_faiss = request.files["file_faiss"]
|
||||
if file_faiss.filename == "":
|
||||
return {"status": "no file name"}
|
||||
if "file_pkl" not in request.files:
|
||||
print("No file part")
|
||||
logger.error("No file_pkl part")
|
||||
return {"status": "no file"}
|
||||
file_pkl = request.files["file_pkl"]
|
||||
if file_pkl.filename == "":
|
||||
return {"status": "no file name"}
|
||||
# saves index files
|
||||
|
||||
if not os.path.exists(save_dir):
|
||||
os.makedirs(save_dir)
|
||||
file_faiss.save(os.path.join(save_dir, "index.faiss"))
|
||||
file_pkl.save(os.path.join(save_dir, "index.pkl"))
|
||||
# create entry in vectors_collection
|
||||
vectors_collection.insert_one(
|
||||
{
|
||||
|
||||
# Save index files to storage
|
||||
faiss_storage_path = f"{index_base_path}/index.faiss"
|
||||
pkl_storage_path = f"{index_base_path}/index.pkl"
|
||||
storage.save_file(file_faiss, faiss_storage_path)
|
||||
storage.save_file(file_pkl, pkl_storage_path)
|
||||
|
||||
|
||||
existing_entry = sources_collection.find_one({"_id": ObjectId(id)})
|
||||
if existing_entry:
|
||||
update_fields = {
|
||||
"user": user,
|
||||
"name": job_name,
|
||||
"language": job_name,
|
||||
"location": save_dir,
|
||||
"date": datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S"),
|
||||
"date": datetime.datetime.now(),
|
||||
"model": settings.EMBEDDINGS_NAME,
|
||||
"type": "local",
|
||||
"type": type,
|
||||
"tokens": tokens,
|
||||
"retriever": retriever,
|
||||
"remote_data": remote_data,
|
||||
"sync_frequency": sync_frequency,
|
||||
"file_path": file_path,
|
||||
"directory_structure": directory_structure,
|
||||
}
|
||||
)
|
||||
return {"status": "ok"}
|
||||
if file_name_map is not None:
|
||||
update_fields["file_name_map"] = file_name_map
|
||||
sources_collection.update_one(
|
||||
{"_id": ObjectId(id)},
|
||||
{"$set": update_fields},
|
||||
)
|
||||
else:
|
||||
insert_doc = {
|
||||
"_id": ObjectId(id),
|
||||
"user": user,
|
||||
"name": job_name,
|
||||
"language": job_name,
|
||||
"date": datetime.datetime.now(),
|
||||
"model": settings.EMBEDDINGS_NAME,
|
||||
"type": type,
|
||||
"tokens": tokens,
|
||||
"retriever": retriever,
|
||||
"remote_data": remote_data,
|
||||
"sync_frequency": sync_frequency,
|
||||
"file_path": file_path,
|
||||
"directory_structure": directory_structure,
|
||||
}
|
||||
if file_name_map is not None:
|
||||
insert_doc["file_name_map"] = file_name_map
|
||||
sources_collection.insert_one(insert_doc)
|
||||
return {"status": "ok"}
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
"""User API module - provides all user-related API endpoints"""
|
||||
|
||||
from .routes import user
|
||||
|
||||
__all__ = ["user"]
|
||||
|
||||
8
application/api/user/agents/__init__.py
Normal file
8
application/api/user/agents/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""Agents module."""
|
||||
|
||||
from .routes import agents_ns
|
||||
from .sharing import agents_sharing_ns
|
||||
from .webhooks import agents_webhooks_ns
|
||||
from .folders import agents_folders_ns
|
||||
|
||||
__all__ = ["agents_ns", "agents_sharing_ns", "agents_webhooks_ns", "agents_folders_ns"]
|
||||
266
application/api/user/agents/folders.py
Normal file
266
application/api/user/agents/folders.py
Normal file
@@ -0,0 +1,266 @@
|
||||
"""
|
||||
Agent folders management routes.
|
||||
Provides virtual folder organization for agents (Google Drive-like structure).
|
||||
"""
|
||||
|
||||
import datetime
|
||||
from bson.objectid import ObjectId
|
||||
from flask import current_app, jsonify, make_response, request
|
||||
from flask_restx import Namespace, Resource, fields
|
||||
|
||||
from application.api import api
|
||||
from application.api.user.base import (
|
||||
agent_folders_collection,
|
||||
agents_collection,
|
||||
)
|
||||
|
||||
agents_folders_ns = Namespace(
|
||||
"agents_folders", description="Agent folder management", path="/api/agents/folders"
|
||||
)
|
||||
|
||||
|
||||
def _folder_error_response(message: str, err: Exception):
|
||||
current_app.logger.error(f"{message}: {err}", exc_info=True)
|
||||
return make_response(jsonify({"success": False, "message": message}), 400)
|
||||
|
||||
|
||||
@agents_folders_ns.route("/")
|
||||
class AgentFolders(Resource):
|
||||
@api.doc(description="Get all folders for the user")
|
||||
def get(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
user = decoded_token.get("sub")
|
||||
try:
|
||||
folders = list(agent_folders_collection.find({"user": user}))
|
||||
result = [
|
||||
{
|
||||
"id": str(f["_id"]),
|
||||
"name": f["name"],
|
||||
"parent_id": f.get("parent_id"),
|
||||
"created_at": f.get("created_at", "").isoformat() if f.get("created_at") else None,
|
||||
"updated_at": f.get("updated_at", "").isoformat() if f.get("updated_at") else None,
|
||||
}
|
||||
for f in folders
|
||||
]
|
||||
return make_response(jsonify({"folders": result}), 200)
|
||||
except Exception as err:
|
||||
return _folder_error_response("Failed to fetch folders", err)
|
||||
|
||||
@api.doc(description="Create a new folder")
|
||||
@api.expect(
|
||||
api.model(
|
||||
"CreateFolder",
|
||||
{
|
||||
"name": fields.String(required=True, description="Folder name"),
|
||||
"parent_id": fields.String(required=False, description="Parent folder ID"),
|
||||
},
|
||||
)
|
||||
)
|
||||
def post(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
user = decoded_token.get("sub")
|
||||
data = request.get_json()
|
||||
if not data or not data.get("name"):
|
||||
return make_response(jsonify({"success": False, "message": "Folder name is required"}), 400)
|
||||
|
||||
parent_id = data.get("parent_id")
|
||||
if parent_id:
|
||||
parent = agent_folders_collection.find_one({"_id": ObjectId(parent_id), "user": user})
|
||||
if not parent:
|
||||
return make_response(jsonify({"success": False, "message": "Parent folder not found"}), 404)
|
||||
|
||||
try:
|
||||
now = datetime.datetime.now(datetime.timezone.utc)
|
||||
folder = {
|
||||
"user": user,
|
||||
"name": data["name"],
|
||||
"parent_id": parent_id,
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
}
|
||||
result = agent_folders_collection.insert_one(folder)
|
||||
return make_response(
|
||||
jsonify({"id": str(result.inserted_id), "name": data["name"], "parent_id": parent_id}),
|
||||
201,
|
||||
)
|
||||
except Exception as err:
|
||||
return _folder_error_response("Failed to create folder", err)
|
||||
|
||||
|
||||
@agents_folders_ns.route("/<string:folder_id>")
|
||||
class AgentFolder(Resource):
|
||||
@api.doc(description="Get a specific folder with its agents")
|
||||
def get(self, folder_id):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
user = decoded_token.get("sub")
|
||||
try:
|
||||
folder = agent_folders_collection.find_one({"_id": ObjectId(folder_id), "user": user})
|
||||
if not folder:
|
||||
return make_response(jsonify({"success": False, "message": "Folder not found"}), 404)
|
||||
|
||||
agents = list(agents_collection.find({"user": user, "folder_id": folder_id}))
|
||||
agents_list = [
|
||||
{"id": str(a["_id"]), "name": a["name"], "description": a.get("description", "")}
|
||||
for a in agents
|
||||
]
|
||||
subfolders = list(agent_folders_collection.find({"user": user, "parent_id": folder_id}))
|
||||
subfolders_list = [{"id": str(sf["_id"]), "name": sf["name"]} for sf in subfolders]
|
||||
|
||||
return make_response(
|
||||
jsonify({
|
||||
"id": str(folder["_id"]),
|
||||
"name": folder["name"],
|
||||
"parent_id": folder.get("parent_id"),
|
||||
"agents": agents_list,
|
||||
"subfolders": subfolders_list,
|
||||
}),
|
||||
200,
|
||||
)
|
||||
except Exception as err:
|
||||
return _folder_error_response("Failed to fetch folder", err)
|
||||
|
||||
@api.doc(description="Update a folder")
|
||||
def put(self, folder_id):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
user = decoded_token.get("sub")
|
||||
data = request.get_json()
|
||||
if not data:
|
||||
return make_response(jsonify({"success": False, "message": "No data provided"}), 400)
|
||||
|
||||
try:
|
||||
update_fields = {"updated_at": datetime.datetime.now(datetime.timezone.utc)}
|
||||
if "name" in data:
|
||||
update_fields["name"] = data["name"]
|
||||
if "parent_id" in data:
|
||||
if data["parent_id"] == folder_id:
|
||||
return make_response(jsonify({"success": False, "message": "Cannot set folder as its own parent"}), 400)
|
||||
update_fields["parent_id"] = data["parent_id"]
|
||||
|
||||
result = agent_folders_collection.update_one(
|
||||
{"_id": ObjectId(folder_id), "user": user}, {"$set": update_fields}
|
||||
)
|
||||
if result.matched_count == 0:
|
||||
return make_response(jsonify({"success": False, "message": "Folder not found"}), 404)
|
||||
return make_response(jsonify({"success": True}), 200)
|
||||
except Exception as err:
|
||||
return _folder_error_response("Failed to update folder", err)
|
||||
|
||||
@api.doc(description="Delete a folder")
|
||||
def delete(self, folder_id):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
user = decoded_token.get("sub")
|
||||
try:
|
||||
agents_collection.update_many(
|
||||
{"user": user, "folder_id": folder_id}, {"$unset": {"folder_id": ""}}
|
||||
)
|
||||
agent_folders_collection.update_many(
|
||||
{"user": user, "parent_id": folder_id}, {"$unset": {"parent_id": ""}}
|
||||
)
|
||||
result = agent_folders_collection.delete_one({"_id": ObjectId(folder_id), "user": user})
|
||||
if result.deleted_count == 0:
|
||||
return make_response(jsonify({"success": False, "message": "Folder not found"}), 404)
|
||||
return make_response(jsonify({"success": True}), 200)
|
||||
except Exception as err:
|
||||
return _folder_error_response("Failed to delete folder", err)
|
||||
|
||||
|
||||
@agents_folders_ns.route("/move_agent")
|
||||
class MoveAgentToFolder(Resource):
|
||||
@api.doc(description="Move an agent to a folder or remove from folder")
|
||||
@api.expect(
|
||||
api.model(
|
||||
"MoveAgent",
|
||||
{
|
||||
"agent_id": fields.String(required=True, description="Agent ID to move"),
|
||||
"folder_id": fields.String(required=False, description="Target folder ID (null to remove from folder)"),
|
||||
},
|
||||
)
|
||||
)
|
||||
def post(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
user = decoded_token.get("sub")
|
||||
data = request.get_json()
|
||||
if not data or not data.get("agent_id"):
|
||||
return make_response(jsonify({"success": False, "message": "Agent ID is required"}), 400)
|
||||
|
||||
agent_id = data["agent_id"]
|
||||
folder_id = data.get("folder_id")
|
||||
|
||||
try:
|
||||
agent = agents_collection.find_one({"_id": ObjectId(agent_id), "user": user})
|
||||
if not agent:
|
||||
return make_response(jsonify({"success": False, "message": "Agent not found"}), 404)
|
||||
|
||||
if folder_id:
|
||||
folder = agent_folders_collection.find_one({"_id": ObjectId(folder_id), "user": user})
|
||||
if not folder:
|
||||
return make_response(jsonify({"success": False, "message": "Folder not found"}), 404)
|
||||
agents_collection.update_one(
|
||||
{"_id": ObjectId(agent_id)}, {"$set": {"folder_id": folder_id}}
|
||||
)
|
||||
else:
|
||||
agents_collection.update_one(
|
||||
{"_id": ObjectId(agent_id)}, {"$unset": {"folder_id": ""}}
|
||||
)
|
||||
|
||||
return make_response(jsonify({"success": True}), 200)
|
||||
except Exception as err:
|
||||
return _folder_error_response("Failed to move agent", err)
|
||||
|
||||
|
||||
@agents_folders_ns.route("/bulk_move")
|
||||
class BulkMoveAgents(Resource):
|
||||
@api.doc(description="Move multiple agents to a folder")
|
||||
@api.expect(
|
||||
api.model(
|
||||
"BulkMoveAgents",
|
||||
{
|
||||
"agent_ids": fields.List(fields.String, required=True, description="List of agent IDs"),
|
||||
"folder_id": fields.String(required=False, description="Target folder ID"),
|
||||
},
|
||||
)
|
||||
)
|
||||
def post(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
user = decoded_token.get("sub")
|
||||
data = request.get_json()
|
||||
if not data or not data.get("agent_ids"):
|
||||
return make_response(jsonify({"success": False, "message": "Agent IDs are required"}), 400)
|
||||
|
||||
agent_ids = data["agent_ids"]
|
||||
folder_id = data.get("folder_id")
|
||||
|
||||
try:
|
||||
if folder_id:
|
||||
folder = agent_folders_collection.find_one({"_id": ObjectId(folder_id), "user": user})
|
||||
if not folder:
|
||||
return make_response(jsonify({"success": False, "message": "Folder not found"}), 404)
|
||||
|
||||
object_ids = [ObjectId(aid) for aid in agent_ids]
|
||||
if folder_id:
|
||||
agents_collection.update_many(
|
||||
{"_id": {"$in": object_ids}, "user": user},
|
||||
{"$set": {"folder_id": folder_id}},
|
||||
)
|
||||
else:
|
||||
agents_collection.update_many(
|
||||
{"_id": {"$in": object_ids}, "user": user},
|
||||
{"$unset": {"folder_id": ""}},
|
||||
)
|
||||
return make_response(jsonify({"success": True}), 200)
|
||||
except Exception as err:
|
||||
return _folder_error_response("Failed to move agents", err)
|
||||
1441
application/api/user/agents/routes.py
Normal file
1441
application/api/user/agents/routes.py
Normal file
File diff suppressed because it is too large
Load Diff
263
application/api/user/agents/sharing.py
Normal file
263
application/api/user/agents/sharing.py
Normal file
@@ -0,0 +1,263 @@
|
||||
"""Agent management sharing functionality."""
|
||||
|
||||
import datetime
|
||||
import secrets
|
||||
|
||||
from bson import DBRef
|
||||
from bson.objectid import ObjectId
|
||||
from flask import current_app, jsonify, make_response, request
|
||||
from flask_restx import fields, Namespace, Resource
|
||||
|
||||
from application.api import api
|
||||
from application.core.settings import settings
|
||||
from application.api.user.base import (
|
||||
agents_collection,
|
||||
db,
|
||||
ensure_user_doc,
|
||||
resolve_tool_details,
|
||||
user_tools_collection,
|
||||
users_collection,
|
||||
)
|
||||
from application.utils import generate_image_url
|
||||
|
||||
agents_sharing_ns = Namespace(
|
||||
"agents", description="Agent management operations", path="/api"
|
||||
)
|
||||
|
||||
|
||||
@agents_sharing_ns.route("/shared_agent")
|
||||
class SharedAgent(Resource):
|
||||
@api.doc(
|
||||
params={
|
||||
"token": "Shared token of the agent",
|
||||
},
|
||||
description="Get a shared agent by token or ID",
|
||||
)
|
||||
def get(self):
|
||||
shared_token = request.args.get("token")
|
||||
|
||||
if not shared_token:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Token or ID is required"}), 400
|
||||
)
|
||||
try:
|
||||
query = {
|
||||
"shared_publicly": True,
|
||||
"shared_token": shared_token,
|
||||
}
|
||||
shared_agent = agents_collection.find_one(query)
|
||||
if not shared_agent:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Shared agent not found"}),
|
||||
404,
|
||||
)
|
||||
agent_id = str(shared_agent["_id"])
|
||||
data = {
|
||||
"id": agent_id,
|
||||
"user": shared_agent.get("user", ""),
|
||||
"name": shared_agent.get("name", ""),
|
||||
"image": (
|
||||
generate_image_url(shared_agent["image"])
|
||||
if shared_agent.get("image")
|
||||
else ""
|
||||
),
|
||||
"description": shared_agent.get("description", ""),
|
||||
"source": (
|
||||
str(source_doc["_id"])
|
||||
if isinstance(shared_agent.get("source"), DBRef)
|
||||
and (source_doc := db.dereference(shared_agent.get("source")))
|
||||
else ""
|
||||
),
|
||||
"chunks": shared_agent.get("chunks", "0"),
|
||||
"retriever": shared_agent.get("retriever", "classic"),
|
||||
"prompt_id": shared_agent.get("prompt_id", "default"),
|
||||
"tools": shared_agent.get("tools", []),
|
||||
"tool_details": resolve_tool_details(shared_agent.get("tools", [])),
|
||||
"agent_type": shared_agent.get("agent_type", ""),
|
||||
"status": shared_agent.get("status", ""),
|
||||
"json_schema": shared_agent.get("json_schema"),
|
||||
"limited_token_mode": shared_agent.get("limited_token_mode", False),
|
||||
"token_limit": shared_agent.get("token_limit", settings.DEFAULT_AGENT_LIMITS["token_limit"]),
|
||||
"limited_request_mode": shared_agent.get("limited_request_mode", False),
|
||||
"request_limit": shared_agent.get("request_limit", settings.DEFAULT_AGENT_LIMITS["request_limit"]),
|
||||
"created_at": shared_agent.get("createdAt", ""),
|
||||
"updated_at": shared_agent.get("updatedAt", ""),
|
||||
"shared": shared_agent.get("shared_publicly", False),
|
||||
"shared_token": shared_agent.get("shared_token", ""),
|
||||
"shared_metadata": shared_agent.get("shared_metadata", {}),
|
||||
}
|
||||
|
||||
if data["tools"]:
|
||||
enriched_tools = []
|
||||
for tool in data["tools"]:
|
||||
tool_data = user_tools_collection.find_one({"_id": ObjectId(tool)})
|
||||
if tool_data:
|
||||
enriched_tools.append(tool_data.get("name", ""))
|
||||
data["tools"] = enriched_tools
|
||||
decoded_token = getattr(request, "decoded_token", None)
|
||||
if decoded_token:
|
||||
user_id = decoded_token.get("sub")
|
||||
owner_id = shared_agent.get("user")
|
||||
|
||||
if user_id != owner_id:
|
||||
ensure_user_doc(user_id)
|
||||
users_collection.update_one(
|
||||
{"user_id": user_id},
|
||||
{"$addToSet": {"agent_preferences.shared_with_me": agent_id}},
|
||||
)
|
||||
return make_response(jsonify(data), 200)
|
||||
except Exception as err:
|
||||
current_app.logger.error(f"Error retrieving shared agent: {err}")
|
||||
return make_response(jsonify({"success": False}), 400)
|
||||
|
||||
|
||||
@agents_sharing_ns.route("/shared_agents")
|
||||
class SharedAgents(Resource):
|
||||
@api.doc(description="Get shared agents explicitly shared with the user")
|
||||
def get(self):
|
||||
try:
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
user_id = decoded_token.get("sub")
|
||||
|
||||
user_doc = ensure_user_doc(user_id)
|
||||
shared_with_ids = user_doc.get("agent_preferences", {}).get(
|
||||
"shared_with_me", []
|
||||
)
|
||||
shared_object_ids = [ObjectId(id) for id in shared_with_ids]
|
||||
|
||||
shared_agents_cursor = agents_collection.find(
|
||||
{"_id": {"$in": shared_object_ids}, "shared_publicly": True}
|
||||
)
|
||||
shared_agents = list(shared_agents_cursor)
|
||||
|
||||
found_ids_set = {str(agent["_id"]) for agent in shared_agents}
|
||||
stale_ids = [id for id in shared_with_ids if id not in found_ids_set]
|
||||
if stale_ids:
|
||||
users_collection.update_one(
|
||||
{"user_id": user_id},
|
||||
{"$pullAll": {"agent_preferences.shared_with_me": stale_ids}},
|
||||
)
|
||||
pinned_ids = set(user_doc.get("agent_preferences", {}).get("pinned", []))
|
||||
|
||||
list_shared_agents = [
|
||||
{
|
||||
"id": str(agent["_id"]),
|
||||
"name": agent.get("name", ""),
|
||||
"description": agent.get("description", ""),
|
||||
"image": (
|
||||
generate_image_url(agent["image"]) if agent.get("image") else ""
|
||||
),
|
||||
"tools": agent.get("tools", []),
|
||||
"tool_details": resolve_tool_details(agent.get("tools", [])),
|
||||
"agent_type": agent.get("agent_type", ""),
|
||||
"status": agent.get("status", ""),
|
||||
"json_schema": agent.get("json_schema"),
|
||||
"limited_token_mode": agent.get("limited_token_mode", False),
|
||||
"token_limit": agent.get("token_limit", settings.DEFAULT_AGENT_LIMITS["token_limit"]),
|
||||
"limited_request_mode": agent.get("limited_request_mode", False),
|
||||
"request_limit": agent.get("request_limit", settings.DEFAULT_AGENT_LIMITS["request_limit"]),
|
||||
"created_at": agent.get("createdAt", ""),
|
||||
"updated_at": agent.get("updatedAt", ""),
|
||||
"pinned": str(agent["_id"]) in pinned_ids,
|
||||
"shared": agent.get("shared_publicly", False),
|
||||
"shared_token": agent.get("shared_token", ""),
|
||||
"shared_metadata": agent.get("shared_metadata", {}),
|
||||
}
|
||||
for agent in shared_agents
|
||||
]
|
||||
|
||||
return make_response(jsonify(list_shared_agents), 200)
|
||||
except Exception as err:
|
||||
current_app.logger.error(f"Error retrieving shared agents: {err}")
|
||||
return make_response(jsonify({"success": False}), 400)
|
||||
|
||||
|
||||
@agents_sharing_ns.route("/share_agent")
|
||||
class ShareAgent(Resource):
|
||||
@api.expect(
|
||||
api.model(
|
||||
"ShareAgentModel",
|
||||
{
|
||||
"id": fields.String(required=True, description="ID of the agent"),
|
||||
"shared": fields.Boolean(
|
||||
required=True, description="Share or unshare the agent"
|
||||
),
|
||||
"username": fields.String(
|
||||
required=False, description="Name of the user"
|
||||
),
|
||||
},
|
||||
)
|
||||
)
|
||||
@api.doc(description="Share or unshare an agent")
|
||||
def put(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
user = decoded_token.get("sub")
|
||||
|
||||
data = request.get_json()
|
||||
if not data:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Missing JSON body"}), 400
|
||||
)
|
||||
agent_id = data.get("id")
|
||||
shared = data.get("shared")
|
||||
username = data.get("username", "")
|
||||
|
||||
if not agent_id:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "ID is required"}), 400
|
||||
)
|
||||
if shared is None:
|
||||
return make_response(
|
||||
jsonify(
|
||||
{
|
||||
"success": False,
|
||||
"message": "Shared parameter is required and must be true or false",
|
||||
}
|
||||
),
|
||||
400,
|
||||
)
|
||||
try:
|
||||
try:
|
||||
agent_oid = ObjectId(agent_id)
|
||||
except Exception:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Invalid agent ID"}), 400
|
||||
)
|
||||
agent = agents_collection.find_one({"_id": agent_oid, "user": user})
|
||||
if not agent:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Agent not found"}), 404
|
||||
)
|
||||
if shared:
|
||||
shared_metadata = {
|
||||
"shared_by": username,
|
||||
"shared_at": datetime.datetime.now(datetime.timezone.utc),
|
||||
}
|
||||
shared_token = secrets.token_urlsafe(32)
|
||||
agents_collection.update_one(
|
||||
{"_id": agent_oid, "user": user},
|
||||
{
|
||||
"$set": {
|
||||
"shared_publicly": shared,
|
||||
"shared_metadata": shared_metadata,
|
||||
"shared_token": shared_token,
|
||||
}
|
||||
},
|
||||
)
|
||||
else:
|
||||
agents_collection.update_one(
|
||||
{"_id": agent_oid, "user": user},
|
||||
{"$set": {"shared_publicly": shared, "shared_token": None}},
|
||||
{"$unset": {"shared_metadata": ""}},
|
||||
)
|
||||
except Exception as err:
|
||||
current_app.logger.error(f"Error sharing/unsharing agent: {err}", exc_info=True)
|
||||
return make_response(jsonify({"success": False, "error": "Failed to update agent sharing status"}), 400)
|
||||
shared_token = shared_token if shared else None
|
||||
return make_response(
|
||||
jsonify({"success": True, "shared_token": shared_token}), 200
|
||||
)
|
||||
119
application/api/user/agents/webhooks.py
Normal file
119
application/api/user/agents/webhooks.py
Normal file
@@ -0,0 +1,119 @@
|
||||
"""Agent management webhook handlers."""
|
||||
|
||||
import secrets
|
||||
|
||||
from bson.objectid import ObjectId
|
||||
from flask import current_app, jsonify, make_response, request
|
||||
from flask_restx import Namespace, Resource
|
||||
|
||||
from application.api import api
|
||||
from application.api.user.base import agents_collection, require_agent
|
||||
from application.api.user.tasks import process_agent_webhook
|
||||
from application.core.settings import settings
|
||||
|
||||
|
||||
agents_webhooks_ns = Namespace(
|
||||
"agents", description="Agent management operations", path="/api"
|
||||
)
|
||||
|
||||
|
||||
@agents_webhooks_ns.route("/agent_webhook")
|
||||
class AgentWebhook(Resource):
|
||||
@api.doc(
|
||||
params={"id": "ID of the agent"},
|
||||
description="Generate webhook URL for the agent",
|
||||
)
|
||||
def get(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
user = decoded_token.get("sub")
|
||||
agent_id = request.args.get("id")
|
||||
if not agent_id:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "ID is required"}), 400
|
||||
)
|
||||
try:
|
||||
agent = agents_collection.find_one(
|
||||
{"_id": ObjectId(agent_id), "user": user}
|
||||
)
|
||||
if not agent:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Agent not found"}), 404
|
||||
)
|
||||
webhook_token = agent.get("incoming_webhook_token")
|
||||
if not webhook_token:
|
||||
webhook_token = secrets.token_urlsafe(32)
|
||||
agents_collection.update_one(
|
||||
{"_id": ObjectId(agent_id), "user": user},
|
||||
{"$set": {"incoming_webhook_token": webhook_token}},
|
||||
)
|
||||
base_url = settings.API_URL.rstrip("/")
|
||||
full_webhook_url = f"{base_url}/api/webhooks/agents/{webhook_token}"
|
||||
except Exception as err:
|
||||
current_app.logger.error(
|
||||
f"Error generating webhook URL: {err}", exc_info=True
|
||||
)
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Error generating webhook URL"}),
|
||||
400,
|
||||
)
|
||||
return make_response(
|
||||
jsonify({"success": True, "webhook_url": full_webhook_url}), 200
|
||||
)
|
||||
|
||||
|
||||
@agents_webhooks_ns.route("/webhooks/agents/<string:webhook_token>")
|
||||
class AgentWebhookListener(Resource):
|
||||
method_decorators = [require_agent]
|
||||
|
||||
def _enqueue_webhook_task(self, agent_id_str, payload, source_method):
|
||||
if not payload:
|
||||
current_app.logger.warning(
|
||||
f"Webhook ({source_method}) received for agent {agent_id_str} with empty payload."
|
||||
)
|
||||
current_app.logger.info(
|
||||
f"Incoming {source_method} webhook for agent {agent_id_str}. Enqueuing task with payload: {payload}"
|
||||
)
|
||||
|
||||
try:
|
||||
task = process_agent_webhook.delay(
|
||||
agent_id=agent_id_str,
|
||||
payload=payload,
|
||||
)
|
||||
current_app.logger.info(
|
||||
f"Task {task.id} enqueued for agent {agent_id_str} ({source_method})."
|
||||
)
|
||||
return make_response(jsonify({"success": True, "task_id": task.id}), 200)
|
||||
except Exception as err:
|
||||
current_app.logger.error(
|
||||
f"Error enqueuing webhook task ({source_method}) for agent {agent_id_str}: {err}",
|
||||
exc_info=True,
|
||||
)
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Error processing webhook"}), 500
|
||||
)
|
||||
|
||||
@api.doc(
|
||||
description="Webhook listener for agent events (POST). Expects JSON payload, which is used to trigger processing.",
|
||||
)
|
||||
def post(self, webhook_token, agent, agent_id_str):
|
||||
payload = request.get_json()
|
||||
if payload is None:
|
||||
return make_response(
|
||||
jsonify(
|
||||
{
|
||||
"success": False,
|
||||
"message": "Invalid or missing JSON data in request body",
|
||||
}
|
||||
),
|
||||
400,
|
||||
)
|
||||
return self._enqueue_webhook_task(agent_id_str, payload, source_method="POST")
|
||||
|
||||
@api.doc(
|
||||
description="Webhook listener for agent events (GET). Uses URL query parameters as payload to trigger processing.",
|
||||
)
|
||||
def get(self, webhook_token, agent, agent_id_str):
|
||||
payload = request.args.to_dict(flat=True)
|
||||
return self._enqueue_webhook_task(agent_id_str, payload, source_method="GET")
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user