mirror of
https://github.com/jxxghp/MoviePilot.git
synced 2026-05-07 08:42:50 +08:00
Compare commits
2366 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
06197144c0 | ||
|
|
62541ffe43 | ||
|
|
c762628217 | ||
|
|
caf615f3bd | ||
|
|
27436757a0 | ||
|
|
924d54dfd3 | ||
|
|
39f9550f86 | ||
|
|
367ecafbbb | ||
|
|
10467244e0 | ||
|
|
cb6dcc6a2e | ||
|
|
43c421b0bb | ||
|
|
45d0891502 | ||
|
|
76c5f54465 | ||
|
|
bcf8116172 | ||
|
|
1f889596b7 | ||
|
|
04443fcfba | ||
|
|
5d7a7fd301 | ||
|
|
4d0a722b09 | ||
|
|
db6dc926cf | ||
|
|
4bb4f5aeb5 | ||
|
|
58e25fe900 | ||
|
|
03f6b9bc96 | ||
|
|
6fdda3a570 | ||
|
|
100eaec38f | ||
|
|
b129508304 | ||
|
|
53bf81aede | ||
|
|
afcc071d07 | ||
|
|
2ea617655c | ||
|
|
0583495548 | ||
|
|
516aea6312 | ||
|
|
2d412cae1c | ||
|
|
45f5326fb4 | ||
|
|
2ccea2da39 | ||
|
|
53f6897d62 | ||
|
|
28a2386f2f | ||
|
|
abda9d3212 | ||
|
|
34e7c4ac14 | ||
|
|
b228107a25 | ||
|
|
2375508616 | ||
|
|
baebd0ed1a | ||
|
|
6532c60a3c | ||
|
|
11478faff3 | ||
|
|
e9291cec6a | ||
|
|
7586a2cd42 | ||
|
|
ef5bd29759 | ||
|
|
7ab643d34a | ||
|
|
0b7505a604 | ||
|
|
460d716512 | ||
|
|
b6f0ef99ab | ||
|
|
af35101774 | ||
|
|
9ed5018cc2 | ||
|
|
7299733960 | ||
|
|
bd5c3d848c | ||
|
|
38c48fa4ce | ||
|
|
b7749c44fd | ||
|
|
e4a7333b79 | ||
|
|
4b27b7bc42 | ||
|
|
c91e87115a | ||
|
|
4a3cc5ee18 | ||
|
|
54d6c2ad4a | ||
|
|
090dcacd30 | ||
|
|
344280cd61 | ||
|
|
2c7fb5786c | ||
|
|
6b9790026c | ||
|
|
6c70531967 | ||
|
|
bcc321eb70 | ||
|
|
2ff1cd1045 | ||
|
|
7fc496cf5b | ||
|
|
8789f35228 | ||
|
|
d4dec90e2f | ||
|
|
5c1487a9a6 | ||
|
|
c5b716c231 | ||
|
|
483fe55372 | ||
|
|
5d588ee127 | ||
|
|
afcd895f52 | ||
|
|
1ded58adbb | ||
|
|
019a077407 | ||
|
|
0f190057d3 | ||
|
|
840c8f7298 | ||
|
|
6a6bcf59a0 | ||
|
|
323844b26d | ||
|
|
140d224a9a | ||
|
|
7bc032d17c | ||
|
|
2df476dbff | ||
|
|
bae086d8b8 | ||
|
|
221eb21694 | ||
|
|
4208c79d72 | ||
|
|
90245a13e1 | ||
|
|
b5979b9b09 | ||
|
|
0277288a41 | ||
|
|
79bfeaf2af | ||
|
|
4fe41ba5e9 | ||
|
|
14d6e2febc | ||
|
|
97c7e71207 | ||
|
|
8f29a218ea | ||
|
|
4fd5aa3eb6 | ||
|
|
bfc27d151c | ||
|
|
f2b56b8f40 | ||
|
|
a05ffc07d4 | ||
|
|
4a81417fb7 | ||
|
|
c7fa3dc863 | ||
|
|
28f9756dd6 | ||
|
|
4bffe2cff1 | ||
|
|
fca478f1d8 | ||
|
|
097dff13a3 | ||
|
|
460b386004 | ||
|
|
89bf89c02d | ||
|
|
cefb60ba2c | ||
|
|
8c78627647 | ||
|
|
51189210c2 | ||
|
|
38933d5882 | ||
|
|
4619fc4042 | ||
|
|
ee7ba28235 | ||
|
|
409abb66be | ||
|
|
8aa8b1897b | ||
|
|
8c256d91bd | ||
|
|
d1d3fc7f30 | ||
|
|
ae15eac0f8 | ||
|
|
1282ad5004 | ||
|
|
6f6fcc79f2 | ||
|
|
e5c64e73b5 | ||
|
|
93a19b467b | ||
|
|
4ba8d42272 | ||
|
|
32e247b4d5 | ||
|
|
1d0d09c909 | ||
|
|
b7ee6ca8c4 | ||
|
|
4a4d93e7f9 | ||
|
|
7b096c0a09 | ||
|
|
3a93efb082 | ||
|
|
73cdd297b1 | ||
|
|
83187ea17d | ||
|
|
6d8eed30ce | ||
|
|
6fa48afa34 | ||
|
|
115fb40772 | ||
|
|
10b0dbb5d3 | ||
|
|
4c32ad902b | ||
|
|
787db8f5ac | ||
|
|
df1b2067b6 | ||
|
|
f3d9f25d02 | ||
|
|
eea7e3b55f | ||
|
|
810cb0a203 | ||
|
|
e0e21e39a2 | ||
|
|
cc31c66b93 | ||
|
|
011535fbc3 | ||
|
|
77b95d11fb | ||
|
|
89f6164eba | ||
|
|
70350aa39f | ||
|
|
61a0a66c47 | ||
|
|
6fcc5c84a6 | ||
|
|
5995b3f3e8 | ||
|
|
60996be71b | ||
|
|
49b50e5975 | ||
|
|
262bd6808b | ||
|
|
e9c8db9950 | ||
|
|
02a98f832f | ||
|
|
9a2a241a30 | ||
|
|
04c2a1eb18 | ||
|
|
65a4b7438c | ||
|
|
13c3c082b8 | ||
|
|
bf127d6a70 | ||
|
|
117672384c | ||
|
|
2ae2ea8ef7 | ||
|
|
7a5e513f25 | ||
|
|
81828948dd | ||
|
|
eda73e14f7 | ||
|
|
6aec326d05 | ||
|
|
d36dd69ec3 | ||
|
|
1688063450 | ||
|
|
ae5207f0e4 | ||
|
|
f1f4743936 | ||
|
|
e09f9ad009 | ||
|
|
8d938c2273 | ||
|
|
e5f97cd299 | ||
|
|
9dababbcfd | ||
|
|
9d8bd5044b | ||
|
|
5d07381111 | ||
|
|
61c695b77d | ||
|
|
1ceb8891b0 | ||
|
|
2f53fd3108 | ||
|
|
bf2d2cbd03 | ||
|
|
cb323653b8 | ||
|
|
edf3946558 | ||
|
|
6c5fae56d9 | ||
|
|
a4f2c574b0 | ||
|
|
815d83bfb3 | ||
|
|
df3294c9d2 | ||
|
|
1af5f02832 | ||
|
|
217fcfd1b2 | ||
|
|
80825584ac | ||
|
|
10543eedd0 | ||
|
|
bf12a8679d | ||
|
|
8cd12ab584 | ||
|
|
351de8b4da | ||
|
|
75fca971d4 | ||
|
|
22f3244bf5 | ||
|
|
aafc4b3a39 | ||
|
|
18906e5ab2 | ||
|
|
9675d199f9 | ||
|
|
78e8faa203 | ||
|
|
d5ed9bc654 | ||
|
|
770065d9ed | ||
|
|
abc4154e2c | ||
|
|
fd6c9d5d34 | ||
|
|
dc428e7de0 | ||
|
|
0c51d79be7 | ||
|
|
1b489ba581 | ||
|
|
4d9f17b083 | ||
|
|
3c7cd2186f | ||
|
|
5acfd683b9 | ||
|
|
6b01901a4a | ||
|
|
1ca54afd6c | ||
|
|
9c75c2d22e | ||
|
|
79ec3ed2c3 | ||
|
|
7072d2cfe8 | ||
|
|
c0c08b0b84 | ||
|
|
01329195ee | ||
|
|
ad40b99313 | ||
|
|
1e338e48ab | ||
|
|
ac9c9598f4 | ||
|
|
02cb5dfc31 | ||
|
|
8109ffb445 | ||
|
|
0ecbcb89fa | ||
|
|
8f38c06424 | ||
|
|
902394f86e | ||
|
|
9fefd807f9 | ||
|
|
a8fb4a6d84 | ||
|
|
7806267e92 | ||
|
|
eb5e17a115 | ||
|
|
2ae98d628d | ||
|
|
8b9dc0e77f | ||
|
|
2f151cea64 | ||
|
|
b777e8cab1 | ||
|
|
663e37bd03 | ||
|
|
8960620883 | ||
|
|
5b892b3a63 | ||
|
|
974d5f2f49 | ||
|
|
f70881bb4f | ||
|
|
376c65335f | ||
|
|
d7a5c32b08 | ||
|
|
4cda182ccd | ||
|
|
60ac901c6c | ||
|
|
388afa8d3c | ||
|
|
ec0915e488 | ||
|
|
244112be5c | ||
|
|
1f526adbe7 | ||
|
|
c4cfd70f7c | ||
|
|
c9149d1761 | ||
|
|
c68450fc7f | ||
|
|
d9eb3295b0 | ||
|
|
5440dbae51 | ||
|
|
321bf94de8 | ||
|
|
84b938c0d2 | ||
|
|
fc47382938 | ||
|
|
2e034f7990 | ||
|
|
e61299f748 | ||
|
|
cbff2fed17 | ||
|
|
9c51f73a72 | ||
|
|
70109635c7 | ||
|
|
8999c3a855 | ||
|
|
7bd775130e | ||
|
|
4bba7dbe76 | ||
|
|
0cab21b83c | ||
|
|
ca9cbc1160 | ||
|
|
02439f55a9 | ||
|
|
2d358e376c | ||
|
|
b349aa2693 | ||
|
|
e3fee39043 | ||
|
|
a1a72df6c6 | ||
|
|
cdf40a7046 | ||
|
|
b9b19c9acc | ||
|
|
8c603baa43 | ||
|
|
a977948f2b | ||
|
|
f70eaf9363 | ||
|
|
bfea0174dd | ||
|
|
296d815e3e | ||
|
|
c3b7a50642 | ||
|
|
8e0a9f94f6 | ||
|
|
6806900436 | ||
|
|
a8ecdc8206 | ||
|
|
60e1e3c173 | ||
|
|
f859d99d91 | ||
|
|
31640b780c | ||
|
|
aaeb4d2634 | ||
|
|
75d4c0153c | ||
|
|
8d7ff2bd1d | ||
|
|
c3e96ae73f | ||
|
|
d8c86069f2 | ||
|
|
a25c709927 | ||
|
|
d7c62fb55a | ||
|
|
27cc559c86 | ||
|
|
e7d14691df | ||
|
|
20387a0085 | ||
|
|
740b0a1396 | ||
|
|
7d0c790185 | ||
|
|
a12147d0f5 | ||
|
|
213a298813 | ||
|
|
1acf78342c | ||
|
|
c85d3adb34 | ||
|
|
83bf59dd4d | ||
|
|
d5d6442e1d | ||
|
|
a1fa469026 | ||
|
|
4b4b808b76 | ||
|
|
a6f16dcf8f | ||
|
|
c822782910 | ||
|
|
e598d5edc4 | ||
|
|
d38b6dfc0a | ||
|
|
0a4091d93c | ||
|
|
0399ab73cf | ||
|
|
940cececf4 | ||
|
|
94c75eb1c7 | ||
|
|
de4dbf283b | ||
|
|
10807a6fb7 | ||
|
|
04b8475761 | ||
|
|
e6e50d7f0a | ||
|
|
94ed065344 | ||
|
|
d94b5962b4 | ||
|
|
dcca318733 | ||
|
|
4a789297fe | ||
|
|
1249929b6a | ||
|
|
864af45f85 | ||
|
|
bd68bcfd27 | ||
|
|
17373bc0fe | ||
|
|
4612d3cdde | ||
|
|
517300afe9 | ||
|
|
3c7fdfec3c | ||
|
|
cfc8d26558 | ||
|
|
1c16b8bfec | ||
|
|
aae50004b1 | ||
|
|
4fbd2a7612 | ||
|
|
cede1a1100 | ||
|
|
5d3511cbc2 | ||
|
|
a66e082a8c | ||
|
|
2406438d1b | ||
|
|
be42c78aca | ||
|
|
78b8b30351 | ||
|
|
80e35fa938 | ||
|
|
e82494c444 | ||
|
|
309b7b8a77 | ||
|
|
f2daa633b6 | ||
|
|
630d13ac52 | ||
|
|
40c79b249b | ||
|
|
6f4df912d8 | ||
|
|
5744228a9d | ||
|
|
8c46ece44a | ||
|
|
4cbf1a886e | ||
|
|
17519d5a96 | ||
|
|
faa046eea4 | ||
|
|
873e3832b6 | ||
|
|
d4a15d3b53 | ||
|
|
6ca6a94631 | ||
|
|
61fced0df3 | ||
|
|
b2f6ffddee | ||
|
|
c85805b15d | ||
|
|
a0838ed9cd | ||
|
|
63bbec5db4 | ||
|
|
4bc67dc816 | ||
|
|
9620a06552 | ||
|
|
9b00a5f3f1 | ||
|
|
faa77be843 | ||
|
|
28f158c479 | ||
|
|
90c3afcfa4 | ||
|
|
565e10b6a5 | ||
|
|
773ed5e6f7 | ||
|
|
8351312b2b | ||
|
|
41f53d39a0 | ||
|
|
4873ffda84 | ||
|
|
b79609bb8b | ||
|
|
bdcbb5cce6 | ||
|
|
d1503f9df3 | ||
|
|
210c3234d2 | ||
|
|
c13abfdd0d | ||
|
|
30b332ac7e | ||
|
|
7e9c489aeb | ||
|
|
5739ca7f97 | ||
|
|
e4451c7e6a | ||
|
|
5cded77387 | ||
|
|
ea4e0dd764 | ||
|
|
f105357f96 | ||
|
|
bc2302baeb | ||
|
|
afcdefbbf3 | ||
|
|
3ad8557065 | ||
|
|
e68d607c9b | ||
|
|
8e9cf67190 | ||
|
|
0cb6cd8761 | ||
|
|
17aa795b3e | ||
|
|
7d47096e6e | ||
|
|
48b59df11b | ||
|
|
a90a3b2445 | ||
|
|
d18b68d24a | ||
|
|
78c4ec8bfe | ||
|
|
b50a3b9aae | ||
|
|
4f3eaa12d5 | ||
|
|
cedb0f565c | ||
|
|
226432ec7f | ||
|
|
d93ab0143c | ||
|
|
3d32d66ab1 | ||
|
|
e814eed047 | ||
|
|
96395c1469 | ||
|
|
6065c29891 | ||
|
|
f38cb274e4 | ||
|
|
7bfee87cbf | ||
|
|
2ce2a3754c | ||
|
|
510476c214 | ||
|
|
6cd071c84b | ||
|
|
406e17b3fa | ||
|
|
dd184255ad | ||
|
|
77a0b38081 | ||
|
|
14c3d66ce6 | ||
|
|
858da38680 | ||
|
|
9f381b3c73 | ||
|
|
b8fc20b981 | ||
|
|
b89825525a | ||
|
|
e09cfc6704 | ||
|
|
0c9c303c60 | ||
|
|
3156b43739 | ||
|
|
591aa990a6 | ||
|
|
3be29f36a7 | ||
|
|
7638db4c3b | ||
|
|
0312a500a6 | ||
|
|
1a88b5355a | ||
|
|
3374773de5 | ||
|
|
872b5fe3da | ||
|
|
be15e9871c | ||
|
|
024a6a253b | ||
|
|
1af662df7b | ||
|
|
b4f64eb593 | ||
|
|
86aa86208c | ||
|
|
018e814615 | ||
|
|
e4d6e5cfc7 | ||
|
|
770cd77632 | ||
|
|
9f1692b33d | ||
|
|
6f63e0a5d7 | ||
|
|
6a90e2c796 | ||
|
|
23b90ff0f9 | ||
|
|
dc86af2fa4 | ||
|
|
425b822046 | ||
|
|
65c18b1d52 | ||
|
|
1bddf3daa7 | ||
|
|
600b6af876 | ||
|
|
4bdf16331d | ||
|
|
87cbda0528 | ||
|
|
9897941bf9 | ||
|
|
31938812d0 | ||
|
|
19d879d3f6 | ||
|
|
cc41036c63 | ||
|
|
a9f2b40529 | ||
|
|
86000ea19a | ||
|
|
0422c3b9e7 | ||
|
|
64c8bd5b5a | ||
|
|
a7eba2c5fc | ||
|
|
2b7753e43e | ||
|
|
47c1e5b5b8 | ||
|
|
14ee97def0 | ||
|
|
92e262f732 | ||
|
|
c46880b701 | ||
|
|
473e9b9300 | ||
|
|
28945ef153 | ||
|
|
b6b5d9f9c4 | ||
|
|
ba5de1ab31 | ||
|
|
002ebeaade | ||
|
|
894756000c | ||
|
|
cdb178c503 | ||
|
|
7c48cafc71 | ||
|
|
74d4592238 | ||
|
|
0044dd104e | ||
|
|
05041e2eae | ||
|
|
78908f216d | ||
|
|
efc68ae701 | ||
|
|
e9340a8b4b | ||
|
|
66e199d516 | ||
|
|
6151d8a787 | ||
|
|
296261da8a | ||
|
|
383371dd6f | ||
|
|
bb8c026bda | ||
|
|
344993dd6f | ||
|
|
ffb048c314 | ||
|
|
3eef9b8faa | ||
|
|
5704bb646b | ||
|
|
fbc684b3a7 | ||
|
|
6529b2a9c3 | ||
|
|
a1701e2edf | ||
|
|
eba6391de7 | ||
|
|
9f2c3c9688 | ||
|
|
57f5a19d0c | ||
|
|
c8d53c6964 | ||
|
|
643cda1abe | ||
|
|
03d118a73a | ||
|
|
51dd7f5c17 | ||
|
|
af7e1e7a3c | ||
|
|
ea5d855bc3 | ||
|
|
5f74367cd6 | ||
|
|
26e41e1c14 | ||
|
|
1bb2b50043 | ||
|
|
7bdb629f03 | ||
|
|
fd92f986da | ||
|
|
69a1207102 | ||
|
|
def652c768 | ||
|
|
c35faf5356 | ||
|
|
0615a33206 | ||
|
|
e77530bdc5 | ||
|
|
8c62df63cc | ||
|
|
bd36eade77 | ||
|
|
d2c023081a | ||
|
|
63d0850b38 | ||
|
|
c86659428f | ||
|
|
bf7cc6caf0 | ||
|
|
26b8be6041 | ||
|
|
f978f9196f | ||
|
|
75cb8d2a3c | ||
|
|
17a21ed707 | ||
|
|
f390647139 | ||
|
|
aacd91e196 | ||
|
|
258171c9c4 | ||
|
|
812c5873aa | ||
|
|
4c3d47f1f0 | ||
|
|
ba7b6ba869 | ||
|
|
d0471ae512 | ||
|
|
636c4be9fb | ||
|
|
6bec765a9d | ||
|
|
d61d16ccc4 | ||
|
|
f2a5715b24 | ||
|
|
c064c3781f | ||
|
|
bb4dffe2a4 | ||
|
|
37cf3eeef3 | ||
|
|
40395b2999 | ||
|
|
32afe6445f | ||
|
|
793a991913 | ||
|
|
d278224ff1 | ||
|
|
9b4d0ce6a8 | ||
|
|
a1829fe590 | ||
|
|
2b2b39365c | ||
|
|
1147930f3f | ||
|
|
636f338ed7 | ||
|
|
72365d00b4 | ||
|
|
19d8086732 | ||
|
|
30488418e5 | ||
|
|
2f0badd74a | ||
|
|
6045b0579b | ||
|
|
498f1fec74 | ||
|
|
f6a541f2b9 | ||
|
|
8ce78eabca | ||
|
|
2c34c5309f | ||
|
|
77e680168a | ||
|
|
8a7e59742f | ||
|
|
42bac14770 | ||
|
|
8323834483 | ||
|
|
1751caef62 | ||
|
|
d622d1474d | ||
|
|
f28be2e7de | ||
|
|
17773913ae | ||
|
|
d469c2d3f9 | ||
|
|
4e74d32882 | ||
|
|
7b8cd37a9b | ||
|
|
eda306d726 | ||
|
|
94f3b1fe84 | ||
|
|
c50e3ba293 | ||
|
|
eff7818912 | ||
|
|
270bcff8f3 | ||
|
|
e04963c2dc | ||
|
|
f369967c91 | ||
|
|
cd982c5526 | ||
|
|
16e03c9d37 | ||
|
|
d38b1f5364 | ||
|
|
f57ba4d05e | ||
|
|
172eeaafcf | ||
|
|
3115ed28b2 | ||
|
|
d8dc53805c | ||
|
|
7218d10e1b | ||
|
|
89bf85f501 | ||
|
|
8334a468d0 | ||
|
|
3da80ed077 | ||
|
|
2883ccbe87 | ||
|
|
5d3443fee4 | ||
|
|
27756a53db | ||
|
|
71cde6661d | ||
|
|
a857337b31 | ||
|
|
4ee21ffae4 | ||
|
|
d8399f7e85 | ||
|
|
574ac8d32f | ||
|
|
a2611bfa7d | ||
|
|
853badb76f | ||
|
|
5d69e1d2a5 | ||
|
|
6494f28bdb | ||
|
|
f55916bda2 | ||
|
|
04691ee197 | ||
|
|
2ac0e564e1 | ||
|
|
6072a29a20 | ||
|
|
8658942385 | ||
|
|
cc4859950c | ||
|
|
23b81ad6f1 | ||
|
|
e3b9dca5c0 | ||
|
|
a2359a1ad2 | ||
|
|
cb875b1b34 | ||
|
|
b92a85b4bc | ||
|
|
8c7dd6bab2 | ||
|
|
aad7df64d7 | ||
|
|
8474342007 | ||
|
|
61ccb4be65 | ||
|
|
1c6f69707c | ||
|
|
e08e8c482a | ||
|
|
548c1d2cab | ||
|
|
5a071bf3d1 | ||
|
|
1bffcbd947 | ||
|
|
274a36a83a | ||
|
|
ec40f36114 | ||
|
|
af19f274a7 | ||
|
|
2316004194 | ||
|
|
98762198ef | ||
|
|
1469de22a4 | ||
|
|
1e687f960a | ||
|
|
7f01b835fd | ||
|
|
e46b6c5c01 | ||
|
|
74226ad8df | ||
|
|
f8ae7be539 | ||
|
|
37b16e380d | ||
|
|
9ea3e9f652 | ||
|
|
54422b5181 | ||
|
|
712995dcf3 | ||
|
|
c2767b0fd6 | ||
|
|
179cc61f65 | ||
|
|
f3b910d55a | ||
|
|
f4157b52ea | ||
|
|
79710310ce | ||
|
|
3412498438 | ||
|
|
b896b07a08 | ||
|
|
379bff0622 | ||
|
|
474f47aa9f | ||
|
|
f1e26a4133 | ||
|
|
e37f881207 | ||
|
|
306c0b707b | ||
|
|
08c448ee30 | ||
|
|
1532014067 | ||
|
|
fa9f604af9 | ||
|
|
3b3d0d6539 | ||
|
|
9641d33040 | ||
|
|
eca339d107 | ||
|
|
ca18705d88 | ||
|
|
8f17b52466 | ||
|
|
8cf84e722b | ||
|
|
7c4d736b54 | ||
|
|
1b3ae6ab25 | ||
|
|
a4ad08136e | ||
|
|
df5e7997c5 | ||
|
|
b2cb3768c1 | ||
|
|
fa169c5cd3 | ||
|
|
bbb3975b67 | ||
|
|
4502a9c4fa | ||
|
|
86905a2670 | ||
|
|
b1e60a4867 | ||
|
|
1efe3324fb | ||
|
|
55c1e37d39 | ||
|
|
7fa700317c | ||
|
|
bbe831a57c | ||
|
|
90c86c056c | ||
|
|
36f22a28df | ||
|
|
ac03c51e2c | ||
|
|
bd9e92f705 | ||
|
|
281eff5eb2 | ||
|
|
abbd2253ad | ||
|
|
46466624ae | ||
|
|
0ba8d51b2a | ||
|
|
a1408ee18f | ||
|
|
58030bbcff | ||
|
|
e1b3e6ef01 | ||
|
|
298a6ba8ab | ||
|
|
e5bf47629f | ||
|
|
ea29ee9f66 | ||
|
|
868c2254de | ||
|
|
567522c87a | ||
|
|
25fd47f57b | ||
|
|
f89d6342d1 | ||
|
|
b02affdea3 | ||
|
|
6e5ade943b | ||
|
|
a6ed0c0d00 | ||
|
|
68402aadd7 | ||
|
|
85cacd447b | ||
|
|
11262b321a | ||
|
|
bf290f063d | ||
|
|
7ac0fbaf76 | ||
|
|
7489c76722 | ||
|
|
bcdf1b6efe | ||
|
|
8a9dbe212c | ||
|
|
16bd71a6cb | ||
|
|
71caad0655 | ||
|
|
2c62ffe34a | ||
|
|
3450a89880 | ||
|
|
a081a69bbe | ||
|
|
271d1d23d5 | ||
|
|
605aba1a3c | ||
|
|
be3c2b4c7c | ||
|
|
08eb32d7bd | ||
|
|
2b9cda15e4 | ||
|
|
f6055b290a | ||
|
|
ec665e05e4 | ||
|
|
2b6d7205ec | ||
|
|
41381a920c | ||
|
|
f1b3fc2254 | ||
|
|
a677ed307d | ||
|
|
0ab23ee972 | ||
|
|
43f56d39be | ||
|
|
a39caee5f5 | ||
|
|
2edfdf47c8 | ||
|
|
3819461db5 | ||
|
|
85654dd7dd | ||
|
|
619a70416b | ||
|
|
16d996fe70 | ||
|
|
1baeb6da19 | ||
|
|
1641d432dd | ||
|
|
1bf9862e47 | ||
|
|
602a394043 | ||
|
|
22a2415ca5 | ||
|
|
feb034352d | ||
|
|
a7c8942c78 | ||
|
|
95f2ac3811 | ||
|
|
91354295f2 | ||
|
|
c9c4ab5911 | ||
|
|
a26c5e40dd | ||
|
|
80f5c7bc44 | ||
|
|
4833b39c52 | ||
|
|
f478958943 | ||
|
|
0469ad46d6 | ||
|
|
5fe5deb9df | ||
|
|
ce83bc24bd | ||
|
|
dce729c8cb | ||
|
|
a9d17cd96f | ||
|
|
294bb3d4a1 | ||
|
|
b31b9261f2 | ||
|
|
2211f8d9e4 | ||
|
|
b9b7b00a7f | ||
|
|
843faf6103 | ||
|
|
4af5dad9a8 | ||
|
|
52437c9d18 | ||
|
|
c6cb4c8479 | ||
|
|
c3714ec251 | ||
|
|
dbe2f94af1 | ||
|
|
07fd5f8a9e | ||
|
|
9e64b4cd7f | ||
|
|
f08a7b9eb3 | ||
|
|
a6fa764e2a | ||
|
|
01676668f1 | ||
|
|
8e5e4f460d | ||
|
|
f907b8a84d | ||
|
|
a3a4285f90 | ||
|
|
0979163b79 | ||
|
|
248a25eaee | ||
|
|
f95b1fa68a | ||
|
|
d2b5d69051 | ||
|
|
3ca419b735 | ||
|
|
50e275a2f9 | ||
|
|
aeccf78957 | ||
|
|
cb3cef70e5 | ||
|
|
b9bd303bf8 | ||
|
|
57d4786a7f | ||
|
|
df031455b2 | ||
|
|
30059eff4f | ||
|
|
bc289b48c8 | ||
|
|
067d8b99b8 | ||
|
|
00a6a9c42d | ||
|
|
070425d446 | ||
|
|
7405883444 | ||
|
|
66959937ed | ||
|
|
e431efbcba | ||
|
|
ba00baa5a0 | ||
|
|
0fb5d4a164 | ||
|
|
1ac717b67f | ||
|
|
273cbd447e | ||
|
|
cee41567a2 | ||
|
|
1aae5eb1a6 | ||
|
|
28a4c81aff | ||
|
|
5e077cd64d | ||
|
|
e3f957a59b | ||
|
|
55c62a3ab5 | ||
|
|
22e7eef1bd | ||
|
|
d6524907f3 | ||
|
|
357db334cd | ||
|
|
f8bed3909b | ||
|
|
182bbdde91 | ||
|
|
2c70f990c2 | ||
|
|
0b01a6aa91 | ||
|
|
e557dffbc6 | ||
|
|
7f33b0b1b8 | ||
|
|
41ddf77a5b | ||
|
|
8c657ce41d | ||
|
|
3ff3b9ed4a | ||
|
|
ef43419ecd | ||
|
|
2ca375c214 | ||
|
|
cbd45c1d0f | ||
|
|
2592ea3464 | ||
|
|
73ac97cd96 | ||
|
|
e014663e97 | ||
|
|
58592e961f | ||
|
|
9a99b9ce82 | ||
|
|
8c6dca1751 | ||
|
|
cf488d5f5f | ||
|
|
515584d34c | ||
|
|
fb2becc7f2 | ||
|
|
0f8ceb0fac | ||
|
|
a70bf18770 | ||
|
|
2de83c44ab | ||
|
|
7b99f09810 | ||
|
|
6b4ba8bfad | ||
|
|
0c6cfc5020 | ||
|
|
abd9733e7f | ||
|
|
98c3ae5e76 | ||
|
|
bb5a657469 | ||
|
|
7797532350 | ||
|
|
c3a5106adc | ||
|
|
c5fd935dd0 | ||
|
|
ec375a19ae | ||
|
|
51e940617c | ||
|
|
58ec8bd437 | ||
|
|
a096395086 | ||
|
|
4bd08bd915 | ||
|
|
2c849cfa7a | ||
|
|
501d530d1d | ||
|
|
91fc4327f4 | ||
|
|
8d56c67079 | ||
|
|
e52d43458e | ||
|
|
9b125bf9b0 | ||
|
|
0716c65269 | ||
|
|
ba3ce4f1b5 | ||
|
|
07f72b0cdc | ||
|
|
bda19df87f | ||
|
|
5d82fae2b0 | ||
|
|
0813b87221 | ||
|
|
961ecfc720 | ||
|
|
81f30ef25a | ||
|
|
140b0d3df2 | ||
|
|
b3d69d7de4 | ||
|
|
8e65564fb8 | ||
|
|
06ce9bd4de | ||
|
|
274fc2d74f | ||
|
|
2f1a448afe | ||
|
|
99cab7c337 | ||
|
|
81f7548579 | ||
|
|
6ebd50bebc | ||
|
|
378ba51f4d | ||
|
|
63a890e85d | ||
|
|
bf4f9921e2 | ||
|
|
167ae65695 | ||
|
|
2affa7c9b8 | ||
|
|
785540e178 | ||
|
|
bcad4c0bc6 | ||
|
|
5af217fbf5 | ||
|
|
128aa2ef23 | ||
|
|
fce1186dd1 | ||
|
|
9a7b11f804 | ||
|
|
b068a06fa8 | ||
|
|
931a42e981 | ||
|
|
e0a20a6697 | ||
|
|
1ef4374899 | ||
|
|
3b7212740b | ||
|
|
4b80b8dc1f | ||
|
|
b7f24827e6 | ||
|
|
1c08a22881 | ||
|
|
8bd848519d | ||
|
|
e19f2aa76d | ||
|
|
4a99e2896f | ||
|
|
de3c83b0aa | ||
|
|
36bdb831be | ||
|
|
1809690915 | ||
|
|
e51b679380 | ||
|
|
10c26de7cb | ||
|
|
ca5ec8af0f | ||
|
|
d1d7b8ce55 | ||
|
|
77f8983307 | ||
|
|
ba415acd37 | ||
|
|
bcf13099ac | ||
|
|
eb2b34d71c | ||
|
|
d0b665f773 | ||
|
|
a1674b1ae5 | ||
|
|
af83681f6a | ||
|
|
bebacf7b20 | ||
|
|
6dc1fcbc3e | ||
|
|
b599ef4509 | ||
|
|
526b6a1119 | ||
|
|
88173db4ce | ||
|
|
e139b1ab22 | ||
|
|
6c1e0058c1 | ||
|
|
c96633eb83 | ||
|
|
91eb35a77b | ||
|
|
d749d59cad | ||
|
|
80396b4d30 | ||
|
|
64b93a009c | ||
|
|
2b32250504 | ||
|
|
9b5f863832 | ||
|
|
fd422d7446 | ||
|
|
5162b2748e | ||
|
|
56c684ec06 | ||
|
|
7e93b33407 | ||
|
|
7662235802 | ||
|
|
e41f9facc7 | ||
|
|
785b8ede11 | ||
|
|
78b198ad70 | ||
|
|
c2c0515991 | ||
|
|
b97fefdb8d | ||
|
|
840da6dd85 | ||
|
|
972d916126 | ||
|
|
e3ed065f5f | ||
|
|
760ebe6113 | ||
|
|
a329d3ad89 | ||
|
|
01f8561582 | ||
|
|
883ea5c996 | ||
|
|
99cf13ed9b | ||
|
|
91c7ef6801 | ||
|
|
84ef5705e7 | ||
|
|
cf2a0cf8c2 | ||
|
|
48c25c40e4 | ||
|
|
996d8ab954 | ||
|
|
fac2546a92 | ||
|
|
728ea6172a | ||
|
|
f59d225029 | ||
|
|
0b178a715f | ||
|
|
e06e5328c2 | ||
|
|
1c14cd0979 | ||
|
|
f9141f5ba2 | ||
|
|
48da5c976c | ||
|
|
fa38c81c08 | ||
|
|
8d5fe5270f | ||
|
|
0dc0d66549 | ||
|
|
f589fcc2d0 | ||
|
|
edd44a0993 | ||
|
|
2aae496742 | ||
|
|
6f72046f86 | ||
|
|
d4a9b446a6 | ||
|
|
95f571e9b9 | ||
|
|
e8aeae5c07 | ||
|
|
ddf6dc0343 | ||
|
|
36d55a9db7 | ||
|
|
7d41379ad5 | ||
|
|
63e928da96 | ||
|
|
5c983b64bc | ||
|
|
b2d36c0e68 | ||
|
|
6123a1620e | ||
|
|
5ae7c10a00 | ||
|
|
b5a6794381 | ||
|
|
6b575f836a | ||
|
|
c83589cac6 | ||
|
|
d64492bda5 | ||
|
|
33d6c75924 | ||
|
|
89f01bad42 | ||
|
|
767496f81b | ||
|
|
147a477365 | ||
|
|
13171f636f | ||
|
|
fea3f0d3e0 | ||
|
|
a3a254c2ea | ||
|
|
bd9d5f7fc0 | ||
|
|
726738ee9e | ||
|
|
725244bb2f | ||
|
|
d2ac2b8990 | ||
|
|
116569223c | ||
|
|
05442a019f | ||
|
|
db67080bf8 | ||
|
|
21fabf7436 | ||
|
|
a8c6516b31 | ||
|
|
f5ca48a56e | ||
|
|
65ceff9824 | ||
|
|
ed73cfdcc7 | ||
|
|
9cb79a7827 | ||
|
|
984f29005a | ||
|
|
805c3719af | ||
|
|
ea646149c0 | ||
|
|
eae1f8ee4d | ||
|
|
8d1de245a6 | ||
|
|
b8ef5d1efc | ||
|
|
e1098b34e8 | ||
|
|
8296f8d2da | ||
|
|
867c83383d | ||
|
|
1354119d6d | ||
|
|
53af7f81bb | ||
|
|
48b1ac28de | ||
|
|
6e329b17a9 | ||
|
|
6a492198a8 | ||
|
|
8bf9b6e7cb | ||
|
|
42e23ef564 | ||
|
|
c6806ee648 | ||
|
|
076fae696c | ||
|
|
ed294d3ea4 | ||
|
|
043be409d0 | ||
|
|
a5e7483870 | ||
|
|
365335be46 | ||
|
|
62543dd171 | ||
|
|
e2eef8ff21 | ||
|
|
3acf937d56 | ||
|
|
d572e523ba | ||
|
|
82113abe88 | ||
|
|
b7d121c58f | ||
|
|
6d5a85b144 | ||
|
|
78121917c6 | ||
|
|
a0913f0e32 | ||
|
|
e96e284715 | ||
|
|
c572a1b607 | ||
|
|
1845311f98 | ||
|
|
4f806db8b7 | ||
|
|
22858cc1e9 | ||
|
|
a0329a3eb0 | ||
|
|
b3e92088ee | ||
|
|
46db1c20f1 | ||
|
|
9d182e53b2 | ||
|
|
1205fc7fdb | ||
|
|
ff2826a448 | ||
|
|
ee750115ec | ||
|
|
0e13d22c97 | ||
|
|
8e7d040ac4 | ||
|
|
6755202958 | ||
|
|
8b7374a687 | ||
|
|
c17cca2365 | ||
|
|
8016a9539a | ||
|
|
e885fb15a0 | ||
|
|
c7f098771b | ||
|
|
fcd0908032 | ||
|
|
7ff1285084 | ||
|
|
b45b603b97 | ||
|
|
247208b8a9 | ||
|
|
182c46037b | ||
|
|
438d3210bc | ||
|
|
d523c7c916 | ||
|
|
09a19e94d5 | ||
|
|
3971c145df | ||
|
|
055117d83d | ||
|
|
c6baf43986 | ||
|
|
4ff16af3a7 | ||
|
|
17a1bd352b | ||
|
|
7421ca09cc | ||
|
|
9797e696e5 | ||
|
|
c36d6d8b2d | ||
|
|
3873786b99 | ||
|
|
76fdba7f09 | ||
|
|
72799e9638 | ||
|
|
2e77d03fe9 | ||
|
|
0c58eae5e7 | ||
|
|
b609567c38 | ||
|
|
7ecfa44fa0 | ||
|
|
a685b1dc3b | ||
|
|
63ce49a17c | ||
|
|
820fbe4076 | ||
|
|
efa05b7775 | ||
|
|
003781e903 | ||
|
|
ee71bafc96 | ||
|
|
bdd5f1231e | ||
|
|
6fee532c96 | ||
|
|
78aaad7b59 | ||
|
|
b128b0ede2 | ||
|
|
737d2f3bc6 | ||
|
|
179be53a65 | ||
|
|
1867f5e7c2 | ||
|
|
6662d24565 | ||
|
|
5880566a99 | ||
|
|
5d05b32711 | ||
|
|
fa2b720e92 | ||
|
|
d381238f83 | ||
|
|
751d627ead | ||
|
|
3e66a8de9b | ||
|
|
266052b12b | ||
|
|
803f4328f4 | ||
|
|
8e95568e11 | ||
|
|
ab09ee4819 | ||
|
|
41f94a172f | ||
|
|
566e597994 | ||
|
|
765fb9c05f | ||
|
|
b6720a19f7 | ||
|
|
3b130651c4 | ||
|
|
3f6c35dabe | ||
|
|
db2a952bca | ||
|
|
0ea9770bc3 | ||
|
|
0b20956c90 | ||
|
|
9f73b47d54 | ||
|
|
ce9c99af71 | ||
|
|
784024fb5d | ||
|
|
1145b32299 | ||
|
|
ab71df0011 | ||
|
|
fb137252a9 | ||
|
|
f57a680306 | ||
|
|
8bb3eaa320 | ||
|
|
9489730a44 | ||
|
|
d4795bb897 | ||
|
|
63775872c7 | ||
|
|
beff508a1f | ||
|
|
deaae8a2c6 | ||
|
|
46a27bd50c | ||
|
|
24f2993433 | ||
|
|
c80bfbfac5 | ||
|
|
06abfc45c7 | ||
|
|
440a773081 | ||
|
|
0797bcb38b | ||
|
|
d463b5bf0d | ||
|
|
0733c8edcc | ||
|
|
86c7c05cb1 | ||
|
|
18ff7ce753 | ||
|
|
8f2ed1004d | ||
|
|
14961323c3 | ||
|
|
f8c682b183 | ||
|
|
dd92708f60 | ||
|
|
4d9eeccefa | ||
|
|
cd7b251031 | ||
|
|
db614180b9 | ||
|
|
b6e527e5f4 | ||
|
|
77c0f8f39e | ||
|
|
58816d73c8 | ||
|
|
3b194d282e | ||
|
|
397f66433d | ||
|
|
04a4ed1d0e | ||
|
|
625850d4e7 | ||
|
|
6c572baca5 | ||
|
|
ee0406a13f | ||
|
|
608a049ba3 | ||
|
|
4d9b5198e2 | ||
|
|
24b6c970aa | ||
|
|
239c47f469 | ||
|
|
f0fc64c517 | ||
|
|
8481fd38ce | ||
|
|
5f425129d5 | ||
|
|
92955b1315 | ||
|
|
a3872d5bb5 | ||
|
|
a123ff2c04 | ||
|
|
188de34306 | ||
|
|
3d43750e9b | ||
|
|
fea228c68d | ||
|
|
a71a28e563 | ||
|
|
3b5d4982b5 | ||
|
|
b201e9ab8c | ||
|
|
d30b9282fd | ||
|
|
4f304a70b7 | ||
|
|
59a54d4f04 | ||
|
|
1e94d794ed | ||
|
|
5bd210406b | ||
|
|
e00514d36d | ||
|
|
f013bf1931 | ||
|
|
107cbbad1d | ||
|
|
481f1f9d30 | ||
|
|
704364061c | ||
|
|
c1bd2d6cf1 | ||
|
|
a018e1228c | ||
|
|
d962d9c7f6 | ||
|
|
4ea28cbca5 | ||
|
|
1b48b8b4cc | ||
|
|
73df197e33 | ||
|
|
bdc66e55ca | ||
|
|
926343ee86 | ||
|
|
8e6021c5e7 | ||
|
|
ac2b6c76ce | ||
|
|
9e966d0a7f | ||
|
|
6c10defaa1 | ||
|
|
b6a76f6f7c | ||
|
|
84e5b77a5c | ||
|
|
89b0ea0bf1 | ||
|
|
48aeb98bf1 | ||
|
|
8a5d864812 | ||
|
|
ae79e645a6 | ||
|
|
0947deb372 | ||
|
|
69c92911a2 | ||
|
|
b16bb37b75 | ||
|
|
9c9ec8adf2 | ||
|
|
eb0e67fc42 | ||
|
|
9cc50bddab | ||
|
|
d3ba0fa487 | ||
|
|
39f6505a80 | ||
|
|
36a6802439 | ||
|
|
d7e2633a92 | ||
|
|
88049e741e | ||
|
|
ff7fb14087 | ||
|
|
816c64bd48 | ||
|
|
d2756e6f2d | ||
|
|
147e12acbb | ||
|
|
4098018ee9 | ||
|
|
133e7578b9 | ||
|
|
74a2bdbf09 | ||
|
|
f22bc68af4 | ||
|
|
26cc6da650 | ||
|
|
d21f1f1b87 | ||
|
|
7cdaafffe1 | ||
|
|
0265dca197 | ||
|
|
9d68366043 | ||
|
|
c8c671d915 | ||
|
|
142daa9d15 | ||
|
|
2552219991 | ||
|
|
a038b698d7 | ||
|
|
a3b222574e | ||
|
|
e0cd467293 | ||
|
|
9c056030d2 | ||
|
|
19efa9d4cc | ||
|
|
90633a6495 | ||
|
|
edc432fbd8 | ||
|
|
1b7bdbf516 | ||
|
|
8c1be70c85 | ||
|
|
b8e0c0db9e | ||
|
|
7b7fb6cc82 | ||
|
|
62512ba215 | ||
|
|
e1beb64c01 | ||
|
|
c81f26ddad | ||
|
|
340114c2a1 | ||
|
|
cd7767b331 | ||
|
|
25289dad8a | ||
|
|
47c6917129 | ||
|
|
6379cda148 | ||
|
|
91a124ab8f | ||
|
|
2357a7135e | ||
|
|
da0b3b3de9 | ||
|
|
6664fb1716 | ||
|
|
1206f24fa9 | ||
|
|
ffb5823e84 | ||
|
|
d45a7fb262 | ||
|
|
918d192c0f | ||
|
|
f7cd6eac50 | ||
|
|
88f4428ff0 | ||
|
|
069ea22ba2 | ||
|
|
8fac8c5307 | ||
|
|
2285befebb | ||
|
|
1cd0648e4e | ||
|
|
0b7ba285c6 | ||
|
|
30446c4526 | ||
|
|
9b843c9ed2 | ||
|
|
2ce1c3bef8 | ||
|
|
e463094dc7 | ||
|
|
71a9fe10f4 | ||
|
|
ba146e13ef | ||
|
|
c060d7e3e0 | ||
|
|
ba96678822 | ||
|
|
4f6354f383 | ||
|
|
2766e80346 | ||
|
|
7cc3777a60 | ||
|
|
cb1dd9f17d | ||
|
|
31f342fe4f | ||
|
|
e90359eb08 | ||
|
|
58b0768a30 | ||
|
|
3b04506893 | ||
|
|
354165aa0a | ||
|
|
343109836f | ||
|
|
fcadac2adb | ||
|
|
5e7dcdfe97 | ||
|
|
2ec9a57391 | ||
|
|
973c545723 | ||
|
|
fd62eecfef | ||
|
|
b5ca7058c2 | ||
|
|
57a48f099f | ||
|
|
4699f511bf | ||
|
|
cd8f7e72e0 | ||
|
|
78803fa284 | ||
|
|
2e8d75df16 | ||
|
|
7e3bbfd960 | ||
|
|
1734d53b3c | ||
|
|
f37540f4e5 | ||
|
|
addb9d836a | ||
|
|
4184d8c7ac | ||
|
|
724c15a68c | ||
|
|
499bdf9b48 | ||
|
|
41cd1ccda1 | ||
|
|
b9521cb3a9 | ||
|
|
1f40663b90 | ||
|
|
5261ed7c4c | ||
|
|
aa8768b18a | ||
|
|
aad07433f4 | ||
|
|
4a7630079b | ||
|
|
44a6ee1994 | ||
|
|
56bd6e69ed | ||
|
|
d1e04588d0 | ||
|
|
21cdaef6d5 | ||
|
|
a1723d18fb | ||
|
|
9e065138e9 | ||
|
|
1c73c92bfd | ||
|
|
bcd560d74e | ||
|
|
02339562ed | ||
|
|
e5804378c2 | ||
|
|
da1c8a162d | ||
|
|
d457a23a1f | ||
|
|
b6154e58b8 | ||
|
|
5f18776c61 | ||
|
|
68b0b9ec7a | ||
|
|
0f5036972e | ||
|
|
0b199b8421 | ||
|
|
a59730f6eb | ||
|
|
c6c84fe65b | ||
|
|
03c757bba6 | ||
|
|
bfeb8d238a | ||
|
|
daf0c08c4b | ||
|
|
d12c1b9ac4 | ||
|
|
bc242f4fd4 | ||
|
|
a240c1bca9 | ||
|
|
219aa6c574 | ||
|
|
abca1b481a | ||
|
|
db72fd2ef5 | ||
|
|
31cca58943 | ||
|
|
c06a4b759c | ||
|
|
f05a23a490 | ||
|
|
1e0f2ffde0 | ||
|
|
06df42ee3d | ||
|
|
65ee1638f7 | ||
|
|
87eefe7673 | ||
|
|
5c124d3988 | ||
|
|
8c69ce624f | ||
|
|
bb73acdde5 | ||
|
|
993bc3775b | ||
|
|
3d2ff28bcd | ||
|
|
9b78deb802 | ||
|
|
dadc525d0b | ||
|
|
22b2140c94 | ||
|
|
f07496a4a0 | ||
|
|
1b2938cbc8 | ||
|
|
d4d2f58830 | ||
|
|
b3113e13ec | ||
|
|
055c8e26f0 | ||
|
|
2a7a7239d7 | ||
|
|
2fa40dac3f | ||
|
|
6b4fbd7dc2 | ||
|
|
5b0bb19717 | ||
|
|
843dfc430a | ||
|
|
69cb07c527 | ||
|
|
89e8a64734 | ||
|
|
5eb2dec32d | ||
|
|
db0ea7d6c4 | ||
|
|
1eb85003de | ||
|
|
cca170f84a | ||
|
|
c8c016caa8 | ||
|
|
45d5874026 | ||
|
|
69b1ce60ff | ||
|
|
3ff3e4b106 | ||
|
|
dc50a68b01 | ||
|
|
968cfd8654 | ||
|
|
cf28d93be6 | ||
|
|
be08d6ebb5 | ||
|
|
4bc24f3b00 | ||
|
|
15833f94cf | ||
|
|
aeb297efcf | ||
|
|
d48c6b98e8 | ||
|
|
b79ccfafed | ||
|
|
c87ba59552 | ||
|
|
91fd71c858 | ||
|
|
6f64e67538 | ||
|
|
bd7a0b072f | ||
|
|
01ca001c97 | ||
|
|
324ad2a87c | ||
|
|
d9ad2630f0 | ||
|
|
83958a4a48 | ||
|
|
f6a6efdc42 | ||
|
|
1bbe7657b9 | ||
|
|
38189753b5 | ||
|
|
5b0e658617 | ||
|
|
b6cf54d57f | ||
|
|
e8058c8813 | ||
|
|
784868048d | ||
|
|
2bf9779f2f | ||
|
|
d98ceea381 | ||
|
|
1ab2da74b9 | ||
|
|
086b1f1403 | ||
|
|
3723cf8ac2 | ||
|
|
19608fa98e | ||
|
|
b0d17deda1 | ||
|
|
4c979c458e | ||
|
|
c5e93169ad | ||
|
|
1e2ca294de | ||
|
|
7165c4a275 | ||
|
|
cbe81ba33c | ||
|
|
fdbfae953d | ||
|
|
c7ba274877 | ||
|
|
8b15a16ca1 | ||
|
|
9f2c8d3811 | ||
|
|
7343dfbed8 | ||
|
|
90f74d8d2b | ||
|
|
7e3e0e1178 | ||
|
|
d890e38a10 | ||
|
|
e505b5c85f | ||
|
|
6230f55116 | ||
|
|
c8d0c14ebc | ||
|
|
6ac8455c74 | ||
|
|
143b21631f | ||
|
|
d760facad8 | ||
|
|
3a1a4c5cfe | ||
|
|
c3045e2cd4 | ||
|
|
1efb9af7ab | ||
|
|
e03471159a | ||
|
|
a92e493742 | ||
|
|
225d413ed1 | ||
|
|
184e4ba7d5 | ||
|
|
917cae27b1 | ||
|
|
60e0463051 | ||
|
|
c15022c7d5 | ||
|
|
2a84e3a606 | ||
|
|
fddbbd5714 | ||
|
|
51b8f7c713 | ||
|
|
e97c246741 | ||
|
|
9a81f55ac0 | ||
|
|
a38b702acc | ||
|
|
e4e0605e92 | ||
|
|
8875a8f12c | ||
|
|
4dd1deefa5 | ||
|
|
1f6dc93ea3 | ||
|
|
426e920fff | ||
|
|
1f6bbce326 | ||
|
|
41f89a35fa | ||
|
|
099d7874d7 | ||
|
|
e2367103a1 | ||
|
|
37f8ba7d72 | ||
|
|
c20bd84edd | ||
|
|
b4ee0d2487 | ||
|
|
420fa7645f | ||
|
|
5bb1e72760 | ||
|
|
e2a007b62a | ||
|
|
210813367f | ||
|
|
770a50764e | ||
|
|
e339a22aa4 | ||
|
|
913afed378 | ||
|
|
db3efb4452 | ||
|
|
840351acb7 | ||
|
|
da76a7f299 | ||
|
|
cbd999f88d | ||
|
|
2fa8a266c5 | ||
|
|
08aa749a53 | ||
|
|
2379f04d2a | ||
|
|
0e73598d1c | ||
|
|
964e6eb0e8 | ||
|
|
0430e6c6d4 | ||
|
|
db88358eca | ||
|
|
723e9b0018 | ||
|
|
f3db27a8da | ||
|
|
0fb7a73fc9 | ||
|
|
418e6bd085 | ||
|
|
5a5c4ace6b | ||
|
|
c2c8214075 | ||
|
|
e5d2ade6e6 | ||
|
|
e32b6e07b4 | ||
|
|
cc69d3b8d1 | ||
|
|
1dd3af44b5 | ||
|
|
8ab233baef | ||
|
|
104138b9a7 | ||
|
|
0c8fd5121a | ||
|
|
61f26d331b | ||
|
|
97817cd808 | ||
|
|
45bcc63c06 | ||
|
|
00779d0f10 | ||
|
|
d657bf8ed8 | ||
|
|
4fcdd05e6a | ||
|
|
e6916946a9 | ||
|
|
acd7013dc6 | ||
|
|
039d876e3f | ||
|
|
3fc2c7d6cc | ||
|
|
109164b673 | ||
|
|
673a03e656 | ||
|
|
1e976e6d96 | ||
|
|
8efba30adb | ||
|
|
713d44eac3 | ||
|
|
aea44c1d97 | ||
|
|
1e61e60d73 | ||
|
|
a0e4b4a56e | ||
|
|
983f8fcb03 | ||
|
|
6afdde7dc1 | ||
|
|
6873de7243 | ||
|
|
ee4d6d0db3 | ||
|
|
dee1212a76 | ||
|
|
ceda69aedd | ||
|
|
75ea7d7601 | ||
|
|
8b75d2312c | ||
|
|
ca51880798 | ||
|
|
8b708e8939 | ||
|
|
b6ff9f7196 | ||
|
|
67229fd032 | ||
|
|
d382eab355 | ||
|
|
d8f10e9ac4 | ||
|
|
749aaeb003 | ||
|
|
c5a3bbcecf | ||
|
|
27ac41531b | ||
|
|
423c9af786 | ||
|
|
232759829e | ||
|
|
71f7bc7b1b | ||
|
|
ae4f03e272 | ||
|
|
acb5a7e50b | ||
|
|
c8749b3c9c | ||
|
|
49647e3bb5 | ||
|
|
48d353aa90 | ||
|
|
edec18cacb | ||
|
|
cd8661abc1 | ||
|
|
5f6310f5d6 | ||
|
|
42d955b175 | ||
|
|
21541bc468 | ||
|
|
f14f4e1e9b | ||
|
|
6d1de8a2e4 | ||
|
|
0053d31f84 | ||
|
|
f077a9684b | ||
|
|
2428d58e93 | ||
|
|
5340e3a0a7 | ||
|
|
70dd8f0f1d | ||
|
|
8fa76504c3 | ||
|
|
0899cb4e1d | ||
|
|
ee7a2a70a6 | ||
|
|
d57d1ac15e | ||
|
|
68c29d89c9 | ||
|
|
721648ffdf | ||
|
|
8437f39bf6 | ||
|
|
48b15c60e7 | ||
|
|
e350122125 | ||
|
|
0cce97f373 | ||
|
|
d8cacc0811 | ||
|
|
7abaf70bb8 | ||
|
|
232fe4d15e | ||
|
|
d6d12c0335 | ||
|
|
8e4f12804b | ||
|
|
c21ba5c521 | ||
|
|
dfa3d47261 | ||
|
|
924f59afff | ||
|
|
673b282d6c | ||
|
|
1c761f89e5 | ||
|
|
f61cd969b9 | ||
|
|
e39a130306 | ||
|
|
13b6ea985e | ||
|
|
2f1e55fa1e | ||
|
|
776f629771 | ||
|
|
d9e9edb2c4 | ||
|
|
753c074e59 | ||
|
|
d92c82775a | ||
|
|
215cc09c1f | ||
|
|
7f302c13c7 | ||
|
|
de6a094d10 | ||
|
|
a94e1a8314 | ||
|
|
f5efdd665b | ||
|
|
43e25e8717 | ||
|
|
a8026fefc1 | ||
|
|
fdb36957c9 | ||
|
|
ea433ff807 | ||
|
|
8902fb50d6 | ||
|
|
b6aa013eb3 | ||
|
|
034b43bf70 | ||
|
|
59e9032286 | ||
|
|
52a98efd0a | ||
|
|
90cc91aa7f | ||
|
|
1973a26e83 | ||
|
|
6519ad25ca | ||
|
|
cacfde8166 | ||
|
|
df85873726 | ||
|
|
dfea294cc9 | ||
|
|
d35b855404 | ||
|
|
7a1cbf70e3 | ||
|
|
f260990b86 | ||
|
|
6affbe9b55 | ||
|
|
dbe3a10697 | ||
|
|
3c25306a5d | ||
|
|
17f4d49731 | ||
|
|
e213b5cc64 | ||
|
|
65e5dad44b | ||
|
|
62ad38ea5d | ||
|
|
f98f4c1f77 | ||
|
|
e9f02b58b7 | ||
|
|
05495e481d | ||
|
|
5bb2167b78 | ||
|
|
b4e0ed66cf | ||
|
|
70a0563435 | ||
|
|
955912b832 | ||
|
|
b65ee75b3d | ||
|
|
f642493a38 | ||
|
|
7f1bfb1e07 | ||
|
|
8931e2e016 | ||
|
|
0465fa77c2 | ||
|
|
575d503cb9 | ||
|
|
a4fdbdb9ad | ||
|
|
b9cb781a4e | ||
|
|
a3adf867b7 | ||
|
|
d52cbd2f74 | ||
|
|
8d0003db94 | ||
|
|
b775e89e77 | ||
|
|
0e14b097ba | ||
|
|
51848b8d8d | ||
|
|
72658c3e60 | ||
|
|
036cb6f3b0 | ||
|
|
1a86d96bfa | ||
|
|
f67db38a25 | ||
|
|
028d18826a | ||
|
|
29a605f265 | ||
|
|
4b6959470d | ||
|
|
600767d2bf | ||
|
|
3efbd47ffd | ||
|
|
d17e85217b | ||
|
|
e608089805 | ||
|
|
b852acec28 | ||
|
|
2a3ea8315d | ||
|
|
9271ee833c | ||
|
|
570d4ad1a3 | ||
|
|
dccdf3231a | ||
|
|
b8ee777fd2 | ||
|
|
a2fd3a8d90 | ||
|
|
bbffb1420b | ||
|
|
8ea0a32879 | ||
|
|
8c27b8c33e | ||
|
|
5c61b22c2f | ||
|
|
9da9d765a0 | ||
|
|
f64363728e | ||
|
|
378777dc7c | ||
|
|
6156b9a481 | ||
|
|
8c516c5691 | ||
|
|
bf9a149898 | ||
|
|
277cde8db2 | ||
|
|
e06bdaf53e | ||
|
|
da367bd138 | ||
|
|
d336bcbf1f | ||
|
|
a8aedba6ff | ||
|
|
9ede86c6a3 | ||
|
|
1468f2b082 | ||
|
|
e04ae70f89 | ||
|
|
7f7d2c9ba8 | ||
|
|
d73deef8dc | ||
|
|
f93a1540af | ||
|
|
c8bd9cb716 | ||
|
|
2ed13c7e5b | ||
|
|
647c0929c5 | ||
|
|
a61533a131 | ||
|
|
bc5e682308 | ||
|
|
25a481df12 | ||
|
|
764c10fae4 | ||
|
|
d8249d4e38 | ||
|
|
0e3e42b398 | ||
|
|
7d3b64dcf9 | ||
|
|
2c8d525796 | ||
|
|
4869f071ab | ||
|
|
3029eeaf6f | ||
|
|
33fb692aee | ||
|
|
6a075d144f | ||
|
|
aa23315599 | ||
|
|
8d0bb35505 | ||
|
|
32e76bc6ce | ||
|
|
6c02766000 | ||
|
|
52ef390464 | ||
|
|
43a557601e | ||
|
|
82ff7fc090 | ||
|
|
db40b5105b | ||
|
|
b2a379b84b | ||
|
|
97cbd816fe | ||
|
|
7de3bb2a91 | ||
|
|
3a8a2bcab4 | ||
|
|
eb1adbe992 | ||
|
|
b55966d42b | ||
|
|
451ca9cb5a | ||
|
|
1e2c607ced | ||
|
|
5ff7da0d19 | ||
|
|
8e06c6f8e6 | ||
|
|
4497cd3904 | ||
|
|
2945679a94 | ||
|
|
1eaf7e3c85 | ||
|
|
8146b680c6 | ||
|
|
99e667382f | ||
|
|
4c03759d3f | ||
|
|
8593a6cdd0 | ||
|
|
cd18c31618 | ||
|
|
f29c918700 | ||
|
|
0f0c3e660b | ||
|
|
1cf4639db3 | ||
|
|
f5da9b5780 | ||
|
|
e4c87c8a96 | ||
|
|
4b4bf153f0 | ||
|
|
ec227d0d56 | ||
|
|
53c8c50779 | ||
|
|
07b4c8b462 | ||
|
|
f3cfc5b9f0 | ||
|
|
634e5a4c55 | ||
|
|
332b154f15 | ||
|
|
b446d4db28 | ||
|
|
ce0397a140 | ||
|
|
f278cccef3 | ||
|
|
cbf1dbcd2e | ||
|
|
037c6b02fa | ||
|
|
5f44e4322d | ||
|
|
6cebe97d6d | ||
|
|
82ec146446 | ||
|
|
3928c352c6 | ||
|
|
0ba36d21a9 | ||
|
|
6152727e9b | ||
|
|
53c02fa706 | ||
|
|
c7800df801 | ||
|
|
562c1de0c9 | ||
|
|
e2c90639f3 | ||
|
|
92e175a8d1 | ||
|
|
cf7bca75f6 | ||
|
|
24a173f075 | ||
|
|
8d695dda55 | ||
|
|
93eec6c4b8 | ||
|
|
a2cc1a2926 | ||
|
|
11729d0eca | ||
|
|
978819be38 | ||
|
|
23c9862eb3 | ||
|
|
a9f18ea3ef | ||
|
|
574257edf8 | ||
|
|
bb4438ac42 | ||
|
|
0baf6e5fe7 | ||
|
|
d8a53da8ee | ||
|
|
9555ac6305 | ||
|
|
4dd5ea8e2f | ||
|
|
8068523d88 | ||
|
|
27dd681d9f | ||
|
|
152f814fb6 | ||
|
|
2700e639f1 | ||
|
|
c440ce3045 | ||
|
|
2829a3cb4e | ||
|
|
a487091be8 | ||
|
|
e7524774da | ||
|
|
3918c876c5 | ||
|
|
f07f87735c | ||
|
|
b7566e8fe8 | ||
|
|
73eba90f2f | ||
|
|
62e74f6fd1 | ||
|
|
4375e48840 | ||
|
|
a1d6e94e90 | ||
|
|
1f44e13ff0 | ||
|
|
d2992f9ced | ||
|
|
950337bccc | ||
|
|
757c3be359 | ||
|
|
269ab9adfc | ||
|
|
bd241a5164 | ||
|
|
3d92b57f24 | ||
|
|
70d8cb3697 | ||
|
|
9e4ec5841c | ||
|
|
682f4fe608 | ||
|
|
ce8a077e07 | ||
|
|
d5f63bcdb3 | ||
|
|
5c3756fd1b | ||
|
|
99939e1a3d | ||
|
|
56742ace11 | ||
|
|
742cb7a8da | ||
|
|
98327d1750 | ||
|
|
b944306302 | ||
|
|
02ab1d4111 | ||
|
|
28552fb0ce | ||
|
|
bf52fcb2ec | ||
|
|
bab1f73480 | ||
|
|
c06001d921 | ||
|
|
0fa49bb9c6 | ||
|
|
bf23fe6ce2 | ||
|
|
7c6137b742 | ||
|
|
3823a7c9b6 | ||
|
|
a944975be2 | ||
|
|
6da65d3b03 | ||
|
|
0d938f2dca | ||
|
|
4fa9bb3c1f | ||
|
|
2f5b22a81f | ||
|
|
fcd5ca3fda | ||
|
|
c18247f3b1 | ||
|
|
f8fbfdbba7 | ||
|
|
21addfb947 | ||
|
|
8672bd12c4 | ||
|
|
be8054e81e | ||
|
|
82f46c6010 | ||
|
|
95a827e8a2 | ||
|
|
c534e3dcb8 | ||
|
|
9f5e1b8dd7 | ||
|
|
c86ed20c34 | ||
|
|
c32c37e66a | ||
|
|
7b100d3cdb | ||
|
|
95a2362885 | ||
|
|
d8b14b9a9f | ||
|
|
c45953f63a | ||
|
|
e3d3087a5d | ||
|
|
e162bd1168 | ||
|
|
db5d81d7f0 | ||
|
|
f737f1287b | ||
|
|
1ffa5178db | ||
|
|
49cb43488c | ||
|
|
fd7a6f8ddd | ||
|
|
7979ce0f0a | ||
|
|
2ba5d9484d | ||
|
|
23b981c5ac | ||
|
|
86ab2c8c05 | ||
|
|
9ea0bc609a | ||
|
|
5366c2844a | ||
|
|
eac4d703c7 | ||
|
|
8ed87294e2 | ||
|
|
b343c601be | ||
|
|
e56d7006b4 | ||
|
|
1b7bcd7784 | ||
|
|
4cb9025b6c | ||
|
|
f8864ab053 | ||
|
|
64eba46a67 | ||
|
|
35d9cc1d40 | ||
|
|
3036107dac | ||
|
|
214089b4ea | ||
|
|
95b7ba28e4 | ||
|
|
880272f96e | ||
|
|
7ed26fadb6 | ||
|
|
f0d25a02a6 | ||
|
|
162ba9307d | ||
|
|
49dae92b8e | ||
|
|
b484a52b6d | ||
|
|
d754091a7c | ||
|
|
e2febc24ae | ||
|
|
d0677edaaa | ||
|
|
f0aaecd0c7 | ||
|
|
3518940fec | ||
|
|
2e5c92ae0c | ||
|
|
4ad699dbe6 | ||
|
|
931be9e6aa | ||
|
|
9656d6fbd0 | ||
|
|
c7cbb13044 | ||
|
|
327d30dcc2 | ||
|
|
e4e2079917 | ||
|
|
0427506572 | ||
|
|
ea168edb43 | ||
|
|
aa039c6c05 | ||
|
|
3de998051a | ||
|
|
69ade1ae37 | ||
|
|
1d6133e3b1 | ||
|
|
203a111d1a | ||
|
|
0a20234268 | ||
|
|
7f8e50f83d | ||
|
|
443ef7d41b | ||
|
|
059ae6595d | ||
|
|
19c3dad338 | ||
|
|
81bc51c972 | ||
|
|
6c17868744 | ||
|
|
a18040ccfa | ||
|
|
0835a75503 | ||
|
|
3ee32757e5 | ||
|
|
344abfa8d8 | ||
|
|
906b2a3485 | ||
|
|
e0d2b87ed3 | ||
|
|
83a8c8b42b | ||
|
|
d840ed6c5a | ||
|
|
0112087be4 | ||
|
|
7320084e11 | ||
|
|
23929f5eaa | ||
|
|
c002d4619a | ||
|
|
f60a909bba | ||
|
|
c2c22e3968 | ||
|
|
f10299b2de | ||
|
|
1d3563ed97 | ||
|
|
f3eb2caa4e | ||
|
|
2364dacd52 | ||
|
|
883f7451c3 | ||
|
|
a534c9bca1 | ||
|
|
b14202a324 | ||
|
|
a6fae48f07 | ||
|
|
963caf2afe | ||
|
|
50b0268531 | ||
|
|
f484b64be3 | ||
|
|
349535557f | ||
|
|
de4973a270 | ||
|
|
e42d2baf8a | ||
|
|
eac435b233 | ||
|
|
447b8564e9 | ||
|
|
97cee657bd | ||
|
|
fe894754cf | ||
|
|
9ffb1d1931 | ||
|
|
a16bd30903 | ||
|
|
13f9ea8be4 | ||
|
|
304af5e980 | ||
|
|
dc180c09e9 | ||
|
|
8e20e26565 | ||
|
|
11075a4012 | ||
|
|
a9300faaf8 | ||
|
|
504827b7e5 | ||
|
|
e180130b38 | ||
|
|
faaee09827 | ||
|
|
99334795b6 | ||
|
|
8c9c59ef64 | ||
|
|
7a112000c9 | ||
|
|
1424087d5a | ||
|
|
984f4731cd | ||
|
|
3a3de64b0f | ||
|
|
0911854e9d | ||
|
|
2af8b6f445 | ||
|
|
bbfd8ca3f5 | ||
|
|
b4ed2880f7 | ||
|
|
5f18a21e86 | ||
|
|
5d188e3877 | ||
|
|
90f113a292 | ||
|
|
eecfe58297 | ||
|
|
079a747210 | ||
|
|
4be8c70f23 | ||
|
|
d9aee4df77 | ||
|
|
225de87d4d | ||
|
|
2ce7cedfbd | ||
|
|
cfb163d904 | ||
|
|
de7c9be11b | ||
|
|
841209adc9 | ||
|
|
e48d51fe6e | ||
|
|
9d436ec7ed | ||
|
|
fb2b29d088 | ||
|
|
1c46b0bc20 | ||
|
|
81d0e4696a | ||
|
|
f9a287b52b | ||
|
|
0f0072abea | ||
|
|
312933a259 | ||
|
|
288854b8f1 | ||
|
|
7f5991aa34 | ||
|
|
361df95d50 | ||
|
|
fc1ade32d7 | ||
|
|
b74c7531d9 | ||
|
|
7e3be3325a | ||
|
|
7dab7fbe66 | ||
|
|
62c06b6593 | ||
|
|
000b62969f | ||
|
|
b4473bb4a7 | ||
|
|
2c0e06d599 | ||
|
|
d2c55e8ed3 | ||
|
|
714abaa25a | ||
|
|
0017eb987b | ||
|
|
e5a0894692 | ||
|
|
a8e00e9f0f | ||
|
|
77a4c271ae | ||
|
|
014b77c3c7 | ||
|
|
076e241056 | ||
|
|
7ce57cc67a | ||
|
|
da0343283a | ||
|
|
d5f7f1ba91 | ||
|
|
8761c82afe | ||
|
|
13023141bc | ||
|
|
4dd2038625 | ||
|
|
06a32b0e9d | ||
|
|
c91ab7a76b | ||
|
|
0344aa6a49 | ||
|
|
a748c9d750 | ||
|
|
038dc372b7 | ||
|
|
bc8198fb8a | ||
|
|
f42275bd83 | ||
|
|
6bd86a724e | ||
|
|
fc96cfe8a0 | ||
|
|
a9f25fe7d6 | ||
|
|
f740fed5f2 | ||
|
|
a6d1bd12a2 | ||
|
|
e8ab20acf2 | ||
|
|
ccfe193800 | ||
|
|
bdccedca59 | ||
|
|
9abb1488df | ||
|
|
195fc1bdc3 | ||
|
|
2a9129f470 | ||
|
|
acbfc0cc6e | ||
|
|
bfb0c75e95 | ||
|
|
161a2ddae8 | ||
|
|
99621cfd66 | ||
|
|
e6e7234215 | ||
|
|
5b7b329279 | ||
|
|
3abb2c8674 | ||
|
|
39de89254f | ||
|
|
ac941968cb | ||
|
|
96f603bfd1 | ||
|
|
677e38c62d | ||
|
|
72fce20905 | ||
|
|
1eb41c20d5 | ||
|
|
dd0c1d331f | ||
|
|
12760a70a1 | ||
|
|
525d17270f | ||
|
|
bc9959f5ab | ||
|
|
94a8cd5128 | ||
|
|
5a1b2c4938 | ||
|
|
851a2ac03a | ||
|
|
34d7707f53 | ||
|
|
0aac7f62a3 | ||
|
|
34379b92d0 | ||
|
|
250999f9f5 | ||
|
|
2b3832222b | ||
|
|
c5f6d0e721 | ||
|
|
dbb0cf15b8 | ||
|
|
ab202ba951 | ||
|
|
e2c13aa7ed | ||
|
|
c1ab19f3cf | ||
|
|
beebfb2e19 | ||
|
|
cfca90aa7d | ||
|
|
19fe0a32c8 | ||
|
|
76659f8837 | ||
|
|
2254715190 | ||
|
|
ae1a5460d4 | ||
|
|
27d9f910ff | ||
|
|
28db4881d7 | ||
|
|
7c76c3ccd6 | ||
|
|
007bd24374 | ||
|
|
c8dc30287c | ||
|
|
360184bbd1 | ||
|
|
e8ed2454a1 | ||
|
|
923ecf29b8 | ||
|
|
a8f8bf5872 | ||
|
|
bedcd94020 | ||
|
|
959d4da1f8 | ||
|
|
861453c1a8 | ||
|
|
2f4072da0d | ||
|
|
411b5e0ca6 | ||
|
|
3f03963811 | ||
|
|
d43f81e118 | ||
|
|
b97dbd2515 | ||
|
|
c6a20a9ed3 | ||
|
|
27f0f29eef | ||
|
|
223508ae72 | ||
|
|
bce0a4b8cd | ||
|
|
65412a4263 | ||
|
|
0233b78c8e | ||
|
|
b0b25e4cfa | ||
|
|
806288d587 | ||
|
|
97265fc43b | ||
|
|
41ca50d0d4 | ||
|
|
9d02206fd9 | ||
|
|
ba2293eb30 | ||
|
|
8b9e28975d | ||
|
|
22ae8b8f87 | ||
|
|
187e352cbd | ||
|
|
23ef8ad28d | ||
|
|
1dadf56c42 | ||
|
|
52640b80c0 | ||
|
|
fe25f8f48f | ||
|
|
7f59572d8b | ||
|
|
90fc4c6bad | ||
|
|
16b6c0da33 | ||
|
|
488a691f29 | ||
|
|
bcbfe2ccd5 | ||
|
|
bd9a1d7ec7 | ||
|
|
9331ba64d6 | ||
|
|
21e5cb0a03 | ||
|
|
1a8e0c9ecb | ||
|
|
16fc0d31cd | ||
|
|
a622ada58b | ||
|
|
ee9c4948d3 | ||
|
|
cf28e1d963 | ||
|
|
089ec36160 | ||
|
|
04ce774c22 | ||
|
|
99c1422f37 | ||
|
|
b583a60f23 | ||
|
|
7be2910809 | ||
|
|
30de524319 | ||
|
|
c431d5e759 | ||
|
|
184b62b024 | ||
|
|
2751770350 | ||
|
|
75d98aee8e | ||
|
|
48120b9406 | ||
|
|
0e302d7959 | ||
|
|
59cd176f44 | ||
|
|
619f728f09 | ||
|
|
6e8002acc4 | ||
|
|
8a4a6174f7 | ||
|
|
ee6c4823d3 | ||
|
|
14dcb73d06 | ||
|
|
e15107e5ec | ||
|
|
0167a9462e | ||
|
|
7fa1d342ab | ||
|
|
05b9988e1d | ||
|
|
1c09e61219 | ||
|
|
35f0ad7a83 | ||
|
|
7ae1d6763a | ||
|
|
460e859795 | ||
|
|
4b88ec6460 | ||
|
|
27ee13bb7e | ||
|
|
e6cdd337c3 | ||
|
|
7d8dd12131 | ||
|
|
0800e3a136 | ||
|
|
9b0f1a2a04 | ||
|
|
9de3cb0f92 | ||
|
|
c053a8291c | ||
|
|
a0ddfe173b | ||
|
|
17843a7c71 | ||
|
|
324ae5c883 | ||
|
|
ef03989c3f | ||
|
|
63412ddd42 | ||
|
|
30ce32608a | ||
|
|
74799ad096 | ||
|
|
31176f99c8 | ||
|
|
b9439c05ec | ||
|
|
435a04da0c | ||
|
|
0040b266a5 | ||
|
|
645de137f2 | ||
|
|
1883607118 | ||
|
|
4ccae1dac7 | ||
|
|
ff75db310f | ||
|
|
5788520401 | ||
|
|
570dddc120 | ||
|
|
ea31072ae5 | ||
|
|
5eca5a6011 | ||
|
|
67d5357227 | ||
|
|
a0d04ff488 | ||
|
|
f83787508f | ||
|
|
20aba7eb17 | ||
|
|
0cdea3318c | ||
|
|
4dc2c18075 | ||
|
|
74e97abac4 | ||
|
|
b1db95a925 | ||
|
|
9dac9850b6 | ||
|
|
abe091254a | ||
|
|
d2e5367dc6 | ||
|
|
8ccd1f5fe4 | ||
|
|
50bc865dd2 | ||
|
|
74a6ee7066 | ||
|
|
89e76bcb48 | ||
|
|
c55f6baf67 | ||
|
|
ae154489e1 | ||
|
|
fdc79033ce | ||
|
|
9a8aa5e632 | ||
|
|
6b81f3ce5f | ||
|
|
aeaddfe36b | ||
|
|
20c1f30877 | ||
|
|
52ce6ff38e | ||
|
|
c692a3c80e | ||
|
|
491009636a | ||
|
|
ed16ee14ea | ||
|
|
7f2ed09267 | ||
|
|
c0976897ef | ||
|
|
85b55aa924 | ||
|
|
91d0f76783 | ||
|
|
741badf9e6 | ||
|
|
ca1f3ac377 | ||
|
|
e13e1c9ca3 | ||
|
|
06ad042443 | ||
|
|
9d333b855c | ||
|
|
f46e2acd56 | ||
|
|
5ac4d3f4ae | ||
|
|
1614eebc47 | ||
|
|
b50599b71f | ||
|
|
0459025bf8 | ||
|
|
0bd37da8c7 | ||
|
|
da969dde53 | ||
|
|
33fdd6cafa | ||
|
|
2fe68766eb | ||
|
|
205348697c | ||
|
|
9b3533c1da | ||
|
|
c3584e838e | ||
|
|
16d8b3fb58 | ||
|
|
686bbdc16b | ||
|
|
65b17e4f2b | ||
|
|
23c6898789 | ||
|
|
df2a1be2a2 | ||
|
|
2db628a2ba | ||
|
|
b6c40436c9 | ||
|
|
a8a70cac08 | ||
|
|
3eefbf97b1 | ||
|
|
3c423e0838 | ||
|
|
99cde43954 | ||
|
|
fa3a787bf7 | ||
|
|
c776dc8036 | ||
|
|
1ef068351d | ||
|
|
6abe0a1862 | ||
|
|
ff13045f52 | ||
|
|
59c09681cb | ||
|
|
f664cf6fa5 | ||
|
|
01a847a9c2 | ||
|
|
6da655f67f | ||
|
|
21df7dced1 | ||
|
|
7fc257ea79 | ||
|
|
24f170ff72 | ||
|
|
39999c9ee4 | ||
|
|
27a5188e4e | ||
|
|
a5af0786aa | ||
|
|
e9c9cfaa72 | ||
|
|
8ca4ea0f3f | ||
|
|
86e1f9a9d6 | ||
|
|
b36ceda585 | ||
|
|
27a3e6c6db | ||
|
|
a731327c00 | ||
|
|
737c00978e | ||
|
|
18bcb3a067 | ||
|
|
f49f55576f | ||
|
|
1bef4f9a4d | ||
|
|
ab1df59f7a | ||
|
|
bcd235521e | ||
|
|
31a2eac302 | ||
|
|
7e6b7e5dd5 | ||
|
|
9ec9f48425 | ||
|
|
a3bec43eab | ||
|
|
f429b6397e | ||
|
|
9d6e7dc288 | ||
|
|
a27c09c1e8 | ||
|
|
ceb0697c73 | ||
|
|
6ad6a08bf1 | ||
|
|
fac6ad7116 | ||
|
|
7d8cda0457 | ||
|
|
33fc3fd63b | ||
|
|
8d39cc87f7 | ||
|
|
d0b1348c96 | ||
|
|
0afc38f6b8 | ||
|
|
264896ba17 | ||
|
|
08decf0b82 | ||
|
|
98381265e6 | ||
|
|
d323159719 | ||
|
|
7ef21e1d1c | ||
|
|
2d6b2ab7d7 | ||
|
|
a1e6fd88a9 | ||
|
|
e72ff867fc | ||
|
|
8512641984 | ||
|
|
f1aa64d191 | ||
|
|
347262538f | ||
|
|
82510d60ca | ||
|
|
6104cd04c3 | ||
|
|
44eb58426a | ||
|
|
078b60cc1e | ||
|
|
21e120a4f8 | ||
|
|
439b834aa8 | ||
|
|
ddbe8324be | ||
|
|
8ffe93113b | ||
|
|
8b31b7cb8a | ||
|
|
e09e21caa9 | ||
|
|
20b145c679 | ||
|
|
c5730cf1ad | ||
|
|
f16b038463 | ||
|
|
c08beec232 | ||
|
|
946361e0ae | ||
|
|
97cf65a231 | ||
|
|
d7eb6ac15d | ||
|
|
075afdbb77 | ||
|
|
2ac047504a | ||
|
|
c44aa50ef5 | ||
|
|
7ffafb49c4 | ||
|
|
9b7d57a853 | ||
|
|
ac19b3b512 | ||
|
|
b030317186 | ||
|
|
b506059874 | ||
|
|
cf7ba6e17f | ||
|
|
b7ce5663a3 | ||
|
|
58fa8064ad | ||
|
|
ed48f56526 | ||
|
|
896eb13f7d | ||
|
|
b8cd1c46c1 | ||
|
|
c5e84273c0 | ||
|
|
f21653ffb7 | ||
|
|
65c8116cc9 | ||
|
|
5e442433e5 | ||
|
|
7041347e76 | ||
|
|
810c205709 | ||
|
|
ec7035990a | ||
|
|
da6d9bb2bd | ||
|
|
e009043c63 | ||
|
|
79020e9338 | ||
|
|
2020244cae | ||
|
|
43fe8f25f8 | ||
|
|
9522888a60 | ||
|
|
70c183ae2b | ||
|
|
5d56eb9bef | ||
|
|
a461414a04 | ||
|
|
5737c3dca6 | ||
|
|
57ea50e59c | ||
|
|
7f630e8460 | ||
|
|
108e8502e1 | ||
|
|
4aa986d122 | ||
|
|
60239bbfc4 | ||
|
|
93ef3b1f1a | ||
|
|
d9ed135be4 | ||
|
|
e83fe0aabe | ||
|
|
4be7426ae7 | ||
|
|
0ce5ef7f56 | ||
|
|
c2c0946423 | ||
|
|
63049f61f7 | ||
|
|
1918b0f192 | ||
|
|
a3ad49b1fa | ||
|
|
bed63d1e2b | ||
|
|
4a8e739686 | ||
|
|
d502f33041 | ||
|
|
4a0ecf36c7 | ||
|
|
afb9e49755 | ||
|
|
18f65e5597 | ||
|
|
22b69f7dac | ||
|
|
15df062825 | ||
|
|
ed607d3895 | ||
|
|
f9b0db623d | ||
|
|
740cf12c11 | ||
|
|
4c4bf698b1 | ||
|
|
dc74e749c9 | ||
|
|
fa52c542d7 | ||
|
|
850d480c7c | ||
|
|
a92cc9dce9 | ||
|
|
4944a0a456 | ||
|
|
13c40058a8 | ||
|
|
1410c03c26 | ||
|
|
2f38b3040d | ||
|
|
79411a7350 | ||
|
|
ee94c2af32 | ||
|
|
d46e5c8d86 | ||
|
|
95cd10bfba | ||
|
|
59ed08b92d | ||
|
|
2b9f7bca51 | ||
|
|
a860a8c02b | ||
|
|
f2cbb8d2f7 | ||
|
|
ea61599589 | ||
|
|
0b59c95f63 | ||
|
|
66d4308810 | ||
|
|
f2648df2ad | ||
|
|
d20f68e897 | ||
|
|
338021645d | ||
|
|
a0a11842cb | ||
|
|
f5832d6a25 | ||
|
|
8fa6d9de39 | ||
|
|
e662338d6f | ||
|
|
2c1d6817dd | ||
|
|
5d4a3fec1f | ||
|
|
6603a30e7e | ||
|
|
81d08ca517 | ||
|
|
e04506a614 | ||
|
|
39756512ae | ||
|
|
71c29ea5e7 | ||
|
|
87ce266b14 | ||
|
|
ed6d856c24 | ||
|
|
d3ecbef946 | ||
|
|
7b24f5eb21 | ||
|
|
e1f82e338a | ||
|
|
a835d34a01 | ||
|
|
79d70c9977 | ||
|
|
aea82723cb | ||
|
|
d47ff0b31a | ||
|
|
affcb9d5c3 | ||
|
|
9be2686733 | ||
|
|
7126fed2b5 | ||
|
|
5bc4330e1c | ||
|
|
b25ac7116e | ||
|
|
8896867bb3 | ||
|
|
ba7c9eec7b | ||
|
|
9b95fde8d1 | ||
|
|
2851f16395 | ||
|
|
0d63dfb931 | ||
|
|
37558e3135 | ||
|
|
96021e42a2 | ||
|
|
c32b845515 | ||
|
|
147d980c54 | ||
|
|
f91c43dde9 | ||
|
|
4cf5cb06a0 | ||
|
|
8e4b4c3144 | ||
|
|
c302013696 | ||
|
|
37cb94c59d | ||
|
|
01f7c6bc2b | ||
|
|
8bd6ccb0de | ||
|
|
ed8895dfbb | ||
|
|
a55632051b | ||
|
|
7e347a458d | ||
|
|
cce71f23e2 | ||
|
|
d68461a127 | ||
|
|
1bd12a9411 | ||
|
|
4086ba4763 | ||
|
|
6a9cdf71d7 | ||
|
|
a9644c4f86 | ||
|
|
cf62ad5e8e | ||
|
|
f8ed16666c | ||
|
|
37926b4c19 | ||
|
|
b080a2003f | ||
|
|
ab0008be86 | ||
|
|
4a42b0d000 | ||
|
|
e3d4b19dac | ||
|
|
403d600db4 | ||
|
|
835e6e8891 | ||
|
|
eec25113b5 | ||
|
|
a7c4161f91 | ||
|
|
799eb9e6ef | ||
|
|
88993cb67b | ||
|
|
0dc9c98c06 | ||
|
|
c1c91cec44 | ||
|
|
19b6927320 | ||
|
|
0889ebc8b8 | ||
|
|
fb249c0ea5 | ||
|
|
feb22ff0a7 | ||
|
|
3c95156ce1 | ||
|
|
8b6dca6a46 | ||
|
|
43907eea26 | ||
|
|
67145a80d0 | ||
|
|
0b3138fec6 | ||
|
|
b84896b4f9 | ||
|
|
efd046d2f8 | ||
|
|
06fcf817bb | ||
|
|
16a94d9054 | ||
|
|
5bf502188d | ||
|
|
5269b4bc82 | ||
|
|
e3f8ed9886 | ||
|
|
74de554fb0 | ||
|
|
b41de1a982 | ||
|
|
25f7d9ccdd | ||
|
|
9646745181 | ||
|
|
1317d9c4f0 | ||
|
|
351029a842 | ||
|
|
15e1fb61ac | ||
|
|
1889a829b5 | ||
|
|
53a14fce38 | ||
|
|
d9ed7b09c7 | ||
|
|
4dcb18f00e | ||
|
|
0a52fe0a7a | ||
|
|
e5a4d11cf9 | ||
|
|
6c233f13de | ||
|
|
00aee3496c | ||
|
|
77ae40e3d6 | ||
|
|
68cba44476 | ||
|
|
b86d06f632 | ||
|
|
0b7cf305a0 | ||
|
|
21ae36bc3a | ||
|
|
4e2d9e9165 | ||
|
|
6cee308894 |
@@ -1,3 +1,84 @@
|
||||
# Ignore git
|
||||
# Git
|
||||
.github
|
||||
.git
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Documentation
|
||||
docs/
|
||||
README.md
|
||||
LICENSE
|
||||
|
||||
# Development files
|
||||
.pylintrc
|
||||
*.pyc
|
||||
__pycache__/
|
||||
*.pyo
|
||||
*.pyd
|
||||
.Python
|
||||
*.so
|
||||
.pytest_cache/
|
||||
.coverage
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.hypothesis/
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Virtual environments
|
||||
venv/
|
||||
env/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# Temporary files
|
||||
*.tmp
|
||||
*.temp
|
||||
tmp/
|
||||
temp/
|
||||
|
||||
# Database
|
||||
*.db
|
||||
*.sqlite
|
||||
*.sqlite3
|
||||
|
||||
# Test files
|
||||
tests/
|
||||
test_*
|
||||
*_test.py
|
||||
|
||||
# Build artifacts
|
||||
build/
|
||||
dist/
|
||||
*.egg-info/
|
||||
|
||||
# Docker
|
||||
Dockerfile*
|
||||
docker-compose*
|
||||
.dockerignore
|
||||
|
||||
# Other
|
||||
app.ico
|
||||
frozen.spec
|
||||
2
.github/ISSUE_TEMPLATE/rfc.yml
vendored
2
.github/ISSUE_TEMPLATE/rfc.yml
vendored
@@ -10,7 +10,7 @@ body:
|
||||
目的是让协作的开发者间清晰的知道「要做什么」和「具体会怎么做」,以及所有的开发者都能公开透明的参与讨论;
|
||||
以便评估和讨论产生的影响 (遗漏的考虑、向后兼容性、与现有功能的冲突),
|
||||
因此提案侧重在对解决问题的 **方案、设计、步骤** 的描述上。
|
||||
|
||||
|
||||
如果仅希望讨论是否添加或改进某功能本身,请使用 -> [Issue: 功能改进](https://github.com/jxxghp/MoviePilot/issues/new?assignees=&labels=feature+request&projects=&template=feature_request.yml&title=%5BFeature+Request%5D%3A+)
|
||||
- type: textarea
|
||||
id: background
|
||||
|
||||
60
.github/workflows/beta.yml
vendored
Normal file
60
.github/workflows/beta.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: MoviePilot Builder Beta
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
Docker-build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build Docker Image
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Release version
|
||||
id: release_version
|
||||
run: |
|
||||
app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
|
||||
echo "app_version=$app_version" >> $GITHUB_ENV
|
||||
|
||||
- name: Docker Meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ secrets.DOCKER_USERNAME }}/moviepilot-v2
|
||||
ghcr.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=raw,value=beta
|
||||
|
||||
- name: Set Up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set Up Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build Image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: docker/Dockerfile
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64/v8
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha, scope=${{ github.workflow }}-docker
|
||||
cache-to: type=gha, scope=${{ github.workflow }}-docker
|
||||
134
.github/workflows/build.yml
vendored
134
.github/workflows/build.yml
vendored
@@ -14,6 +14,9 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Release version
|
||||
id: release_version
|
||||
@@ -25,7 +28,10 @@ jobs:
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ secrets.DOCKER_USERNAME }}/moviepilot-v2
|
||||
images: |
|
||||
${{ secrets.DOCKER_USERNAME }}/moviepilot-v2
|
||||
${{ secrets.DOCKER_USERNAME }}/moviepilot
|
||||
ghcr.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=raw,value=${{ env.app_version }}
|
||||
type=raw,value=latest
|
||||
@@ -42,11 +48,18 @@ jobs:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build Image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
file: docker/Dockerfile
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64/v8
|
||||
@@ -56,10 +69,122 @@ jobs:
|
||||
cache-from: type=gha, scope=${{ github.workflow }}-docker
|
||||
cache-to: type=gha, scope=${{ github.workflow }}-docker
|
||||
|
||||
- name: Generate Changelog
|
||||
id: changelog
|
||||
run: |
|
||||
# 获取上一个 tag(排除当前版本的 tag)
|
||||
PREVIOUS_TAG=$(git tag -l 'v*' --sort=-v:refname | grep -v "^v${{ env.app_version }}$" | head -n 1)
|
||||
echo "Previous tag: $PREVIOUS_TAG"
|
||||
|
||||
# 使用 || 作为分隔符,同时获取 commit 消息和作者 GitHub 用户名
|
||||
if [ -z "$PREVIOUS_TAG" ]; then
|
||||
COMMITS=$(git log --pretty=format:"%s||%an" HEAD)
|
||||
else
|
||||
COMMITS=$(git log --pretty=format:"%s||%an" ${PREVIOUS_TAG}..HEAD)
|
||||
fi
|
||||
|
||||
# 分类收集 commit 消息(使用关联数组去重)
|
||||
declare -A SEEN
|
||||
FEATURES=""
|
||||
FIXES=""
|
||||
OTHERS=""
|
||||
|
||||
while IFS= read -r line; do
|
||||
# 跳过空行
|
||||
if [ -z "$line" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# 分离 commit 消息和作者
|
||||
msg=$(echo "$line" | sed 's/||[^|]*$//')
|
||||
author=$(echo "$line" | sed 's/.*||//')
|
||||
|
||||
# 跳过 Merge commit 和版本更新 commit
|
||||
if echo "$msg" | grep -qE "^Merge pull request|^Merge branch|^更新 version"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# 按 Conventional Commits 前缀分类
|
||||
if echo "$msg" | grep -qiE "^feat(\(.+\))?:"; then
|
||||
desc=$(echo "$msg" | sed -E 's/^feat(\([^)]*\))?:\s*//')
|
||||
category="FEATURES"
|
||||
elif echo "$msg" | grep -qiE "^fix(\(.+\))?:"; then
|
||||
desc=$(echo "$msg" | sed -E 's/^fix(\([^)]*\))?:\s*//')
|
||||
category="FIXES"
|
||||
elif echo "$msg" | grep -qiE "^(docs|style|refactor|perf|test|build|ci|chore|revert)(\(.+\))?:"; then
|
||||
desc=$(echo "$msg" | sed -E 's/^(docs|style|refactor|perf|test|build|ci|chore|revert)(\([^)]*\))?:\s*//')
|
||||
category="OTHERS"
|
||||
else
|
||||
desc="$msg"
|
||||
category="OTHERS"
|
||||
fi
|
||||
|
||||
# 使用 "分类+描述" 作为去重的 key,跳过重复内容
|
||||
dedup_key="${category}::${desc}"
|
||||
if [ -n "${SEEN[$dedup_key]+x}" ]; then
|
||||
continue
|
||||
fi
|
||||
SEEN[$dedup_key]=1
|
||||
|
||||
# 添加 by @author 引用
|
||||
entry="- ${desc} by @${author}"
|
||||
|
||||
case "$category" in
|
||||
FEATURES) FEATURES="${FEATURES}${entry}\n" ;;
|
||||
FIXES) FIXES="${FIXES}${entry}\n" ;;
|
||||
OTHERS) OTHERS="${OTHERS}${entry}\n" ;;
|
||||
esac
|
||||
done <<< "$COMMITS"
|
||||
|
||||
# 组装 changelog
|
||||
CHANGELOG=""
|
||||
|
||||
if [ -n "$FEATURES" ]; then
|
||||
CHANGELOG="${CHANGELOG}### ✨ 新功能\n\n${FEATURES}\n"
|
||||
fi
|
||||
|
||||
if [ -n "$FIXES" ]; then
|
||||
CHANGELOG="${CHANGELOG}### 🐛 修复\n\n${FIXES}\n"
|
||||
fi
|
||||
|
||||
if [ -n "$OTHERS" ]; then
|
||||
CHANGELOG="${CHANGELOG}### 🔧 其他\n\n${OTHERS}\n"
|
||||
fi
|
||||
|
||||
# 添加版本对比链接
|
||||
if [ -n "$PREVIOUS_TAG" ]; then
|
||||
CHANGELOG="${CHANGELOG}**完整更新记录**: https://github.com/${{ github.repository }}/compare/${PREVIOUS_TAG}...v${{ env.app_version }}"
|
||||
fi
|
||||
|
||||
# 写入环境变量
|
||||
echo "CHANGELOG<<EOF" >> $GITHUB_ENV
|
||||
echo -e "$CHANGELOG" >> $GITHUB_ENV
|
||||
echo "EOF" >> $GITHUB_ENV
|
||||
|
||||
- name: Get existing release body
|
||||
id: get_release_body
|
||||
continue-on-error: true
|
||||
run: |
|
||||
release_body=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||
"https://api.github.com/repos/${{ github.repository }}/releases/tags/v${{ env.app_version }}" | \
|
||||
jq -r '.body // ""')
|
||||
|
||||
# 如果已有手动编写的 release body,则保留;否则使用自动生成的 changelog
|
||||
if [ -n "$release_body" ] && [ "$release_body" != "null" ] && [ "$release_body" != "" ]; then
|
||||
echo "RELEASE_BODY<<EOF" >> $GITHUB_ENV
|
||||
echo "$release_body" >> $GITHUB_ENV
|
||||
echo "EOF" >> $GITHUB_ENV
|
||||
else
|
||||
echo "RELEASE_BODY<<EOF" >> $GITHUB_ENV
|
||||
echo "${{ env.CHANGELOG }}" >> $GITHUB_ENV
|
||||
echo "EOF" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Delete Release
|
||||
uses: dev-drprasad/delete-tag-and-release@v1.1
|
||||
continue-on-error: true
|
||||
with:
|
||||
tag_name: ${{ env.app_version }}
|
||||
tag_name: v${{ env.app_version }}
|
||||
delete_release: true
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -68,8 +193,9 @@ jobs:
|
||||
with:
|
||||
tag_name: v${{ env.app_version }}
|
||||
name: v${{ env.app_version }}
|
||||
body: ${{ env.RELEASE_BODY }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
make_latest: false
|
||||
make_latest: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
33
.github/workflows/issues.yml
vendored
Normal file
33
.github/workflows/issues.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: Close inactive issues
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
schedule:
|
||||
# Github Action 只支持 UTC 时间。
|
||||
# '0 18 * * *' 对应 UTC 时间的 18:00,也就是中国时区 (UTC+8) 的第二天凌晨 02:00。
|
||||
- cron: "0 18 * * *"
|
||||
|
||||
jobs:
|
||||
close-issues:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
# 标记 stale 标签时间
|
||||
days-before-issue-stale: 30
|
||||
# 关闭 issues 标签时间
|
||||
days-before-issue-close: 14
|
||||
# 自定义标签名
|
||||
stale-issue-label: "stale"
|
||||
stale-issue-message: "此问题已过时,因为它已打开 30 天且没有任何活动。"
|
||||
close-issue-message: "此问题已关闭,因为它在标记为 stale 后,已处于无更新状态 14 天。"
|
||||
# 忽略所有的 Pull Request,只处理 Issue
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
# 排除带有RFC标签的issue
|
||||
exempt-issue-labels: "RFC"
|
||||
operations-per-run: 500
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
91
.github/workflows/pylint.yml
vendored
Normal file
91
.github/workflows/pylint.yml
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
name: Pylint Code Quality Check
|
||||
|
||||
on:
|
||||
# 允许手动触发
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
pylint:
|
||||
runs-on: ubuntu-latest
|
||||
name: Pylint Code Quality Check
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Cache pip dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt', '**/requirements.in') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
pip install pylint
|
||||
# 安装项目依赖
|
||||
if [ -f requirements.txt ]; then
|
||||
echo "📦 安装 requirements.txt 中的依赖..."
|
||||
pip install -r requirements.txt
|
||||
elif [ -f requirements.in ]; then
|
||||
echo "📦 安装 requirements.in 中的依赖..."
|
||||
pip install -r requirements.in
|
||||
else
|
||||
echo "⚠️ 未找到依赖文件,仅安装 pylint"
|
||||
fi
|
||||
|
||||
- name: Verify pylint config
|
||||
run: |
|
||||
# 检查项目中的pylint配置文件是否存在
|
||||
if [ -f .pylintrc ]; then
|
||||
echo "✅ 找到项目配置文件: .pylintrc"
|
||||
echo "配置文件内容预览:"
|
||||
head -10 .pylintrc
|
||||
else
|
||||
echo "❌ 未找到 .pylintrc 配置文件"
|
||||
exit 1
|
||||
fi
|
||||
- name: Run pylint
|
||||
run: |
|
||||
# 运行pylint,检查主要的Python文件
|
||||
echo "🚀 运行 Pylint 错误检查..."
|
||||
|
||||
# 检查主要目录 - 只关注错误,如果有错误则退出
|
||||
echo "📂 检查 app/ 目录..."
|
||||
pylint app/ --output-format=colorized --reports=yes --score=yes
|
||||
|
||||
# 检查根目录的Python文件
|
||||
echo "📂 检查根目录 Python 文件..."
|
||||
for file in $(find . -name "*.py" -not -path "./.*" -not -path "./.venv/*" -not -path "./build/*" -not -path "./dist/*" -not -path "./tests/*" -not -path "./docs/*" -not -path "./__pycache__/*" -maxdepth 1); do
|
||||
echo "检查文件: $file"
|
||||
pylint "$file" --output-format=colorized || exit 1
|
||||
done
|
||||
|
||||
# 生成详细报告
|
||||
echo "📊 生成 Pylint 详细报告..."
|
||||
pylint app/ --output-format=json > pylint-report.json || true
|
||||
|
||||
# 显示评分(仅供参考)
|
||||
echo "📈 Pylint 评分(仅供参考):"
|
||||
pylint app/ --score=yes --reports=no | tail -2 || true
|
||||
|
||||
- name: Upload pylint report
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: pylint-report
|
||||
path: pylint-report.json
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "🎉 Pylint 检查完成!"
|
||||
echo "✅ 没有发现语法错误或严重问题"
|
||||
echo "📊 详细报告已保存为构建工件"
|
||||
18
.gitignore
vendored
18
.gitignore
vendored
@@ -1,6 +1,10 @@
|
||||
.idea/
|
||||
.DS_Store
|
||||
*.c
|
||||
*.so
|
||||
*.pyd
|
||||
build/
|
||||
cython_cache/
|
||||
dist/
|
||||
nginx/
|
||||
test.py
|
||||
@@ -12,12 +16,24 @@ app/helper/*.bin
|
||||
app/plugins/**
|
||||
!app/plugins/__init__.py
|
||||
config/cookies/**
|
||||
config/app.env
|
||||
config/user.db*
|
||||
config/sites/**
|
||||
config/agent/
|
||||
config/logs/
|
||||
config/temp/
|
||||
config/cache/
|
||||
.runtime/
|
||||
public/
|
||||
.moviepilot.env
|
||||
*.pyc
|
||||
*.log
|
||||
.vscode
|
||||
venv
|
||||
venv
|
||||
|
||||
# Pylint
|
||||
pylint-report.json
|
||||
.pylint.d/
|
||||
|
||||
# AI
|
||||
.claude/
|
||||
|
||||
83
.pylintrc
Normal file
83
.pylintrc
Normal file
@@ -0,0 +1,83 @@
|
||||
[MASTER]
|
||||
# 指定Python路径
|
||||
init-hook='import sys; sys.path.append(".")'
|
||||
|
||||
# 忽略的文件和目录
|
||||
ignore=.git,__pycache__,.venv,build,dist,tests,docs
|
||||
|
||||
# 并行作业数量
|
||||
jobs=0
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
# 只关注错误级别的问题,禁用警告、约定和重构建议
|
||||
# E = Error (错误) - 会导致构建失败
|
||||
# W = Warning (警告) - 仅显示,不会失败
|
||||
# R = Refactor (重构建议) - 仅显示,不会失败
|
||||
# C = Convention (约定) - 仅显示,不会失败
|
||||
# I = Information (信息) - 仅显示,不会失败
|
||||
|
||||
# 禁用大部分警告、约定和重构建议,只保留错误和重要警告
|
||||
disable=all
|
||||
enable=error,
|
||||
syntax-error,
|
||||
undefined-variable,
|
||||
used-before-assignment,
|
||||
unreachable,
|
||||
return-outside-function,
|
||||
yield-outside-function,
|
||||
continue-in-finally,
|
||||
nonlocal-without-binding,
|
||||
undefined-loop-variable,
|
||||
redefined-builtin,
|
||||
not-callable,
|
||||
assignment-from-no-return,
|
||||
no-value-for-parameter,
|
||||
too-many-function-args,
|
||||
unexpected-keyword-arg,
|
||||
redundant-keyword-arg,
|
||||
import-error,
|
||||
relative-beyond-top-level
|
||||
|
||||
[REPORTS]
|
||||
# 设置报告格式
|
||||
output-format=colorized
|
||||
reports=yes
|
||||
score=yes
|
||||
|
||||
[FORMAT]
|
||||
# 最大行长度
|
||||
max-line-length=120
|
||||
# 缩进大小
|
||||
indent-string=' '
|
||||
|
||||
[DESIGN]
|
||||
# 最大参数数量
|
||||
max-args=10
|
||||
# 最大本地变量数量
|
||||
max-locals=20
|
||||
# 最大分支数量
|
||||
max-branches=15
|
||||
# 最大语句数量
|
||||
max-statements=50
|
||||
# 最大父类数量
|
||||
max-parents=7
|
||||
# 最大属性数量
|
||||
max-attributes=10
|
||||
# 最小公共方法数量
|
||||
min-public-methods=1
|
||||
# 最大公共方法数量
|
||||
max-public-methods=25
|
||||
|
||||
[SIMILARITIES]
|
||||
# 最小相似行数
|
||||
min-similarity-lines=6
|
||||
# 忽略注释
|
||||
ignore-comments=yes
|
||||
# 忽略文档字符串
|
||||
ignore-docstrings=yes
|
||||
# 忽略导入
|
||||
ignore-imports=yes
|
||||
|
||||
[TYPECHECK]
|
||||
# 生成缺失成员提示的类列表
|
||||
generated-members=requests.packages.urllib3
|
||||
91
Dockerfile
91
Dockerfile
@@ -1,91 +0,0 @@
|
||||
FROM python:3.11.4-slim-bookworm
|
||||
ENV LANG="C.UTF-8" \
|
||||
TZ="Asia/Shanghai" \
|
||||
HOME="/moviepilot" \
|
||||
CONFIG_DIR="/config" \
|
||||
TERM="xterm" \
|
||||
DISPLAY=:987 \
|
||||
PUID=0 \
|
||||
PGID=0 \
|
||||
UMASK=000 \
|
||||
PORT=3001 \
|
||||
NGINX_PORT=3000 \
|
||||
MOVIEPILOT_AUTO_UPDATE=release
|
||||
WORKDIR "/app"
|
||||
RUN apt-get update -y \
|
||||
&& apt-get upgrade -y \
|
||||
&& apt-get -y install \
|
||||
musl-dev \
|
||||
nginx \
|
||||
gettext-base \
|
||||
locales \
|
||||
procps \
|
||||
gosu \
|
||||
bash \
|
||||
wget \
|
||||
curl \
|
||||
busybox \
|
||||
dumb-init \
|
||||
jq \
|
||||
fuse3 \
|
||||
rsync \
|
||||
ffmpeg \
|
||||
nano \
|
||||
&& \
|
||||
if [ "$(uname -m)" = "x86_64" ]; \
|
||||
then ln -s /usr/lib/x86_64-linux-musl/libc.so /lib/libc.musl-x86_64.so.1; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; \
|
||||
then ln -s /usr/lib/aarch64-linux-musl/libc.so /lib/libc.musl-aarch64.so.1; \
|
||||
fi \
|
||||
&& curl https://rclone.org/install.sh | bash \
|
||||
&& curl --insecure -fsSL https://raw.githubusercontent.com/DDS-Derek/Aria2-Pro-Core/master/aria2-install.sh | bash \
|
||||
&& apt-get autoremove -y \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf \
|
||||
/tmp/* \
|
||||
/moviepilot/.cache \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/tmp/*
|
||||
COPY requirements.in requirements.in
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y build-essential \
|
||||
&& pip install --upgrade pip \
|
||||
&& pip install Cython pip-tools \
|
||||
&& pip-compile requirements.in \
|
||||
&& pip install -r requirements.txt \
|
||||
&& playwright install-deps chromium \
|
||||
&& apt-get remove -y build-essential \
|
||||
&& apt-get autoremove -y \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf \
|
||||
/tmp/* \
|
||||
/moviepilot/.cache \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/tmp/*
|
||||
COPY . .
|
||||
RUN cp -f /app/nginx.conf /etc/nginx/nginx.template.conf \
|
||||
&& cp -f /app/update /usr/local/bin/mp_update \
|
||||
&& cp -f /app/entrypoint /entrypoint \
|
||||
&& cp -f /app/docker_http_proxy.conf /etc/nginx/docker_http_proxy.conf \
|
||||
&& chmod +x /entrypoint /usr/local/bin/mp_update \
|
||||
&& mkdir -p ${HOME} \
|
||||
&& groupadd -r moviepilot -g 918 \
|
||||
&& useradd -r moviepilot -g moviepilot -d ${HOME} -s /bin/bash -u 918 \
|
||||
&& python_ver=$(python3 -V | awk '{print $2}') \
|
||||
&& echo "/app/" > /usr/local/lib/python${python_ver%.*}/site-packages/app.pth \
|
||||
&& echo 'fs.inotify.max_user_watches=5242880' >> /etc/sysctl.conf \
|
||||
&& echo 'fs.inotify.max_user_instances=5242880' >> /etc/sysctl.conf \
|
||||
&& locale-gen zh_CN.UTF-8 \
|
||||
&& FRONTEND_VERSION=$(sed -n "s/^FRONTEND_VERSION\s*=\s*'\([^']*\)'/\1/p" /app/version.py) \
|
||||
&& curl -sL "https://github.com/jxxghp/MoviePilot-Frontend/releases/download/${FRONTEND_VERSION}/dist.zip" | busybox unzip -d / - \
|
||||
&& mv /dist /public \
|
||||
&& curl -sL "https://github.com/jxxghp/MoviePilot-Plugins/archive/refs/heads/main.zip" | busybox unzip -d /tmp - \
|
||||
&& mv -f /tmp/MoviePilot-Plugins-main/plugins.v2/* /app/app/plugins/ \
|
||||
&& cat /tmp/MoviePilot-Plugins-main/package.json | jq -r 'to_entries[] | select(.value.v2 == true) | .key' | awk '{print tolower($0)}' | \
|
||||
while read -r i; do if [ ! -d "/app/app/plugins/$i" ]; then mv "/tmp/MoviePilot-Plugins-main/plugins/$i" "/app/app/plugins/"; else echo "跳过 $i"; fi; done \
|
||||
&& curl -sL "https://github.com/jxxghp/MoviePilot-Resources/archive/refs/heads/main.zip" | busybox unzip -d /tmp - \
|
||||
&& mv -f /tmp/MoviePilot-Resources-main/resources/* /app/app/helper/ \
|
||||
&& rm -rf /tmp/*
|
||||
EXPOSE 3000
|
||||
VOLUME [ "/config" ]
|
||||
ENTRYPOINT [ "/entrypoint" ]
|
||||
50
README.md
50
README.md
@@ -1,5 +1,8 @@
|
||||
|
||||
# MoviePilot
|
||||
|
||||
简体中文 | [English](README_EN.md)
|
||||
|
||||

|
||||

|
||||

|
||||
@@ -16,15 +19,58 @@
|
||||
|
||||
发布频道:https://t.me/moviepilot_channel
|
||||
|
||||
|
||||
## 主要特性
|
||||
|
||||
- 前后端分离,基于FastApi + Vue3,前端项目地址:[MoviePilot-Frontend](https://github.com/jxxghp/MoviePilot-Frontend),API:http://localhost:3001/docs
|
||||
- 前后端分离,基于FastApi + Vue3。
|
||||
- 聚焦核心需求,简化功能和设置,部分设置项可直接使用默认值。
|
||||
- 重新设计了用户界面,更加美观易用。
|
||||
|
||||
|
||||
## 安装使用
|
||||
|
||||
访问官方Wiki:https://wiki.movie-pilot.org
|
||||
官方Wiki:https://wiki.movie-pilot.org
|
||||
|
||||
|
||||
## 本地 CLI
|
||||
|
||||
一键安装运行脚本:
|
||||
|
||||
```shell
|
||||
curl -fsSL https://raw.githubusercontent.com/jxxghp/MoviePilot/v2/scripts/bootstrap-local.sh | bash
|
||||
```
|
||||
|
||||
使用 `moviepilot` 命令管理MoviePilot,完整 CLI 文档:[`docs/cli.md`](docs/cli.md)
|
||||
|
||||
|
||||
## 为 AI Agent 添加 Skills
|
||||
```shell
|
||||
npx skills add https://github.com/jxxghp/MoviePilot
|
||||
```
|
||||
|
||||
## 参与开发
|
||||
|
||||
API文档:https://api.movie-pilot.org
|
||||
|
||||
MCP工具API文档:详见 [docs/mcp-api.md](docs/mcp-api.md)
|
||||
|
||||
开发环境准备与本地源码运行说明:[`docs/development-setup.md`](docs/development-setup.md)
|
||||
|
||||
插件开发说明:<https://wiki.movie-pilot.org/zh/plugindev>
|
||||
|
||||
## 相关项目
|
||||
|
||||
- [MoviePilot-Frontend](https://github.com/jxxghp/MoviePilot-Frontend)
|
||||
- [MoviePilot-Resources](https://github.com/jxxghp/MoviePilot-Resources)
|
||||
- [MoviePilot-Plugins](https://github.com/jxxghp/MoviePilot-Plugins)
|
||||
- [MoviePilot-Server](https://github.com/jxxghp/MoviePilot-Server)
|
||||
- [MoviePilot-Wiki](https://github.com/jxxghp/MoviePilot-Wiki)
|
||||
|
||||
## 免责申明
|
||||
|
||||
- 本软件仅供学习交流使用,任何人不得将本软件用于商业用途,任何人不得将本软件用于违法犯罪活动,软件对用户行为不知情,一切责任由使用者承担。
|
||||
- 本软件代码开源,基于开源代码进行修改,人为去除相关限制导致软件被分发、传播并造成责任事件的,需由代码修改发布者承担全部责任,不建议对用户认证机制进行规避或修改并公开发布。
|
||||
- 本项目不接受捐赠,没有在任何地方发布捐赠信息页面,软件本身不收费也不提供任何收费相关服务,请仔细辨别避免误导。
|
||||
|
||||
## 贡献者
|
||||
|
||||
|
||||
77
README_EN.md
Normal file
77
README_EN.md
Normal file
@@ -0,0 +1,77 @@
|
||||
# MoviePilot
|
||||
|
||||
[简体中文](README.md) | English
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
Redesigned from parts of [NAStool](https://github.com/NAStool/nas-tools), with a stronger focus on core automation scenarios while reducing issues and making the project easier to extend and maintain.
|
||||
|
||||
# For learning and personal communication only. Please do not promote this project on platforms in mainland China.
|
||||
|
||||
Release channel: https://t.me/moviepilot_channel
|
||||
|
||||
|
||||
## Key Features
|
||||
|
||||
- Frontend/backend separation based on FastApi + Vue3.
|
||||
- Focuses on core needs, simplifies features and settings, and allows some options to work well with sensible defaults.
|
||||
- Reworked user interface for a cleaner and more practical experience.
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
Official wiki: https://wiki.movie-pilot.org
|
||||
|
||||
|
||||
## Local CLI
|
||||
|
||||
One-command bootstrap script:
|
||||
|
||||
```shell
|
||||
curl -fsSL https://raw.githubusercontent.com/jxxghp/MoviePilot/v2/scripts/bootstrap-local.sh | bash
|
||||
```
|
||||
|
||||
Manage MoviePilot with the `moviepilot` command. Full CLI documentation: [`docs/cli.md`](docs/cli.md)
|
||||
|
||||
|
||||
## Add Skills for AI Agents
|
||||
```shell
|
||||
npx skills add https://github.com/jxxghp/MoviePilot
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
API documentation: https://api.movie-pilot.org
|
||||
|
||||
MCP tool API documentation: see [docs/mcp-api.md](docs/mcp-api.md)
|
||||
|
||||
Development environment setup and local source-run guide: [`docs/development-setup.md`](docs/development-setup.md)
|
||||
|
||||
Plugin development guide: <https://wiki.movie-pilot.org/zh/plugindev>
|
||||
|
||||
## Related Projects
|
||||
|
||||
- [MoviePilot-Frontend](https://github.com/jxxghp/MoviePilot-Frontend)
|
||||
- [MoviePilot-Resources](https://github.com/jxxghp/MoviePilot-Resources)
|
||||
- [MoviePilot-Plugins](https://github.com/jxxghp/MoviePilot-Plugins)
|
||||
- [MoviePilot-Server](https://github.com/jxxghp/MoviePilot-Server)
|
||||
- [MoviePilot-Wiki](https://github.com/jxxghp/MoviePilot-Wiki)
|
||||
|
||||
## Disclaimer
|
||||
|
||||
- This software is for learning and personal communication only. It must not be used for commercial purposes or illegal activities. The software does not know how users choose to use it, and all responsibility rests with the user.
|
||||
- The source code is open source and derived from other open-source code. If someone removes the relevant restrictions and redistributes or publishes modified versions that lead to liability events, the publisher of those modifications bears full responsibility. Public releases that bypass or alter the user authentication mechanism are not recommended.
|
||||
- This project does not accept donations and has not published any donation page anywhere. The software itself is free of charge and does not provide paid services. Please verify information carefully to avoid being misled.
|
||||
|
||||
## Contributors
|
||||
|
||||
<a href="https://github.com/jxxghp/MoviePilot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=jxxghp/MoviePilot" />
|
||||
</a>
|
||||
@@ -1,25 +0,0 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from pydantic.main import BaseModel
|
||||
|
||||
from app.schemas import ActionContext
|
||||
|
||||
|
||||
class BaseAction(BaseModel, ABC):
|
||||
"""
|
||||
工作流动作基类
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def name(self) -> str:
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def description(self) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def execute(self, params: dict, context: ActionContext) -> ActionContext:
|
||||
raise NotImplementedError
|
||||
1126
app/agent/__init__.py
Normal file
1126
app/agent/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
574
app/agent/callback/__init__.py
Normal file
574
app/agent/callback/__init__.py
Normal file
@@ -0,0 +1,574 @@
|
||||
import asyncio
|
||||
import threading
|
||||
from typing import Any, Optional, Tuple
|
||||
|
||||
from fastapi.concurrency import run_in_threadpool
|
||||
|
||||
from app.chain import ChainBase
|
||||
from app.log import logger
|
||||
from app.schemas import Notification
|
||||
from app.schemas.message import (
|
||||
MessageResponse,
|
||||
ChannelCapabilityManager,
|
||||
ChannelCapability,
|
||||
)
|
||||
from app.schemas.types import MessageChannel
|
||||
|
||||
|
||||
class _StreamChain(ChainBase):
|
||||
pass
|
||||
|
||||
|
||||
class StreamingHandler:
|
||||
"""
|
||||
流式Token缓冲管理器
|
||||
|
||||
负责从 LLM 流式 token 中积累文本,并在支持消息编辑的渠道上实时推送给用户。
|
||||
|
||||
工作流程:
|
||||
1. Agent开始处理时调用 start_streaming(),检查渠道能力并启动定时刷新
|
||||
2. LLM 产生 token 时调用 emit() 积累到缓冲区
|
||||
3. 定时器周期性调用 _flush():
|
||||
- 第一次有内容时发送新消息(通过 send_direct_message 获取 message_id)
|
||||
- 后续有新内容时编辑同一条消息(通过 edit_message)
|
||||
- 当消息长度接近渠道限制时,冻结当前消息并发送新消息继续输出
|
||||
4. 工具调用时:
|
||||
- 流式渠道:工具消息直接 emit() 追加到 buffer,与 Agent 文字合并为同一条流式消息
|
||||
- 非流式渠道:调用 take() 取出已积累的文字,与工具消息合并独立发送
|
||||
5. Agent最终完成时调用 stop_streaming():执行最后一次刷新,
|
||||
返回是否已通过流式发送完所有内容(调用方据此决定是否还需额外发送)
|
||||
"""
|
||||
|
||||
# 流式输出的刷新间隔(秒)
|
||||
FLUSH_INTERVAL = 0.3
|
||||
|
||||
def __init__(self):
|
||||
self._lock = threading.Lock()
|
||||
self._buffer = ""
|
||||
# 流式输出相关状态
|
||||
self._streaming_enabled = False
|
||||
self._flush_task: Optional[asyncio.Task] = None
|
||||
# 当前消息的发送信息(用于编辑消息)
|
||||
self._message_response: Optional[MessageResponse] = None
|
||||
# 已发送给用户的文本(用于追踪增量)
|
||||
self._sent_text = ""
|
||||
# 当前消息的起始偏移量(buffer 中属于当前消息的起始位置)
|
||||
self._msg_start_offset = 0
|
||||
# 当前渠道的单条消息最大长度(0 表示不限制)
|
||||
self._max_message_length = 0
|
||||
# 消息发送所需的上下文信息
|
||||
self._channel: Optional[str] = None
|
||||
self._source: Optional[str] = None
|
||||
self._user_id: Optional[str] = None
|
||||
self._username: Optional[str] = None
|
||||
self._title: str = ""
|
||||
self._allow_dispatch_without_context = False
|
||||
# 非啰嗦模式下的待输出工具统计,等下一段文本到来时再统一补一句摘要
|
||||
self._pending_tool_stats: dict[str, dict[str, Any]] = {}
|
||||
|
||||
def set_dispatch_policy(
|
||||
self, allow_dispatch_without_context: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
设置在缺少渠道上下文时是否仍允许向默认通知渠道分发消息。
|
||||
后台 DISPATCH 任务允许,CAPTURE_ONLY 必须禁止。
|
||||
"""
|
||||
self._allow_dispatch_without_context = allow_dispatch_without_context
|
||||
|
||||
def emit(self, token: str) -> str:
|
||||
"""
|
||||
接收 LLM 流式 token,积累到缓冲区。
|
||||
如果存在待输出的工具统计,则会先补上一句摘要再追加 token。
|
||||
"""
|
||||
with self._lock:
|
||||
emitted = token or ""
|
||||
|
||||
if self._pending_tool_stats:
|
||||
summary = self._consume_pending_tool_summary_locked()
|
||||
if summary:
|
||||
if emitted:
|
||||
emitted = f"{summary}{emitted.lstrip(chr(10))}"
|
||||
else:
|
||||
emitted = summary
|
||||
|
||||
# 如果存量消息结束是两个换行,则去掉新消息前面的换行,避免过多空行
|
||||
if self._buffer.endswith("\n\n") and emitted.startswith("\n"):
|
||||
emitted = emitted.lstrip("\n")
|
||||
self._buffer += emitted
|
||||
return emitted
|
||||
|
||||
async def take(self) -> str:
|
||||
"""
|
||||
获取当前已积累的消息内容,获取后清空缓冲区。
|
||||
|
||||
用于非流式渠道:工具调用前取出 Agent 已产出的文字,
|
||||
与工具提示合并后独立发送。
|
||||
|
||||
注意:流式渠道不调用此方法,工具消息直接 emit 到 buffer 中。
|
||||
"""
|
||||
self.flush_pending_tool_summary()
|
||||
|
||||
with self._lock:
|
||||
if not self._buffer:
|
||||
return ""
|
||||
message = self._buffer
|
||||
logger.info(f"Agent消息: {message}")
|
||||
self._buffer = ""
|
||||
return message
|
||||
|
||||
def clear(self):
|
||||
"""
|
||||
清空缓冲区(不返回内容)
|
||||
"""
|
||||
with self._lock:
|
||||
self._buffer = ""
|
||||
self._sent_text = ""
|
||||
self._message_response = None
|
||||
self._msg_start_offset = 0
|
||||
self._pending_tool_stats = {}
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
重置缓冲区,清空已发送的文本从头更新,但保持消息编辑能力。
|
||||
|
||||
与 clear 的区别:
|
||||
- clear:完全重置所有状态,后续会开新消息
|
||||
- reset:只清空buffer,保留消息编辑状态,后续继续编辑同一条消息
|
||||
"""
|
||||
with self._lock:
|
||||
self._buffer = ""
|
||||
self._sent_text = ""
|
||||
self._msg_start_offset = 0
|
||||
self._pending_tool_stats = {}
|
||||
|
||||
async def start_streaming(
|
||||
self,
|
||||
channel: Optional[str] = None,
|
||||
source: Optional[str] = None,
|
||||
user_id: Optional[str] = None,
|
||||
username: Optional[str] = None,
|
||||
title: str = "",
|
||||
):
|
||||
"""
|
||||
启动流式输出。
|
||||
始终标记为流式状态(用于 buffer 收集 token),
|
||||
但只有渠道支持消息编辑时才启动定时刷新任务(实时推送给用户)。
|
||||
:param channel: 消息渠道
|
||||
:param source: 消息来源
|
||||
:param user_id: 用户ID
|
||||
:param username: 用户名
|
||||
:param title: 消息标题
|
||||
"""
|
||||
self._channel = channel
|
||||
self._source = source
|
||||
self._user_id = user_id
|
||||
self._username = username
|
||||
self._title = title
|
||||
|
||||
self._streaming_enabled = True
|
||||
self._sent_text = ""
|
||||
self._message_response = None
|
||||
self._msg_start_offset = 0
|
||||
self._pending_tool_stats = {}
|
||||
|
||||
# 检查渠道是否支持消息编辑,不支持则仅收集 token 到 buffer,不实时推送
|
||||
if not self._can_stream():
|
||||
logger.debug(f"渠道 {channel} 不支持消息编辑,仅启用 buffer 收集模式")
|
||||
return
|
||||
|
||||
# 从渠道能力中获取单条消息最大长度
|
||||
try:
|
||||
channel_enum = MessageChannel(self._channel)
|
||||
self._max_message_length = ChannelCapabilityManager.get_max_message_length(
|
||||
channel_enum
|
||||
)
|
||||
except (ValueError, KeyError):
|
||||
self._max_message_length = 0
|
||||
|
||||
# 启动异步定时刷新任务
|
||||
self._flush_task = asyncio.create_task(self._flush_loop())
|
||||
logger.debug("流式输出已启动")
|
||||
|
||||
async def stop_streaming(self) -> Tuple[bool, str]:
|
||||
"""
|
||||
停止流式输出。执行最后一次刷新确保所有内容都已发送。
|
||||
:return: (all_sent, final_text)
|
||||
all_sent: 是否已经通过流式编辑将最终完整内容发送给了用户
|
||||
(True 表示调用方无需再额外发送消息)
|
||||
final_text: 流式发送的完整文本内容(用于调用方保存消息记录)
|
||||
"""
|
||||
if not self._streaming_enabled:
|
||||
return False, ""
|
||||
|
||||
self._streaming_enabled = False
|
||||
|
||||
# 取消定时任务
|
||||
await self._cancel_flush_task()
|
||||
|
||||
# 将未落地的工具统计补入缓冲区,避免流式结束时丢失这段执行信息
|
||||
self.flush_pending_tool_summary()
|
||||
|
||||
# 执行最后一次刷新
|
||||
await self._flush()
|
||||
|
||||
# 检查是否所有缓冲内容都已发送
|
||||
with self._lock:
|
||||
# 当前消息的文本 = buffer 中从 _msg_start_offset 开始的部分
|
||||
current_msg_text = self._buffer[self._msg_start_offset:]
|
||||
all_sent = (
|
||||
self._message_response is not None
|
||||
and self._sent_text
|
||||
and current_msg_text == self._sent_text
|
||||
)
|
||||
# 保留最终文本用于返回(返回完整 buffer 内容,包含所有分段消息)
|
||||
final_text = self._buffer if all_sent else ""
|
||||
# 重置状态
|
||||
self._sent_text = ""
|
||||
self._message_response = None
|
||||
self._msg_start_offset = 0
|
||||
self._pending_tool_stats = {}
|
||||
if all_sent:
|
||||
# 所有内容已通过流式发送,清空缓冲区
|
||||
self._buffer = ""
|
||||
return all_sent, final_text
|
||||
|
||||
def record_tool_call(
|
||||
self,
|
||||
tool_name: str,
|
||||
tool_message: Optional[str] = None,
|
||||
tool_kwargs: Optional[dict[str, Any]] = None,
|
||||
):
|
||||
"""
|
||||
记录一次工具调用,供非啰嗦模式下延迟汇总输出。
|
||||
"""
|
||||
category, target = self._classify_tool_call(
|
||||
tool_name=tool_name,
|
||||
tool_message=tool_message,
|
||||
tool_kwargs=tool_kwargs or {},
|
||||
)
|
||||
with self._lock:
|
||||
bucket = self._pending_tool_stats.setdefault(
|
||||
category,
|
||||
{
|
||||
"count": 0,
|
||||
"targets": set(),
|
||||
},
|
||||
)
|
||||
bucket["count"] += 1
|
||||
if target:
|
||||
bucket["targets"].add(str(target))
|
||||
|
||||
def flush_pending_tool_summary(self) -> str:
|
||||
"""
|
||||
将待输出的工具统计摘要补入缓冲区,并返回本次新增的摘要文本。
|
||||
"""
|
||||
with self._lock:
|
||||
summary = self._consume_pending_tool_summary_locked()
|
||||
if summary:
|
||||
self._buffer += summary
|
||||
return summary
|
||||
|
||||
@staticmethod
|
||||
def _classify_tool_call(
|
||||
tool_name: str,
|
||||
tool_message: Optional[str],
|
||||
tool_kwargs: dict[str, Any],
|
||||
) -> tuple[str, Optional[str]]:
|
||||
tool_name = (tool_name or "").strip().lower()
|
||||
tool_message = (tool_message or "").strip()
|
||||
tool_message_lower = tool_message.lower()
|
||||
|
||||
if tool_name == "read_file":
|
||||
return "file_read", tool_kwargs.get("file_path")
|
||||
if tool_name in {"write_file", "edit_file"}:
|
||||
return "file_write", tool_kwargs.get("file_path")
|
||||
if tool_name in {"list_directory", "query_directory_settings"}:
|
||||
return "directory", tool_kwargs.get("path")
|
||||
if tool_name == "browse_webpage":
|
||||
return (
|
||||
"web_browse",
|
||||
tool_kwargs.get("url")
|
||||
or tool_kwargs.get("target_url")
|
||||
or tool_kwargs.get("path"),
|
||||
)
|
||||
if tool_name == "execute_command":
|
||||
return "command", tool_kwargs.get("command")
|
||||
if tool_name == "ask_user_choice":
|
||||
return "interaction", tool_kwargs.get("message")
|
||||
if tool_name.startswith("search_") or tool_name in {"get_search_results"}:
|
||||
return (
|
||||
"search",
|
||||
tool_kwargs.get("query")
|
||||
or tool_kwargs.get("title")
|
||||
or tool_kwargs.get("keyword"),
|
||||
)
|
||||
if tool_name.startswith("query_") or tool_name.startswith("list_") or tool_name.startswith("get_"):
|
||||
return "data_query", None
|
||||
if tool_name.startswith(("add_", "update_", "delete_", "modify_", "run_")):
|
||||
return "action", None
|
||||
if tool_name in {
|
||||
"recognize_media",
|
||||
"scrape_metadata",
|
||||
"transfer_file",
|
||||
"test_site",
|
||||
"send_message",
|
||||
"send_local_file",
|
||||
"send_voice_message",
|
||||
}:
|
||||
return "action", None
|
||||
|
||||
if "读取文件" in tool_message or "read file" in tool_message_lower:
|
||||
return "file_read", tool_kwargs.get("file_path")
|
||||
if (
|
||||
"写入文件" in tool_message
|
||||
or "编辑文件" in tool_message
|
||||
or "write file" in tool_message_lower
|
||||
or "edit file" in tool_message_lower
|
||||
):
|
||||
return "file_write", tool_kwargs.get("file_path")
|
||||
if "目录" in tool_message or "directory" in tool_message_lower:
|
||||
return "directory", tool_kwargs.get("path")
|
||||
if "搜索" in tool_message or "search" in tool_message_lower:
|
||||
return (
|
||||
"search",
|
||||
tool_kwargs.get("query")
|
||||
or tool_kwargs.get("title")
|
||||
or tool_kwargs.get("keyword"),
|
||||
)
|
||||
if "网页" in tool_message or "browser" in tool_message_lower or "webpage" in tool_message_lower:
|
||||
return "web_browse", tool_kwargs.get("url")
|
||||
if "命令" in tool_message or "command" in tool_message_lower:
|
||||
return "command", tool_kwargs.get("command")
|
||||
|
||||
return "tool", None
|
||||
|
||||
def _consume_pending_tool_summary_locked(self) -> str:
|
||||
if not self._pending_tool_stats:
|
||||
return ""
|
||||
|
||||
parts = []
|
||||
for category, bucket in self._pending_tool_stats.items():
|
||||
value = bucket["count"]
|
||||
if category in {"file_read", "file_write", "directory", "web_browse"} and bucket["targets"]:
|
||||
value = len(bucket["targets"])
|
||||
part = self._format_tool_stat(category, value)
|
||||
if part:
|
||||
parts.append(part)
|
||||
|
||||
self._pending_tool_stats = {}
|
||||
if not parts:
|
||||
return ""
|
||||
|
||||
summary = f"({','.join(parts)})"
|
||||
visible_buffer = self._buffer.rstrip(" \t")
|
||||
last_char = visible_buffer[-1:] if visible_buffer.strip() else ""
|
||||
prefix = ""
|
||||
if self._buffer and last_char != "\n":
|
||||
prefix = "\n\n"
|
||||
return f"{prefix}{summary}\n\n"
|
||||
|
||||
@staticmethod
|
||||
def _format_tool_stat(category: str, count: int) -> str:
|
||||
if count <= 0:
|
||||
return ""
|
||||
|
||||
if category == "search":
|
||||
return f"执行了 {count} 次搜索"
|
||||
if category == "file_read":
|
||||
return f"读取了 {count} 个文件"
|
||||
if category == "file_write":
|
||||
return f"修改了 {count} 个文件"
|
||||
if category == "directory":
|
||||
return f"查看了 {count} 个目录"
|
||||
if category == "web_browse":
|
||||
return f"浏览了 {count} 个网页"
|
||||
if category == "command":
|
||||
return f"执行了 {count} 条命令"
|
||||
if category == "data_query":
|
||||
return f"查询了 {count} 次数据"
|
||||
if category == "action":
|
||||
return f"执行了 {count} 次操作"
|
||||
if category == "interaction":
|
||||
return f"发起了 {count} 次交互"
|
||||
return f"调用了 {count} 次工具"
|
||||
|
||||
def _can_stream(self) -> bool:
|
||||
"""
|
||||
检查当前渠道是否支持流式输出(消息编辑)
|
||||
"""
|
||||
if not self._channel:
|
||||
return False
|
||||
try:
|
||||
channel_enum = MessageChannel(self._channel)
|
||||
return ChannelCapabilityManager.supports_capability(
|
||||
channel_enum, ChannelCapability.MESSAGE_EDITING
|
||||
)
|
||||
except (ValueError, KeyError):
|
||||
return False
|
||||
|
||||
async def _flush_loop(self):
|
||||
"""
|
||||
定时刷新循环,定期将缓冲区内容发送/编辑到用户
|
||||
"""
|
||||
try:
|
||||
while self._streaming_enabled:
|
||||
await asyncio.sleep(self.FLUSH_INTERVAL)
|
||||
if self._streaming_enabled:
|
||||
await self._flush()
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.error(f"流式刷新异常: {e}")
|
||||
|
||||
async def _cancel_flush_task(self):
|
||||
"""
|
||||
取消当前的定时刷新任务
|
||||
"""
|
||||
if self._flush_task and not self._flush_task.done():
|
||||
self._flush_task.cancel()
|
||||
try:
|
||||
await self._flush_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
self._flush_task = None
|
||||
|
||||
async def _flush(self):
|
||||
"""
|
||||
将当前缓冲区内容刷新到用户消息
|
||||
- 如果还没有发送过消息,先发送一条新消息并记录message_id
|
||||
- 如果已经发送过消息,编辑该消息为最新的完整内容
|
||||
- 如果当前消息内容超过长度限制,冻结当前消息并发送新消息继续输出
|
||||
"""
|
||||
with self._lock:
|
||||
# 当前消息的文本 = buffer 中从 _msg_start_offset 开始的部分
|
||||
current_text = self._buffer[self._msg_start_offset:]
|
||||
if not current_text or current_text == self._sent_text:
|
||||
# 没有新内容需要刷新
|
||||
return
|
||||
if (
|
||||
(not self._channel or not self._source)
|
||||
and not self._allow_dispatch_without_context
|
||||
):
|
||||
logger.debug("流式输出缺少渠道上下文,当前模式禁止外发消息")
|
||||
return
|
||||
|
||||
chain = _StreamChain()
|
||||
|
||||
try:
|
||||
if self._message_response is None:
|
||||
# 第一次发送:发送新消息并获取 message_id
|
||||
response = await run_in_threadpool(
|
||||
chain.send_direct_message,
|
||||
Notification(
|
||||
channel=self._channel,
|
||||
source=self._source,
|
||||
userid=self._user_id,
|
||||
username=self._username,
|
||||
title=self._title,
|
||||
text=current_text,
|
||||
),
|
||||
)
|
||||
if response and response.success and response.message_id:
|
||||
self._message_response = response
|
||||
with self._lock:
|
||||
self._sent_text = current_text
|
||||
logger.debug(
|
||||
f"流式输出初始消息已发送: message_id={response.message_id}"
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
"流式输出初始消息发送失败或未返回message_id,降级为非流式输出"
|
||||
)
|
||||
self._streaming_enabled = False
|
||||
else:
|
||||
# 检查当前消息内容是否超过长度限制
|
||||
if (
|
||||
self._max_message_length
|
||||
and len(current_text) > self._max_message_length
|
||||
):
|
||||
# 消息过长,冻结当前消息(保持最后一次成功编辑的内容)
|
||||
# 将 offset 移动到已发送文本之后,开启新消息
|
||||
logger.debug(
|
||||
f"流式消息长度 {len(current_text)} 超过限制 {self._max_message_length},启用新消息"
|
||||
)
|
||||
with self._lock:
|
||||
self._msg_start_offset += len(self._sent_text)
|
||||
current_text = self._buffer[self._msg_start_offset:]
|
||||
self._message_response = None
|
||||
self._sent_text = ""
|
||||
|
||||
# 如果偏移后还有新内容,立即发送为新消息
|
||||
if current_text:
|
||||
response = await run_in_threadpool(
|
||||
chain.send_direct_message,
|
||||
Notification(
|
||||
channel=self._channel,
|
||||
source=self._source,
|
||||
userid=self._user_id,
|
||||
username=self._username,
|
||||
title=self._title,
|
||||
text=current_text,
|
||||
),
|
||||
)
|
||||
if response and response.success and response.message_id:
|
||||
self._message_response = response
|
||||
with self._lock:
|
||||
self._sent_text = current_text
|
||||
logger.debug(
|
||||
f"流式输出新消息已发送: message_id={response.message_id}"
|
||||
)
|
||||
else:
|
||||
logger.debug("流式输出新消息发送失败,降级为非流式输出")
|
||||
self._streaming_enabled = False
|
||||
else:
|
||||
# 后续更新:编辑已有消息
|
||||
try:
|
||||
channel_enum = MessageChannel(self._channel)
|
||||
except (ValueError, KeyError):
|
||||
return
|
||||
|
||||
success = await run_in_threadpool(
|
||||
chain.edit_message,
|
||||
channel=channel_enum,
|
||||
source=self._message_response.source,
|
||||
message_id=self._message_response.message_id,
|
||||
chat_id=self._message_response.chat_id,
|
||||
text=current_text,
|
||||
title=self._title,
|
||||
)
|
||||
if success:
|
||||
with self._lock:
|
||||
self._sent_text = current_text
|
||||
else:
|
||||
logger.debug("流式输出消息编辑失败")
|
||||
except Exception as e:
|
||||
logger.error(f"流式输出刷新失败: {e}")
|
||||
|
||||
@property
|
||||
def is_streaming(self) -> bool:
|
||||
"""
|
||||
是否正在流式输出
|
||||
"""
|
||||
return self._streaming_enabled
|
||||
|
||||
@property
|
||||
def is_auto_flushing(self) -> bool:
|
||||
"""
|
||||
是否正在定时刷新(渠道支持消息编辑时自动推送 buffer 内容)
|
||||
"""
|
||||
return self._flush_task is not None
|
||||
|
||||
@property
|
||||
def has_sent_message(self) -> bool:
|
||||
"""
|
||||
是否已经通过流式输出发送过消息(当前轮次)
|
||||
"""
|
||||
return self._message_response is not None
|
||||
|
||||
@property
|
||||
def last_buffer_char(self) -> str:
|
||||
"""
|
||||
返回当前缓冲区最后一个字符;缓冲区为空时返回空字符串。
|
||||
"""
|
||||
with self._lock:
|
||||
return self._buffer[-1:] if self._buffer else ""
|
||||
19
app/agent/defaults/CURRENT_PERSONA.md
Normal file
19
app/agent/defaults/CURRENT_PERSONA.md
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
version: 3
|
||||
active_persona: default
|
||||
extra_context_files: []
|
||||
deprecated_phrases: []
|
||||
---
|
||||
# CURRENT_PERSONA
|
||||
|
||||
当前激活人格:`default`
|
||||
|
||||
运行时加载顺序固定如下:
|
||||
|
||||
1. 核心系统提示词(程序内置,不可运行时覆盖)
|
||||
2. `personas/<active_persona>/PERSONA.md`
|
||||
3. `extra_context_files`
|
||||
4. `memory/*.md`
|
||||
5. `activity/*.md`
|
||||
|
||||
`memory` 中的长期偏好可以细化回复方式,但不应覆盖系统核心身份、目标和安全边界。
|
||||
22
app/agent/defaults/personas/aloof/PERSONA.md
Normal file
22
app/agent/defaults/personas/aloof/PERSONA.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
version: 1
|
||||
persona_id: aloof
|
||||
label: 高冷
|
||||
description: 冷静、克制、低温度,话少但不失礼。
|
||||
aliases:
|
||||
- 冷淡
|
||||
- 冷感
|
||||
- 冷艳
|
||||
---
|
||||
# PERSONA
|
||||
|
||||
- Tone: cool, distant, and composed.
|
||||
- Keep emotional temperature low and transitions short.
|
||||
- Be brief and efficient, but do not become rude or contemptuous.
|
||||
- Prefer understatement over enthusiasm.
|
||||
|
||||
## RESPONSE_FORMAT
|
||||
|
||||
- Lead with the answer or the action result.
|
||||
- Keep explanations minimal unless the user explicitly asks for detail.
|
||||
- Avoid extra reassurance, hype, or emotional softening.
|
||||
22
app/agent/defaults/personas/anime/PERSONA.md
Normal file
22
app/agent/defaults/personas/anime/PERSONA.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
version: 1
|
||||
persona_id: anime
|
||||
label: 二次元
|
||||
description: 带一点 ACG 语感和戏剧化表达,但仍然以任务完成和清晰沟通为主。
|
||||
aliases:
|
||||
- 动漫风
|
||||
- ACG
|
||||
- 宅系
|
||||
---
|
||||
# PERSONA
|
||||
|
||||
- Tone: lively, stylized, and lightly dramatic, with a small amount of anime-flavored wording.
|
||||
- Keep the actual task handling grounded and practical; the style should stay mostly in phrasing.
|
||||
- You may occasionally use short ACG-like interjections, but do not flood the reply with memes, kaomoji, or niche jargon.
|
||||
- Stay readable first. If the task is serious, reduce the stylistic flavor automatically.
|
||||
|
||||
## RESPONSE_FORMAT
|
||||
|
||||
- Prefer short paragraphs or compact lists.
|
||||
- A light playful closing line is acceptable after the real result is already clear.
|
||||
- Do not let the style make operational instructions vague.
|
||||
22
app/agent/defaults/personas/catgirl/PERSONA.md
Normal file
22
app/agent/defaults/personas/catgirl/PERSONA.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
version: 1
|
||||
persona_id: catgirl
|
||||
label: 猫娘
|
||||
description: 带一点猫系拟人风格,轻松可爱,但不过度角色扮演。
|
||||
aliases:
|
||||
- 猫猫
|
||||
- 喵系
|
||||
- 猫耳
|
||||
---
|
||||
# PERSONA
|
||||
|
||||
- Tone: playful, cat-like, and cute, with occasional feline wording.
|
||||
- You may occasionally use a light "喵" style suffix or cat metaphor, but only sparingly.
|
||||
- Do not turn the reply into full roleplay; task clarity remains the primary goal.
|
||||
- If the content is operational, keep the answer direct first and add only a thin layer of style.
|
||||
|
||||
## RESPONSE_FORMAT
|
||||
|
||||
- Keep answers compact and readable.
|
||||
- Use only a very small amount of repeated verbal tic.
|
||||
- The result or action status should always appear before any playful flourish.
|
||||
23
app/agent/defaults/personas/concise/PERSONA.md
Normal file
23
app/agent/defaults/personas/concise/PERSONA.md
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
version: 1
|
||||
persona_id: concise
|
||||
label: 极简
|
||||
description: 更短、更硬朗,优先结论和动作,不主动展开背景解释。
|
||||
aliases:
|
||||
- 简洁
|
||||
- 干脆
|
||||
- 极简人格
|
||||
---
|
||||
# PERSONA
|
||||
|
||||
- Tone: terse, decisive, and highly compressed.
|
||||
- Prefer the shortest complete answer that still moves the task forward.
|
||||
- Default to one sentence when possible. Only use lists when they materially improve readability.
|
||||
- Avoid extra context, caveats, or teaching unless the user explicitly asks for explanation.
|
||||
- Keep transitions minimal and skip conversational softening.
|
||||
|
||||
## RESPONSE_FORMAT
|
||||
|
||||
- Lead with the conclusion or result.
|
||||
- For option lists, keep each item very short.
|
||||
- Do not repeat already-known context back to the user unless it is needed to disambiguate the action.
|
||||
22
app/agent/defaults/personas/cute/PERSONA.md
Normal file
22
app/agent/defaults/personas/cute/PERSONA.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
version: 1
|
||||
persona_id: cute
|
||||
label: 可爱
|
||||
description: 语气更亲和、更柔软、更讨喜,但不做重度角色扮演。
|
||||
aliases:
|
||||
- 软萌
|
||||
- 甜系
|
||||
- 亲和
|
||||
---
|
||||
# PERSONA
|
||||
|
||||
- Tone: warm, cheerful, and gently cute.
|
||||
- Sound approachable and pleasant, but keep the answer concise and useful.
|
||||
- Avoid baby talk, excessive repetition, or exaggerated emotive punctuation.
|
||||
- If the user asks for directness, keep the cute flavor minimal.
|
||||
|
||||
## RESPONSE_FORMAT
|
||||
|
||||
- Prefer friendly short paragraphs.
|
||||
- For lists, keep each item short and easy to read.
|
||||
- When something fails, explain it gently but clearly.
|
||||
24
app/agent/defaults/personas/default/PERSONA.md
Normal file
24
app/agent/defaults/personas/default/PERSONA.md
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
version: 1
|
||||
persona_id: default
|
||||
label: 默认
|
||||
description: 专业、克制、简洁,适合大多数日常媒体管理场景。
|
||||
aliases:
|
||||
- 专业
|
||||
- 默认人格
|
||||
---
|
||||
# PERSONA
|
||||
|
||||
- Tone: professional, concise, restrained.
|
||||
- Be direct. No unnecessary preamble, no repeating the user's words, no narrating internal reasoning.
|
||||
- Do not flatter the user, praise the question, or add emotional cushioning.
|
||||
- Do not use emojis, exclamation marks, cute language, or excessive apology.
|
||||
- Prefer short declarative sentences. Default to one or two short paragraphs; use lists only when they improve scanability.
|
||||
- Use Markdown for structured data. Use `inline code` for media titles and paths.
|
||||
|
||||
## RESPONSE_FORMAT
|
||||
|
||||
- Keep confirmations short.
|
||||
- For search or comparison results, prefer a brief list over a long paragraph.
|
||||
- Skip filler phrases like "Let me help you", "Here are the results", or "I found...".
|
||||
- When an error occurs, briefly state the blocker and the next best action.
|
||||
22
app/agent/defaults/personas/disdain/PERSONA.md
Normal file
22
app/agent/defaults/personas/disdain/PERSONA.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
version: 1
|
||||
persona_id: disdain
|
||||
label: 不屑
|
||||
description: 带一点嫌弃感和轻微毒舌,但必须保持可控和不越界。
|
||||
aliases:
|
||||
- 嫌弃
|
||||
- 毒舌
|
||||
- 鄙视链
|
||||
---
|
||||
# PERSONA
|
||||
|
||||
- Tone: dry, skeptical, and faintly dismissive.
|
||||
- Mild sarcasm is acceptable, but it must stay controlled and should never turn into direct insult or humiliation.
|
||||
- Prioritize sharp phrasing and low patience, while still giving the user the actual answer.
|
||||
- If the task is sensitive or the user is clearly frustrated, reduce the bite automatically.
|
||||
|
||||
## RESPONSE_FORMAT
|
||||
|
||||
- Keep answers crisp and pointed.
|
||||
- Use short, cutting observations only when they improve the style without harming clarity.
|
||||
- Always include the concrete result, instruction, or blocker.
|
||||
22
app/agent/defaults/personas/guide/PERSONA.md
Normal file
22
app/agent/defaults/personas/guide/PERSONA.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
version: 1
|
||||
persona_id: guide
|
||||
label: 说明型
|
||||
description: 在复杂问题上更愿意解释原因和步骤,但仍保持克制,不会无节制展开。
|
||||
aliases:
|
||||
- 讲解
|
||||
- 解释型
|
||||
- 教学
|
||||
---
|
||||
# PERSONA
|
||||
|
||||
- Tone: clear, structured, and mildly explanatory.
|
||||
- When the task is simple, stay concise. When the task is complex or the user asks why/how, provide a short explanation with visible structure.
|
||||
- Keep explanations practical and tied to the current decision, not generic theory.
|
||||
- Remain restrained: do not become chatty, cute, or overly warm.
|
||||
|
||||
## RESPONSE_FORMAT
|
||||
|
||||
- For non-trivial tasks, prefer short sections or a compact numbered list.
|
||||
- When describing tradeoffs, keep them concrete and action-oriented.
|
||||
- End with the actual outcome or next step, not a generic summary.
|
||||
23
app/agent/defaults/personas/moe/PERSONA.md
Normal file
23
app/agent/defaults/personas/moe/PERSONA.md
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
version: 1
|
||||
persona_id: moe
|
||||
label: 萌系
|
||||
description: 更轻小说感、更元气、更可爱,但仍然保持边界和专业度。
|
||||
aliases:
|
||||
- 萝莉风
|
||||
- 轻小说风
|
||||
- 元气少女
|
||||
- 萌萌
|
||||
---
|
||||
# PERSONA
|
||||
|
||||
- Tone: soft, upbeat, cute, and lightly playful.
|
||||
- Keep the personality in wording only; do not imitate a child, emphasize age, or use any sexualized framing.
|
||||
- Use cute particles or soft wording sparingly so the answer still feels useful instead of noisy.
|
||||
- When the task is urgent or technical, reduce the fluff and keep the result clear.
|
||||
|
||||
## RESPONSE_FORMAT
|
||||
|
||||
- Prefer short, bright sentences.
|
||||
- A small amount of cute phrasing is acceptable, but the final answer must still be easy to scan.
|
||||
- Do not bury the actual conclusion under roleplay language.
|
||||
19
app/agent/llm/__init__.py
Normal file
19
app/agent/llm/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
"""Agent 内部使用的 LLM 适配层。"""
|
||||
|
||||
from app.agent.llm.helper import LLMHelper, LLMTestError, LLMTestTimeout
|
||||
from app.agent.llm.provider import (
|
||||
LLMProviderAuthError,
|
||||
LLMProviderError,
|
||||
LLMProviderManager,
|
||||
render_auth_result_html,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"LLMHelper",
|
||||
"LLMProviderAuthError",
|
||||
"LLMProviderError",
|
||||
"LLMProviderManager",
|
||||
"LLMTestError",
|
||||
"LLMTestTimeout",
|
||||
"render_auth_result_html",
|
||||
]
|
||||
839
app/agent/llm/helper.py
Normal file
839
app/agent/llm/helper.py
Normal file
@@ -0,0 +1,839 @@
|
||||
"""LLM模型相关辅助功能"""
|
||||
|
||||
import asyncio
|
||||
import inspect
|
||||
import json
|
||||
import time
|
||||
from functools import wraps
|
||||
from typing import Any, List
|
||||
|
||||
from langchain_core.messages import AIMessage
|
||||
|
||||
from app.core.config import settings
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class LLMTestError(RuntimeError):
|
||||
"""LLM 测试调用异常,附带请求耗时。"""
|
||||
|
||||
def __init__(self, message: str, duration_ms: int | None = None):
|
||||
super().__init__(message)
|
||||
self.duration_ms = duration_ms
|
||||
|
||||
|
||||
class LLMTestTimeout(TimeoutError):
|
||||
"""LLM 测试调用超时,附带请求耗时。"""
|
||||
|
||||
def __init__(self, message: str, duration_ms: int | None = None):
|
||||
super().__init__(message)
|
||||
self.duration_ms = duration_ms
|
||||
|
||||
|
||||
def _patch_gemini_thought_signature():
|
||||
"""
|
||||
修复 langchain-google-genai 中 Gemini 2.5 思考模型的 thought_signature 兼容问题。
|
||||
langchain-google-genai 的 _is_gemini_3_or_later() 仅检查 "gemini-3",
|
||||
导致 Gemini 2.5 思考模型(如 gemini-2.5-flash、gemini-2.5-pro)在工具调用时
|
||||
缺少 thought_signature 而报错 400。
|
||||
此补丁将检查范围扩展到 Gemini 2.5 模型。
|
||||
"""
|
||||
try:
|
||||
import langchain_google_genai.chat_models as _cm
|
||||
|
||||
# 仅在未修补时执行
|
||||
if getattr(_cm, "_thought_signature_patched", False):
|
||||
return
|
||||
|
||||
def _patched_is_gemini_3_or_later(model_name: str) -> bool:
|
||||
if not model_name:
|
||||
return False
|
||||
name = model_name.lower().replace("models/", "")
|
||||
# Gemini 2.5 思考模型也需要 thought_signature 支持
|
||||
return "gemini-3" in name or "gemini-2.5" in name
|
||||
|
||||
_cm._is_gemini_3_or_later = _patched_is_gemini_3_or_later
|
||||
_cm._thought_signature_patched = True
|
||||
logger.debug(
|
||||
"已修补 langchain-google-genai thought_signature 兼容性(覆盖 Gemini 2.5 模型)"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"修补 langchain-google-genai thought_signature 失败: {e}")
|
||||
|
||||
|
||||
def _get_httpx_proxy_key() -> str:
|
||||
"""
|
||||
获取当前 httpx 版本支持的代理参数名。
|
||||
httpx < 0.28 使用 "proxies"(复数),>= 0.28 使用 "proxy"(单数)。
|
||||
google-genai SDK 会静默过滤掉不在 httpx.Client.__init__ 签名中的参数,
|
||||
因此必须使用与当前 httpx 版本匹配的参数名。
|
||||
"""
|
||||
try:
|
||||
import httpx
|
||||
|
||||
params = inspect.signature(httpx.Client.__init__).parameters
|
||||
if "proxy" in params:
|
||||
return "proxy"
|
||||
return "proxies"
|
||||
except Exception as e:
|
||||
logger.warning(f"检测 httpx 代理参数失败,默认使用 'proxies':{e}")
|
||||
return "proxies"
|
||||
|
||||
|
||||
def _deepseek_thinking_toggle(extra_body: Any) -> bool | None:
|
||||
"""
|
||||
解析 DeepSeek extra_body 中显式传入的 thinking 开关。
|
||||
"""
|
||||
if not isinstance(extra_body, dict):
|
||||
return None
|
||||
|
||||
thinking = extra_body.get("thinking")
|
||||
if not isinstance(thinking, dict):
|
||||
return None
|
||||
|
||||
thinking_type = str(thinking.get("type") or "").strip().lower()
|
||||
if thinking_type == "enabled":
|
||||
return True
|
||||
if thinking_type == "disabled":
|
||||
return False
|
||||
return None
|
||||
|
||||
|
||||
def _is_deepseek_thinking_enabled(model_name: str | None, extra_body: Any) -> bool:
|
||||
"""
|
||||
判断本次 DeepSeek 调用是否处于 thinking mode。
|
||||
"""
|
||||
explicit_toggle = _deepseek_thinking_toggle(extra_body)
|
||||
if explicit_toggle is not None:
|
||||
return explicit_toggle
|
||||
|
||||
normalized_model_name = str(model_name or "").strip().lower()
|
||||
if normalized_model_name == "deepseek-reasoner":
|
||||
return True
|
||||
if normalized_model_name.startswith("deepseek-v4-"):
|
||||
# DeepSeek V4 默认启用 thinking mode,除非显式关闭。
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _patch_deepseek_reasoning_content_support():
|
||||
"""
|
||||
修补 langchain-deepseek 在 tool-call 场景下遗漏 reasoning_content 回传的问题。
|
||||
|
||||
DeepSeek thinking mode 要求:若 assistant 历史消息包含 tool_calls,
|
||||
后续请求中必须带回该条消息的顶层 reasoning_content。
|
||||
某些 langchain-deepseek 版本虽然能从响应中拿到 reasoning_content,
|
||||
但不会在重放消息历史时写回请求载荷,导致 400。
|
||||
"""
|
||||
try:
|
||||
from langchain_deepseek import ChatDeepSeek
|
||||
except Exception as err:
|
||||
logger.debug(f"跳过 langchain-deepseek reasoning_content 修补:{err}")
|
||||
return
|
||||
|
||||
if getattr(ChatDeepSeek, "_moviepilot_reasoning_content_patched", False):
|
||||
return
|
||||
|
||||
original_get_request_payload = getattr(ChatDeepSeek, "_get_request_payload", None)
|
||||
if not callable(original_get_request_payload):
|
||||
logger.warning("langchain-deepseek 缺少 _get_request_payload,无法修补 reasoning_content")
|
||||
return
|
||||
|
||||
@wraps(original_get_request_payload)
|
||||
def _patched_get_request_payload(self, input_, *, stop=None, **kwargs):
|
||||
payload = original_get_request_payload(self, input_, stop=stop, **kwargs)
|
||||
|
||||
# Resolve original messages so we can extract reasoning_content from
|
||||
# additional_kwargs. The parent's payload builder does not propagate
|
||||
# this DeepSeek-specific field.
|
||||
messages = self._convert_input(input_).to_messages()
|
||||
|
||||
for i, message in enumerate(payload["messages"]):
|
||||
if message["role"] == "tool" and isinstance(message["content"], list):
|
||||
message["content"] = json.dumps(message["content"])
|
||||
elif message["role"] == "assistant":
|
||||
if isinstance(message["content"], list):
|
||||
# DeepSeek API expects assistant content to be a string,
|
||||
# not a list. Extract text blocks and join them, or use
|
||||
# empty string if none exist.
|
||||
text_parts = [
|
||||
block.get("text", "")
|
||||
for block in message["content"]
|
||||
if isinstance(block, dict) and block.get("type") == "text"
|
||||
]
|
||||
message["content"] = "".join(text_parts) if text_parts else ""
|
||||
|
||||
# DeepSeek reasoning models require every assistant message to
|
||||
# carry a reasoning_content field (even when empty). The value
|
||||
# is stored in AIMessage.additional_kwargs by
|
||||
# _create_chat_result(); re-inject it into the API payload.
|
||||
if (
|
||||
"reasoning_content" not in message
|
||||
and i < len(messages)
|
||||
and isinstance(messages[i], AIMessage)
|
||||
):
|
||||
message["reasoning_content"] = messages[i].additional_kwargs.get(
|
||||
"reasoning_content", ""
|
||||
)
|
||||
|
||||
return payload
|
||||
|
||||
ChatDeepSeek._get_request_payload = _patched_get_request_payload
|
||||
ChatDeepSeek._moviepilot_reasoning_content_patched = True
|
||||
logger.debug("已修补 langchain-deepseek thinking tool-call 的 reasoning_content 回传兼容性")
|
||||
|
||||
|
||||
def _patch_openai_responses_instructions_support():
|
||||
"""
|
||||
修补 langchain-openai 在使用 use_responses_api=True 时,
|
||||
提取 system 消息为顶层 instructions 字段。
|
||||
由于 Codex 等模型 (Responses API) 强依赖 instructions 字段,
|
||||
如果没有该字段会报 400 "Instructions are required"。
|
||||
"""
|
||||
try:
|
||||
from langchain_openai import ChatOpenAI
|
||||
except Exception as err:
|
||||
logger.debug(f"跳过 langchain-openai instructions 修补:{err}")
|
||||
return
|
||||
|
||||
if getattr(ChatOpenAI, "_moviepilot_responses_instructions_patched", False):
|
||||
return
|
||||
|
||||
original_get_request_payload = getattr(ChatOpenAI, "_get_request_payload", None)
|
||||
if not callable(original_get_request_payload):
|
||||
logger.warning("langchain-openai 缺少 _get_request_payload,无法修补 instructions")
|
||||
return
|
||||
|
||||
@wraps(original_get_request_payload)
|
||||
def _patched_get_request_payload(self, input_, *, stop=None, **kwargs):
|
||||
payload = original_get_request_payload(self, input_, stop=stop, **kwargs)
|
||||
|
||||
base_url = str(getattr(self, "openai_api_base", "") or "").lower()
|
||||
|
||||
# 处理 GitHub Copilot 端点兼容性
|
||||
if "githubcopilot.com" in base_url:
|
||||
payload.pop("stream_options", None)
|
||||
payload.pop("metadata", None)
|
||||
|
||||
# 处理 ChatGPT 官方 Responses API (Codex) 端点兼容性
|
||||
is_codex = "chatgpt.com/backend-api/codex" in base_url
|
||||
|
||||
if is_codex and (getattr(self, "use_responses_api", False) or "input" in payload):
|
||||
instructions = payload.get("instructions", "")
|
||||
inputs = payload.get("input", [])
|
||||
new_inputs = []
|
||||
|
||||
for msg in inputs:
|
||||
if isinstance(msg, dict) and msg.get("role") == "system":
|
||||
content = msg.get("content")
|
||||
if isinstance(content, str) and content.strip():
|
||||
if instructions:
|
||||
instructions += "\n\n" + content
|
||||
else:
|
||||
instructions = content
|
||||
else:
|
||||
new_inputs.append(msg)
|
||||
|
||||
payload["input"] = new_inputs
|
||||
payload["instructions"] = instructions or "You are a helpful assistant."
|
||||
payload["store"] = False
|
||||
|
||||
# Codex 端点不支持的部分常见补全参数,统一清理避免 400 报错
|
||||
unsupported_keys = [
|
||||
"presence_penalty", "frequency_penalty", "top_p", "n", "user",
|
||||
"stop", "metadata", "logit_bias", "logprobs", "top_logprobs",
|
||||
"stream_options", "temperature"
|
||||
]
|
||||
for key in unsupported_keys:
|
||||
payload.pop(key, None)
|
||||
|
||||
return payload
|
||||
|
||||
ChatOpenAI._get_request_payload = _patched_get_request_payload
|
||||
ChatOpenAI._moviepilot_responses_instructions_patched = True
|
||||
logger.debug("已修补 langchain-openai responses API 的 instructions 兼容性")
|
||||
|
||||
|
||||
class LLMHelper:
|
||||
"""LLM模型相关辅助功能"""
|
||||
|
||||
_SUPPORTED_THINKING_LEVELS = frozenset(
|
||||
{"off", "auto", "minimal", "low", "medium", "high", "max", "xhigh"}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_model_name(model_name: str | None) -> str:
|
||||
"""
|
||||
统一清理模型名称,便于按模型族做能力映射。
|
||||
"""
|
||||
return (model_name or "").strip().lower()
|
||||
|
||||
@classmethod
|
||||
def _normalize_deepseek_reasoning_effort(
|
||||
cls, thinking_level: str | None = None
|
||||
) -> str | None:
|
||||
"""
|
||||
DeepSeek 文档当前建议使用 high/max;兼容常见 effort 别名。
|
||||
"""
|
||||
if not thinking_level or thinking_level in {"off", "auto"}:
|
||||
return None
|
||||
|
||||
if thinking_level in {"minimal", "low", "medium", "high"}:
|
||||
return "high"
|
||||
if thinking_level in {"max", "xhigh"}:
|
||||
return "max"
|
||||
|
||||
logger.warning(f"忽略不支持的 DeepSeek reasoning_effort 配置: {thinking_level}")
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def _normalize_openai_reasoning_effort(
|
||||
cls, thinking_level: str | None = None
|
||||
) -> str | None:
|
||||
"""
|
||||
OpenAI reasoning_effort 支持更细粒度的 effort,统一做最近似映射。
|
||||
"""
|
||||
if not thinking_level or thinking_level == "auto":
|
||||
return None
|
||||
if thinking_level == "off":
|
||||
return "none"
|
||||
if thinking_level == "max":
|
||||
return "xhigh"
|
||||
return thinking_level
|
||||
|
||||
@classmethod
|
||||
def _build_google_thinking_kwargs(
|
||||
cls, model_name: str, thinking_level: str
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Gemini 3 使用 thinking_level;Gemini 2.5 使用 thinking_budget。
|
||||
"""
|
||||
if not model_name or thinking_level == "auto":
|
||||
return {}
|
||||
|
||||
if "gemini-2.5" in model_name:
|
||||
if thinking_level == "off":
|
||||
if "pro" in model_name:
|
||||
# Gemini 2.5 Pro 官方不支持完全关闭思考,回退到最小预算。
|
||||
return {
|
||||
"thinking_budget": 128,
|
||||
"include_thoughts": False,
|
||||
}
|
||||
return {
|
||||
"thinking_budget": 0,
|
||||
"include_thoughts": False,
|
||||
}
|
||||
|
||||
budget_map = {
|
||||
"minimal": 512,
|
||||
"low": 1024,
|
||||
"medium": 4096,
|
||||
"high": 8192,
|
||||
"max": 24576,
|
||||
"xhigh": 24576,
|
||||
}
|
||||
budget = budget_map.get(thinking_level)
|
||||
return (
|
||||
{
|
||||
"thinking_budget": budget,
|
||||
"include_thoughts": False,
|
||||
}
|
||||
if budget is not None
|
||||
else {}
|
||||
)
|
||||
|
||||
if "gemini-3" in model_name:
|
||||
level_map = {
|
||||
"off": "minimal",
|
||||
"minimal": "minimal",
|
||||
"low": "low",
|
||||
"medium": "medium",
|
||||
"high": "high",
|
||||
"max": "high",
|
||||
"xhigh": "high",
|
||||
}
|
||||
google_level = level_map.get(thinking_level)
|
||||
return (
|
||||
{
|
||||
"thinking_level": google_level,
|
||||
"include_thoughts": False,
|
||||
}
|
||||
if google_level
|
||||
else {}
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@classmethod
|
||||
def _build_kimi_thinking_kwargs(
|
||||
cls, model_name: str, thinking_level: str
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Kimi 当前公开文档仅支持思考开关,不支持显式深度调节。
|
||||
"""
|
||||
if model_name.startswith("kimi-k2-thinking"):
|
||||
return {}
|
||||
if thinking_level == "off":
|
||||
return {"extra_body": {"thinking": {"type": "disabled"}}}
|
||||
return {}
|
||||
|
||||
@classmethod
|
||||
def _build_thinking_kwargs(
|
||||
cls,
|
||||
provider: str,
|
||||
model: str | None,
|
||||
thinking_level: str | None = None
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
按 provider/model 生成思考模式相关参数。
|
||||
|
||||
优先使用 LangChain/OpenAI SDK 已支持的原生字段;仅在 provider
|
||||
明确要求自定义请求体时,才回退到 extra_body。
|
||||
"""
|
||||
provider_name = (provider or "").strip().lower()
|
||||
model_name = cls._normalize_model_name(model)
|
||||
|
||||
if provider_name == "deepseek":
|
||||
if thinking_level == "off":
|
||||
return {"extra_body": {"thinking": {"type": "disabled"}}}
|
||||
if thinking_level == "auto":
|
||||
return {}
|
||||
|
||||
kwargs: dict[str, Any] = {"extra_body": {"thinking": {"type": "enabled"}}}
|
||||
deepseek_effort = cls._normalize_deepseek_reasoning_effort(
|
||||
thinking_level
|
||||
)
|
||||
if deepseek_effort:
|
||||
kwargs["reasoning_effort"] = deepseek_effort
|
||||
return kwargs
|
||||
|
||||
if model_name.startswith(("kimi-k2.5", "kimi-k2.6", "kimi-k2-thinking")):
|
||||
return cls._build_kimi_thinking_kwargs(model_name, thinking_level)
|
||||
|
||||
if not model_name:
|
||||
return {}
|
||||
|
||||
# OpenAI 原生推理模型优先走 LangChain 内置 reasoning_effort。
|
||||
if provider_name in {"openai", "chatgpt"} and model_name.startswith(
|
||||
("gpt-5", "o1", "o3", "o4")
|
||||
):
|
||||
openai_effort = cls._normalize_openai_reasoning_effort(
|
||||
thinking_level
|
||||
)
|
||||
return {"reasoning_effort": openai_effort} if openai_effort else {}
|
||||
|
||||
# Gemini 使用 google-genai / langchain-google-genai 内置思考控制参数。
|
||||
if provider_name == "google":
|
||||
return cls._build_google_thinking_kwargs(
|
||||
model_name, thinking_level
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
def supports_image_input() -> bool:
|
||||
"""
|
||||
判断当前模型是否启用了图片输入能力。
|
||||
"""
|
||||
return bool(settings.LLM_SUPPORT_IMAGE_INPUT)
|
||||
|
||||
@staticmethod
|
||||
def _build_legacy_runtime(
|
||||
provider_name: str,
|
||||
model_name: str | None,
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
在 provider 目录不可用时回退到旧的直接构造逻辑。
|
||||
|
||||
这主要用于单测 stub 环境以及极端的最小运行环境,正常生产路径仍优先
|
||||
走 `LLMProviderManager.resolve_runtime()`。
|
||||
"""
|
||||
api_key_value = api_key if api_key is not None else settings.LLM_API_KEY
|
||||
base_url_value = base_url if base_url is not None else settings.LLM_BASE_URL
|
||||
if not api_key_value:
|
||||
raise ValueError("未配置LLM API Key")
|
||||
|
||||
runtime_name = provider_name if provider_name in {"google", "deepseek"} else "openai_compatible"
|
||||
return {
|
||||
"provider_id": provider_name,
|
||||
"runtime": runtime_name,
|
||||
"model_id": model_name,
|
||||
"api_key": api_key_value,
|
||||
"base_url": base_url_value,
|
||||
"default_headers": None,
|
||||
"use_responses_api": None,
|
||||
"model_record": None,
|
||||
"model_metadata": None,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def _resolve_thinking_level(
|
||||
cls,
|
||||
thinking_level: str | None = None,
|
||||
) -> str | None:
|
||||
"""
|
||||
统一兼容新旧 thinking 参数。
|
||||
"""
|
||||
|
||||
def _normalize(value: str | None) -> str | None:
|
||||
normalized = str(value or "").strip().lower()
|
||||
if not normalized:
|
||||
return None
|
||||
alias_map = {
|
||||
"none": "off",
|
||||
"disabled": "off",
|
||||
"disable": "off",
|
||||
"enabled": "auto",
|
||||
"enable": "auto",
|
||||
"default": "auto",
|
||||
"dynamic": "auto",
|
||||
}
|
||||
normalized = alias_map.get(normalized, normalized)
|
||||
if normalized in cls._SUPPORTED_THINKING_LEVELS:
|
||||
return normalized
|
||||
logger.warning(f"忽略不支持的思考级别: {value}")
|
||||
return None
|
||||
|
||||
normalized_thinking_level = _normalize(thinking_level)
|
||||
if normalized_thinking_level:
|
||||
return normalized_thinking_level
|
||||
|
||||
return "off"
|
||||
|
||||
@classmethod
|
||||
async def get_llm(
|
||||
cls,
|
||||
streaming: bool = False,
|
||||
provider: str | None = None,
|
||||
model: str | None = None,
|
||||
thinking_level: str | None = None,
|
||||
api_key: str | None = settings.LLM_API_KEY,
|
||||
base_url: str | None = settings.LLM_BASE_URL,
|
||||
):
|
||||
"""
|
||||
获取LLM实例
|
||||
:param streaming: 是否启用流式输出
|
||||
:param provider: LLM提供商,默认为配置项LLM_PROVIDER
|
||||
:param model: 模型名称,默认为配置项LLM_MODEL
|
||||
:param thinking_level: 思考模式级别,默认为 None(即自动判断
|
||||
是否启用思考模式)。支持的级别包括 "off"(关闭)、"auto"(自动)、"minimal"、"low"、"medium"、"high"、"max"/"xhigh"(最大)。
|
||||
不同模型对思考模式的支持和表现不同,具体映射关系请
|
||||
参考代码实现。对于不支持思考模式的模型,该参数将被忽略。
|
||||
:param api_key: API Key,默认为配置项LLM_API_KEY。对于某些提供商(如 DeepSeek),可能需要同时提供 base_url。
|
||||
:param base_url: API Base URL,默认为配置项LLM_BASE_URL。
|
||||
:return: LLM实例
|
||||
"""
|
||||
provider_name = str(provider if provider is not None else settings.LLM_PROVIDER).lower()
|
||||
model_name = model if model is not None else settings.LLM_MODEL
|
||||
normalized_thinking_level = cls._resolve_thinking_level(
|
||||
thinking_level=thinking_level,
|
||||
)
|
||||
try:
|
||||
# 延迟导入,避免单测在最小 stub 环境下 import `llm.py` 时被 provider
|
||||
# 目录依赖链拖住。
|
||||
from app.agent.llm.provider import LLMProviderManager
|
||||
|
||||
runtime = await LLMProviderManager().resolve_runtime(
|
||||
provider_id=provider_name,
|
||||
model=model_name,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
)
|
||||
except Exception as err:
|
||||
logger.debug(f"LLM provider 目录不可用,回退到旧运行时逻辑: {err}")
|
||||
runtime = cls._build_legacy_runtime(
|
||||
provider_name=provider_name,
|
||||
model_name=model_name,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
)
|
||||
model_name = runtime.get("model_id") or model_name
|
||||
thinking_kwargs = cls._build_thinking_kwargs(
|
||||
provider=provider_name,
|
||||
model=model_name,
|
||||
thinking_level=normalized_thinking_level,
|
||||
)
|
||||
|
||||
if runtime["runtime"] == "google":
|
||||
# 修补 Gemini 2.5 思考模型的 thought_signature 兼容性
|
||||
_patch_gemini_thought_signature()
|
||||
|
||||
# 统一使用 langchain-google-genai 原生接口
|
||||
# 不使用 OpenAI 兼容端点,因其不支持 Gemini 思考模型的 thought_signature,
|
||||
# 会导致工具调用时报错 400
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
|
||||
client_args = None
|
||||
if settings.PROXY_HOST:
|
||||
proxy_key = _get_httpx_proxy_key()
|
||||
client_args = {proxy_key: settings.PROXY_HOST}
|
||||
|
||||
model = ChatGoogleGenerativeAI(
|
||||
model=model_name,
|
||||
api_key=runtime["api_key"],
|
||||
retries=3,
|
||||
temperature=settings.LLM_TEMPERATURE,
|
||||
streaming=streaming,
|
||||
client_args=client_args,
|
||||
**thinking_kwargs,
|
||||
)
|
||||
elif runtime["runtime"] == "deepseek":
|
||||
from langchain_deepseek import ChatDeepSeek
|
||||
|
||||
_patch_deepseek_reasoning_content_support()
|
||||
model = ChatDeepSeek(
|
||||
model=model_name,
|
||||
api_key=runtime["api_key"],
|
||||
api_base=runtime["base_url"],
|
||||
max_retries=3,
|
||||
temperature=settings.LLM_TEMPERATURE,
|
||||
streaming=streaming,
|
||||
stream_usage=True,
|
||||
**thinking_kwargs,
|
||||
)
|
||||
elif runtime["runtime"] in {"anthropic_compatible", "copilot_anthropic"}:
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
model = ChatAnthropic(
|
||||
model=model_name,
|
||||
api_key=runtime["api_key"],
|
||||
base_url=runtime["base_url"],
|
||||
max_retries=3,
|
||||
temperature=settings.LLM_TEMPERATURE,
|
||||
streaming=streaming,
|
||||
stream_usage=True,
|
||||
anthropic_proxy=settings.PROXY_HOST,
|
||||
default_headers=runtime.get("default_headers"),
|
||||
**thinking_kwargs,
|
||||
)
|
||||
else:
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
_patch_openai_responses_instructions_support()
|
||||
|
||||
# ChatGPT Codex 端点强制要求 stream: True
|
||||
if runtime.get("use_responses_api") and "chatgpt.com/backend-api/codex" in str(runtime.get("base_url") or ""):
|
||||
streaming = True
|
||||
|
||||
model = ChatOpenAI(
|
||||
model=model_name,
|
||||
api_key=runtime["api_key"],
|
||||
max_retries=3,
|
||||
base_url=runtime.get("base_url"),
|
||||
temperature=settings.LLM_TEMPERATURE,
|
||||
streaming=streaming,
|
||||
stream_usage=True,
|
||||
openai_proxy=settings.PROXY_HOST,
|
||||
default_headers=runtime.get("default_headers"),
|
||||
use_responses_api=runtime.get("use_responses_api"),
|
||||
**thinking_kwargs,
|
||||
)
|
||||
|
||||
# 优先使用 provider / models.dev 目录中的上下文上限,减少用户手填成本。
|
||||
model_profile = getattr(model, "profile", None)
|
||||
if model_profile:
|
||||
logger.debug(f"使用LLM模型: {model.model},Profile: {model.profile}")
|
||||
else:
|
||||
model_record = runtime.get("model_record") or {}
|
||||
model_metadata = runtime.get("model_metadata") or {}
|
||||
metadata_limit = model_metadata.get("limit") or {}
|
||||
max_input_tokens = (
|
||||
model_record.get("input_tokens")
|
||||
or model_record.get("context_tokens")
|
||||
or metadata_limit.get("input")
|
||||
or metadata_limit.get("context")
|
||||
or settings.LLM_MAX_CONTEXT_TOKENS * 1000
|
||||
)
|
||||
model.profile = {
|
||||
"max_input_tokens": int(max_input_tokens),
|
||||
}
|
||||
|
||||
return model
|
||||
|
||||
@staticmethod
|
||||
def _extract_text_content(content) -> str:
|
||||
"""
|
||||
从响应内容中提取纯文本,仅保留真实文本块。
|
||||
"""
|
||||
if content is None:
|
||||
return ""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
text_parts = []
|
||||
for block in content:
|
||||
if isinstance(block, str):
|
||||
text_parts.append(block)
|
||||
continue
|
||||
|
||||
if isinstance(block, dict) or hasattr(block, "get"):
|
||||
block_type = block.get("type")
|
||||
if block.get("thought") or block_type in (
|
||||
"thinking",
|
||||
"reasoning_content",
|
||||
"reasoning",
|
||||
"thought",
|
||||
):
|
||||
continue
|
||||
if block_type == "text":
|
||||
text_parts.append(block.get("text", ""))
|
||||
continue
|
||||
if not block_type and isinstance(block.get("text"), str):
|
||||
text_parts.append(block.get("text", ""))
|
||||
return "".join(text_parts)
|
||||
if isinstance(content, dict) or hasattr(content, "get"):
|
||||
if content.get("thought"):
|
||||
return ""
|
||||
if content.get("type") == "text":
|
||||
return content.get("text", "")
|
||||
if not content.get("type") and isinstance(content.get("text"), str):
|
||||
return content.get("text", "")
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
async def test_current_settings(
|
||||
prompt: str = "请只回复 OK",
|
||||
timeout: int = 20,
|
||||
provider: str | None = None,
|
||||
model: str | None = None,
|
||||
thinking_level: str | None = None,
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
) -> dict:
|
||||
"""
|
||||
使用当前已保存配置执行一次最小 LLM 调用。
|
||||
"""
|
||||
provider_name = provider if provider is not None else settings.LLM_PROVIDER
|
||||
model_name = model if model is not None else settings.LLM_MODEL
|
||||
start = time.perf_counter()
|
||||
llm = await LLMHelper.get_llm(
|
||||
streaming=False,
|
||||
provider=provider_name,
|
||||
model=model_name,
|
||||
thinking_level=thinking_level,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
)
|
||||
try:
|
||||
response = await asyncio.wait_for(llm.ainvoke(prompt), timeout=timeout)
|
||||
except TimeoutError as err:
|
||||
duration_ms = round((time.perf_counter() - start) * 1000)
|
||||
raise LLMTestTimeout("LLM 调用超时", duration_ms=duration_ms) from err
|
||||
except Exception as err:
|
||||
duration_ms = round((time.perf_counter() - start) * 1000)
|
||||
raise LLMTestError(str(err), duration_ms=duration_ms) from err
|
||||
|
||||
reply_text = LLMHelper._extract_text_content(
|
||||
getattr(response, "content", response)
|
||||
).strip()
|
||||
duration_ms = round((time.perf_counter() - start) * 1000)
|
||||
|
||||
data = {
|
||||
"provider": provider_name,
|
||||
"model": model_name,
|
||||
"duration_ms": duration_ms,
|
||||
}
|
||||
if reply_text:
|
||||
data["reply_preview"] = reply_text[:120]
|
||||
return data
|
||||
|
||||
async def get_models(
|
||||
self,
|
||||
provider: str,
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
force_refresh: bool = False,
|
||||
) -> List[dict[str, Any]]:
|
||||
"""
|
||||
获取模型列表。
|
||||
|
||||
返回值会带上 context/supports_reasoning 等元数据,供前端直接渲染并自动
|
||||
回填上下文大小。
|
||||
"""
|
||||
logger.info(f"获取 {provider} 模型列表...")
|
||||
try:
|
||||
from app.agent.llm.provider import LLMProviderManager
|
||||
|
||||
return await LLMProviderManager().list_models(
|
||||
provider_id=provider,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
force_refresh=force_refresh,
|
||||
)
|
||||
except Exception as err:
|
||||
logger.debug(f"LLM provider 目录不可用,回退旧模型列表逻辑: {err}")
|
||||
if provider == "google":
|
||||
return [
|
||||
{"id": model_id, "name": model_id}
|
||||
for model_id in await self._get_google_models(api_key or "")
|
||||
]
|
||||
model_list_base_url = base_url
|
||||
try:
|
||||
from app.agent.llm.provider import LLMProviderManager
|
||||
|
||||
model_list_base_url = (
|
||||
LLMProviderManager().resolve_model_list_base_url(
|
||||
provider_id=provider,
|
||||
base_url=base_url,
|
||||
)
|
||||
or base_url
|
||||
)
|
||||
except Exception:
|
||||
model_list_base_url = base_url
|
||||
return [
|
||||
{"id": model_id, "name": model_id}
|
||||
for model_id in await self._get_openai_compatible_models(
|
||||
provider,
|
||||
api_key or "",
|
||||
model_list_base_url,
|
||||
)
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
async def _get_google_models(api_key: str) -> List[str]:
|
||||
"""获取Google模型列表(使用 google-genai SDK v1)"""
|
||||
try:
|
||||
from google import genai
|
||||
from google.genai.types import HttpOptions
|
||||
|
||||
http_options = None
|
||||
if settings.PROXY_HOST:
|
||||
proxy_key = _get_httpx_proxy_key()
|
||||
proxy_args = {proxy_key: settings.PROXY_HOST}
|
||||
http_options = HttpOptions(
|
||||
client_args=proxy_args,
|
||||
async_client_args=proxy_args,
|
||||
)
|
||||
|
||||
client = genai.Client(api_key=api_key, http_options=http_options)
|
||||
models = await client.aio.models.list()
|
||||
result = [
|
||||
m.name
|
||||
for m in models.page
|
||||
if m.supported_actions and "generateContent" in m.supported_actions
|
||||
]
|
||||
await client.aio.aclose()
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"获取Google模型列表失败:{e}")
|
||||
raise e
|
||||
|
||||
@staticmethod
|
||||
async def _get_openai_compatible_models(
|
||||
provider: str, api_key: str, base_url: str = None
|
||||
) -> List[str]:
|
||||
"""获取OpenAI兼容模型列表"""
|
||||
try:
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
if provider == "deepseek":
|
||||
base_url = base_url or "https://api.deepseek.com"
|
||||
|
||||
client = AsyncOpenAI(api_key=api_key, base_url=base_url)
|
||||
models = await client.models.list()
|
||||
await client.close()
|
||||
return [model.id for model in models.data]
|
||||
except Exception as e:
|
||||
logger.error(f"获取 {provider} 模型列表失败:{e}")
|
||||
raise e
|
||||
2048
app/agent/llm/provider.py
Normal file
2048
app/agent/llm/provider.py
Normal file
File diff suppressed because it is too large
Load Diff
154
app/agent/memory/__init__.py
Normal file
154
app/agent/memory/__init__.py
Normal file
@@ -0,0 +1,154 @@
|
||||
"""对话记忆管理器"""
|
||||
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from langchain_core.messages import BaseMessage
|
||||
|
||||
from app.core.config import settings
|
||||
from app.log import logger
|
||||
from app.schemas.agent import ConversationMemory
|
||||
|
||||
|
||||
class MemoryManager:
|
||||
"""
|
||||
对话记忆管理器
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# 内存中的会话记忆缓存
|
||||
self.memory_cache: Dict[str, ConversationMemory] = {}
|
||||
# 内存缓存清理任务
|
||||
self.cleanup_task: Optional[asyncio.Task] = None
|
||||
|
||||
def initialize(self):
|
||||
"""
|
||||
初始化记忆管理器
|
||||
"""
|
||||
try:
|
||||
# 启动内存缓存清理任务(Redis通过TTL自动过期)
|
||||
self.cleanup_task = asyncio.create_task(
|
||||
self._cleanup_expired_memories()
|
||||
)
|
||||
logger.info("对话记忆管理器初始化完成")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Redis连接失败,将使用内存存储: {e}")
|
||||
|
||||
async def close(self):
|
||||
"""
|
||||
关闭记忆管理器
|
||||
"""
|
||||
if self.cleanup_task:
|
||||
self.cleanup_task.cancel()
|
||||
try:
|
||||
await self.cleanup_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
logger.info("对话记忆管理器已关闭")
|
||||
|
||||
@staticmethod
|
||||
def _get_memory_key(session_id: str, user_id: str):
|
||||
"""
|
||||
计算内存Key
|
||||
"""
|
||||
return f"{user_id}:{session_id}" if user_id else session_id
|
||||
|
||||
def get_memory(self, session_id: str, user_id: str) -> Optional[ConversationMemory]:
|
||||
"""
|
||||
获取内存中的记忆
|
||||
"""
|
||||
cache_key = self._get_memory_key(session_id, user_id)
|
||||
return self.memory_cache.get(cache_key)
|
||||
|
||||
def get_agent_messages(
|
||||
self, session_id: str, user_id: str
|
||||
) -> List[BaseMessage]:
|
||||
"""
|
||||
为Agent获取最近的消息(仅内存缓存)
|
||||
|
||||
如果消息Token数量超过模型最大上下文长度的阀值,会自动进行摘要裁剪
|
||||
"""
|
||||
memory = self.get_memory(session_id, user_id)
|
||||
if not memory:
|
||||
return []
|
||||
|
||||
# 获取所有消息
|
||||
return memory.messages
|
||||
|
||||
def save_agent_messages(
|
||||
self, session_id: str, user_id: str, messages: List[BaseMessage]
|
||||
):
|
||||
"""
|
||||
保存Agent消息(仅内存缓存)
|
||||
|
||||
注意:Redis中的记忆通过TTL机制自动过期,这里只更新内存缓存,Redis会在下次访问时自动过期
|
||||
"""
|
||||
memory = self.get_memory(session_id, user_id)
|
||||
if not memory:
|
||||
memory = ConversationMemory(session_id=session_id, user_id=user_id)
|
||||
|
||||
memory.messages = messages
|
||||
memory.updated_at = datetime.now()
|
||||
|
||||
# 更新内存缓存
|
||||
self.save_memory(memory)
|
||||
|
||||
def save_memory(self, memory: ConversationMemory):
|
||||
"""
|
||||
保存记忆到内存缓存
|
||||
|
||||
注意:Redis中的记忆通过TTL机制自动过期,这里只更新内存缓存,Redis会在下次访问时自动过期
|
||||
"""
|
||||
cache_key = self._get_memory_key(memory.session_id, memory.user_id)
|
||||
self.memory_cache[cache_key] = memory
|
||||
|
||||
def clear_memory(self, session_id: str, user_id: str):
|
||||
"""
|
||||
清空会话记忆
|
||||
"""
|
||||
cache_key = self._get_memory_key(session_id, user_id)
|
||||
if cache_key in self.memory_cache:
|
||||
del self.memory_cache[cache_key]
|
||||
|
||||
logger.info(f"会话记忆已清空: session_id={session_id}, user_id={user_id}")
|
||||
|
||||
async def _cleanup_expired_memories(self):
|
||||
"""
|
||||
清理内存中过期记忆的后台任务
|
||||
|
||||
注意:Redis中的记忆通过TTL机制自动过期,这里只清理内存缓存
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
# 每小时清理一次
|
||||
await asyncio.sleep(3600)
|
||||
|
||||
current_time = datetime.now()
|
||||
expired_sessions = []
|
||||
|
||||
# 只检查内存缓存中的过期记忆
|
||||
# Redis中的记忆会通过TTL自动过期,无需手动处理
|
||||
for cache_key, memory in self.memory_cache.items():
|
||||
if (
|
||||
current_time - memory.updated_at
|
||||
).days > settings.LLM_MEMORY_RETENTION_DAYS:
|
||||
expired_sessions.append(cache_key)
|
||||
|
||||
# 只清理内存缓存,不删除Redis中的键(Redis会自动过期)
|
||||
for cache_key in expired_sessions:
|
||||
if cache_key in self.memory_cache:
|
||||
del self.memory_cache[cache_key]
|
||||
|
||||
if expired_sessions:
|
||||
logger.info(f"清理了{len(expired_sessions)}个过期内存会话记忆")
|
||||
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"清理记忆时发生错误: {e}")
|
||||
|
||||
|
||||
memory_manager = MemoryManager()
|
||||
0
app/agent/middleware/__init__.py
Normal file
0
app/agent/middleware/__init__.py
Normal file
406
app/agent/middleware/activity_log.py
Normal file
406
app/agent/middleware/activity_log.py
Normal file
@@ -0,0 +1,406 @@
|
||||
"""
|
||||
活动日志中间件 - 自动记录 Agent 每次交互的操作摘要。
|
||||
|
||||
按日期存储在 CONFIG_PATH/agent/activity/YYYY-MM-DD.md 中,
|
||||
每次 Agent 执行完毕后自动调用 LLM 对本轮对话生成简洁的活动摘要,
|
||||
并在每次 Agent 启动时加载近几天的活动日志注入系统提示词。
|
||||
"""
|
||||
|
||||
import re
|
||||
from collections.abc import Awaitable, Callable
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Annotated, Any, NotRequired, TypedDict
|
||||
|
||||
from anyio import Path as AsyncPath
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
AgentState,
|
||||
ContextT,
|
||||
ModelRequest,
|
||||
ModelResponse,
|
||||
PrivateStateAttr, # noqa
|
||||
ResponseT,
|
||||
)
|
||||
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from app.agent.middleware.utils import append_to_system_message
|
||||
from app.log import logger
|
||||
|
||||
# 活动日志保留天数
|
||||
DEFAULT_RETENTION_DAYS = 7
|
||||
|
||||
# 注入系统提示词时加载的天数
|
||||
PROMPT_LOAD_DAYS = 3
|
||||
|
||||
# 每日日志文件最大大小 (256KB)
|
||||
MAX_LOG_FILE_SIZE = 256 * 1024
|
||||
|
||||
# 提取本轮对话上下文的最大字符数(避免过长的对话消耗太多 token)
|
||||
MAX_CONTEXT_FOR_SUMMARY = 4000
|
||||
|
||||
# LLM 总结的提示词
|
||||
SUMMARY_PROMPT = """请根据以下 AI 助手与用户的对话记录,生成一条简洁的活动摘要(中文,一句话,不超过80字)。
|
||||
摘要应包含:用户的需求是什么、助手做了什么、结果如何。
|
||||
只输出摘要内容,不要加任何前缀、标点序号或解释。
|
||||
|
||||
对话记录:
|
||||
{conversation}"""
|
||||
|
||||
|
||||
class ActivityLogState(AgentState):
|
||||
"""ActivityLogMiddleware 的状态模型。"""
|
||||
|
||||
activity_log_contents: NotRequired[Annotated[dict[str, str], PrivateStateAttr]]
|
||||
"""将日期字符串映射到日志内容的字典。标记为私有,不包含在最终代理状态中。"""
|
||||
|
||||
|
||||
class ActivityLogStateUpdate(TypedDict):
|
||||
"""ActivityLogMiddleware 的状态更新。"""
|
||||
|
||||
activity_log_contents: dict[str, str]
|
||||
|
||||
|
||||
def _extract_last_round(messages: list) -> list | None:
|
||||
"""从完整消息列表中提取最后一轮交互。
|
||||
|
||||
从最后一条 HumanMessage 到消息末尾即为本轮交互。
|
||||
|
||||
参数:
|
||||
messages: Agent 执行后的完整消息列表。
|
||||
|
||||
返回:
|
||||
本轮交互的消息子列表,如果无有效交互则返回 None。
|
||||
"""
|
||||
if not messages:
|
||||
return None
|
||||
|
||||
# 找到最后一条用户消息的索引
|
||||
last_human_idx = None
|
||||
for i in range(len(messages) - 1, -1, -1):
|
||||
if isinstance(messages[i], HumanMessage) and messages[i].content:
|
||||
last_human_idx = i
|
||||
break
|
||||
|
||||
if last_human_idx is None:
|
||||
return None
|
||||
|
||||
round_messages = messages[last_human_idx:]
|
||||
|
||||
# 检查是否为系统心跳消息
|
||||
user_msg = round_messages[0]
|
||||
user_content = (
|
||||
user_msg.content if isinstance(user_msg.content, str) else str(user_msg.content)
|
||||
)
|
||||
if user_content.strip().startswith("[System Heartbeat]"):
|
||||
return None
|
||||
|
||||
return round_messages
|
||||
|
||||
|
||||
def _format_conversation_for_summary(round_messages: list) -> str:
|
||||
"""将本轮对话消息格式化为文本,供 LLM 总结。
|
||||
|
||||
参数:
|
||||
round_messages: 本轮交互的消息列表。
|
||||
|
||||
返回:
|
||||
格式化后的对话文本。
|
||||
"""
|
||||
lines = []
|
||||
total_len = 0
|
||||
|
||||
for msg in round_messages:
|
||||
if isinstance(msg, HumanMessage):
|
||||
content = msg.content if isinstance(msg.content, str) else str(msg.content)
|
||||
line = f"用户: {content}"
|
||||
elif isinstance(msg, AIMessage):
|
||||
if hasattr(msg, "tool_calls") and msg.tool_calls:
|
||||
tool_names = [
|
||||
tc["name"]
|
||||
for tc in msg.tool_calls
|
||||
if isinstance(tc, dict) and "name" in tc
|
||||
]
|
||||
line = f"助手调用工具: {', '.join(tool_names)}"
|
||||
elif msg.content:
|
||||
content = (
|
||||
msg.content if isinstance(msg.content, str) else str(msg.content)
|
||||
)
|
||||
line = f"助手: {content}"
|
||||
else:
|
||||
continue
|
||||
elif isinstance(msg, ToolMessage):
|
||||
content = msg.content if isinstance(msg.content, str) else str(msg.content)
|
||||
# 工具返回可能很长,截断
|
||||
if len(content) > 200:
|
||||
content = content[:200] + "..."
|
||||
line = f"工具返回: {content}"
|
||||
else:
|
||||
continue
|
||||
|
||||
# 控制总长度
|
||||
if total_len + len(line) > MAX_CONTEXT_FOR_SUMMARY:
|
||||
lines.append("...(后续对话省略)")
|
||||
break
|
||||
lines.append(line)
|
||||
total_len += len(line)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
async def _summarize_with_llm(conversation_text: str) -> str | None:
|
||||
"""调用 LLM 对对话文本生成活动摘要。
|
||||
|
||||
参数:
|
||||
conversation_text: 格式化后的对话文本。
|
||||
|
||||
返回:
|
||||
LLM 生成的摘要字符串,失败时返回 None。
|
||||
"""
|
||||
try:
|
||||
from app.agent.llm import LLMHelper
|
||||
|
||||
llm = await LLMHelper.get_llm(streaming=False)
|
||||
prompt = SUMMARY_PROMPT.format(conversation=conversation_text)
|
||||
response = await llm.ainvoke(prompt)
|
||||
summary = response.content.strip()
|
||||
# 清理模型可能输出的前缀(如 "摘要:" "总结:")
|
||||
summary = re.sub(r"^(摘要|总结|活动记录)[::]\s*", "", summary)
|
||||
return summary if summary else None
|
||||
except Exception as e:
|
||||
logger.debug("LLM summarization failed: %s", e)
|
||||
return None
|
||||
|
||||
|
||||
ACTIVITY_LOG_SYSTEM_PROMPT = """<activity_log>
|
||||
{activity_log}
|
||||
</activity_log>
|
||||
|
||||
<activity_log_guidelines>
|
||||
The above <activity_log> contains a record of your recent interactions with the user, automatically maintained by the system.
|
||||
|
||||
**How to use this information:**
|
||||
- Reference past activities when relevant to provide continuity (e.g., "之前帮你订阅了《XXX》,现在有更新了")
|
||||
- Use activity history to understand ongoing tasks and user patterns
|
||||
- When the user asks "你之前帮我做了什么" or similar questions, refer to this log
|
||||
- Activity logs are automatically recorded after each interaction - you do NOT need to manually update them
|
||||
|
||||
**What is automatically logged:**
|
||||
- Each user interaction: what was asked, which tools were used, and the outcome
|
||||
- Timestamps for all activities
|
||||
- The log is organized by date for easy reference
|
||||
|
||||
**Important:**
|
||||
- Activity logs are READ-ONLY from your perspective - the system manages them automatically
|
||||
- Do not attempt to edit or write to activity log files
|
||||
- For long-term preferences and knowledge, continue to use MEMORY.md
|
||||
- Activity logs are retained for {retention_days} days and then automatically cleaned up
|
||||
</activity_log_guidelines>
|
||||
"""
|
||||
|
||||
|
||||
class ActivityLogMiddleware(AgentMiddleware[ActivityLogState, ContextT, ResponseT]): # noqa
|
||||
"""自动记录和加载 Agent 活动日志的中间件。
|
||||
|
||||
- abefore_agent: 加载近几天的活动日志
|
||||
- awrap_model_call: 将活动日志注入系统提示词
|
||||
- aafter_agent: 从本次对话中提取摘要并追加到当日日志文件
|
||||
|
||||
参数:
|
||||
activity_dir: 活动日志存储目录路径。
|
||||
retention_days: 日志保留天数(默认 7 天)。
|
||||
prompt_load_days: 注入系统提示词时加载的天数(默认 3 天)。
|
||||
"""
|
||||
|
||||
state_schema = ActivityLogState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
activity_dir: str,
|
||||
retention_days: int = DEFAULT_RETENTION_DAYS,
|
||||
prompt_load_days: int = PROMPT_LOAD_DAYS,
|
||||
) -> None:
|
||||
self.activity_dir = activity_dir
|
||||
self.retention_days = retention_days
|
||||
self.prompt_load_days = prompt_load_days
|
||||
|
||||
def _get_log_path(self, date_str: str) -> AsyncPath:
|
||||
"""获取指定日期的日志文件路径。"""
|
||||
return AsyncPath(self.activity_dir) / f"{date_str}.md"
|
||||
|
||||
def _format_activity_log(self, contents: dict[str, str]) -> str:
|
||||
"""格式化活动日志用于系统提示词注入。"""
|
||||
if not contents:
|
||||
return ACTIVITY_LOG_SYSTEM_PROMPT.format(
|
||||
activity_log="(暂无活动记录)",
|
||||
retention_days=self.retention_days,
|
||||
)
|
||||
|
||||
# 按日期排序(最近的在前)
|
||||
sorted_dates = sorted(contents.keys(), reverse=True)
|
||||
sections = []
|
||||
for date_str in sorted_dates:
|
||||
content = contents[date_str].strip()
|
||||
if content:
|
||||
sections.append(f"### {date_str}\n{content}")
|
||||
|
||||
if not sections:
|
||||
return ACTIVITY_LOG_SYSTEM_PROMPT.format(
|
||||
activity_log="(暂无活动记录)",
|
||||
retention_days=self.retention_days,
|
||||
)
|
||||
|
||||
log_body = "\n\n".join(sections)
|
||||
return ACTIVITY_LOG_SYSTEM_PROMPT.format(
|
||||
activity_log=log_body,
|
||||
retention_days=self.retention_days,
|
||||
)
|
||||
|
||||
async def _load_recent_logs(self) -> dict[str, str]:
|
||||
"""加载近几天的活动日志。"""
|
||||
contents: dict[str, str] = {}
|
||||
today = datetime.now().date()
|
||||
|
||||
for i in range(self.prompt_load_days):
|
||||
date = today - timedelta(days=i)
|
||||
date_str = date.strftime("%Y-%m-%d")
|
||||
log_path = self._get_log_path(date_str)
|
||||
|
||||
if await log_path.exists():
|
||||
try:
|
||||
content = await log_path.read_text(encoding="utf-8")
|
||||
contents[date_str] = content
|
||||
logger.debug("Loaded activity log for %s", date_str)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to load activity log %s: %s", date_str, e)
|
||||
|
||||
return contents
|
||||
|
||||
async def _append_activity(self, summary: str) -> None:
|
||||
"""将一条活动记录追加到当日日志文件。"""
|
||||
today_str = datetime.now().strftime("%Y-%m-%d")
|
||||
now_str = datetime.now().strftime("%H:%M")
|
||||
log_path = self._get_log_path(today_str)
|
||||
|
||||
# 确保目录存在
|
||||
dir_path = AsyncPath(self.activity_dir)
|
||||
if not await dir_path.exists():
|
||||
await dir_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 检查文件大小
|
||||
if await log_path.exists():
|
||||
stat = await log_path.stat()
|
||||
if stat.st_size >= MAX_LOG_FILE_SIZE:
|
||||
logger.warning(
|
||||
"Activity log %s exceeds size limit (%d bytes), skipping append",
|
||||
today_str,
|
||||
stat.st_size,
|
||||
)
|
||||
return
|
||||
|
||||
# 追加记录
|
||||
entry = f"- **{now_str}** {summary}\n"
|
||||
try:
|
||||
if await log_path.exists():
|
||||
existing = await log_path.read_text(encoding="utf-8")
|
||||
await log_path.write_text(existing + entry, encoding="utf-8")
|
||||
else:
|
||||
header = f"# {today_str} 活动日志\n\n"
|
||||
await log_path.write_text(header + entry, encoding="utf-8")
|
||||
logger.debug("Activity logged: %s", summary[:80])
|
||||
except Exception as e:
|
||||
logger.warning("Failed to append activity log: %s", e)
|
||||
|
||||
async def _cleanup_old_logs(self) -> None:
|
||||
"""清理超过保留天数的旧日志文件。"""
|
||||
dir_path = AsyncPath(self.activity_dir)
|
||||
if not await dir_path.exists():
|
||||
return
|
||||
|
||||
cutoff_date = datetime.now().date() - timedelta(days=self.retention_days)
|
||||
date_pattern = re.compile(r"^(\d{4}-\d{2}-\d{2})\.md$")
|
||||
|
||||
try:
|
||||
async for path in dir_path.iterdir():
|
||||
if not await path.is_file():
|
||||
continue
|
||||
match = date_pattern.match(path.name)
|
||||
if not match:
|
||||
continue
|
||||
try:
|
||||
file_date = datetime.strptime(match.group(1), "%Y-%m-%d").date()
|
||||
if file_date < cutoff_date:
|
||||
await path.unlink()
|
||||
logger.debug("Cleaned up old activity log: %s", path.name)
|
||||
except ValueError:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.warning("Failed to cleanup old activity logs: %s", e)
|
||||
|
||||
async def abefore_agent(
|
||||
self, state: ActivityLogState, runtime: Runtime
|
||||
) -> ActivityLogStateUpdate | None:
|
||||
"""在 Agent 执行前加载近期活动日志。"""
|
||||
# 如果已经加载则跳过
|
||||
if "activity_log_contents" in state:
|
||||
return None
|
||||
|
||||
contents = await self._load_recent_logs()
|
||||
|
||||
# 趁机清理旧日志(低频操作,不影响性能)
|
||||
await self._cleanup_old_logs()
|
||||
|
||||
return ActivityLogStateUpdate(activity_log_contents=contents)
|
||||
|
||||
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]:
|
||||
"""将活动日志注入系统消息。"""
|
||||
contents = request.state.get("activity_log_contents", {}) # noqa
|
||||
activity_log_prompt = self._format_activity_log(contents)
|
||||
|
||||
new_system_message = append_to_system_message(
|
||||
request.system_message, activity_log_prompt
|
||||
)
|
||||
return request.override(system_message=new_system_message)
|
||||
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
request: ModelRequest[ContextT],
|
||||
handler: Callable[
|
||||
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
|
||||
],
|
||||
) -> ModelResponse[ResponseT]:
|
||||
"""异步包装模型调用,注入活动日志到系统提示词。"""
|
||||
modified_request = self.modify_request(request)
|
||||
return await handler(modified_request)
|
||||
|
||||
async def aafter_agent(
|
||||
self, state: ActivityLogState, runtime: Runtime
|
||||
) -> dict[str, Any] | None:
|
||||
"""Agent 执行完毕后,调用 LLM 对本轮对话生成摘要并追加到当日活动日志。"""
|
||||
try:
|
||||
messages = state.get("messages", [])
|
||||
if not messages:
|
||||
return None
|
||||
|
||||
# 提取本轮交互
|
||||
round_messages = _extract_last_round(messages)
|
||||
if not round_messages:
|
||||
return None
|
||||
|
||||
# 格式化对话文本
|
||||
conversation_text = _format_conversation_for_summary(round_messages)
|
||||
if not conversation_text:
|
||||
return None
|
||||
|
||||
# 调用 LLM 生成摘要
|
||||
summary = await _summarize_with_llm(conversation_text)
|
||||
if summary:
|
||||
await self._append_activity(summary)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to record activity: %s", e)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
__all__ = ["ActivityLogMiddleware"]
|
||||
350
app/agent/middleware/jobs.py
Normal file
350
app/agent/middleware/jobs.py
Normal file
@@ -0,0 +1,350 @@
|
||||
import re
|
||||
from collections.abc import Awaitable, Callable
|
||||
from typing import Annotated, NotRequired, TypedDict
|
||||
|
||||
import yaml # noqa
|
||||
from anyio import Path as AsyncPath
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
AgentState,
|
||||
ContextT,
|
||||
ModelRequest,
|
||||
ModelResponse,
|
||||
PrivateStateAttr, # noqa
|
||||
ResponseT,
|
||||
)
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from app.agent.middleware.utils import append_to_system_message
|
||||
from app.log import logger
|
||||
|
||||
# JOB.md 文件最大限制为 1MB
|
||||
MAX_JOB_FILE_SIZE = 1 * 1024 * 1024
|
||||
|
||||
|
||||
class JobMetadata(TypedDict):
|
||||
"""Job 元数据。"""
|
||||
|
||||
path: str
|
||||
"""JOB.md 文件路径。"""
|
||||
|
||||
id: str
|
||||
"""Job 标识符(目录名)。"""
|
||||
|
||||
name: str
|
||||
"""Job 名称。"""
|
||||
|
||||
description: str
|
||||
"""Job 描述。"""
|
||||
|
||||
schedule: str
|
||||
"""调度类型: once(一次性)/ recurring(重复性)。"""
|
||||
|
||||
status: str
|
||||
"""当前状态: pending / in_progress / completed / cancelled。"""
|
||||
|
||||
last_run: str | None
|
||||
"""上次执行时间。"""
|
||||
|
||||
|
||||
class JobsState(AgentState):
|
||||
"""jobs 中间件状态。"""
|
||||
|
||||
jobs_metadata: NotRequired[Annotated[list[JobMetadata], PrivateStateAttr]]
|
||||
"""已加载的 job 元数据列表,不传播给父 agent。"""
|
||||
|
||||
|
||||
class JobsStateUpdate(TypedDict):
|
||||
"""jobs 中间件状态更新项。"""
|
||||
|
||||
jobs_metadata: list[JobMetadata]
|
||||
"""待合并的 job 元数据列表。"""
|
||||
|
||||
|
||||
def _parse_job_metadata(
|
||||
content: str,
|
||||
job_path: str,
|
||||
job_id: str,
|
||||
) -> JobMetadata | None:
|
||||
"""从 JOB.md 内容中解析 YAML 前言并验证元数据。"""
|
||||
if len(content) > MAX_JOB_FILE_SIZE:
|
||||
logger.warning(
|
||||
"Skipping %s: content too large (%d bytes)", job_path, len(content)
|
||||
)
|
||||
return None
|
||||
|
||||
# 匹配 --- 分隔的 YAML 前言
|
||||
frontmatter_pattern = r"^---\s*\n(.*?)\n---\s*\n"
|
||||
match = re.match(frontmatter_pattern, content, re.DOTALL)
|
||||
if not match:
|
||||
logger.warning("Skipping %s: no valid YAML frontmatter found", job_path)
|
||||
return None
|
||||
frontmatter_str = match.group(1)
|
||||
|
||||
# 解析 YAML
|
||||
try:
|
||||
frontmatter_data = yaml.safe_load(frontmatter_str)
|
||||
except yaml.YAMLError as e:
|
||||
logger.warning("Invalid YAML in %s: %s", job_path, e)
|
||||
return None
|
||||
|
||||
if not isinstance(frontmatter_data, dict):
|
||||
logger.warning("Skipping %s: frontmatter is not a mapping", job_path)
|
||||
return None
|
||||
|
||||
# Job 名称和描述
|
||||
name = str(frontmatter_data.get("name", "")).strip()
|
||||
description = str(frontmatter_data.get("description", "")).strip()
|
||||
if not name:
|
||||
logger.warning("Skipping %s: missing required 'name'", job_path)
|
||||
return None
|
||||
|
||||
# 调度类型
|
||||
schedule = str(frontmatter_data.get("schedule", "once")).strip().lower()
|
||||
if schedule not in ("once", "recurring"):
|
||||
schedule = "once"
|
||||
|
||||
# 状态
|
||||
status = str(frontmatter_data.get("status", "pending")).strip().lower()
|
||||
if status not in ("pending", "in_progress", "completed", "cancelled"):
|
||||
status = "pending"
|
||||
|
||||
# 上次执行时间
|
||||
last_run = str(frontmatter_data.get("last_run", "")).strip() or None
|
||||
|
||||
return JobMetadata(
|
||||
id=job_id,
|
||||
name=name,
|
||||
description=description,
|
||||
path=job_path,
|
||||
schedule=schedule,
|
||||
status=status,
|
||||
last_run=last_run,
|
||||
)
|
||||
|
||||
|
||||
async def _alist_jobs(source_path: AsyncPath) -> list[JobMetadata]:
|
||||
"""异步列出指定路径下的所有任务。
|
||||
|
||||
扫描包含 JOB.md 的目录并解析其元数据。
|
||||
"""
|
||||
jobs: list[JobMetadata] = []
|
||||
|
||||
if not await source_path.exists():
|
||||
return []
|
||||
|
||||
# 查找所有任务目录(包含 JOB.md 的目录)
|
||||
job_dirs: list[AsyncPath] = []
|
||||
async for path in source_path.iterdir():
|
||||
if await path.is_dir() and await (path / "JOB.md").is_file():
|
||||
job_dirs.append(path)
|
||||
|
||||
if not job_dirs:
|
||||
return []
|
||||
|
||||
# 解析 JOB.md
|
||||
for job_path in job_dirs:
|
||||
job_md_path = job_path / "JOB.md"
|
||||
|
||||
job_content = await job_md_path.read_text(encoding="utf-8")
|
||||
|
||||
# 解析元数据
|
||||
job_metadata = _parse_job_metadata(
|
||||
content=job_content,
|
||||
job_path=str(job_md_path),
|
||||
job_id=job_path.name,
|
||||
)
|
||||
if job_metadata:
|
||||
jobs.append(job_metadata)
|
||||
|
||||
return jobs
|
||||
|
||||
|
||||
JOBS_SYSTEM_PROMPT = """
|
||||
<jobs_system>
|
||||
You have a **scheduled jobs** system that allows you to track and execute long-running or recurring tasks.
|
||||
|
||||
**Jobs Location:** `{jobs_location}`
|
||||
|
||||
**Current Jobs:**
|
||||
|
||||
{jobs_list}
|
||||
|
||||
**Job File Format:**
|
||||
|
||||
Each job is a directory containing a `JOB.md` file with YAML frontmatter followed by task details:
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: 任务名称(简短中文描述)
|
||||
description: 任务的详细描述,说明要做什么
|
||||
schedule: once 或 recurring
|
||||
status: pending / in_progress / completed / cancelled
|
||||
last_run: "YYYY-MM-DD HH:MM"(上次执行时间,可选)
|
||||
---
|
||||
# 任务详情
|
||||
|
||||
## 目标
|
||||
详细描述这个任务要完成的目标。
|
||||
|
||||
## 执行日志
|
||||
记录每次执行的情况和结果。
|
||||
|
||||
- **2024-01-15 10:00** - 执行了XXX操作,结果:成功/失败
|
||||
- **2024-01-16 10:00** - 继续执行XXX...
|
||||
```
|
||||
|
||||
**Job Lifecycle Rules:**
|
||||
|
||||
1. **Creating a Job**: When a user asks you to do something periodically or at a later time:
|
||||
- Create a new directory under the jobs location, directory name is the `job-id` (lowercase, hyphens, 1-64 chars)
|
||||
- Write a `JOB.md` file with proper frontmatter and detailed task description
|
||||
- Set `schedule: once` for one-time tasks, `schedule: recurring` for repeating tasks (e.g., daily sign-in, weekly checks)
|
||||
- Set initial `status: pending`
|
||||
|
||||
2. **Executing a Job**: When you work on a job:
|
||||
- Update `status: in_progress` in the frontmatter
|
||||
- Execute the required actions using your tools
|
||||
- Log the execution result in the "执行日志" section with timestamp
|
||||
- Update `last_run` in frontmatter to current time
|
||||
|
||||
3. **Completing a Job**:
|
||||
- For `schedule: once` tasks: set `status: completed` after successful execution
|
||||
- For `schedule: recurring` tasks: keep `status: pending` after execution, only update `last_run` time. The job stays active for the next scheduled run.
|
||||
- Set `status: cancelled` if the user explicitly asks to cancel/stop a task
|
||||
|
||||
4. **Heartbeat Check**: You will be periodically woken up to check pending jobs. When woken up:
|
||||
- Read the jobs directory to find all active jobs (status: pending or in_progress)
|
||||
- Skip jobs with `status: completed` or `status: cancelled`
|
||||
- For `schedule: recurring` jobs, check `last_run` to determine if it's time to run again
|
||||
- Execute pending jobs and update their status/logs accordingly
|
||||
|
||||
**Important Notes:**
|
||||
- Each job MUST have its own separate directory and JOB.md file to avoid conflicts
|
||||
- Always update the frontmatter fields (status, last_run) when executing a job
|
||||
- Keep execution logs concise but informative
|
||||
- For recurring jobs, maintain a rolling log (keep recent entries, you can summarize/remove old entries to keep the file manageable)
|
||||
- When creating jobs, make the description detailed enough that you can understand and execute the task in future sessions without additional context
|
||||
|
||||
**When to Create Jobs:**
|
||||
- User says "每天帮我..." / "定期..." / "定时..." / "提醒我..." / "以后每次..."
|
||||
- User requests a task that should be done repeatedly
|
||||
- User asks for monitoring or periodic checking of something
|
||||
|
||||
**When NOT to Create Jobs:**
|
||||
- User asks for an immediate one-time action (just do it now)
|
||||
- Simple questions or conversations
|
||||
- Tasks that are already handled by MoviePilot's built-in scheduler services
|
||||
</jobs_system>
|
||||
"""
|
||||
|
||||
|
||||
class JobsMiddleware(AgentMiddleware[JobsState, ContextT, ResponseT]): # noqa
|
||||
"""加载并向系统提示词注入 Agent Jobs 的中间件。
|
||||
|
||||
扫描 jobs 目录下的 JOB.md 文件,解析元数据并注入到系统提示词中,
|
||||
使智能体了解当前的长期任务及其状态。
|
||||
"""
|
||||
|
||||
state_schema = JobsState
|
||||
|
||||
def __init__(self, *, sources: list[str]) -> None:
|
||||
"""初始化 Jobs 中间件。"""
|
||||
self.sources = sources
|
||||
self.system_prompt_template = JOBS_SYSTEM_PROMPT
|
||||
|
||||
@staticmethod
|
||||
def _format_jobs_list(jobs: list[JobMetadata]) -> str:
|
||||
"""格式化任务元数据列表用于系统提示词。"""
|
||||
if not jobs:
|
||||
return "(No active jobs. You can create jobs when users request periodic or scheduled tasks.)"
|
||||
|
||||
lines = []
|
||||
for job in jobs:
|
||||
status_emoji = {
|
||||
"pending": "⏳",
|
||||
"in_progress": "🔄",
|
||||
"completed": "✅",
|
||||
"cancelled": "❌",
|
||||
}.get(job["status"], "❓")
|
||||
|
||||
schedule_label = (
|
||||
"recurring (重复)"
|
||||
if job["schedule"] == "recurring"
|
||||
else "once (一次性)"
|
||||
)
|
||||
desc_line = (
|
||||
f"- {status_emoji} **{job['id']}**: {job['name']}"
|
||||
f" [{schedule_label}] - {job['description']}"
|
||||
)
|
||||
if job.get("last_run"):
|
||||
desc_line += f" (上次执行: {job['last_run']})"
|
||||
lines.append(desc_line)
|
||||
lines.append(f" -> Read `{job['path']}` for full details")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]:
|
||||
"""将任务文档注入模型请求的系统消息中。"""
|
||||
jobs_metadata = request.state.get("jobs_metadata", []) # noqa
|
||||
|
||||
# 过滤:只展示活跃任务(pending / in_progress / recurring)
|
||||
active_jobs = [
|
||||
j
|
||||
for j in jobs_metadata
|
||||
if j["status"] in ("pending", "in_progress")
|
||||
or (j["schedule"] == "recurring" and j["status"] not in ("cancelled",))
|
||||
]
|
||||
|
||||
jobs_list = self._format_jobs_list(active_jobs)
|
||||
jobs_location = self.sources[0] if self.sources else ""
|
||||
|
||||
jobs_section = self.system_prompt_template.format(
|
||||
jobs_location=jobs_location,
|
||||
jobs_list=jobs_list,
|
||||
)
|
||||
|
||||
new_system_message = append_to_system_message(
|
||||
request.system_message, jobs_section
|
||||
)
|
||||
|
||||
return request.override(system_message=new_system_message)
|
||||
|
||||
async def abefore_agent( # noqa
|
||||
self, state: JobsState, runtime: Runtime, config: RunnableConfig
|
||||
) -> JobsStateUpdate | None:
|
||||
"""在 Agent 执行前异步加载任务元数据。
|
||||
|
||||
每个会话仅加载一次。若 state 中已有则跳过。
|
||||
"""
|
||||
# 如果 state 中已存在元数据则跳过
|
||||
if "jobs_metadata" in state:
|
||||
return None
|
||||
|
||||
all_jobs: list[JobMetadata] = []
|
||||
|
||||
# 遍历源加载任务
|
||||
for source_path_str in self.sources:
|
||||
source_path = AsyncPath(source_path_str)
|
||||
if not await source_path.exists():
|
||||
await source_path.mkdir(parents=True, exist_ok=True)
|
||||
continue
|
||||
source_jobs = await _alist_jobs(source_path)
|
||||
all_jobs.extend(source_jobs)
|
||||
|
||||
return JobsStateUpdate(jobs_metadata=all_jobs)
|
||||
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
request: ModelRequest[ContextT],
|
||||
handler: Callable[
|
||||
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
|
||||
],
|
||||
) -> ModelResponse[ResponseT]:
|
||||
"""在模型调用时注入任务文档。"""
|
||||
modified_request = self.modify_request(request)
|
||||
return await handler(modified_request)
|
||||
|
||||
|
||||
__all__ = ["JobMetadata", "JobsMiddleware"]
|
||||
396
app/agent/middleware/memory.py
Normal file
396
app/agent/middleware/memory.py
Normal file
@@ -0,0 +1,396 @@
|
||||
from collections.abc import Awaitable, Callable
|
||||
from typing import Annotated, NotRequired, TypedDict, Dict
|
||||
|
||||
from anyio import Path as AsyncPath
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
AgentState,
|
||||
ContextT,
|
||||
ModelRequest,
|
||||
ModelResponse,
|
||||
PrivateStateAttr, # noqa
|
||||
ResponseT,
|
||||
)
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from app.agent.middleware.utils import append_to_system_message
|
||||
from app.log import logger
|
||||
|
||||
# 记忆文件最大限制为 100KB,防止单文件过大导致上下文溢出
|
||||
MAX_MEMORY_FILE_SIZE = 100 * 1024
|
||||
|
||||
# 默认记忆文件名(用户主记忆)
|
||||
DEFAULT_MEMORY_FILE = "MEMORY.md"
|
||||
|
||||
|
||||
class MemoryState(AgentState):
|
||||
"""`MemoryMiddleware` 的状态模型。
|
||||
|
||||
属性:
|
||||
memory_contents: 将源路径映射到其加载内容的字典。
|
||||
标记为私有,因此不包含在最终的代理状态中。
|
||||
memory_empty: 记忆文件是否为空或不存在。
|
||||
标记为私有,用于判断是否需要触发初始化引导流程。
|
||||
"""
|
||||
|
||||
memory_contents: NotRequired[Annotated[dict[str, str], PrivateStateAttr]]
|
||||
memory_empty: NotRequired[Annotated[bool, PrivateStateAttr]]
|
||||
|
||||
|
||||
class MemoryStateUpdate(TypedDict):
|
||||
"""`MemoryMiddleware` 的状态更新。"""
|
||||
|
||||
memory_contents: dict[str, str]
|
||||
memory_empty: bool
|
||||
|
||||
|
||||
MEMORY_SYSTEM_PROMPT = """<agent_memory>
|
||||
The following memory files were loaded from your memory directory: `{memory_dir}`
|
||||
You can create, edit, or organize any `.md` files in this directory to manage your knowledge.
|
||||
|
||||
{agent_memory}
|
||||
</agent_memory>
|
||||
|
||||
<memory_guidelines>
|
||||
The above <agent_memory> was loaded from `.md` files in your memory directory (`{memory_dir}`). As you learn from your interactions with the user, you can save new knowledge by calling the `edit_file` or `write_file` tool on files in this directory.
|
||||
|
||||
**Memory file organization:**
|
||||
- All `.md` files in `{memory_dir}` are automatically loaded as memory.
|
||||
- `MEMORY.md` is the default/primary memory file for general user preferences, communication style, and durable working rules.
|
||||
- You may create additional `.md` files to organize knowledge by topic (e.g., `MEDIA_RULES.md`, `COMMUNICATION_PREFERENCES.md`, `DOWNLOAD_PREFERENCES.md`, `SITE_CONFIGS.md`, etc.).
|
||||
- Keep each file focused on a specific domain or topic for better organization.
|
||||
- Subdirectories are NOT scanned — only `.md` files directly in `{memory_dir}`.
|
||||
|
||||
**Learning from feedback:**
|
||||
- One of your MAIN PRIORITIES is to learn from your interactions with the user. These learnings can be implicit or explicit. This means that in the future, you will remember this important information.
|
||||
- When you need to remember something, updating memory must be your FIRST, IMMEDIATE action - before responding to the user, before calling other tools, before doing anything else. Just update memory immediately.
|
||||
- When user says something is better/worse, capture WHY and encode it as a pattern.
|
||||
- Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions.
|
||||
- A great opportunity to update your memories is when the user interrupts a tool call and provides feedback. You should update your memories immediately before revising the tool call.
|
||||
- Look for the underlying principle behind corrections, not just the specific mistake.
|
||||
- The user might not explicitly ask you to remember something, but if they provide information that is useful for future use, you should update your memories immediately.
|
||||
|
||||
**Asking for information:**
|
||||
- If you lack context to perform an action (e.g. send a Slack DM, requires a user ID/email) you should explicitly ask the user for this information.
|
||||
- It is preferred for you to ask for information, don't assume anything that you do not know!
|
||||
- When the user provides information that is useful for future use, you should update your memories immediately.
|
||||
|
||||
**When to update memories:**
|
||||
- When the user explicitly asks you to remember something (e.g., "remember my email", "save this preference")
|
||||
- When the user gives durable communication or reply-format preferences (e.g., "be more concise", "prefer tables", "use JSON when summarizing")
|
||||
- When the user gives feedback on your work - capture what was wrong and how to improve
|
||||
- When the user provides information required for tool use (e.g., slack channel ID, email addresses)
|
||||
- When the user provides context useful for future tasks, such as how to use tools, or which actions to take in a particular situation
|
||||
- When you discover new user-specific patterns or preferences (communication style, formatting, workflows)
|
||||
|
||||
**When to NOT update memories:**
|
||||
- When the information is temporary or transient (e.g., "I'm running late", "I'm on my phone right now")
|
||||
- When the information is a one-time task request (e.g., "Find me a recipe", "What's 25 * 4?")
|
||||
- When the information is a simple question that doesn't reveal lasting preferences (e.g., "What day is it?", "Can you explain X?")
|
||||
- When the information is an acknowledgment or small talk (e.g., "Sounds good!", "Hello", "Thanks for that")
|
||||
- When the information is stale or irrelevant in future conversations
|
||||
- Memory may refine user-facing style, but it must NOT redefine the agent's core identity, safety boundaries, or global system-task rules.
|
||||
- If the user wants a built-in speaking style/persona, prefer the dedicated persona-switching tools instead of rewriting memory as a substitute.
|
||||
- Never store API keys, access tokens, passwords, or any other credentials in any file, memory, or system prompt.
|
||||
- If the user asks where to put API keys or provides an API key, do NOT echo or save it.
|
||||
- Do NOT record daily activities or task execution history in memory files - these are automatically tracked in the activity log system (see <activity_log>). Memory files are only for long-term knowledge, preferences, and patterns.
|
||||
|
||||
**Examples:**
|
||||
Example 1 (remembering user information):
|
||||
User: Can you connect to my google account?
|
||||
Agent: Sure, I'll connect to your google account, what's your google account email?
|
||||
User: john@example.com
|
||||
Agent: Let me save this to my memory.
|
||||
Tool Call: edit_file(...) -> remembers that the user's google account email is john@example.com
|
||||
|
||||
Example 2 (remembering implicit user preferences):
|
||||
User: Can you write me an example for creating a deep agent in LangChain?
|
||||
Agent: Sure, I'll write you an example for creating a deep agent in LangChain <example code in Python>
|
||||
User: Can you do this in JavaScript
|
||||
Agent: Let me save this to my memory.
|
||||
Tool Call: edit_file(...) -> remembers that the user prefers to get LangChain code examples in JavaScript
|
||||
Agent: Sure, here is the JavaScript example<example code in JavaScript>
|
||||
|
||||
Example 3 (do not remember transient information):
|
||||
User: I'm going to play basketball tonight so I will be offline for a few hours.
|
||||
Agent: Okay I'll add a block to your calendar.
|
||||
Tool Call: create_calendar_event(...) -> just calls a tool, does not commit anything to memory, as it is transient information
|
||||
</memory_guidelines>
|
||||
"""
|
||||
|
||||
MEMORY_ONBOARDING_PROMPT = """<agent_memory>
|
||||
(No memory loaded — this is a brand new user with no saved preferences.)
|
||||
Memory directory: {memory_dir}
|
||||
Default memory file: {memory_file}
|
||||
</agent_memory>
|
||||
|
||||
<memory_onboarding>
|
||||
First-time user detected.
|
||||
|
||||
The memory directory is currently empty. This likely means the user has no saved long-term preferences yet.
|
||||
|
||||
**Behavior requirements:**
|
||||
- Do NOT interrupt the current task just to collect preferences.
|
||||
- Do NOT proactively greet warmly, build rapport, or ask a long onboarding questionnaire.
|
||||
- Default to a concise, professional style until the user states a preference.
|
||||
- Only ask for preferences when they are directly useful for the current task, or when a short follow-up question at the end would clearly help future interactions.
|
||||
|
||||
**What to collect when useful:**
|
||||
- Preferred communication style or persona preference
|
||||
- Media interests
|
||||
- Quality / codec / subtitle preferences
|
||||
- Any standing rules the user wants you to follow
|
||||
|
||||
**When the user provides lasting preferences**, you MUST promptly save them to `{memory_file}` using `write_file` or `edit_file`.
|
||||
|
||||
**Memory format requirements:**
|
||||
- Use clean Markdown with short sections.
|
||||
- Record only durable preferences and working rules.
|
||||
- Do NOT invent personal details or preferred names.
|
||||
- Do NOT force use of a nickname or personalized greeting.
|
||||
</memory_onboarding>
|
||||
|
||||
<memory_guidelines>
|
||||
Your memory directory is at: {memory_dir}. You can save new knowledge by calling the `edit_file` or `write_file` tool on any `.md` file in this directory.
|
||||
|
||||
**Memory file organization:**
|
||||
- `MEMORY.md` is the default/primary memory file for user preferences, persona preferences, and durable working rules.
|
||||
- You may create additional `.md` files to organize knowledge by topic.
|
||||
- All `.md` files directly in the memory directory are automatically loaded on each conversation.
|
||||
|
||||
**Learning from feedback:**
|
||||
- One of your MAIN PRIORITIES is to learn from your interactions with the user. These learnings can be implicit or explicit. This means that in the future, you will remember this important information.
|
||||
- When you need to remember something, updating memory must be your FIRST, IMMEDIATE action - before responding to the user, before calling other tools, before doing anything else. Just update memory immediately.
|
||||
- When user says something is better/worse, capture WHY and encode it as a pattern.
|
||||
- Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions.
|
||||
- The user might not explicitly ask you to remember something, but if they provide information that is useful for future use, you should update your memories immediately.
|
||||
|
||||
**When to update memories:**
|
||||
- When the user explicitly asks you to remember something
|
||||
- When the user gives durable communication or reply-format preferences
|
||||
- When the user gives feedback on your work
|
||||
- When the user provides information required for tool use
|
||||
- When you discover new user-specific patterns or preferences
|
||||
|
||||
**When to NOT update memories:**
|
||||
- Temporary/transient information
|
||||
- One-time task requests
|
||||
- Simple questions, acknowledgments, or small talk
|
||||
- Memory may refine user-facing style, but it must NOT redefine the agent's core identity, safety boundaries, or global system-task rules
|
||||
- If the user wants a built-in speaking style/persona, prefer the dedicated persona-switching tools instead of rewriting memory as a substitute
|
||||
- Never store API keys, access tokens, passwords, or credentials
|
||||
- Do NOT record daily activities in memory files — those go to the activity log
|
||||
</memory_guidelines>
|
||||
"""
|
||||
|
||||
|
||||
class MemoryMiddleware(AgentMiddleware[MemoryState, ContextT, ResponseT]): # noqa
|
||||
"""从代理记忆目录加载所有 MD 文件作为记忆的中间件。
|
||||
|
||||
自动扫描指定目录下的所有 `.md` 文件,加载其内容并注入到系统提示词中。
|
||||
支持多文件记忆组织:用户可以创建多个 `.md` 文件来按主题组织知识。
|
||||
|
||||
参数:
|
||||
memory_dir: 记忆文件目录路径。建议使用独立的 `config/agent/memory`
|
||||
目录,避免与核心规则或人格定义混写。
|
||||
"""
|
||||
|
||||
state_schema = MemoryState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
memory_dir: str,
|
||||
) -> None:
|
||||
"""初始化记忆中间件。
|
||||
|
||||
参数:
|
||||
memory_dir: 记忆文件目录路径(例如,`"/config/agent/memory"`)。
|
||||
该目录下所有 `.md` 文件都会被自动加载为记忆。
|
||||
"""
|
||||
self.memory_dir = memory_dir
|
||||
self.default_memory_file = str(AsyncPath(memory_dir) / DEFAULT_MEMORY_FILE)
|
||||
|
||||
@staticmethod
|
||||
def _is_memory_empty(contents: dict[str, str]) -> bool:
|
||||
"""判断记忆内容是否为空。
|
||||
|
||||
检查所有源文件的内容,如果全部为空或仅包含空白字符则返回 True。
|
||||
|
||||
参数:
|
||||
contents: 将源路径映射到内容的字典。
|
||||
|
||||
返回:
|
||||
如果记忆为空则返回 True,否则返回 False。
|
||||
"""
|
||||
if not contents:
|
||||
return True
|
||||
return all(not content.strip() for content in contents.values())
|
||||
|
||||
def _format_agent_memory(
|
||||
self, contents: dict[str, str], memory_empty: bool = False
|
||||
) -> str:
|
||||
"""格式化记忆,将位置和内容成对组合。
|
||||
|
||||
当记忆为空时,返回初始化引导提示词,引导智能体主动询问用户偏好。
|
||||
当记忆非空时,返回标准记忆系统提示词,包含所有加载的文件内容。
|
||||
|
||||
参数:
|
||||
contents: 将源路径映射到内容的字典。
|
||||
memory_empty: 记忆是否为空的标志位。
|
||||
|
||||
返回:
|
||||
在 <agent_memory> 标签中包装了位置+内容对的格式化字符串。
|
||||
"""
|
||||
# 记忆为空时返回初始化引导提示词
|
||||
if memory_empty or self._is_memory_empty(contents):
|
||||
return MEMORY_ONBOARDING_PROMPT.format(
|
||||
memory_dir=self.memory_dir,
|
||||
memory_file=self.default_memory_file,
|
||||
)
|
||||
|
||||
# 按文件名排序,确保 MEMORY.md 排在最前面
|
||||
sorted_paths = sorted(
|
||||
[p for p in contents if contents[p].strip()],
|
||||
key=lambda p: (0 if AsyncPath(p).name == DEFAULT_MEMORY_FILE else 1, p),
|
||||
)
|
||||
|
||||
if not sorted_paths:
|
||||
return MEMORY_ONBOARDING_PROMPT.format(
|
||||
memory_dir=self.memory_dir,
|
||||
memory_file=self.default_memory_file,
|
||||
)
|
||||
|
||||
sections = []
|
||||
for path in sorted_paths:
|
||||
file_name = AsyncPath(path).name
|
||||
sections.append(f"### {file_name}\n**Path:** `{path}`\n\n{contents[path]}")
|
||||
|
||||
memory_body = "\n\n---\n\n".join(sections)
|
||||
return MEMORY_SYSTEM_PROMPT.format(
|
||||
agent_memory=memory_body,
|
||||
memory_dir=self.memory_dir,
|
||||
)
|
||||
|
||||
async def _scan_memory_files(self) -> list[str]:
|
||||
"""扫描记忆目录下的所有 .md 文件。
|
||||
|
||||
仅扫描目录下直接存在的 `.md` 文件(不递归子目录)。
|
||||
文件大小超过限制的将被跳过。
|
||||
|
||||
返回:
|
||||
发现的 .md 文件路径列表。
|
||||
"""
|
||||
dir_path = AsyncPath(self.memory_dir)
|
||||
if not await dir_path.exists():
|
||||
return []
|
||||
|
||||
md_files: list[str] = []
|
||||
async for entry in dir_path.iterdir():
|
||||
if await entry.is_file() and entry.name.lower().endswith(".md"):
|
||||
md_files.append(str(entry))
|
||||
|
||||
return md_files
|
||||
|
||||
async def abefore_agent( # noqa
|
||||
self,
|
||||
state: MemoryState,
|
||||
runtime: Runtime, # noqa
|
||||
config: RunnableConfig,
|
||||
) -> MemoryStateUpdate | None:
|
||||
"""在代理执行前扫描记忆目录并加载所有 .md 文件的内容。
|
||||
|
||||
自动发现目录下所有 `.md` 文件并加载其内容到状态中。
|
||||
如果状态中尚未存在则进行加载。
|
||||
同时检测记忆文件是否为空,设置 memory_empty 标志位,
|
||||
以便在系统提示词中触发初始化引导流程。
|
||||
|
||||
参数:
|
||||
state: 当前代理状态。
|
||||
runtime: 运行时上下文。
|
||||
config: Runnable 配置。
|
||||
|
||||
返回:
|
||||
填充了 memory_contents 和 memory_empty 的状态更新。
|
||||
"""
|
||||
# 如果已经加载则跳过
|
||||
if "memory_contents" in state:
|
||||
return None
|
||||
|
||||
# 扫描目录下所有 .md 文件
|
||||
md_files = await self._scan_memory_files()
|
||||
|
||||
contents: Dict[str, str] = {}
|
||||
for path in md_files:
|
||||
file_path = AsyncPath(path)
|
||||
try:
|
||||
# 检查文件大小
|
||||
stat = await file_path.stat()
|
||||
if stat.st_size > MAX_MEMORY_FILE_SIZE:
|
||||
logger.warning(
|
||||
"Skipping memory file %s: too large (%d bytes, max %d)",
|
||||
path,
|
||||
stat.st_size,
|
||||
MAX_MEMORY_FILE_SIZE,
|
||||
)
|
||||
continue
|
||||
contents[path] = await file_path.read_text(encoding="utf-8")
|
||||
logger.debug("Loaded memory from: %s", path)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to read memory file %s: %s", path, e)
|
||||
|
||||
if contents:
|
||||
logger.info(
|
||||
"Loaded %d memory file(s) from %s: %s",
|
||||
len(contents),
|
||||
self.memory_dir,
|
||||
[AsyncPath(p).name for p in contents],
|
||||
)
|
||||
|
||||
# 检测记忆是否为空(文件不存在、文件内容为空白)
|
||||
is_empty = self._is_memory_empty(contents)
|
||||
if is_empty:
|
||||
logger.info(
|
||||
"Memory is empty, onboarding prompt will be activated for user preference collection."
|
||||
)
|
||||
|
||||
return MemoryStateUpdate(memory_contents=contents, memory_empty=is_empty)
|
||||
|
||||
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]:
|
||||
"""将记忆内容注入系统消息。
|
||||
|
||||
参数:
|
||||
request: 要修改的模型请求。
|
||||
|
||||
返回:
|
||||
将记忆注入系统消息后的修改后请求。
|
||||
"""
|
||||
contents = request.state.get("memory_contents", {}) # noqa
|
||||
memory_empty = request.state.get("memory_empty", False) # noqa
|
||||
agent_memory = self._format_agent_memory(contents, memory_empty=memory_empty)
|
||||
|
||||
new_system_message = append_to_system_message(
|
||||
request.system_message, agent_memory
|
||||
)
|
||||
|
||||
return request.override(system_message=new_system_message)
|
||||
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
request: ModelRequest[ContextT],
|
||||
handler: Callable[
|
||||
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
|
||||
],
|
||||
) -> ModelResponse[ResponseT]:
|
||||
"""异步包装模型调用,将记忆注入系统提示词。
|
||||
|
||||
参数:
|
||||
request: 正在处理的模型请求。
|
||||
handler: 使用修改后的请求进行调用的异步处理函数。
|
||||
|
||||
返回:
|
||||
来自处理函数的模型响应。
|
||||
"""
|
||||
modified_request = self.modify_request(request)
|
||||
return await handler(modified_request)
|
||||
43
app/agent/middleware/patch_tool_calls.py
Normal file
43
app/agent/middleware/patch_tool_calls.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from typing import Any
|
||||
|
||||
from langchain.agents.middleware import AgentMiddleware, AgentState
|
||||
from langchain_core.messages import AIMessage, ToolMessage
|
||||
from langgraph.runtime import Runtime
|
||||
from langgraph.types import Overwrite
|
||||
|
||||
|
||||
class PatchToolCallsMiddleware(AgentMiddleware):
|
||||
"""修复消息历史中悬空工具调用的中间件。"""
|
||||
|
||||
def before_agent(self, state: AgentState, runtime: Runtime[Any]) -> dict[str, Any] | None: # noqa: ARG002
|
||||
"""在代理运行之前,处理任何 AIMessage 中悬空的工具调用。"""
|
||||
messages = state["messages"]
|
||||
if not messages or len(messages) == 0:
|
||||
return None
|
||||
|
||||
patched_messages = []
|
||||
# 遍历消息并添加任何悬空的工具调用
|
||||
for i, msg in enumerate(messages):
|
||||
patched_messages.append(msg)
|
||||
if isinstance(msg, AIMessage) and msg.tool_calls:
|
||||
for tool_call in msg.tool_calls:
|
||||
corresponding_tool_msg = next(
|
||||
(msg for msg in messages[i:] if msg.type == "tool" and msg.tool_call_id == tool_call["id"]),
|
||||
# ty: ignore[unresolved-attribute]
|
||||
None,
|
||||
)
|
||||
if corresponding_tool_msg is None:
|
||||
# 我们有一个悬空的工具调用,需要一个 ToolMessage
|
||||
tool_msg = (
|
||||
f"Tool call {tool_call['name']} with id {tool_call['id']} was "
|
||||
"cancelled - another message came in before it could be completed."
|
||||
)
|
||||
patched_messages.append(
|
||||
ToolMessage(
|
||||
content=tool_msg,
|
||||
name=tool_call["name"],
|
||||
tool_call_id=tool_call["id"],
|
||||
)
|
||||
)
|
||||
|
||||
return {"messages": Overwrite(patched_messages)}
|
||||
42
app/agent/middleware/runtime_config.py
Normal file
42
app/agent/middleware/runtime_config.py
Normal file
@@ -0,0 +1,42 @@
|
||||
"""动态注入 Agent 根层运行时配置的中间件。"""
|
||||
|
||||
from collections.abc import Awaitable, Callable
|
||||
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
ContextT,
|
||||
ModelRequest,
|
||||
ModelResponse,
|
||||
ResponseT,
|
||||
)
|
||||
|
||||
from app.agent.middleware.utils import append_to_system_message
|
||||
from app.agent.runtime import agent_runtime_manager
|
||||
|
||||
|
||||
class RuntimeConfigMiddleware(AgentMiddleware[dict, ContextT, ResponseT]): # noqa
|
||||
"""在每次模型调用前动态加载运行时配置。
|
||||
|
||||
这里不把结果缓存到 middleware state 中,目的是让人格切换工具在同一轮
|
||||
Agent 执行里修改 CURRENT_PERSONA 后,后续模型调用可以立即看到新的人格。
|
||||
"""
|
||||
|
||||
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]: # noqa
|
||||
runtime_config = agent_runtime_manager.load_runtime_config()
|
||||
runtime_sections = runtime_config.render_prompt_sections()
|
||||
new_system_message = append_to_system_message(
|
||||
request.system_message, runtime_sections
|
||||
)
|
||||
return request.override(system_message=new_system_message)
|
||||
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
request: ModelRequest[ContextT],
|
||||
handler: Callable[
|
||||
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
|
||||
],
|
||||
) -> ModelResponse[ResponseT]:
|
||||
return await handler(self.modify_request(request))
|
||||
|
||||
|
||||
__all__ = ["RuntimeConfigMiddleware"]
|
||||
525
app/agent/middleware/skills.py
Normal file
525
app/agent/middleware/skills.py
Normal file
@@ -0,0 +1,525 @@
|
||||
import re
|
||||
import shutil
|
||||
from collections.abc import Awaitable, Callable
|
||||
from pathlib import Path
|
||||
from typing import Annotated, List
|
||||
from typing import NotRequired, TypedDict
|
||||
|
||||
import yaml # noqa
|
||||
from anyio import Path as AsyncPath
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
AgentState,
|
||||
ContextT,
|
||||
ModelRequest,
|
||||
ModelResponse,
|
||||
ResponseT,
|
||||
)
|
||||
from langchain.agents.middleware.types import PrivateStateAttr # noqa
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
from app.agent.middleware.utils import append_to_system_message
|
||||
from app.log import logger
|
||||
|
||||
# 安全提示: SKILL.md 文件最大限制为 10MB,防止 DoS 攻击
|
||||
MAX_SKILL_FILE_SIZE = 10 * 1024 * 1024
|
||||
|
||||
# Agent Skills 规范约束 (https://agentskills.io/specification)
|
||||
MAX_SKILL_NAME_LENGTH = 64
|
||||
MAX_SKILL_DESCRIPTION_LENGTH = 1024
|
||||
MAX_SKILL_COMPATIBILITY_LENGTH = 500
|
||||
|
||||
|
||||
class SkillMetadata(TypedDict):
|
||||
"""Skill 元数据,符合 Agent Skills 规范。"""
|
||||
|
||||
path: str
|
||||
"""SKILL.md 文件路径。"""
|
||||
|
||||
id: str
|
||||
"""Skill 标识符。
|
||||
约束: 1-64 字符,仅限小写字母/数字/连字符,不能以连字符开头或结尾,无连续连字符,需与父目录名一致。
|
||||
"""
|
||||
|
||||
name: str
|
||||
"""Skill 名称。
|
||||
约束: Skill中文描述。
|
||||
"""
|
||||
|
||||
version: int
|
||||
"""Skill 版本号。
|
||||
用于内置技能的版本管理,同步时比较版本号决定是否覆盖用户目录中的旧版本。
|
||||
"""
|
||||
|
||||
description: str
|
||||
"""Skill 功能描述。
|
||||
约束: 1-1024 字符,应说明功能及适用场景。
|
||||
"""
|
||||
|
||||
license: str | None
|
||||
"""许可证信息。"""
|
||||
|
||||
compatibility: str | None
|
||||
"""环境依赖或兼容性要求 (最多 500 字符)。"""
|
||||
|
||||
metadata: dict[str, str]
|
||||
"""附加元数据。"""
|
||||
|
||||
allowed_tools: list[str]
|
||||
"""(实验性) Skill 建议使用的工具列表。"""
|
||||
|
||||
|
||||
class SkillsState(AgentState):
|
||||
"""skills 中间件状态。"""
|
||||
|
||||
skills_metadata: NotRequired[Annotated[list[SkillMetadata], PrivateStateAttr]]
|
||||
"""已加载的 skill 元数据列表,不传播给父 agent。"""
|
||||
|
||||
|
||||
class SkillsStateUpdate(TypedDict):
|
||||
"""skills 中间件状态更新项。"""
|
||||
|
||||
skills_metadata: list[SkillMetadata]
|
||||
"""待合并的 skill 元数据列表。"""
|
||||
|
||||
|
||||
def _parse_skill_metadata( # noqa: C901
|
||||
content: str,
|
||||
skill_path: str,
|
||||
skill_id: str,
|
||||
) -> SkillMetadata | None:
|
||||
"""从 SKILL.md 内容中解析 YAML 前言并验证元数据。"""
|
||||
if len(content) > MAX_SKILL_FILE_SIZE:
|
||||
logger.warning(
|
||||
"Skipping %s: content too large (%d bytes)", skill_path, len(content)
|
||||
)
|
||||
return None
|
||||
|
||||
# 匹配 --- 分隔的 YAML 前言
|
||||
frontmatter_pattern = r"^---\s*\n(.*?)\n---\s*\n"
|
||||
match = re.match(frontmatter_pattern, content, re.DOTALL)
|
||||
if not match:
|
||||
logger.warning("Skipping %s: no valid YAML frontmatter found", skill_path)
|
||||
return None
|
||||
frontmatter_str = match.group(1)
|
||||
|
||||
# 解析 YAML
|
||||
try:
|
||||
frontmatter_data = yaml.safe_load(frontmatter_str)
|
||||
except yaml.YAMLError as e:
|
||||
logger.warning("Invalid YAML in %s: %s", skill_path, e)
|
||||
return None
|
||||
|
||||
if not isinstance(frontmatter_data, dict):
|
||||
logger.warning("Skipping %s: frontmatter is not a mapping", skill_path)
|
||||
return None
|
||||
|
||||
# SKill名称和描述
|
||||
name = str(frontmatter_data.get("name", "")).strip()
|
||||
description = str(frontmatter_data.get("description", "")).strip()
|
||||
if not name or not description:
|
||||
logger.warning(
|
||||
"Skipping %s: missing required 'name' or 'description'", skill_path
|
||||
)
|
||||
return None
|
||||
description_str = description
|
||||
if len(description_str) > MAX_SKILL_DESCRIPTION_LENGTH:
|
||||
logger.warning(
|
||||
"Description exceeds %d characters in %s, truncating",
|
||||
MAX_SKILL_DESCRIPTION_LENGTH,
|
||||
skill_path,
|
||||
)
|
||||
description_str = description_str[:MAX_SKILL_DESCRIPTION_LENGTH]
|
||||
|
||||
# 可选的工具列表,支持空格或逗号分隔
|
||||
raw_tools = frontmatter_data.get("allowed-tools")
|
||||
if isinstance(raw_tools, str):
|
||||
allowed_tools = [
|
||||
t.strip(",") # 兼容 Claude Code 风格的逗号分隔
|
||||
for t in raw_tools.split()
|
||||
if t.strip(",")
|
||||
]
|
||||
else:
|
||||
if raw_tools is not None:
|
||||
logger.warning(
|
||||
"Ignoring non-string 'allowed-tools' in %s (got %s)",
|
||||
skill_path,
|
||||
type(raw_tools).__name__,
|
||||
)
|
||||
allowed_tools = []
|
||||
|
||||
# 能力或环境兼容性说明,最多 500 字符
|
||||
compatibility_str = str(frontmatter_data.get("compatibility", "")).strip() or None
|
||||
if compatibility_str and len(compatibility_str) > MAX_SKILL_COMPATIBILITY_LENGTH:
|
||||
logger.warning(
|
||||
"Compatibility exceeds %d characters in %s, truncating",
|
||||
MAX_SKILL_COMPATIBILITY_LENGTH,
|
||||
skill_path,
|
||||
)
|
||||
compatibility_str = compatibility_str[:MAX_SKILL_COMPATIBILITY_LENGTH]
|
||||
|
||||
# 版本号,默认为 0(表示未设置版本)
|
||||
raw_version = frontmatter_data.get("version")
|
||||
version = 0
|
||||
if raw_version is not None:
|
||||
try:
|
||||
version = int(raw_version)
|
||||
except (ValueError, TypeError):
|
||||
logger.warning(
|
||||
"Invalid 'version' in %s (got %r), defaulting to 0",
|
||||
skill_path,
|
||||
raw_version,
|
||||
)
|
||||
|
||||
return SkillMetadata(
|
||||
id=skill_id,
|
||||
name=name,
|
||||
version=version,
|
||||
description=description_str,
|
||||
path=skill_path,
|
||||
metadata=_validate_metadata(frontmatter_data.get("metadata", {}), skill_path),
|
||||
license=str(frontmatter_data.get("license", "")).strip() or None,
|
||||
compatibility=compatibility_str,
|
||||
allowed_tools=allowed_tools,
|
||||
)
|
||||
|
||||
|
||||
def _validate_metadata(
|
||||
raw: object,
|
||||
skill_path: str,
|
||||
) -> dict[str, str]:
|
||||
"""验证并规范化 YAML 前言中的元数据字段,确保为 dict[str, str] 类型。"""
|
||||
if not isinstance(raw, dict):
|
||||
if raw:
|
||||
logger.warning(
|
||||
"Ignoring non-dict metadata in %s (got %s)",
|
||||
skill_path,
|
||||
type(raw).__name__,
|
||||
)
|
||||
return {}
|
||||
return {str(k): str(v) for k, v in raw.items()}
|
||||
|
||||
|
||||
def _format_skill_annotations(skill: SkillMetadata) -> str:
|
||||
"""构建许可证和兼容性说明字符串。"""
|
||||
parts: list[str] = []
|
||||
if skill.get("license"):
|
||||
parts.append(f"License: {skill['license']}")
|
||||
if skill.get("compatibility"):
|
||||
parts.append(f"Compatibility: {skill['compatibility']}")
|
||||
return ", ".join(parts)
|
||||
|
||||
|
||||
async def _alist_skills(source_path: AsyncPath) -> list[SkillMetadata]:
|
||||
"""异步列出指定路径下的所有技能。
|
||||
|
||||
扫描包含 SKILL.md 的目录并解析其元数据。
|
||||
"""
|
||||
skills: list[SkillMetadata] = []
|
||||
|
||||
# 查找所有技能目录 (包含 SKILL.md 的目录)
|
||||
skill_dirs: List[AsyncPath] = []
|
||||
async for path in source_path.iterdir():
|
||||
if await path.is_dir() and await (path / "SKILL.md").is_file():
|
||||
skill_dirs.append(path)
|
||||
|
||||
if not skill_dirs:
|
||||
return []
|
||||
|
||||
# 解析已下载的 SKILL.md
|
||||
for skill_path in skill_dirs:
|
||||
skill_md_path = skill_path / "SKILL.md"
|
||||
|
||||
skill_content = await skill_md_path.read_text(encoding="utf-8")
|
||||
|
||||
# 解析元数据
|
||||
skill_metadata = _parse_skill_metadata(
|
||||
content=skill_content,
|
||||
skill_path=str(skill_md_path),
|
||||
skill_id=skill_path.name,
|
||||
)
|
||||
if skill_metadata:
|
||||
skills.append(skill_metadata)
|
||||
|
||||
return skills
|
||||
|
||||
|
||||
SKILLS_SYSTEM_PROMPT = """
|
||||
<skills_system>
|
||||
You have access to a skills library that provides specialized capabilities and domain knowledge.
|
||||
|
||||
{skills_locations}
|
||||
|
||||
**Available Skills:**
|
||||
|
||||
{skills_list}
|
||||
|
||||
**How to Use Skills (Progressive Disclosure):**
|
||||
|
||||
Skills follow a **progressive disclosure** pattern - you see their name and description above, but only read full instructions when needed:
|
||||
|
||||
1. **Recognize when a skill applies**: Check if the user's task matches a skill's description
|
||||
2. **Read the skill's full instructions**: Use the path shown in the skill list above
|
||||
3. **Follow the skill's instructions**: SKILL.md contains step-by-step workflows, best practices, and examples
|
||||
4. **Access supporting files**: Skills may include helper scripts, configs, or reference docs - use absolute paths
|
||||
|
||||
**Creating New Skills:**
|
||||
|
||||
When you identify a repetitive complex workflow or specialized task that would benefit from being a skill, you can create one:
|
||||
|
||||
1. **Directory Structure**: Create a new directory in one of the skills locations. The directory name is the `skill-id`.
|
||||
- Path format: `<skills_location>/<skill-id>/SKILL.md`
|
||||
- `skill-id` constraints: 1-64 characters, lowercase letters, numbers, and hyphens only.
|
||||
2. **SKILL.md Format**: Must start with a YAML frontmatter followed by markdown instructions.
|
||||
```markdown
|
||||
---
|
||||
name: Brief tool name (Chinese)
|
||||
description: Detailed functional description and use cases (1-1024 chars)
|
||||
allowed-tools: "tool1 tool2" (optional, space-separated list of recommended tools)
|
||||
compatibility: "Environment requirements" (optional, max 500 chars)
|
||||
---
|
||||
# Skill Instructions
|
||||
Step-by-step workflows, best practices, and examples go here.
|
||||
```
|
||||
3. **Supporting Files**: You can add `.py` scripts, `.yaml` configs, or other files within the same skill directory. Reference them using absolute paths in `SKILL.md`.
|
||||
|
||||
**When to Use Skills:**
|
||||
- User's request matches a skill's domain (e.g., "research X" -> web-research skill)
|
||||
- You need specialized knowledge or structured workflows
|
||||
- A skill provides proven patterns for complex tasks
|
||||
|
||||
**Executing Skill Scripts:**
|
||||
Skills may contain Python scripts or other executable files. Always use absolute paths from the skill list.
|
||||
|
||||
**Example Workflow:**
|
||||
|
||||
User: "Can you research the latest developments in quantum computing?"
|
||||
|
||||
1. Check available skills -> See "web-research" skill with its path
|
||||
2. Read the skill using the path shown
|
||||
3. Follow the skill's research workflow (search -> organize -> synthesize)
|
||||
4. Use any helper scripts with absolute paths
|
||||
|
||||
Remember: Skills make you more capable and consistent. When in doubt, check if a skill exists for the task!
|
||||
</skills_system>
|
||||
"""
|
||||
|
||||
|
||||
def _extract_version(skill_md: Path) -> int:
|
||||
"""从 SKILL.md 文件中快速提取 version 字段,无法提取时返回 0。"""
|
||||
try:
|
||||
content = skill_md.read_text(encoding="utf-8")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
return 0
|
||||
match = re.match(r"^---\s*\n(.*?)\n---\s*\n", content, re.DOTALL)
|
||||
if not match:
|
||||
return 0
|
||||
try:
|
||||
frontmatter = yaml.safe_load(match.group(1))
|
||||
except yaml.YAMLError:
|
||||
return 0
|
||||
if not isinstance(frontmatter, dict):
|
||||
return 0
|
||||
raw = frontmatter.get("version")
|
||||
if raw is None:
|
||||
return 0
|
||||
try:
|
||||
return int(raw)
|
||||
except (ValueError, TypeError):
|
||||
return 0
|
||||
|
||||
|
||||
def _sync_bundled_skills(bundled_dir: Path, target_dir: Path) -> None:
|
||||
"""将项目自带的技能同步到用户目录。
|
||||
|
||||
- 目标目录中不存在对应技能子目录时,直接复制。
|
||||
- 目标目录中已存在时,比较内置与用户目录中 SKILL.md 的 version 字段:
|
||||
- 内置版本更高时,直接覆盖用户目录中的旧版本。
|
||||
- 版本相同或用户版本更高时,跳过。
|
||||
- 内置 SKILL.md 无 version 字段(视为 0)时,不覆盖。
|
||||
|
||||
Parameters
|
||||
----------
|
||||
bundled_dir : Path
|
||||
项目内置技能目录(如 ``ROOT_PATH / "skills"``)。
|
||||
target_dir : Path
|
||||
用户配置技能目录(如 ``CONFIG_PATH / "agent" / "skills"``)。
|
||||
"""
|
||||
if not bundled_dir.is_dir():
|
||||
return
|
||||
|
||||
target_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for skill_src in bundled_dir.iterdir():
|
||||
if not skill_src.is_dir():
|
||||
continue
|
||||
skill_md = skill_src / "SKILL.md"
|
||||
if not skill_md.is_file():
|
||||
continue
|
||||
|
||||
skill_dst = target_dir / skill_src.name
|
||||
|
||||
if not skill_dst.exists():
|
||||
# 目标不存在,直接复制
|
||||
try:
|
||||
shutil.copytree(str(skill_src), str(skill_dst))
|
||||
logger.info(
|
||||
"已自动复制内置技能 '%s' -> '%s'", skill_src.name, skill_dst
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("复制内置技能 '%s' 失败: %s", skill_src.name, e)
|
||||
continue
|
||||
|
||||
# 目标已存在,比较版本号
|
||||
bundled_version = _extract_version(skill_md)
|
||||
if bundled_version <= 0:
|
||||
# 内置技能无版本号,保持旧逻辑不覆盖
|
||||
continue
|
||||
|
||||
user_skill_md = skill_dst / "SKILL.md"
|
||||
user_version = _extract_version(user_skill_md) if user_skill_md.is_file() else 0
|
||||
|
||||
if bundled_version <= user_version:
|
||||
# 用户版本 >= 内置版本,跳过
|
||||
continue
|
||||
|
||||
# 内置版本更高,删除旧版本后覆盖
|
||||
try:
|
||||
shutil.rmtree(str(skill_dst))
|
||||
shutil.copytree(str(skill_src), str(skill_dst))
|
||||
logger.info(
|
||||
"已更新内置技能 '%s' (v%d -> v%d)",
|
||||
skill_src.name,
|
||||
user_version,
|
||||
bundled_version,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("更新内置技能 '%s' 失败: %s", skill_src.name, e)
|
||||
|
||||
|
||||
class SkillsMiddleware(AgentMiddleware[SkillsState, ContextT, ResponseT]): # noqa
|
||||
"""加载并向系统提示词注入 Agent Skill 的中间件。
|
||||
|
||||
按源顺序加载 Skill,后加载的会覆盖重名的。
|
||||
启动时自动将项目内置技能(bundled_skills_dir)同步到用户技能目录。
|
||||
"""
|
||||
|
||||
state_schema = SkillsState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
sources: list[str],
|
||||
bundled_skills_dir: str | None = None,
|
||||
) -> None:
|
||||
"""初始化 Skill 中间件。
|
||||
|
||||
Parameters
|
||||
----------
|
||||
sources : list[str]
|
||||
用户技能目录列表。
|
||||
bundled_skills_dir : str | None
|
||||
项目内置技能目录路径。若提供,在首次加载前会将其中不存在于
|
||||
sources 首个目录的技能自动复制过去。
|
||||
"""
|
||||
self.sources = sources
|
||||
self.bundled_skills_dir = bundled_skills_dir
|
||||
self.system_prompt_template = SKILLS_SYSTEM_PROMPT
|
||||
|
||||
def _format_skills_locations(self) -> str:
|
||||
"""格式化技能位置信息用于系统提示词。"""
|
||||
locations = []
|
||||
|
||||
for i, source_path in enumerate(self.sources):
|
||||
suffix = " (higher priority)" if i == len(self.sources) - 1 else ""
|
||||
locations.append(f"**MoviePilot Skills**: `{source_path}`{suffix}")
|
||||
|
||||
return "\n".join(locations)
|
||||
|
||||
def _format_skills_list(self, skills: list[SkillMetadata]) -> str:
|
||||
"""格式化技能元数据列表用于系统提示词。"""
|
||||
if not skills:
|
||||
paths = [f"{source_path}" for source_path in self.sources]
|
||||
return f"(No skills available yet. You can create skills in {' or '.join(paths)})"
|
||||
|
||||
lines = []
|
||||
for skill in skills:
|
||||
annotations = _format_skill_annotations(skill)
|
||||
desc_line = f"- **{skill['id']}**: {skill['name']} - {skill['description']}"
|
||||
if annotations:
|
||||
desc_line += f" ({annotations})"
|
||||
lines.append(desc_line)
|
||||
if skill["allowed_tools"]:
|
||||
lines.append(f" -> Allowed tools: {', '.join(skill['allowed_tools'])}")
|
||||
lines.append(f" -> Read `{skill['path']}` for full instructions")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]:
|
||||
"""将技能文档注入模型请求的系统消息中。"""
|
||||
skills_metadata = request.state.get("skills_metadata", []) # noqa
|
||||
skills_locations = self._format_skills_locations()
|
||||
skills_list = self._format_skills_list(skills_metadata)
|
||||
|
||||
skills_section = self.system_prompt_template.format(
|
||||
skills_locations=skills_locations,
|
||||
skills_list=skills_list,
|
||||
)
|
||||
|
||||
new_system_message = append_to_system_message(
|
||||
request.system_message, skills_section
|
||||
)
|
||||
|
||||
return request.override(system_message=new_system_message)
|
||||
|
||||
async def abefore_agent( # noqa
|
||||
self, state: SkillsState, runtime: Runtime, config: RunnableConfig
|
||||
) -> SkillsStateUpdate | None: # ty: ignore[invalid-method-override]
|
||||
"""在 Agent 执行前异步加载技能元数据。
|
||||
|
||||
每个会话仅加载一次。若 state 中已有则跳过。
|
||||
首次加载时,会先将内置技能同步到用户目录(如不存在)。
|
||||
"""
|
||||
# 如果 state 中已存在元数据则跳过
|
||||
if "skills_metadata" in state:
|
||||
return None
|
||||
|
||||
# 自动同步内置技能到首个用户技能目录
|
||||
if self.bundled_skills_dir and self.sources:
|
||||
bundled = Path(self.bundled_skills_dir)
|
||||
target = Path(self.sources[0])
|
||||
try:
|
||||
_sync_bundled_skills(bundled, target)
|
||||
except Exception as e:
|
||||
logger.warning("同步内置技能失败: %s", e)
|
||||
|
||||
all_skills: dict[str, SkillMetadata] = {}
|
||||
|
||||
# 遍历源按顺序加载技能,重名时后者覆盖前者
|
||||
for source_path in self.sources:
|
||||
skill_source_path = AsyncPath(source_path)
|
||||
if not await skill_source_path.exists():
|
||||
await skill_source_path.mkdir(parents=True, exist_ok=True)
|
||||
continue
|
||||
source_skills = await _alist_skills(skill_source_path)
|
||||
for skill in source_skills:
|
||||
all_skills[skill["name"]] = skill
|
||||
|
||||
skills = list(all_skills.values())
|
||||
return SkillsStateUpdate(skills_metadata=skills)
|
||||
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
request: ModelRequest[ContextT],
|
||||
handler: Callable[
|
||||
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
|
||||
],
|
||||
) -> ModelResponse[ResponseT]:
|
||||
"""在模型调用时注入技能文档。"""
|
||||
modified_request = self.modify_request(request)
|
||||
return await handler(modified_request)
|
||||
|
||||
|
||||
__all__ = ["SkillMetadata", "SkillsMiddleware"]
|
||||
549
app/agent/middleware/tool_selection.py
Normal file
549
app/agent/middleware/tool_selection.py
Normal file
@@ -0,0 +1,549 @@
|
||||
"""MoviePilot 自定义工具筛选中间件。"""
|
||||
|
||||
import json
|
||||
from collections.abc import Awaitable, Callable
|
||||
from dataclasses import dataclass
|
||||
from typing import Annotated, Any, Literal, Union, NotRequired
|
||||
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
AgentState,
|
||||
ContextT,
|
||||
ModelRequest,
|
||||
ModelResponse,
|
||||
ResponseT,
|
||||
)
|
||||
from langchain.agents.middleware.types import (
|
||||
PrivateStateAttr, # noqa
|
||||
)
|
||||
from langchain_core.language_models.chat_models import BaseChatModel
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langchain_core.tools import BaseTool
|
||||
from langgraph.runtime import Runtime
|
||||
from pydantic import Field, TypeAdapter
|
||||
from typing_extensions import TypedDict # noqa
|
||||
|
||||
from app.log import logger
|
||||
|
||||
DEFAULT_SYSTEM_PROMPT = (
|
||||
"Your goal is to select the most relevant tools for answering the user's query."
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class _SelectionRequest:
|
||||
"""Prepared inputs for tool selection."""
|
||||
|
||||
available_tools: list[BaseTool]
|
||||
system_message: str
|
||||
last_user_message: HumanMessage
|
||||
model: BaseChatModel
|
||||
valid_tool_names: list[str]
|
||||
|
||||
|
||||
def _create_tool_selection_response(tools: list[BaseTool]) -> TypeAdapter[Any]:
|
||||
"""Create a structured output schema for tool selection.
|
||||
|
||||
Args:
|
||||
tools: Available tools to include in the schema.
|
||||
|
||||
Returns:
|
||||
`TypeAdapter` for a schema where each tool name is a `Literal` with its
|
||||
description.
|
||||
|
||||
Raises:
|
||||
AssertionError: If `tools` is empty.
|
||||
"""
|
||||
if not tools:
|
||||
msg = "Invalid usage: tools must be non-empty"
|
||||
raise AssertionError(msg)
|
||||
|
||||
# Create a Union of Annotated Literal types for each tool name with description
|
||||
# For instance: Union[Annotated[Literal["tool1"], Field(description="...")], ...]
|
||||
literals = [
|
||||
Annotated[Literal[tool.name], Field(description=tool.description)]
|
||||
for tool in tools # noqa
|
||||
]
|
||||
selected_tool_type = Union[tuple(literals)] # type: ignore[valid-type] # noqa: UP007
|
||||
|
||||
description = "Tools to use. Place the most relevant tools first."
|
||||
|
||||
class ToolSelectionResponse(TypedDict):
|
||||
"""Use to select relevant tools."""
|
||||
|
||||
tools: Annotated[list[selected_tool_type], Field(description=description)] # type: ignore[valid-type]
|
||||
|
||||
return TypeAdapter(ToolSelectionResponse)
|
||||
|
||||
|
||||
def _render_tool_list(tools: list[BaseTool]) -> str:
|
||||
"""Format tools as markdown list.
|
||||
|
||||
Args:
|
||||
tools: Tools to format.
|
||||
|
||||
Returns:
|
||||
Markdown string with each tool on a new line.
|
||||
"""
|
||||
return "\n".join(f"- {tool.name}: {tool.description}" for tool in tools)
|
||||
|
||||
|
||||
class ToolSelectionState(AgentState):
|
||||
"""工具筛选中间件私有状态。"""
|
||||
|
||||
selected_tool_names: NotRequired[Annotated[list[str] | None, PrivateStateAttr]]
|
||||
"""当前这条用户请求首轮筛选得到的工具名列表。"""
|
||||
|
||||
|
||||
class ToolSelectionStateUpdate(TypedDict):
|
||||
"""工具筛选中间件状态更新项。"""
|
||||
|
||||
selected_tool_names: list[str] | None
|
||||
|
||||
|
||||
class ToolSelectorMiddleware(
|
||||
AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]
|
||||
):
|
||||
"""
|
||||
为 DeepSeek 兼容端点提供更稳妥的工具筛选实现。
|
||||
|
||||
LangChain 默认会通过 `with_structured_output()` 走 OpenAI 的
|
||||
`response_format=json_schema` 路径,但 DeepSeek 官方 OpenAI 兼容端点公开文档
|
||||
仅保证 `json_object` 模式可用。对于 `deepseek-reasoner`,这会在工具筛选阶段
|
||||
提前触发 400,导致 Agent 还没真正开始执行工具就失败。
|
||||
|
||||
因此这里仅在识别到 DeepSeek 模型/端点时,退回到显式 JSON 输出模式:
|
||||
1. 使用 `response_format={"type": "json_object"}`;
|
||||
2. 在提示词中明确约束返回 JSON 结构;
|
||||
3. 手动解析 `{"tools": [...]}`,其余模型继续沿用 LangChain 默认实现。
|
||||
|
||||
另外,LangChain 原生工具筛选挂在 `wrap_model_call` 上,会在同一条用户请求
|
||||
的每次“模型回合”前都重新筛选一次工具。对于会多轮调用工具的复杂任务,
|
||||
这会重复消耗一次额外的 LLM 调用。这里改成:
|
||||
- `abefore_agent()`:在本轮 Agent 执行开始时筛选一次;
|
||||
- `awrap_model_call()`:从 `request.state` 读取首轮筛选结果并复用。
|
||||
"""
|
||||
|
||||
state_schema = ToolSelectionState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: BaseChatModel,
|
||||
system_prompt: str = DEFAULT_SYSTEM_PROMPT,
|
||||
selection_tools: list[Any] | None = None,
|
||||
max_tools: int | None = None,
|
||||
always_include: list[str] | None = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.model = model
|
||||
self.system_prompt = system_prompt
|
||||
self.max_tools = max_tools
|
||||
self.always_include = always_include or []
|
||||
self.selection_tools = selection_tools or []
|
||||
|
||||
def _prepare_selection_request(
|
||||
self, request: ModelRequest[ContextT]
|
||||
) -> _SelectionRequest | None:
|
||||
"""Prepare inputs for tool selection.
|
||||
|
||||
Args:
|
||||
request: the model request.
|
||||
|
||||
Returns:
|
||||
`SelectionRequest` with prepared inputs, or `None` if no selection is
|
||||
needed.
|
||||
|
||||
Raises:
|
||||
ValueError: If tools in `always_include` are not found in the request.
|
||||
AssertionError: If no user message is found in the request messages.
|
||||
"""
|
||||
# If no tools available, return None
|
||||
if not request.tools or len(request.tools) == 0:
|
||||
return None
|
||||
|
||||
# Filter to only BaseTool instances (exclude provider-specific tool dicts)
|
||||
base_tools = [tool for tool in request.tools if not isinstance(tool, dict)]
|
||||
|
||||
# Validate that always_include tools exist
|
||||
if self.always_include:
|
||||
available_tool_names = {tool.name for tool in base_tools}
|
||||
missing_tools = [
|
||||
name for name in self.always_include if name not in available_tool_names
|
||||
]
|
||||
if missing_tools:
|
||||
msg = (
|
||||
f"Tools in always_include not found in request: {missing_tools}. "
|
||||
f"Available tools: {sorted(available_tool_names)}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
# Separate tools that are always included from those available for selection
|
||||
available_tools = [
|
||||
tool for tool in base_tools if tool.name not in self.always_include
|
||||
]
|
||||
|
||||
# If no tools available for selection, return None
|
||||
if not available_tools:
|
||||
return None
|
||||
|
||||
system_message = self.system_prompt
|
||||
# If there's a max_tools limit, append instructions to the system prompt
|
||||
if self.max_tools is not None:
|
||||
system_message += (
|
||||
f"\nIMPORTANT: List the tool names in order of relevance, "
|
||||
f"with the most relevant first. "
|
||||
f"If you exceed the maximum number of tools, "
|
||||
f"only the first {self.max_tools} will be used."
|
||||
)
|
||||
|
||||
# Get the last user message from the conversation history
|
||||
last_user_message: HumanMessage
|
||||
for message in reversed(request.messages):
|
||||
if isinstance(message, HumanMessage):
|
||||
last_user_message = message
|
||||
break
|
||||
else:
|
||||
msg = "No user message found in request messages"
|
||||
raise AssertionError(msg)
|
||||
|
||||
model = self.model or request.model
|
||||
valid_tool_names = [tool.name for tool in available_tools]
|
||||
|
||||
return _SelectionRequest(
|
||||
available_tools=available_tools,
|
||||
system_message=system_message,
|
||||
last_user_message=last_user_message,
|
||||
model=model,
|
||||
valid_tool_names=valid_tool_names,
|
||||
)
|
||||
|
||||
def _process_selection_response(
|
||||
self,
|
||||
response: dict[str, Any],
|
||||
available_tools: list[BaseTool],
|
||||
valid_tool_names: list[str],
|
||||
request: ModelRequest[ContextT],
|
||||
) -> ModelRequest[ContextT]:
|
||||
"""Process the selection response and return filtered `ModelRequest`."""
|
||||
selected_tool_names: list[str] = []
|
||||
invalid_tool_selections = []
|
||||
|
||||
for tool_name in response["tools"]:
|
||||
if tool_name not in valid_tool_names:
|
||||
invalid_tool_selections.append(tool_name)
|
||||
continue
|
||||
|
||||
# Only add if not already selected and within max_tools limit
|
||||
if tool_name not in selected_tool_names and (
|
||||
self.max_tools is None or len(selected_tool_names) < self.max_tools
|
||||
):
|
||||
selected_tool_names.append(tool_name)
|
||||
|
||||
if invalid_tool_selections:
|
||||
msg = f"Model selected invalid tools: {invalid_tool_selections}"
|
||||
raise ValueError(msg)
|
||||
|
||||
# Filter tools based on selection and append always-included tools
|
||||
if selected_tool_names:
|
||||
selected_tools: list[BaseTool] = [
|
||||
tool for tool in available_tools if tool.name in selected_tool_names
|
||||
]
|
||||
else:
|
||||
# 如果模型筛选结果为空,则不对工具进行裁剪,使用所有可用工具
|
||||
logger.warning("工具筛选结果为空,将恢复使用所有工具。")
|
||||
selected_tools = available_tools
|
||||
|
||||
always_included_tools: list[BaseTool] = [
|
||||
tool
|
||||
for tool in request.tools
|
||||
if not isinstance(tool, dict) and tool.name in self.always_include
|
||||
]
|
||||
selected_tools.extend(always_included_tools)
|
||||
|
||||
# Also preserve any provider-specific tool dicts from the original request
|
||||
provider_tools = [tool for tool in request.tools if isinstance(tool, dict)]
|
||||
|
||||
return request.override(tools=[*selected_tools, *provider_tools])
|
||||
|
||||
@staticmethod
|
||||
def _is_deepseek_compatible_model(model: BaseChatModel) -> bool:
|
||||
"""
|
||||
判断当前模型是否应当走 DeepSeek JSON 兼容分支。
|
||||
|
||||
除了官方 `langchain_deepseek`,用户也可能通过 OpenAI-compatible
|
||||
配置把 DeepSeek 端点接到 `ChatOpenAI`。因此这里同时检查模块名、模型名
|
||||
和 Base URL,避免只靠单一条件漏判。
|
||||
"""
|
||||
module_name = type(model).__module__.lower()
|
||||
model_name = (
|
||||
str(getattr(model, "model_name", "") or getattr(model, "model", ""))
|
||||
.strip()
|
||||
.lower()
|
||||
)
|
||||
base_url = (
|
||||
str(getattr(model, "openai_api_base", "") or getattr(model, "api_base", ""))
|
||||
.strip()
|
||||
.lower()
|
||||
)
|
||||
|
||||
return (
|
||||
"deepseek" in module_name
|
||||
or model_name.startswith("deepseek-")
|
||||
or "api.deepseek.com" in base_url
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _extract_text_content(content: Any) -> str:
|
||||
"""
|
||||
从模型响应中提取纯文本。
|
||||
|
||||
这里不依赖上层 LLMHelper,避免中间件与 LLM 构造逻辑互相耦合。
|
||||
"""
|
||||
if content is None:
|
||||
return ""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
text_parts: list[str] = []
|
||||
for block in content:
|
||||
if isinstance(block, str):
|
||||
text_parts.append(block)
|
||||
continue
|
||||
if isinstance(block, dict):
|
||||
if block.get("type") == "text" and isinstance(
|
||||
block.get("text"), str
|
||||
):
|
||||
text_parts.append(block["text"])
|
||||
continue
|
||||
if not block.get("type") and isinstance(block.get("text"), str):
|
||||
text_parts.append(block["text"])
|
||||
return "".join(text_parts)
|
||||
if isinstance(content, dict):
|
||||
if content.get("type") == "text" and isinstance(content.get("text"), str):
|
||||
return content["text"]
|
||||
if not content.get("type") and isinstance(content.get("text"), str):
|
||||
return content["text"]
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def _parse_json_object(text: str) -> dict[str, Any]:
|
||||
"""
|
||||
解析模型返回的 JSON。
|
||||
|
||||
DeepSeek 在 JSON 模式下通常会返回纯 JSON,但这里仍做一层兜底,
|
||||
兼容模型偶发输出围栏或前后说明文本的情况。
|
||||
"""
|
||||
stripped_text = text.strip()
|
||||
if not stripped_text:
|
||||
raise ValueError("工具筛选返回了空响应")
|
||||
|
||||
try:
|
||||
payload = json.loads(stripped_text)
|
||||
if isinstance(payload, dict):
|
||||
return payload
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
start = stripped_text.find("{")
|
||||
end = stripped_text.rfind("}")
|
||||
if start == -1 or end == -1 or end <= start:
|
||||
raise ValueError(f"工具筛选返回的内容不是合法 JSON: {stripped_text}")
|
||||
|
||||
payload = json.loads(stripped_text[start: end + 1])
|
||||
if not isinstance(payload, dict):
|
||||
raise ValueError("工具筛选 JSON 顶层必须是对象")
|
||||
return payload
|
||||
|
||||
@staticmethod
|
||||
def _render_tool_list(available_tools: list[Any]) -> str:
|
||||
"""把工具名和描述渲染成稳定的文本列表。"""
|
||||
return "\n".join(
|
||||
f"- {tool.name}: {tool.description}" for tool in available_tools
|
||||
)
|
||||
|
||||
def _build_deepseek_selection_prompt(self, selection_request: Any) -> str:
|
||||
"""
|
||||
为 DeepSeek 生成显式 JSON 输出提示。
|
||||
|
||||
DeepSeek 官方文档要求在 JSON 输出模式下,提示词中必须明确包含 JSON
|
||||
约束,否则兼容端点可能返回空内容或无意义输出。
|
||||
"""
|
||||
limit_instruction = ""
|
||||
if self.max_tools:
|
||||
limit_instruction = f"- Select up to {self.max_tools} tools. IF NO TOOLS ARE RELEVANT, DO NOT RETURN AN EMPTY ARRAY. SELECT THE MOST APPLICABLE ONES TO ENSURE THE REQUEST IS HANDLED."
|
||||
|
||||
return (
|
||||
f"{selection_request.system_message}\n\n"
|
||||
"Return the answer in JSON only.\n"
|
||||
'Use exactly this shape: {"tools": ["tool_name_1", "tool_name_2"]}\n'
|
||||
"Rules:\n"
|
||||
"- The `tools` field must be a JSON array of strings.\n"
|
||||
"- Only use tool names from the allowed list below.\n"
|
||||
"- Order tools by relevance, with the most relevant first.\n"
|
||||
f"{limit_instruction}\n"
|
||||
"- Do not add explanations, markdown, or extra keys.\n\n"
|
||||
"Allowed tools:\n"
|
||||
f"{self._render_tool_list(selection_request.available_tools)}"
|
||||
)
|
||||
|
||||
def _normalize_selection_response(self, response: Any) -> dict[str, list[str]]:
|
||||
"""
|
||||
解析并标准化 DeepSeek JSON 模式的工具筛选结果。
|
||||
"""
|
||||
content = getattr(response, "content", response)
|
||||
text = self._extract_text_content(content)
|
||||
logger.debug(f"工具筛选原始响应: {text}")
|
||||
payload = self._parse_json_object(text)
|
||||
|
||||
tools = payload.get("tools")
|
||||
if not isinstance(tools, list):
|
||||
raise ValueError(f"工具筛选 JSON 缺少 `tools` 数组: {payload}")
|
||||
|
||||
normalized_tools = [
|
||||
tool_name for tool_name in tools if isinstance(tool_name, str)
|
||||
]
|
||||
logger.debug(f"工具筛选标准化结果: {normalized_tools}")
|
||||
return {"tools": normalized_tools}
|
||||
|
||||
async def _aselect_tools_with_deepseek(
|
||||
self, selection_request: Any
|
||||
) -> dict[str, list[str]]:
|
||||
"""
|
||||
使用 DeepSeek 兼容的 JSON 输出模式执行异步工具筛选。
|
||||
"""
|
||||
logger.debug("工具筛选走 DeepSeek JSON 兼容分支")
|
||||
structured_model = selection_request.model.bind(
|
||||
response_format={"type": "json_object"}
|
||||
)
|
||||
response = await structured_model.ainvoke(
|
||||
[
|
||||
{
|
||||
"role": "system",
|
||||
"content": self._build_deepseek_selection_prompt(selection_request),
|
||||
},
|
||||
selection_request.last_user_message,
|
||||
]
|
||||
)
|
||||
return self._normalize_selection_response(response)
|
||||
|
||||
@staticmethod
|
||||
def _extract_selected_tool_names(request: ModelRequest) -> list[str]:
|
||||
"""从已筛选后的请求中提取最终工具名,保留原有顺序。"""
|
||||
return [tool.name for tool in request.tools if not isinstance(tool, dict)]
|
||||
|
||||
@staticmethod
|
||||
def _apply_selected_tools(
|
||||
request: ModelRequest[ContextT],
|
||||
selected_tool_names: list[str],
|
||||
) -> ModelRequest[ContextT]:
|
||||
"""
|
||||
将已筛选出的工具集应用到当前模型请求。
|
||||
|
||||
这里只复用首次筛选出的客户端工具名;provider-specific 的 dict 工具仍然
|
||||
原样保留,避免破坏 LangChain/provider 自身的工具绑定约定。
|
||||
"""
|
||||
if not selected_tool_names:
|
||||
return request
|
||||
|
||||
current_tools_by_name = {
|
||||
tool.name: tool for tool in request.tools if not isinstance(tool, dict)
|
||||
}
|
||||
selected_tools = [
|
||||
current_tools_by_name[tool_name]
|
||||
for tool_name in selected_tool_names
|
||||
if tool_name in current_tools_by_name
|
||||
]
|
||||
provider_tools = [tool for tool in request.tools if isinstance(tool, dict)]
|
||||
return request.override(tools=[*selected_tools, *provider_tools])
|
||||
|
||||
async def _aselect_request_once(
|
||||
self, request: ModelRequest[ContextT]
|
||||
) -> ModelRequest[ContextT]:
|
||||
"""
|
||||
执行一次真实工具筛选,并返回筛选后的请求对象。
|
||||
|
||||
这里单独抽成 helper,便于首次筛选后缓存结果,也便于测试覆盖
|
||||
“首轮筛选,后续复用”的行为。
|
||||
"""
|
||||
selection_request = self._prepare_selection_request(request)
|
||||
if selection_request is None:
|
||||
return request
|
||||
|
||||
if not self._is_deepseek_compatible_model(selection_request.model):
|
||||
captured_request: ModelRequest[ContextT] = request
|
||||
|
||||
async def _capture_handler(
|
||||
updated_request: ModelRequest[ContextT],
|
||||
) -> ModelRequest[ContextT]:
|
||||
nonlocal captured_request
|
||||
captured_request = updated_request
|
||||
return updated_request
|
||||
|
||||
await super().awrap_model_call(request, _capture_handler)
|
||||
return captured_request
|
||||
|
||||
response = await self._aselect_tools_with_deepseek(selection_request)
|
||||
return self._process_selection_response(
|
||||
response,
|
||||
selection_request.available_tools,
|
||||
selection_request.valid_tool_names,
|
||||
request,
|
||||
)
|
||||
|
||||
async def abefore_agent( # noqa
|
||||
self,
|
||||
state: ToolSelectionState,
|
||||
runtime: Runtime, # noqa
|
||||
config: RunnableConfig,
|
||||
) -> ToolSelectionStateUpdate | None: # ty: ignore[invalid-method-override]
|
||||
"""
|
||||
在本轮 Agent 执行开始前完成一次真实工具筛选。
|
||||
|
||||
这样后续多轮 `model -> tools -> model` 循环都只复用这一次结果,
|
||||
不会为每次模型回合重复追加一笔 selector LLM 开销。
|
||||
"""
|
||||
if "selected_tool_names" in state:
|
||||
return None
|
||||
|
||||
if not self.selection_tools or self.model is None:
|
||||
return ToolSelectionStateUpdate(selected_tool_names=None)
|
||||
|
||||
selection_request = ModelRequest(
|
||||
model=self.model,
|
||||
tools=list(self.selection_tools),
|
||||
messages=state["messages"],
|
||||
state=state,
|
||||
runtime=runtime,
|
||||
)
|
||||
modified_request = await self._aselect_request_once(selection_request)
|
||||
selected_tool_names = self._extract_selected_tool_names(modified_request)
|
||||
return ToolSelectionStateUpdate(selected_tool_names=selected_tool_names or None)
|
||||
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
request: ModelRequest[ContextT],
|
||||
handler: Callable[
|
||||
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
|
||||
],
|
||||
) -> ModelResponse[ResponseT]:
|
||||
"""
|
||||
从 state 中读取首次筛选结果,并应用到每次模型回合。
|
||||
"""
|
||||
selected_tool_names = request.state.get("selected_tool_names") # noqa
|
||||
|
||||
# 正常路径下,`abefore_agent()` 已经提前写入状态;这里只保留一层兜底,
|
||||
# 兼容直接单测或未来某些绕过 before_agent 的调用场景。
|
||||
if (
|
||||
selected_tool_names is None
|
||||
and self.selection_tools
|
||||
and self.model is not None
|
||||
):
|
||||
request = await self._aselect_request_once(request)
|
||||
selected_tool_names = self._extract_selected_tool_names(request) or None
|
||||
request.state["selected_tool_names"] = selected_tool_names # noqa
|
||||
|
||||
if selected_tool_names:
|
||||
request = self._apply_selected_tools(request, selected_tool_names)
|
||||
|
||||
return await handler(request)
|
||||
184
app/agent/middleware/usage.py
Normal file
184
app/agent/middleware/usage.py
Normal file
@@ -0,0 +1,184 @@
|
||||
from collections.abc import Awaitable, Callable
|
||||
from typing import Any
|
||||
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
ContextT,
|
||||
ModelRequest,
|
||||
ModelResponse,
|
||||
ResponseT,
|
||||
)
|
||||
from langchain_core.messages import AIMessage
|
||||
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class UsageMiddleware(AgentMiddleware):
|
||||
"""记录模型调用 usage 信息并回传给外部会话。"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
on_usage: Callable[[dict[str, Any]], None] | None = None,
|
||||
) -> None:
|
||||
self.on_usage = on_usage
|
||||
|
||||
@staticmethod
|
||||
def _coerce_int(value: Any) -> int | None:
|
||||
if value is None:
|
||||
return None
|
||||
try:
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def _lookup_int(cls, container: Any, *keys: str) -> int | None:
|
||||
if not container:
|
||||
return None
|
||||
|
||||
getter = getattr(container, "get", None)
|
||||
if callable(getter):
|
||||
for key in keys:
|
||||
value = getter(key)
|
||||
if value is not None:
|
||||
return cls._coerce_int(value)
|
||||
|
||||
for key in keys:
|
||||
value = getattr(container, key, None)
|
||||
if value is not None:
|
||||
return cls._coerce_int(value)
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def _extract_model_name(cls, model: Any) -> str | None:
|
||||
return (
|
||||
getattr(model, "model", None)
|
||||
or getattr(model, "model_name", None)
|
||||
or getattr(model, "model_id", None)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _extract_context_window_tokens(cls, model: Any) -> int | None:
|
||||
profile = getattr(model, "profile", None)
|
||||
if not profile:
|
||||
return None
|
||||
return cls._lookup_int(profile, "max_input_tokens", "input_token_limit")
|
||||
|
||||
@classmethod
|
||||
def _extract_usage(cls, ai_message: AIMessage) -> dict[str, Any]:
|
||||
usage_metadata = getattr(ai_message, "usage_metadata", None)
|
||||
|
||||
input_tokens = cls._lookup_int(usage_metadata, "input_tokens")
|
||||
output_tokens = cls._lookup_int(usage_metadata, "output_tokens")
|
||||
total_tokens = cls._lookup_int(usage_metadata, "total_tokens")
|
||||
|
||||
response_metadata = getattr(ai_message, "response_metadata", None) or {}
|
||||
token_usage = (
|
||||
response_metadata.get("token_usage")
|
||||
or response_metadata.get("usage")
|
||||
or response_metadata.get("usage_metadata")
|
||||
or {}
|
||||
)
|
||||
|
||||
if input_tokens is None:
|
||||
input_tokens = cls._lookup_int(
|
||||
token_usage,
|
||||
"prompt_tokens",
|
||||
"input_tokens",
|
||||
)
|
||||
if input_tokens is None:
|
||||
input_tokens = cls._lookup_int(
|
||||
response_metadata,
|
||||
"prompt_token_count",
|
||||
"input_tokens",
|
||||
)
|
||||
|
||||
if output_tokens is None:
|
||||
output_tokens = cls._lookup_int(
|
||||
token_usage,
|
||||
"completion_tokens",
|
||||
"output_tokens",
|
||||
)
|
||||
if output_tokens is None:
|
||||
output_tokens = cls._lookup_int(
|
||||
response_metadata,
|
||||
"candidates_token_count",
|
||||
"output_tokens",
|
||||
)
|
||||
|
||||
if total_tokens is None:
|
||||
total_tokens = cls._lookup_int(token_usage, "total_tokens")
|
||||
if total_tokens is None:
|
||||
total_tokens = cls._lookup_int(response_metadata, "total_token_count")
|
||||
|
||||
has_usage = any(
|
||||
value is not None for value in (input_tokens, output_tokens, total_tokens)
|
||||
)
|
||||
resolved_input = input_tokens or 0
|
||||
resolved_output = output_tokens or 0
|
||||
resolved_total = (
|
||||
total_tokens
|
||||
if total_tokens is not None
|
||||
else resolved_input + resolved_output
|
||||
)
|
||||
|
||||
return {
|
||||
"has_usage": has_usage,
|
||||
"input_tokens": resolved_input,
|
||||
"output_tokens": resolved_output,
|
||||
"total_tokens": resolved_total,
|
||||
}
|
||||
|
||||
async def awrap_model_call(
|
||||
self,
|
||||
request: ModelRequest[ContextT],
|
||||
handler: Callable[
|
||||
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
|
||||
],
|
||||
) -> ModelResponse[ResponseT]:
|
||||
response = await handler(request)
|
||||
|
||||
if not callable(self.on_usage):
|
||||
return response
|
||||
|
||||
try:
|
||||
ai_message = next(
|
||||
(
|
||||
message
|
||||
for message in reversed(response.result)
|
||||
if isinstance(message, AIMessage)
|
||||
),
|
||||
None,
|
||||
)
|
||||
usage = (
|
||||
self._extract_usage(ai_message)
|
||||
if ai_message
|
||||
else {
|
||||
"has_usage": False,
|
||||
"input_tokens": 0,
|
||||
"output_tokens": 0,
|
||||
"total_tokens": 0,
|
||||
}
|
||||
)
|
||||
context_window_tokens = self._extract_context_window_tokens(request.model)
|
||||
context_usage_ratio = None
|
||||
if context_window_tokens and usage["has_usage"]:
|
||||
context_usage_ratio = usage["input_tokens"] / context_window_tokens
|
||||
|
||||
self.on_usage(
|
||||
{
|
||||
"model": self._extract_model_name(request.model),
|
||||
"context_window_tokens": context_window_tokens,
|
||||
"context_usage_ratio": context_usage_ratio,
|
||||
**usage,
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("记录模型 usage 失败: %s", e)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
__all__ = ["UsageMiddleware"]
|
||||
21
app/agent/middleware/utils.py
Normal file
21
app/agent/middleware/utils.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from langchain_core.messages import SystemMessage, ContentBlock
|
||||
|
||||
|
||||
def append_to_system_message(
|
||||
system_message: SystemMessage | None,
|
||||
text: str,
|
||||
) -> SystemMessage:
|
||||
"""将文本追加到系统消息。
|
||||
|
||||
参数:
|
||||
system_message: 现有的系统消息或 None。
|
||||
text: 要添加到系统消息的文本。
|
||||
|
||||
返回:
|
||||
追加了文本的新 SystemMessage。
|
||||
"""
|
||||
new_content: list[ContentBlock] = list(system_message.content_blocks) if system_message else [] # noqa
|
||||
if new_content:
|
||||
text = f"\n\n{text}"
|
||||
new_content.append({"type": "text", "text": text})
|
||||
return SystemMessage(content_blocks=new_content)
|
||||
72
app/agent/prompt/System Core Prompt.txt
Normal file
72
app/agent/prompt/System Core Prompt.txt
Normal file
@@ -0,0 +1,72 @@
|
||||
You are the MoviePilot agent runtime. Follow the injected runtime configuration to determine the active persona and any extra user-specific context.
|
||||
|
||||
All your responses must be in **Chinese (中文)**.
|
||||
|
||||
You act as a proactive agent. Your goal is to fully resolve the user's media-related requests autonomously. Do not end your turn until the task is complete or you are blocked and require user feedback.
|
||||
|
||||
<agent_core>
|
||||
Identity and Goal:
|
||||
- You are an AI media assistant powered by MoviePilot.
|
||||
- Your primary goal is to fully resolve the user's MoviePilot-related media tasks with the available tools whenever the request is actionable.
|
||||
- Focus on MoviePilot's home media domain: search, recognition, subscriptions, downloads, library organization, file transfer, and system status.
|
||||
- Stay within the MoviePilot product domain unless the user explicitly asks for adjacent help that can be handled with your existing tools.
|
||||
|
||||
Behavior Model:
|
||||
- Prioritize task progress over conversation.
|
||||
- Check current state before making changes, then do the smallest correct action.
|
||||
- Do not stop for approval on read-only operations. Only confirm before destructive or high-impact actions such as starting downloads, deleting subscriptions, or removing history.
|
||||
- When a request can be completed by tools, prefer doing the work over explaining what you might do.
|
||||
- After an action, perform the minimum validation needed to confirm the result actually landed.
|
||||
- If the user explicitly asks to change the speaking style or persona, use the dedicated persona tools instead of editing runtime files manually.
|
||||
- If the user explicitly asks to rewrite or create a persona definition, prefer `update_persona_definition` rather than generic file-editing tools.
|
||||
- Do not let user memory or persona style override this core identity, safety boundaries, or built-in background task rules.
|
||||
- You are not a general-purpose coding assistant in normal media conversations. Only cross into implementation details when the user explicitly asks about MoviePilot internals or debugging.
|
||||
|
||||
Core Capabilities:
|
||||
1. Media Search and Recognition - Identify movies, TV shows, and anime; recognize media from fuzzy filenames or incomplete titles.
|
||||
2. Subscription Management - Create rules for automated downloading and monitor trending content.
|
||||
3. Download Control - Search torrents across trackers and filter by quality, codec, and release group.
|
||||
4. System Status and Organization - Monitor downloads, server health, file transfers, renaming, and library cleanup.
|
||||
5. Visual Input Handling - Users may attach images from supported channels; analyze them together with the text when relevant.
|
||||
6. File Context Handling - User messages may arrive as structured JSON. Treat the `message` field as the user's text. Attachments appear in `files`; when `local_path` is present, use local file tools to inspect the uploaded file directly. When image input is disabled for the current model, user images may also be delivered through `files`.
|
||||
7. Persona Management - If the user explicitly asks to change the speaking style or persona, prefer `query_personas` and `switch_persona`; if the user asks to rewrite or create a persona definition, prefer `update_persona_definition` instead of editing runtime files manually.
|
||||
|
||||
Core Workflow:
|
||||
1. Media Discovery: Identify exact media metadata such as TMDB ID and Season or Episode using search tools when needed.
|
||||
2. Context Checking: Verify whether the media already exists in the library, has already been subscribed, or has relevant history that affects the next step.
|
||||
3. Action Execution: Perform the requested task with concise user-facing output unless the operation is destructive or blocked.
|
||||
4. Final Confirmation: State the outcome briefly, including the key media facts or blocker.
|
||||
|
||||
Tool Calling Strategy:
|
||||
- Call independent tools in parallel whenever possible.
|
||||
- If search results are ambiguous, use `query_media_detail` or `recognize_media` to clarify before proceeding.
|
||||
- If `search_media` fails, fall back to `search_web` or `recognize_media`. Only ask the user when automated paths are exhausted.
|
||||
- Reuse known media identity, prior tool results, and current system context instead of repeating expensive recognition or search calls.
|
||||
- When a tool fails, try one narrower fallback path before escalating to the user.
|
||||
|
||||
Media Management Rules:
|
||||
1. Download Safety: Present found torrents with size, seeds, and quality, then get explicit consent before downloading.
|
||||
2. Subscription Logic: Check for the best matching quality profile based on user history or defaults.
|
||||
3. Library Awareness: Check if content already exists in the library to avoid duplicates.
|
||||
4. Error Handling: If a tool or site fails, briefly explain what went wrong and suggest an alternative.
|
||||
5. TV Subscription Rule: When calling `add_subscribe` for a TV show, omitting `season` means subscribe to season 1 only. To subscribe multiple seasons or the full series, call `add_subscribe` separately for each season.
|
||||
</agent_core>
|
||||
|
||||
<communication_runtime>
|
||||
{verbose_spec}
|
||||
|
||||
- Channel-aware formatting: Follow the capability rules below for Markdown, plain text, buttons, and voice replies.
|
||||
{button_choice_spec}
|
||||
- Voice replies: {voice_reply_spec}
|
||||
- If the current channel supports image sending and an image would materially help, you may use the `send_message` tool with `image_url` to send it.
|
||||
- If the current channel supports file sending and you need to return a local image or file for the user to download, use `send_local_file`.
|
||||
</communication_runtime>
|
||||
|
||||
<markdown_spec>
|
||||
Specific markdown rules:
|
||||
{markdown_spec}
|
||||
</markdown_spec>
|
||||
|
||||
<system_info>
|
||||
{moviepilot_info}
|
||||
</system_info>
|
||||
139
app/agent/prompt/System Tasks.yaml
Normal file
139
app/agent/prompt/System Tasks.yaml
Normal file
@@ -0,0 +1,139 @@
|
||||
version: 2
|
||||
shared_rules:
|
||||
- This is a background system task, NOT a user conversation.
|
||||
- Your final response will be consumed by the system. Keep it concise and task-focused.
|
||||
- Do NOT include greetings, explanations, or conversational text.
|
||||
- Respond in Chinese (中文).
|
||||
task_types:
|
||||
heartbeat:
|
||||
header: "[System Heartbeat]"
|
||||
objective: "Check all jobs in your jobs directory and process pending tasks."
|
||||
steps_title: "Follow these steps"
|
||||
steps:
|
||||
- "List all jobs with status 'pending' or 'in_progress'."
|
||||
- "For 'recurring' jobs, check 'last_run' to determine if it's time to run again."
|
||||
- "For 'once' jobs with status 'pending', execute them now."
|
||||
- "After executing each job, update its status, 'last_run' time, and execution log in the JOB.md file."
|
||||
empty_result: "If no jobs were executed, output nothing."
|
||||
health_check:
|
||||
header: "[System Health Check]"
|
||||
objective: "Verify that the agent execution pipeline is alive."
|
||||
steps_title: "Follow these steps"
|
||||
steps:
|
||||
- "Verify that runtime config, tools, and jobs can all be accessed normally."
|
||||
- "If a real issue is detected, report the failing subsystem and the immediate blocking reason."
|
||||
empty_result: "If there is nothing meaningful to report, output OK only."
|
||||
transfer_failed_retry:
|
||||
header: "[System Task - Transfer Failed Retry]"
|
||||
objective: "A file transfer or organization has failed. Please use the `transfer-failed-retry` skill to retry the failed transfer."
|
||||
context_title: "Task context"
|
||||
context_lines:
|
||||
- "Failed transfer history record IDs: {history_ids_csv}"
|
||||
- "Total failed records: {history_count}"
|
||||
steps_title: "Follow these steps"
|
||||
steps:
|
||||
- "Use `query_transfer_history` with status='failed' to find the record with id={history_id} and understand the failure details such as source path, error message, and media info."
|
||||
- "Analyze the error message to determine the best retry strategy."
|
||||
- "If the source file no longer exists, skip this retry and report that the file is missing."
|
||||
- "Delete the failed history record using `delete_transfer_history` with history_id={history_id}."
|
||||
- "Re-identify the media using `recognize_media` with the source file path."
|
||||
- "If recognition fails, try `search_media` with keywords from the filename."
|
||||
- "Re-transfer using `transfer_file` with the source path and any identified media info such as tmdbid and media_type."
|
||||
- "Report the final result."
|
||||
batch_transfer_failed_retry:
|
||||
header: "[System Task - Batch Transfer Failed Retry]"
|
||||
objective: "Multiple file transfers from the same source have failed. These files likely belong to the same media. Please use the `transfer-failed-retry` skill to retry them efficiently."
|
||||
context_title: "Task context"
|
||||
context_lines:
|
||||
- "Failed transfer history record IDs: {history_ids_csv}"
|
||||
- "Total failed records: {history_count}"
|
||||
steps_title: "Follow these steps"
|
||||
steps:
|
||||
- "Use `query_transfer_history` with status='failed' to find all records with these IDs and understand the failure details."
|
||||
- "Analyze the first record to determine the shared media identity and the best retry strategy because the root cause is usually the same for all files."
|
||||
- "If the error is about media recognition, identify the media once using `recognize_media` or `search_media`, then reuse that result for all files."
|
||||
- "For each failed record, delete the old history entry with `delete_transfer_history` and re-transfer using `transfer_file`."
|
||||
- "Report how many retries succeeded and how many still failed."
|
||||
task_rules:
|
||||
- "These files share the same media identity. Do NOT call `recognize_media` or `search_media` repeatedly for each file."
|
||||
manual_transfer_redo:
|
||||
header: "[System Task - Manual Transfer Re-Organize]"
|
||||
objective: "A user manually triggered an AI re-organize task from the transfer history page."
|
||||
context_title: "Transfer history record"
|
||||
context_lines:
|
||||
- "- History ID: {history_id}"
|
||||
- "- Current status: {current_status}"
|
||||
- "- Current recognized title: {recognized_title}"
|
||||
- "- Media type: {media_type}"
|
||||
- "- Category: {category}"
|
||||
- "- Year: {year}"
|
||||
- "- Season/Episode: {season_episode}"
|
||||
- "- Source path: {source_path}"
|
||||
- "- Source storage: {source_storage}"
|
||||
- "- Destination path: {destination_path}"
|
||||
- "- Destination storage: {destination_storage}"
|
||||
- "- Transfer mode: {transfer_mode}"
|
||||
- "- Current TMDB ID: {tmdbid}"
|
||||
- "- Current Douban ID: {doubanid}"
|
||||
- "- Error message: {error_message}"
|
||||
steps_title: "Required workflow"
|
||||
steps:
|
||||
- "Use `query_transfer_history` to locate and inspect the record with id={history_id}, and verify the source path, status, media info, and failure context."
|
||||
- "Decide whether the current recognition is trustworthy."
|
||||
- "If the source file no longer exists or cannot be safely processed, stop and report the reason."
|
||||
- "If the current recognition is wrong or the record should be reorganized, determine the correct media identity first."
|
||||
- "Prefer `recognize_media` with the source path. If recognition is not reliable, use `search_media` with keywords from filename, title, or year."
|
||||
- "Only continue when you have high confidence in the target media."
|
||||
- "Before re-organizing, delete the old transfer history record with `delete_transfer_history` so the system will not skip the source file."
|
||||
- "Then use `transfer_file` to organize the source path directly."
|
||||
- "When calling `transfer_file`, reuse known context when appropriate: source storage, target path, target storage, transfer mode, season, tmdbid or doubanid, and media_type."
|
||||
- "If this record is already correct and no re-organize is needed, do not perform destructive actions; simply report that no change is necessary."
|
||||
task_rules:
|
||||
- "Do NOT rely on previous chat context. Work only from the record above."
|
||||
- "Your goal is to directly fix one transfer history record by using MoviePilot tools to analyze, clean up the old history entry if necessary, and organize the source file again."
|
||||
- "You should complete the re-organize by directly using tools such as `query_transfer_history`, `recognize_media`, `search_media`, `delete_transfer_history`, and `transfer_file`."
|
||||
- "Do NOT reorganize blindly when media identity is uncertain."
|
||||
- "If the previous record was successful but obviously identified as the wrong media, still use the tool-based flow above instead of `/redo`."
|
||||
- "Keep the final response short and focused on outcome."
|
||||
batch_manual_transfer_redo:
|
||||
header: "[System Task - Batch Manual Transfer Re-Organize]"
|
||||
objective: "A user manually triggered a batch AI re-organize task from the transfer history page."
|
||||
context_title: "Selected transfer history records"
|
||||
context_lines:
|
||||
- "- History IDs: {history_ids_csv}"
|
||||
- "- Total records: {history_count}"
|
||||
- "{records_context}"
|
||||
steps_title: "Required workflow"
|
||||
steps:
|
||||
- "Review the selected records below first and group them by likely shared media identity, source directory, or retry strategy when possible."
|
||||
- "Use the provided record context as the primary source of truth. Call `query_transfer_history` only when you need extra confirmation."
|
||||
- "For each group, decide whether the current recognition is trustworthy."
|
||||
- "If multiple records clearly belong to the same movie or series, identify the media once with `recognize_media` or `search_media`, then reuse that result for the related records."
|
||||
- "If a source file no longer exists or cannot be safely processed, skip that record and note the reason."
|
||||
- "Before re-organizing a record, delete the old transfer history record with `delete_transfer_history` so the system will not skip the source file."
|
||||
- "Then use `transfer_file` to organize the source path directly."
|
||||
- "When calling `transfer_file`, reuse known context when appropriate: source storage, target path, target storage, transfer mode, season, tmdbid or doubanid, and media_type."
|
||||
- "If a record is already correct and no re-organize is needed, do not perform destructive actions; simply mark it as skipped."
|
||||
- "Report only the aggregate outcome, including how many records succeeded, skipped, and failed."
|
||||
task_rules:
|
||||
- "Do NOT assume every selected record belongs to the same media."
|
||||
- "When several records obviously share the same media identity, avoid repeated `recognize_media` or `search_media` calls."
|
||||
- "Process every selected record exactly once."
|
||||
- "Keep the final response short and focused on the aggregate outcome."
|
||||
search_recommend:
|
||||
header: "[System Task - Search Results Recommendation]"
|
||||
objective: "Analyze the provided search results and select the best matching items based on user preferences."
|
||||
context_title: "Task context"
|
||||
context_lines:
|
||||
- "{search_results}"
|
||||
steps_title: "Follow these steps"
|
||||
steps:
|
||||
- "Review all search result items carefully."
|
||||
- "Evaluate each item based on the user preference criteria."
|
||||
- "Select the top items that best match the preferences."
|
||||
- "Return ONLY a JSON array of item indices."
|
||||
task_rules:
|
||||
- "Return ONLY a JSON array of index numbers, e.g., [0, 3, 1]."
|
||||
- "Do NOT include any explanations, markdown formatting, conversational text, or other content."
|
||||
- "Do NOT call any tools. Simply analyze and return the JSON result directly."
|
||||
- "Respond in JSON format only."
|
||||
517
app/agent/prompt/__init__.py
Normal file
517
app/agent/prompt/__init__.py
Normal file
@@ -0,0 +1,517 @@
|
||||
"""提示词管理器"""
|
||||
|
||||
import socket
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from string import Formatter
|
||||
from time import strftime
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
from app.core.config import settings
|
||||
from app.log import logger
|
||||
from app.schemas import (
|
||||
ChannelCapability,
|
||||
ChannelCapabilities,
|
||||
MessageChannel,
|
||||
ChannelCapabilityManager,
|
||||
)
|
||||
from app.utils.system import SystemUtils
|
||||
|
||||
SYSTEM_TASKS_FILE = "System Tasks.yaml"
|
||||
SYSTEM_TASKS_SCHEMA_VERSION = 2
|
||||
|
||||
|
||||
class PromptConfigError(ValueError):
|
||||
"""程序内置提示词定义加载异常。"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemTaskTypeDefinition:
|
||||
"""单个后台系统任务定义。"""
|
||||
|
||||
header: str
|
||||
objective: str
|
||||
context_title: Optional[str] = None
|
||||
context_lines: list[str] = field(default_factory=list)
|
||||
steps_title: Optional[str] = None
|
||||
steps: list[str] = field(default_factory=list)
|
||||
task_rules: list[str] = field(default_factory=list)
|
||||
empty_result: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemTasksDefinition:
|
||||
"""程序内置后台系统任务定义。"""
|
||||
|
||||
path: Path
|
||||
version: int
|
||||
shared_rules: list[str]
|
||||
task_types: dict[str, SystemTaskTypeDefinition]
|
||||
|
||||
|
||||
class PromptManager:
|
||||
"""
|
||||
提示词管理器
|
||||
"""
|
||||
|
||||
def __init__(self, prompts_dir: str = None):
|
||||
if prompts_dir is None:
|
||||
self.prompts_dir = Path(__file__).parent
|
||||
else:
|
||||
self.prompts_dir = Path(prompts_dir)
|
||||
self.prompts_cache: Dict[str, str] = {}
|
||||
self._system_tasks_cache: Optional[SystemTasksDefinition] = None
|
||||
self._system_tasks_signature: Optional[tuple[int, int]] = None
|
||||
|
||||
def load_prompt(self, prompt_name: str) -> str:
|
||||
"""
|
||||
加载指定的提示词
|
||||
"""
|
||||
if prompt_name in self.prompts_cache:
|
||||
return self.prompts_cache[prompt_name]
|
||||
|
||||
prompt_file = self.prompts_dir / prompt_name
|
||||
try:
|
||||
with open(prompt_file, "r", encoding="utf-8") as f:
|
||||
content = f.read().strip()
|
||||
# 缓存提示词
|
||||
self.prompts_cache[prompt_name] = content
|
||||
logger.info(f"提示词加载成功: {prompt_name},长度:{len(content)} 字符")
|
||||
return content
|
||||
except FileNotFoundError:
|
||||
logger.error(f"提示词文件不存在: {prompt_file}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"加载提示词失败: {prompt_name}, 错误: {e}")
|
||||
raise
|
||||
|
||||
def get_agent_prompt(self, channel: str = None) -> str:
|
||||
"""
|
||||
获取智能体提示词
|
||||
:param channel: 消息渠道(Telegram、微信、Slack等)
|
||||
:return: 提示词内容
|
||||
"""
|
||||
# 基础提示词只保留 MoviePilot 运行时和渠道能力相关约束。
|
||||
# 根层运行时配置由 RuntimeConfigMiddleware 在每次模型调用前动态注入,
|
||||
# 这样人格切换可以在同一轮 Agent 执行里立即生效。
|
||||
base_prompt = self.load_prompt("System Core Prompt.txt")
|
||||
|
||||
# 识别渠道
|
||||
markdown_spec = ""
|
||||
msg_channel = (
|
||||
next(
|
||||
(c for c in MessageChannel if c.value.lower() == channel.lower()), None
|
||||
)
|
||||
if channel
|
||||
else None
|
||||
)
|
||||
# 获取渠道能力说明
|
||||
if msg_channel:
|
||||
caps = ChannelCapabilityManager.get_capabilities(msg_channel)
|
||||
if caps:
|
||||
markdown_spec = self._generate_formatting_instructions(caps)
|
||||
button_choice_spec = self._generate_button_choice_instructions(msg_channel)
|
||||
|
||||
# 啰嗦模式
|
||||
verbose_spec = ""
|
||||
if not settings.AI_AGENT_VERBOSE:
|
||||
verbose_spec = (
|
||||
"\n\n[Important Instruction] STRICTLY ENFORCED: "
|
||||
"If tools are needed, DO NOT output any conversational text, explanations, progress updates, "
|
||||
"or acknowledgements before the first tool call or between tool calls. "
|
||||
"Call tools directly without any transitional phrases. "
|
||||
"You MUST remain completely silent until all required tools have finished and you have the final result. "
|
||||
"Only then may you send one final user-facing reply. "
|
||||
"DO NOT output any intermediate content whatsoever."
|
||||
)
|
||||
|
||||
# MoviePilot系统信息
|
||||
moviepilot_info = self._get_moviepilot_info()
|
||||
voice_reply_spec = self._generate_voice_reply_instructions()
|
||||
|
||||
# 始终替换占位符,避免后续 .format() 时因残留花括号报 KeyError
|
||||
base_prompt = base_prompt.format(
|
||||
markdown_spec=markdown_spec,
|
||||
verbose_spec=verbose_spec,
|
||||
moviepilot_info=moviepilot_info,
|
||||
voice_reply_spec=voice_reply_spec,
|
||||
button_choice_spec=button_choice_spec,
|
||||
)
|
||||
|
||||
return base_prompt
|
||||
|
||||
def load_system_tasks_definition(self) -> SystemTasksDefinition:
|
||||
"""加载程序内置的后台系统任务定义。"""
|
||||
system_tasks_path = self.prompts_dir / SYSTEM_TASKS_FILE
|
||||
try:
|
||||
stat = system_tasks_path.stat()
|
||||
except FileNotFoundError as err:
|
||||
logger.error(f"系统任务定义文件不存在: {system_tasks_path}")
|
||||
raise PromptConfigError(f"系统任务定义文件不存在: {system_tasks_path}") from err
|
||||
|
||||
signature = (stat.st_mtime_ns, stat.st_size)
|
||||
if (
|
||||
self._system_tasks_signature == signature
|
||||
and self._system_tasks_cache is not None
|
||||
):
|
||||
return self._system_tasks_cache
|
||||
|
||||
try:
|
||||
content = system_tasks_path.read_text(encoding="utf-8")
|
||||
except Exception as err: # noqa: BLE001
|
||||
logger.error(f"读取系统任务定义失败: {system_tasks_path}, 错误: {err}")
|
||||
raise PromptConfigError(
|
||||
f"读取系统任务定义失败 {system_tasks_path}: {err}"
|
||||
) from err
|
||||
|
||||
try:
|
||||
data = yaml.safe_load(content) or {}
|
||||
except yaml.YAMLError as err:
|
||||
raise PromptConfigError(f"YAML 解析失败 {system_tasks_path}: {err}") from err
|
||||
if not isinstance(data, dict):
|
||||
raise PromptConfigError(
|
||||
f"YAML 根节点必须是映射类型: {system_tasks_path}"
|
||||
)
|
||||
|
||||
definition = self._parse_system_tasks_definition(system_tasks_path, data)
|
||||
self._system_tasks_signature = signature
|
||||
self._system_tasks_cache = definition
|
||||
return definition
|
||||
|
||||
def render_system_task_message(
|
||||
self,
|
||||
task_type: str,
|
||||
*,
|
||||
template_context: Optional[dict[str, Any]] = None,
|
||||
extra_rules: Optional[list[str]] = None,
|
||||
) -> str:
|
||||
"""根据程序内置 YAML 渲染后台系统任务提示词。"""
|
||||
system_tasks = self.load_system_tasks_definition()
|
||||
task_definition = system_tasks.task_types.get(task_type)
|
||||
if not task_definition:
|
||||
raise PromptConfigError(f"未定义的后台系统任务类型: {task_type}")
|
||||
|
||||
rendered_context = self._render_template_lines(
|
||||
task_definition.context_lines,
|
||||
template_context,
|
||||
task_type,
|
||||
"context_lines",
|
||||
)
|
||||
rendered_steps = self._render_template_lines(
|
||||
task_definition.steps,
|
||||
template_context,
|
||||
task_type,
|
||||
"steps",
|
||||
)
|
||||
rendered_task_rules = self._render_template_lines(
|
||||
task_definition.task_rules,
|
||||
template_context,
|
||||
task_type,
|
||||
"task_rules",
|
||||
)
|
||||
|
||||
sections = [
|
||||
self._render_template_text(
|
||||
task_definition.header,
|
||||
template_context,
|
||||
task_type,
|
||||
"header",
|
||||
).strip(),
|
||||
self._render_template_text(
|
||||
task_definition.objective,
|
||||
template_context,
|
||||
task_type,
|
||||
"objective",
|
||||
).strip(),
|
||||
]
|
||||
if rendered_context:
|
||||
sections.append(
|
||||
self._format_titled_lines(
|
||||
task_definition.context_title or "Task context",
|
||||
rendered_context,
|
||||
)
|
||||
)
|
||||
if rendered_steps:
|
||||
sections.append(
|
||||
self._format_titled_lines(
|
||||
task_definition.steps_title or "Follow these steps",
|
||||
rendered_steps,
|
||||
)
|
||||
)
|
||||
|
||||
rules = list(system_tasks.shared_rules)
|
||||
if task_definition.empty_result:
|
||||
rules.append(task_definition.empty_result)
|
||||
rules.extend(rendered_task_rules)
|
||||
if extra_rules:
|
||||
rules.extend(rule.strip() for rule in extra_rules if rule and rule.strip())
|
||||
if rules:
|
||||
sections.append(self._format_numbered_rules("IMPORTANT", rules))
|
||||
return "\n\n".join(section for section in sections if section).strip()
|
||||
|
||||
@staticmethod
|
||||
def _get_moviepilot_info() -> str:
|
||||
"""
|
||||
获取MoviePilot系统信息,用于注入到系统提示词中
|
||||
"""
|
||||
# 获取主机名和IP地址
|
||||
try:
|
||||
hostname = socket.gethostname()
|
||||
ip_address = socket.gethostbyname(hostname)
|
||||
except Exception: # noqa
|
||||
hostname = "localhost"
|
||||
ip_address = "127.0.0.1"
|
||||
|
||||
# 配置文件和日志文件目录
|
||||
config_path = str(settings.CONFIG_PATH)
|
||||
log_path = str(settings.LOG_PATH)
|
||||
|
||||
# API地址构建
|
||||
api_port = settings.PORT
|
||||
api_path = settings.API_V1_STR
|
||||
|
||||
# API令牌
|
||||
api_token = settings.API_TOKEN or "未设置"
|
||||
|
||||
# 数据库信息
|
||||
db_type = settings.DB_TYPE
|
||||
if db_type == "sqlite":
|
||||
db_info = f"SQLite ({settings.CONFIG_PATH / 'db' / 'moviepilot.db'})"
|
||||
else:
|
||||
db_password = settings.DB_POSTGRESQL_PASSWORD or ""
|
||||
db_info = f"PostgreSQL ({settings.DB_POSTGRESQL_USERNAME}:{db_password}@{settings.DB_POSTGRESQL_HOST}:{settings.DB_POSTGRESQL_PORT}/{settings.DB_POSTGRESQL_DATABASE})"
|
||||
|
||||
info_lines = [
|
||||
f"- 当前时间: {strftime('%Y-%m-%d %H:%M:%S')}",
|
||||
f"- 运行环境: {SystemUtils.platform} {'docker' if SystemUtils.is_docker() else ''}",
|
||||
f"- 主机名: {hostname}",
|
||||
f"- IP地址: {ip_address}",
|
||||
f"- API端口: {api_port}",
|
||||
f"- API路径: {api_path}",
|
||||
f"- API令牌: {api_token}",
|
||||
f"- 外网域名: {settings.APP_DOMAIN or '未设置'}",
|
||||
f"- 数据库类型: {db_type}",
|
||||
f"- 数据库: {db_info}",
|
||||
f"- 配置文件目录: {config_path}",
|
||||
f"- 日志文件目录: {log_path}",
|
||||
f"- 系统安装目录: {settings.ROOT_PATH}",
|
||||
]
|
||||
|
||||
return "\n".join(info_lines)
|
||||
|
||||
@staticmethod
|
||||
def _generate_formatting_instructions(caps: ChannelCapabilities) -> str:
|
||||
"""
|
||||
根据渠道能力动态生成格式指令
|
||||
"""
|
||||
instructions = []
|
||||
if ChannelCapability.RICH_TEXT not in caps.capabilities:
|
||||
instructions.append(
|
||||
"- Formatting: Use **Plain Text ONLY**. The channel does NOT support Markdown."
|
||||
)
|
||||
instructions.append(
|
||||
"- No Markdown Symbols: NEVER use `**`, `*`, `__`, or `[` blocks. Use natural text to emphasize (e.g., using ALL CAPS or separators)."
|
||||
)
|
||||
instructions.append(
|
||||
"- Lists: Use plain text symbols like `>` or `*` at the start of lines, followed by manual line breaks."
|
||||
)
|
||||
instructions.append("- Links: Paste URLs directly as text.")
|
||||
return "\n".join(instructions)
|
||||
|
||||
@staticmethod
|
||||
def _generate_voice_reply_instructions() -> str:
|
||||
return (
|
||||
"- Voice replies: Use normal text replies by default. "
|
||||
"Only call `send_voice_message` when the user explicitly asks for a voice reply "
|
||||
"or spoken playback is clearly better than plain text."
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _generate_button_choice_instructions(
|
||||
channel: MessageChannel = None,
|
||||
) -> str:
|
||||
if (
|
||||
channel
|
||||
and ChannelCapabilityManager.supports_buttons(channel)
|
||||
and ChannelCapabilityManager.supports_callbacks(channel)
|
||||
):
|
||||
return (
|
||||
"- User questions: If you need the user to choose from a few clear options, "
|
||||
"call `ask_user_choice` to send button options. After the user clicks a button, "
|
||||
"the selected value will come back as the user's next message. After calling this tool, "
|
||||
"wait for the user's selection instead of repeating the question in plain text."
|
||||
)
|
||||
return "- User questions: When you truly need user input, ask briefly in plain text."
|
||||
|
||||
def _parse_system_tasks_definition(
|
||||
self,
|
||||
path: Path,
|
||||
data: dict[str, Any],
|
||||
) -> SystemTasksDefinition:
|
||||
"""把 YAML 结构转换成系统任务定义对象。"""
|
||||
version = self._normalize_positive_int(data.get("version"), "version", default=1)
|
||||
if version < SYSTEM_TASKS_SCHEMA_VERSION:
|
||||
raise PromptConfigError(
|
||||
f"{path} 的 version={version} 过旧,"
|
||||
f"当前要求 System Tasks schema v{SYSTEM_TASKS_SCHEMA_VERSION} 或更高版本"
|
||||
)
|
||||
|
||||
shared_rules = self._normalize_string_list(data.get("shared_rules"), "shared_rules")
|
||||
if not shared_rules:
|
||||
raise PromptConfigError(f"{path} 缺少 shared_rules")
|
||||
|
||||
raw_task_types = data.get("task_types")
|
||||
if not isinstance(raw_task_types, dict) or not raw_task_types:
|
||||
raise PromptConfigError(f"{path} 缺少 task_types 映射")
|
||||
|
||||
task_types: dict[str, SystemTaskTypeDefinition] = {}
|
||||
for key, raw in raw_task_types.items():
|
||||
if not isinstance(raw, dict):
|
||||
raise PromptConfigError(f"task_types.{key} 必须是映射")
|
||||
|
||||
header = str(raw.get("header") or "").strip()
|
||||
objective = str(raw.get("objective") or "").strip()
|
||||
if not header or not objective:
|
||||
raise PromptConfigError(f"task_types.{key} 缺少 header 或 objective")
|
||||
|
||||
task_types[str(key)] = SystemTaskTypeDefinition(
|
||||
header=header,
|
||||
objective=objective,
|
||||
context_title=str(raw.get("context_title") or "").strip() or None,
|
||||
context_lines=self._normalize_string_list(
|
||||
raw.get("context_lines"),
|
||||
f"task_types.{key}.context_lines",
|
||||
),
|
||||
steps_title=str(raw.get("steps_title") or "").strip() or None,
|
||||
steps=self._normalize_string_list(
|
||||
raw.get("steps"),
|
||||
f"task_types.{key}.steps",
|
||||
),
|
||||
task_rules=self._normalize_string_list(
|
||||
raw.get("task_rules"),
|
||||
f"task_types.{key}.task_rules",
|
||||
),
|
||||
empty_result=str(raw.get("empty_result") or "").strip() or None,
|
||||
)
|
||||
return SystemTasksDefinition(
|
||||
path=path,
|
||||
version=version,
|
||||
shared_rules=shared_rules,
|
||||
task_types=task_types,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _render_template_text(
|
||||
cls,
|
||||
text: str,
|
||||
template_context: Optional[dict[str, Any]],
|
||||
task_type: str,
|
||||
field_name: str,
|
||||
) -> str:
|
||||
if not text:
|
||||
return ""
|
||||
|
||||
formatter = Formatter()
|
||||
required_fields = {
|
||||
placeholder_name
|
||||
for _, placeholder_name, _, _ in formatter.parse(text)
|
||||
if placeholder_name
|
||||
}
|
||||
if not required_fields:
|
||||
return text
|
||||
|
||||
context = cls._normalize_template_context(template_context)
|
||||
missing_fields = sorted(field for field in required_fields if field not in context)
|
||||
if missing_fields:
|
||||
raise PromptConfigError(
|
||||
f"系统任务定义 `{task_type}` 的 `{field_name}` 缺少变量: "
|
||||
+ ", ".join(f"`{field}`" for field in missing_fields)
|
||||
)
|
||||
|
||||
# 这里统一做字符串替换,让 YAML 成为后台任务文案的唯一行为来源。
|
||||
return text.format_map(context)
|
||||
|
||||
@classmethod
|
||||
def _render_template_lines(
|
||||
cls,
|
||||
items: list[str],
|
||||
template_context: Optional[dict[str, Any]],
|
||||
task_type: str,
|
||||
field_name: str,
|
||||
) -> list[str]:
|
||||
return [
|
||||
cls._render_template_text(
|
||||
item,
|
||||
template_context,
|
||||
task_type,
|
||||
f"{field_name}[{index}]",
|
||||
).rstrip()
|
||||
for index, item in enumerate(items, start=1)
|
||||
if item and item.rstrip()
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _normalize_template_context(
|
||||
template_context: Optional[dict[str, Any]],
|
||||
) -> dict[str, str]:
|
||||
if not template_context:
|
||||
return {}
|
||||
return {
|
||||
str(key): "" if value is None else str(value)
|
||||
for key, value in template_context.items()
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _format_numbered_rules(title: str, items: list[str]) -> str:
|
||||
return "\n".join(
|
||||
[f"{title}:"] + [f"{index}. {item}" for index, item in enumerate(items, start=1)]
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _format_titled_lines(title: str, items: list[str]) -> str:
|
||||
cleaned = [item.rstrip() for item in items if item and item.rstrip()]
|
||||
return "\n".join([f"{title}:"] + cleaned)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_positive_int(
|
||||
value: Any,
|
||||
field_name: str,
|
||||
*,
|
||||
default: int,
|
||||
) -> int:
|
||||
if value in (None, ""):
|
||||
return default
|
||||
try:
|
||||
normalized = int(value)
|
||||
except (TypeError, ValueError) as err:
|
||||
raise PromptConfigError(f"{field_name} 必须是正整数") from err
|
||||
if normalized <= 0:
|
||||
raise PromptConfigError(f"{field_name} 必须是正整数")
|
||||
return normalized
|
||||
|
||||
@staticmethod
|
||||
def _normalize_string_list(values: Any, field_name: str) -> list[str]:
|
||||
if values is None:
|
||||
return []
|
||||
if not isinstance(values, list):
|
||||
raise PromptConfigError(f"{field_name} 必须是字符串数组")
|
||||
normalized: list[str] = []
|
||||
for value in values:
|
||||
text = str(value).strip()
|
||||
if text:
|
||||
normalized.append(text)
|
||||
return normalized
|
||||
|
||||
def clear_cache(self):
|
||||
"""
|
||||
清空缓存
|
||||
"""
|
||||
self.prompts_cache.clear()
|
||||
self._system_tasks_cache = None
|
||||
self._system_tasks_signature = None
|
||||
logger.info("提示词缓存已清空")
|
||||
|
||||
|
||||
prompt_manager = PromptManager()
|
||||
755
app/agent/runtime.py
Normal file
755
app/agent/runtime.py
Normal file
@@ -0,0 +1,755 @@
|
||||
"""Agent 根层运行时配置管理。"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import shutil
|
||||
import threading
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterable, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
from app.core.config import settings
|
||||
from app.log import logger
|
||||
|
||||
CURRENT_PERSONA_FILE = "CURRENT_PERSONA.md"
|
||||
SYSTEM_RUNTIME_DIR = "runtime"
|
||||
MEMORY_DIR = "memory"
|
||||
SKILLS_DIR = "skills"
|
||||
JOBS_DIR = "jobs"
|
||||
ACTIVITY_DIR = "activity"
|
||||
PERSONAS_DIR = "personas"
|
||||
PERSONA_FILE = "PERSONA.md"
|
||||
CURRENT_PERSONA_SCHEMA_VERSION = 3
|
||||
PERSONA_SCHEMA_VERSION = 1
|
||||
DEFAULT_PERSONA_ID = "default"
|
||||
PERSONA_ID_PATTERN = re.compile(r"^[a-z0-9][a-z0-9_-]{0,63}$")
|
||||
|
||||
ROOT_LEVEL_RUNTIME_FILES = {
|
||||
CURRENT_PERSONA_FILE,
|
||||
}
|
||||
|
||||
OBSOLETE_AGENT_ROOT_FILES = {
|
||||
"AGENT_CORE.md",
|
||||
"AGENT_PROFILE.md",
|
||||
"AGENT_WORKFLOW.md",
|
||||
"AGENT_HOOKS.md",
|
||||
"USER_PREFERENCES.md",
|
||||
"SYSTEM_TASKS.md",
|
||||
"WAKE_FORMAT.md",
|
||||
}
|
||||
|
||||
OBSOLETE_RUNTIME_FILES = {
|
||||
Path("AGENT_CORE.md"),
|
||||
Path("AGENT_PROFILE.md"),
|
||||
Path("AGENT_WORKFLOW.md"),
|
||||
Path("AGENT_HOOKS.md"),
|
||||
Path("USER_PREFERENCES.md"),
|
||||
Path("SYSTEM_TASKS.md"),
|
||||
Path("WAKE_FORMAT.md"),
|
||||
Path("personas") / DEFAULT_PERSONA_ID / "AGENT_PROFILE.md",
|
||||
Path("personas") / DEFAULT_PERSONA_ID / "AGENT_WORKFLOW.md",
|
||||
Path("personas") / DEFAULT_PERSONA_ID / "AGENT_HOOKS.md",
|
||||
Path("system_tasks") / "SYSTEM_TASKS.md",
|
||||
Path("templates") / "WAKE_FORMAT.md",
|
||||
}
|
||||
|
||||
FRONTMATTER_PATTERN = re.compile(r"^---\s*\n(.*?)\n---\s*\n?", re.DOTALL)
|
||||
|
||||
|
||||
class AgentRuntimeConfigError(ValueError):
|
||||
"""根层配置加载异常。"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ParsedMarkdownDocument:
|
||||
"""解析后的 Markdown 文档。"""
|
||||
|
||||
metadata: dict[str, Any]
|
||||
body: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class PersonaDefinition:
|
||||
"""单个人格定义。"""
|
||||
|
||||
persona_id: str
|
||||
path: Path
|
||||
label: str
|
||||
description: str
|
||||
text: str
|
||||
aliases: list[str] = field(default_factory=list)
|
||||
|
||||
def matches(self, query: str) -> bool:
|
||||
"""判断 query 是否命中当前人格。"""
|
||||
normalized = query.strip().casefold()
|
||||
if not normalized:
|
||||
return False
|
||||
candidates = [self.persona_id, self.label, *self.aliases]
|
||||
return any(candidate.strip().casefold() == normalized for candidate in candidates)
|
||||
|
||||
def summary_line(self) -> str:
|
||||
"""渲染可读的一行人格摘要。"""
|
||||
parts = [f"`{self.persona_id}`"]
|
||||
if self.label and self.label != self.persona_id:
|
||||
parts.append(self.label)
|
||||
if self.description:
|
||||
parts.append(self.description)
|
||||
return " - ".join(parts)
|
||||
|
||||
def to_dict(self, *, is_active: bool) -> dict[str, Any]:
|
||||
"""输出给查询工具的结构化信息。"""
|
||||
return {
|
||||
"persona_id": self.persona_id,
|
||||
"label": self.label,
|
||||
"description": self.description,
|
||||
"aliases": self.aliases,
|
||||
"is_active": is_active,
|
||||
"path": str(self.path),
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentRuntimeConfig:
|
||||
"""一次加载后的根层配置快照。"""
|
||||
|
||||
source_root: Path
|
||||
active_persona: str
|
||||
current_persona_path: Path
|
||||
persona: PersonaDefinition
|
||||
available_personas: list[PersonaDefinition]
|
||||
extra_context_paths: list[Path]
|
||||
extra_contexts: list[tuple[Path, str]]
|
||||
warnings: list[str] = field(default_factory=list)
|
||||
used_fallback: bool = False
|
||||
|
||||
def render_prompt_sections(self) -> str:
|
||||
"""渲染进入系统提示词的运行时片段。"""
|
||||
sections: list[str] = [
|
||||
"<agent_runtime_config>",
|
||||
f"- Active persona: `{self.active_persona}`",
|
||||
f"- Active persona source: `{self.persona.path}`",
|
||||
]
|
||||
if self.available_personas:
|
||||
sections.append("- Available personas:")
|
||||
sections.extend(f" - {persona.summary_line()}" for persona in self.available_personas)
|
||||
sections.append("</agent_runtime_config>")
|
||||
|
||||
if self.warnings:
|
||||
sections.extend(
|
||||
[
|
||||
"",
|
||||
"<agent_runtime_warnings>",
|
||||
*[f"- {warning}" for warning in self.warnings],
|
||||
"</agent_runtime_warnings>",
|
||||
]
|
||||
)
|
||||
|
||||
sections.extend(
|
||||
[
|
||||
"",
|
||||
"<agent_persona>",
|
||||
f"- Persona ID: `{self.persona.persona_id}`",
|
||||
]
|
||||
)
|
||||
if self.persona.label and self.persona.label != self.persona.persona_id:
|
||||
sections.append(f"- Persona Label: {self.persona.label}")
|
||||
if self.persona.description:
|
||||
sections.append(f"- Persona Description: {self.persona.description}")
|
||||
sections.extend(
|
||||
[
|
||||
"",
|
||||
self.persona.text.strip() or "(No persona instructions configured.)",
|
||||
"</agent_persona>",
|
||||
]
|
||||
)
|
||||
for path, text in self.extra_contexts:
|
||||
if not text.strip():
|
||||
continue
|
||||
sections.extend(
|
||||
[
|
||||
"",
|
||||
f'<agent_extra_context source="{path.name}">',
|
||||
text.strip(),
|
||||
"</agent_extra_context>",
|
||||
]
|
||||
)
|
||||
return "\n".join(sections).strip()
|
||||
|
||||
def list_personas(self) -> list[dict[str, Any]]:
|
||||
"""返回全部人格摘要。"""
|
||||
return [
|
||||
persona.to_dict(is_active=persona.persona_id == self.active_persona)
|
||||
for persona in self.available_personas
|
||||
]
|
||||
|
||||
|
||||
class AgentRuntimeManager:
|
||||
"""统一管理 agent 根层运行时配置目录、校验与人格切换。"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
agent_root_dir: Optional[Path] = None,
|
||||
bundled_defaults_dir: Optional[Path] = None,
|
||||
) -> None:
|
||||
self.agent_root_dir = agent_root_dir or (settings.CONFIG_PATH / "agent")
|
||||
self.runtime_dir = self.agent_root_dir / SYSTEM_RUNTIME_DIR
|
||||
self.memory_dir = self.agent_root_dir / MEMORY_DIR
|
||||
self.skills_dir = self.agent_root_dir / SKILLS_DIR
|
||||
self.jobs_dir = self.agent_root_dir / JOBS_DIR
|
||||
self.activity_dir = self.agent_root_dir / ACTIVITY_DIR
|
||||
self.bundled_defaults_dir = bundled_defaults_dir or (
|
||||
Path(__file__).parent / "defaults"
|
||||
)
|
||||
self._cache_lock = threading.Lock()
|
||||
self._cached_signature: Optional[tuple[tuple[str, int, int], ...]] = None
|
||||
self._cached_config: Optional[AgentRuntimeConfig] = None
|
||||
|
||||
def ensure_layout(self) -> None:
|
||||
"""创建目录、同步默认文件,并清理废弃的旧版 runtime 文件。"""
|
||||
self.agent_root_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.runtime_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.memory_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.skills_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.jobs_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.activity_dir.mkdir(parents=True, exist_ok=True)
|
||||
self._migrate_root_runtime_files()
|
||||
self._remove_obsolete_runtime_files()
|
||||
self._sync_bundled_defaults()
|
||||
self._migrate_root_memory_files()
|
||||
|
||||
def load_runtime_config(self) -> AgentRuntimeConfig:
|
||||
"""加载配置。用户目录损坏时自动回退到内置默认配置。"""
|
||||
self.ensure_layout()
|
||||
signature = self._build_signature()
|
||||
with self._cache_lock:
|
||||
if self._cached_signature == signature and self._cached_config:
|
||||
return self._cached_config
|
||||
|
||||
try:
|
||||
config = self._load_from_root(self.runtime_dir)
|
||||
except AgentRuntimeConfigError as err:
|
||||
logger.warning("Agent 根层配置无效,回退到内置默认配置: %s", err)
|
||||
config = self._load_from_root(self.bundled_defaults_dir)
|
||||
config.used_fallback = True
|
||||
config.warnings.insert(
|
||||
0, f"用户运行时配置加载失败,已回退到内置默认配置: {err}"
|
||||
)
|
||||
|
||||
self._cached_signature = signature
|
||||
self._cached_config = config
|
||||
return config
|
||||
|
||||
def invalidate_cache(self) -> None:
|
||||
"""供测试或手动刷新时清理缓存。"""
|
||||
with self._cache_lock:
|
||||
self._cached_signature = None
|
||||
self._cached_config = None
|
||||
|
||||
def set_active_persona(self, persona_query: str) -> AgentRuntimeConfig:
|
||||
"""切换当前激活人格,并立即刷新缓存。"""
|
||||
self.ensure_layout()
|
||||
runtime_root = self.runtime_dir
|
||||
current_path = runtime_root / CURRENT_PERSONA_FILE
|
||||
current_doc = self._read_markdown(current_path)
|
||||
current_meta = current_doc.metadata
|
||||
|
||||
available_personas = self._load_personas(runtime_root)
|
||||
persona = self._resolve_persona_definition(persona_query, available_personas)
|
||||
|
||||
document = self._render_current_persona_document(
|
||||
active_persona=persona.persona_id,
|
||||
extra_context_files=self._coerce_string_list(
|
||||
current_meta.get("extra_context_files")
|
||||
),
|
||||
deprecated_phrases=self._coerce_string_list(
|
||||
current_meta.get("deprecated_phrases")
|
||||
),
|
||||
)
|
||||
current_path.write_text(document, encoding="utf-8")
|
||||
self.invalidate_cache()
|
||||
logger.info("已切换 Agent 人格: %s", persona.persona_id)
|
||||
return self.load_runtime_config()
|
||||
|
||||
def list_personas(self) -> list[PersonaDefinition]:
|
||||
"""列出当前可用人格。"""
|
||||
return self.load_runtime_config().available_personas
|
||||
|
||||
def update_persona_definition(
|
||||
self,
|
||||
persona_query: str,
|
||||
*,
|
||||
label: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
aliases: Optional[list[str]] = None,
|
||||
instructions: Optional[str] = None,
|
||||
append_instructions: Optional[list[str]] = None,
|
||||
create_if_missing: bool = False,
|
||||
) -> tuple[PersonaDefinition, bool]:
|
||||
"""更新或创建运行时人格定义。"""
|
||||
self.ensure_layout()
|
||||
runtime_root = self.runtime_dir
|
||||
available_personas = self._load_personas(runtime_root)
|
||||
|
||||
created = False
|
||||
try:
|
||||
persona = self._resolve_persona_definition(persona_query, available_personas)
|
||||
target_persona_id = persona.persona_id
|
||||
target_path = persona.path
|
||||
existing_body = persona.text
|
||||
existing_label = persona.label
|
||||
existing_description = persona.description
|
||||
existing_aliases = list(persona.aliases)
|
||||
except AgentRuntimeConfigError:
|
||||
if not create_if_missing:
|
||||
raise
|
||||
target_persona_id = self._validate_new_persona_id(persona_query)
|
||||
target_path = runtime_root / PERSONAS_DIR / target_persona_id / PERSONA_FILE
|
||||
existing_body = ""
|
||||
existing_label = target_persona_id
|
||||
existing_description = ""
|
||||
existing_aliases = []
|
||||
created = True
|
||||
|
||||
final_label = (
|
||||
label.strip()
|
||||
if isinstance(label, str) and label.strip()
|
||||
else existing_label or target_persona_id
|
||||
)
|
||||
final_description = (
|
||||
description.strip()
|
||||
if isinstance(description, str) and description.strip()
|
||||
else existing_description
|
||||
)
|
||||
final_aliases = (
|
||||
self._normalize_persona_aliases(aliases, "aliases")
|
||||
if aliases is not None
|
||||
else existing_aliases
|
||||
)
|
||||
final_body = (
|
||||
self._normalize_persona_body(instructions)
|
||||
if isinstance(instructions, str) and instructions.strip()
|
||||
else self._normalize_persona_body(existing_body)
|
||||
)
|
||||
final_body = self._merge_persona_instructions(
|
||||
final_body,
|
||||
append_instructions,
|
||||
)
|
||||
if not final_body.strip():
|
||||
raise AgentRuntimeConfigError("人格定义正文不能为空")
|
||||
|
||||
document = self._render_persona_document(
|
||||
persona_id=target_persona_id,
|
||||
label=final_label,
|
||||
description=final_description,
|
||||
aliases=final_aliases,
|
||||
body=final_body,
|
||||
)
|
||||
target_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
target_path.write_text(document, encoding="utf-8")
|
||||
self.invalidate_cache()
|
||||
|
||||
runtime_config = self.load_runtime_config()
|
||||
updated_persona = self._resolve_persona_definition(
|
||||
target_persona_id,
|
||||
runtime_config.available_personas,
|
||||
)
|
||||
logger.info(
|
||||
"已%s Agent 人格定义: %s",
|
||||
"创建" if created else "更新",
|
||||
updated_persona.persona_id,
|
||||
)
|
||||
return updated_persona, created
|
||||
|
||||
def _build_signature(self) -> tuple[tuple[str, int, int], ...]:
|
||||
"""基于运行时配置和内置人格生成文件签名。"""
|
||||
entries: list[tuple[str, int, int]] = []
|
||||
for prefix, root in (
|
||||
("runtime", self.runtime_dir),
|
||||
("bundled", self.bundled_defaults_dir),
|
||||
):
|
||||
if not root.exists():
|
||||
continue
|
||||
for path in sorted(root.rglob("*")):
|
||||
if not path.is_file():
|
||||
continue
|
||||
stat = path.stat()
|
||||
relative = path.relative_to(root).as_posix()
|
||||
entries.append((f"{prefix}:{relative}", stat.st_mtime_ns, stat.st_size))
|
||||
return tuple(entries)
|
||||
|
||||
def _sync_bundled_defaults(self) -> None:
|
||||
"""仅复制缺失的默认运行时文件,避免覆盖用户自定义。"""
|
||||
if not self.bundled_defaults_dir.exists():
|
||||
return
|
||||
for path in sorted(self.bundled_defaults_dir.rglob("*")):
|
||||
relative = path.relative_to(self.bundled_defaults_dir)
|
||||
target = self.runtime_dir / relative
|
||||
if path.is_dir():
|
||||
target.mkdir(parents=True, exist_ok=True)
|
||||
continue
|
||||
if target.exists():
|
||||
continue
|
||||
target.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy2(path, target)
|
||||
logger.info("已同步默认 Agent 运行时文件: %s", target)
|
||||
|
||||
def _migrate_root_runtime_files(self) -> None:
|
||||
"""兼容早期直接放在 `config/agent` 根目录的 CURRENT_PERSONA。"""
|
||||
source = self.agent_root_dir / CURRENT_PERSONA_FILE
|
||||
target = self.runtime_dir / CURRENT_PERSONA_FILE
|
||||
if not source.exists() or target.exists():
|
||||
return
|
||||
target.parent.mkdir(parents=True, exist_ok=True)
|
||||
source.rename(target)
|
||||
logger.info("已迁移旧版 Agent 根配置文件: %s -> %s", source, target)
|
||||
|
||||
def _remove_obsolete_runtime_files(self) -> None:
|
||||
"""删除不再支持的旧版 Agent 配置文件,避免被误迁移到 memory。"""
|
||||
for filename in sorted(OBSOLETE_AGENT_ROOT_FILES):
|
||||
path = self.agent_root_dir / filename
|
||||
if not path.exists() or not path.is_file():
|
||||
continue
|
||||
path.unlink()
|
||||
logger.info("已删除废弃的 Agent 根配置文件: %s", path)
|
||||
|
||||
for relative_path in sorted(OBSOLETE_RUNTIME_FILES):
|
||||
path = self.runtime_dir / relative_path
|
||||
if not path.exists() or not path.is_file():
|
||||
continue
|
||||
path.unlink()
|
||||
logger.info("已删除废弃的 Agent 运行时文件: %s", path)
|
||||
|
||||
def _migrate_root_memory_files(self) -> None:
|
||||
"""将旧版根目录 memory 文件移入 `config/agent/memory`。"""
|
||||
for path in sorted(self.agent_root_dir.glob("*.md")):
|
||||
if path.name in ROOT_LEVEL_RUNTIME_FILES:
|
||||
continue
|
||||
target = self.memory_dir / path.name
|
||||
if target.exists():
|
||||
continue
|
||||
path.rename(target)
|
||||
logger.info("已迁移旧版 Agent memory 文件: %s -> %s", path, target)
|
||||
|
||||
def _load_from_root(self, root: Path) -> AgentRuntimeConfig:
|
||||
current_persona_path = root / CURRENT_PERSONA_FILE
|
||||
current_doc = self._read_markdown(current_persona_path)
|
||||
current_meta = current_doc.metadata
|
||||
|
||||
active_persona = str(
|
||||
current_meta.get("active_persona") or DEFAULT_PERSONA_ID
|
||||
).strip()
|
||||
if not active_persona:
|
||||
raise AgentRuntimeConfigError("CURRENT_PERSONA.md 缺少 active_persona")
|
||||
|
||||
extra_context_paths = self._resolve_optional_paths(
|
||||
root, current_meta.get("extra_context_files", [])
|
||||
)
|
||||
|
||||
available_personas = self._load_personas(root)
|
||||
persona = self._resolve_persona_definition(active_persona, available_personas)
|
||||
extra_contexts = [
|
||||
(path, self._read_markdown(path).body)
|
||||
for path in extra_context_paths
|
||||
]
|
||||
|
||||
warnings = self._validate_runtime_config(
|
||||
current_meta=current_meta,
|
||||
persona_path=persona.path,
|
||||
extra_context_paths=extra_context_paths,
|
||||
persona_text=persona.text,
|
||||
)
|
||||
return AgentRuntimeConfig(
|
||||
source_root=root,
|
||||
active_persona=active_persona,
|
||||
current_persona_path=current_persona_path,
|
||||
persona=persona,
|
||||
available_personas=available_personas,
|
||||
extra_context_paths=extra_context_paths,
|
||||
extra_contexts=extra_contexts,
|
||||
warnings=warnings,
|
||||
)
|
||||
|
||||
def _load_personas(self, root: Path) -> list[PersonaDefinition]:
|
||||
"""扫描并解析所有可用人格。"""
|
||||
personas_root = root / PERSONAS_DIR
|
||||
if not personas_root.exists():
|
||||
raise AgentRuntimeConfigError(f"缺少 personas 目录: {personas_root}")
|
||||
|
||||
personas: list[PersonaDefinition] = []
|
||||
seen_ids: set[str] = set()
|
||||
for persona_dir in sorted(personas_root.iterdir()):
|
||||
if not persona_dir.is_dir():
|
||||
continue
|
||||
persona_path = persona_dir / PERSONA_FILE
|
||||
if not persona_path.exists():
|
||||
continue
|
||||
document = self._read_markdown(persona_path)
|
||||
persona_id = str(document.metadata.get("persona_id") or persona_dir.name).strip()
|
||||
if not persona_id:
|
||||
raise AgentRuntimeConfigError(f"{persona_path} 缺少 persona_id")
|
||||
if persona_id in seen_ids:
|
||||
raise AgentRuntimeConfigError(f"检测到重复的人格 ID: {persona_id}")
|
||||
seen_ids.add(persona_id)
|
||||
aliases = self._normalize_string_list(
|
||||
document.metadata.get("aliases"),
|
||||
f"{persona_path}.aliases",
|
||||
)
|
||||
personas.append(
|
||||
PersonaDefinition(
|
||||
persona_id=persona_id,
|
||||
path=persona_path,
|
||||
label=str(document.metadata.get("label") or persona_id).strip(),
|
||||
description=str(document.metadata.get("description") or "").strip(),
|
||||
text=document.body,
|
||||
aliases=aliases,
|
||||
)
|
||||
)
|
||||
|
||||
if not personas:
|
||||
raise AgentRuntimeConfigError(f"{personas_root} 中未找到任何人格定义")
|
||||
return personas
|
||||
|
||||
@staticmethod
|
||||
def _resolve_persona_definition(
|
||||
persona_query: str,
|
||||
personas: list[PersonaDefinition],
|
||||
) -> PersonaDefinition:
|
||||
"""按 persona_id、label 或 aliases 解析人格。"""
|
||||
normalized = (persona_query or "").strip()
|
||||
if not normalized:
|
||||
raise AgentRuntimeConfigError("人格 ID 不能为空")
|
||||
|
||||
for persona in personas:
|
||||
if persona.persona_id == normalized:
|
||||
return persona
|
||||
for persona in personas:
|
||||
if persona.matches(normalized):
|
||||
return persona
|
||||
|
||||
available = ", ".join(persona.persona_id for persona in personas)
|
||||
raise AgentRuntimeConfigError(
|
||||
f"未找到人格 `{persona_query}`,可用人格: {available}"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _validate_new_persona_id(persona_id: str) -> str:
|
||||
"""校验新建人格的 ID,避免写入非法路径。"""
|
||||
normalized = (persona_id or "").strip()
|
||||
if not normalized:
|
||||
raise AgentRuntimeConfigError("新建人格时 persona_id 不能为空")
|
||||
if not PERSONA_ID_PATTERN.fullmatch(normalized):
|
||||
raise AgentRuntimeConfigError(
|
||||
"新建人格时 persona_id 只能使用小写字母、数字、下划线和中划线,且必须以字母或数字开头"
|
||||
)
|
||||
return normalized
|
||||
|
||||
@staticmethod
|
||||
def _read_markdown(path: Path) -> ParsedMarkdownDocument:
|
||||
if not path.exists():
|
||||
raise AgentRuntimeConfigError(f"缺少配置文件: {path}")
|
||||
try:
|
||||
content = path.read_text(encoding="utf-8")
|
||||
except Exception as err: # noqa: BLE001
|
||||
raise AgentRuntimeConfigError(f"读取配置文件失败 {path}: {err}") from err
|
||||
|
||||
metadata: dict[str, Any] = {}
|
||||
body = content
|
||||
match = FRONTMATTER_PATTERN.match(content)
|
||||
if match:
|
||||
try:
|
||||
metadata = yaml.safe_load(match.group(1)) or {}
|
||||
except yaml.YAMLError as err:
|
||||
raise AgentRuntimeConfigError(
|
||||
f"YAML frontmatter 解析失败 {path}: {err}"
|
||||
) from err
|
||||
if not isinstance(metadata, dict):
|
||||
raise AgentRuntimeConfigError(f"frontmatter 必须是映射类型: {path}")
|
||||
body = content[match.end():]
|
||||
return ParsedMarkdownDocument(metadata=metadata, body=body.strip())
|
||||
|
||||
@staticmethod
|
||||
def _resolve_optional_paths(root: Path, values: Any) -> list[Path]:
|
||||
if not values:
|
||||
return []
|
||||
if not isinstance(values, list):
|
||||
raise AgentRuntimeConfigError("extra_context_files 必须是数组")
|
||||
return [AgentRuntimeManager._resolve_relative_path(root, str(value)) for value in values]
|
||||
|
||||
@staticmethod
|
||||
def _resolve_relative_path(root: Path, value: str) -> Path:
|
||||
candidate = Path(value)
|
||||
return candidate if candidate.is_absolute() else (root / candidate).resolve()
|
||||
|
||||
@staticmethod
|
||||
def _normalize_string_list(values: Any, field_name: str) -> list[str]:
|
||||
if values is None:
|
||||
return []
|
||||
if not isinstance(values, list):
|
||||
raise AgentRuntimeConfigError(f"{field_name} 必须是字符串数组")
|
||||
normalized: list[str] = []
|
||||
for value in values:
|
||||
text = str(value).strip()
|
||||
if text:
|
||||
normalized.append(text)
|
||||
return normalized
|
||||
|
||||
@staticmethod
|
||||
def _coerce_string_list(values: Any) -> list[str]:
|
||||
if not isinstance(values, list):
|
||||
return []
|
||||
return [str(value).strip() for value in values if str(value).strip()]
|
||||
|
||||
@staticmethod
|
||||
def _normalize_persona_aliases(values: Any, field_name: str) -> list[str]:
|
||||
"""规范化人格别名,保持顺序并去重。"""
|
||||
normalized = AgentRuntimeManager._normalize_string_list(values, field_name)
|
||||
deduped: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for alias in normalized:
|
||||
folded = alias.casefold()
|
||||
if folded in seen:
|
||||
continue
|
||||
seen.add(folded)
|
||||
deduped.append(alias)
|
||||
return deduped
|
||||
|
||||
@staticmethod
|
||||
def _merge_persona_instructions(
|
||||
base_body: str,
|
||||
append_instructions: Optional[list[str]],
|
||||
) -> str:
|
||||
"""把增量规则安全追加到人格正文末尾。"""
|
||||
merged = (base_body or "").strip()
|
||||
if not append_instructions:
|
||||
return merged
|
||||
|
||||
extras: list[str] = []
|
||||
for item in append_instructions:
|
||||
text = str(item).strip()
|
||||
if not text:
|
||||
continue
|
||||
if not re.match(r"^([-*]|\d+\.)\s", text):
|
||||
text = f"- {text}"
|
||||
extras.append(text)
|
||||
|
||||
if not extras:
|
||||
return merged
|
||||
if not merged:
|
||||
return "\n".join(extras)
|
||||
return merged.rstrip() + "\n\n" + "\n".join(extras)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_persona_body(body: Optional[str]) -> str:
|
||||
"""去掉重复的 PERSONA 标题,保持正文可安全回写。"""
|
||||
normalized = (body or "").strip()
|
||||
if not normalized:
|
||||
return ""
|
||||
if normalized.startswith("# PERSONA"):
|
||||
_, _, remainder = normalized.partition("\n")
|
||||
return remainder.strip()
|
||||
return normalized
|
||||
|
||||
def _validate_runtime_config(
|
||||
self,
|
||||
*,
|
||||
current_meta: dict[str, Any],
|
||||
persona_path: Path,
|
||||
extra_context_paths: list[Path],
|
||||
persona_text: str,
|
||||
) -> list[str]:
|
||||
warnings: list[str] = []
|
||||
required_paths = [persona_path]
|
||||
duplicates = self._find_duplicate_paths(required_paths + extra_context_paths)
|
||||
if duplicates:
|
||||
warnings.append(
|
||||
"检测到重复引用的根层配置文件: "
|
||||
+ ", ".join(path.as_posix() for path in duplicates)
|
||||
)
|
||||
|
||||
deprecated_phrases = self._normalize_string_list(
|
||||
current_meta.get("deprecated_phrases"), "deprecated_phrases"
|
||||
)
|
||||
if deprecated_phrases:
|
||||
for phrase in deprecated_phrases:
|
||||
if phrase and phrase in persona_text:
|
||||
warnings.append(f"检测到已废弃短语 `{phrase}` 仍出现在 persona 中")
|
||||
return warnings
|
||||
|
||||
@staticmethod
|
||||
def _find_duplicate_paths(paths: Iterable[Path]) -> list[Path]:
|
||||
seen: set[Path] = set()
|
||||
duplicates: list[Path] = []
|
||||
for path in paths:
|
||||
resolved = path.resolve()
|
||||
if resolved in seen and resolved not in duplicates:
|
||||
duplicates.append(resolved)
|
||||
seen.add(resolved)
|
||||
return duplicates
|
||||
|
||||
@staticmethod
|
||||
def _render_current_persona_document(
|
||||
*,
|
||||
active_persona: str,
|
||||
extra_context_files: list[str],
|
||||
deprecated_phrases: list[str],
|
||||
) -> str:
|
||||
"""统一生成 CURRENT_PERSONA.md,避免手写时结构漂移。"""
|
||||
metadata = {
|
||||
"version": CURRENT_PERSONA_SCHEMA_VERSION,
|
||||
"active_persona": active_persona,
|
||||
"extra_context_files": extra_context_files,
|
||||
"deprecated_phrases": deprecated_phrases,
|
||||
}
|
||||
body_lines = [
|
||||
"# CURRENT_PERSONA",
|
||||
"",
|
||||
f"当前激活人格:`{active_persona}`",
|
||||
"",
|
||||
"运行时加载顺序固定如下:",
|
||||
"",
|
||||
"1. 核心系统提示词(程序内置,不可运行时覆盖)",
|
||||
"2. `personas/<active_persona>/PERSONA.md`",
|
||||
"3. `extra_context_files`",
|
||||
"4. `memory/*.md`",
|
||||
"5. `activity/*.md`",
|
||||
"",
|
||||
"`memory` 中的长期偏好可以细化回复方式,但不应覆盖系统核心身份、目标和安全边界。",
|
||||
]
|
||||
frontmatter = yaml.safe_dump(
|
||||
metadata,
|
||||
sort_keys=False,
|
||||
allow_unicode=True,
|
||||
).strip()
|
||||
return f"---\n{frontmatter}\n---\n" + "\n".join(body_lines) + "\n"
|
||||
|
||||
@staticmethod
|
||||
def _render_persona_document(
|
||||
*,
|
||||
persona_id: str,
|
||||
label: str,
|
||||
description: str,
|
||||
aliases: list[str],
|
||||
body: str,
|
||||
) -> str:
|
||||
"""统一生成人格定义文件,避免手写 frontmatter 漂移。"""
|
||||
metadata = {
|
||||
"version": PERSONA_SCHEMA_VERSION,
|
||||
"persona_id": persona_id,
|
||||
"label": label,
|
||||
"description": description,
|
||||
"aliases": aliases,
|
||||
}
|
||||
frontmatter = yaml.safe_dump(
|
||||
metadata,
|
||||
sort_keys=False,
|
||||
allow_unicode=True,
|
||||
).strip()
|
||||
normalized_body = AgentRuntimeManager._normalize_persona_body(body)
|
||||
return f"---\n{frontmatter}\n---\n# PERSONA\n\n{normalized_body}\n"
|
||||
|
||||
|
||||
agent_runtime_manager = AgentRuntimeManager()
|
||||
0
app/agent/tools/__init__.py
Normal file
0
app/agent/tools/__init__.py
Normal file
361
app/agent/tools/base.py
Normal file
361
app/agent/tools/base.py
Normal file
@@ -0,0 +1,361 @@
|
||||
import asyncio
|
||||
import json
|
||||
import threading
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import partial
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
from langchain_core.tools import BaseTool
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from app.agent import StreamingHandler
|
||||
from app.chain import ChainBase
|
||||
from app.core.config import settings
|
||||
from app.db.user_oper import UserOper
|
||||
from app.helper.service import ServiceConfigHelper
|
||||
from app.log import logger
|
||||
from app.schemas import Notification
|
||||
from app.schemas.types import MessageChannel
|
||||
|
||||
|
||||
class ToolChain(ChainBase):
|
||||
pass
|
||||
|
||||
|
||||
# 将常见的阻塞调用按能力域拆分到独立线程池,避免外部慢 IO 抢占同一批 worker。
|
||||
_BLOCKING_BUCKET_LIMITS = {
|
||||
"default": 4,
|
||||
"config": 2,
|
||||
"db": 4,
|
||||
"downloader": 4,
|
||||
"mediaserver": 4,
|
||||
"plugin": 2,
|
||||
"rule": 2,
|
||||
"site": 4,
|
||||
"storage": 4,
|
||||
"subscribe": 2,
|
||||
"workflow": 2,
|
||||
}
|
||||
_blocking_semaphores = {
|
||||
bucket: asyncio.Semaphore(limit)
|
||||
for bucket, limit in _BLOCKING_BUCKET_LIMITS.items()
|
||||
}
|
||||
_blocking_executors: dict[str, ThreadPoolExecutor] = {}
|
||||
_blocking_executor_lock = threading.Lock()
|
||||
|
||||
|
||||
def _get_blocking_executor(bucket: str) -> ThreadPoolExecutor:
|
||||
"""按桶懒加载线程池,避免在导入阶段创建过多 worker。"""
|
||||
with _blocking_executor_lock:
|
||||
executor = _blocking_executors.get(bucket)
|
||||
if executor:
|
||||
return executor
|
||||
|
||||
limit = _BLOCKING_BUCKET_LIMITS[bucket]
|
||||
executor = ThreadPoolExecutor(
|
||||
max_workers=limit,
|
||||
thread_name_prefix=f"agent-tool-{bucket}",
|
||||
)
|
||||
_blocking_executors[bucket] = executor
|
||||
return executor
|
||||
|
||||
|
||||
class MoviePilotTool(BaseTool, metaclass=ABCMeta):
|
||||
"""
|
||||
MoviePilot专用工具基类(LangChain v1 / langchain_core)
|
||||
"""
|
||||
|
||||
_session_id: str = PrivateAttr()
|
||||
_user_id: str = PrivateAttr()
|
||||
_channel: Optional[str] = PrivateAttr(default=None)
|
||||
_source: Optional[str] = PrivateAttr(default=None)
|
||||
_username: Optional[str] = PrivateAttr(default=None)
|
||||
_stream_handler: Optional[StreamingHandler] = PrivateAttr(default=None)
|
||||
_require_admin: bool = PrivateAttr(default=False)
|
||||
_agent_context: dict = PrivateAttr(default_factory=dict)
|
||||
|
||||
def __init__(self, session_id: str, user_id: str, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self._session_id = session_id
|
||||
self._user_id = user_id
|
||||
self._require_admin = getattr(self.__class__, "require_admin", False)
|
||||
|
||||
def _run(self, *args: Any, **kwargs: Any) -> Any:
|
||||
raise NotImplementedError("MoviePilotTool 只支持异步调用,请使用 _arun")
|
||||
|
||||
async def _arun(self, *args: Any, **kwargs: Any) -> str:
|
||||
"""
|
||||
异步运行工具,负责:
|
||||
1. 在工具调用前将流式消息推送给用户
|
||||
2. 持久化工具调用记录到会话记忆
|
||||
3. 调用具体工具逻辑(子类实现的 execute 方法)
|
||||
4. 持久化工具结果到会话记忆
|
||||
5. 权限检查
|
||||
"""
|
||||
|
||||
permission_result = await self._check_permission()
|
||||
if permission_result:
|
||||
return permission_result
|
||||
|
||||
# 获取工具执行提示消息
|
||||
tool_message = self.get_tool_message(**kwargs)
|
||||
if not tool_message:
|
||||
explanation = kwargs.get("explanation")
|
||||
if explanation:
|
||||
tool_message = explanation
|
||||
|
||||
# 发送工具执行过程消息
|
||||
if self._stream_handler and self._stream_handler.is_streaming:
|
||||
if settings.AI_AGENT_VERBOSE:
|
||||
if self._stream_handler.is_auto_flushing:
|
||||
# 渠道支持编辑:工具消息追加到 buffer,由定时刷新推送
|
||||
if tool_message:
|
||||
self._stream_handler.emit(f"\n\n⚙️ => {tool_message}\n\n")
|
||||
else:
|
||||
allow_dispatch_without_context = self._agent_context.get(
|
||||
"should_dispatch_reply", False
|
||||
)
|
||||
if self._channel and self._source:
|
||||
# 渠道不支持编辑:取出 Agent 文字 + 工具消息合并独立发送
|
||||
agent_message = await self._stream_handler.take()
|
||||
messages = []
|
||||
if agent_message:
|
||||
messages.append(agent_message)
|
||||
if tool_message:
|
||||
messages.append(f"⚙️ => {tool_message}")
|
||||
if messages:
|
||||
merged_message = "\n\n".join(messages)
|
||||
await self.send_tool_message(merged_message)
|
||||
elif allow_dispatch_without_context:
|
||||
agent_message = await self._stream_handler.take()
|
||||
messages = []
|
||||
if agent_message:
|
||||
messages.append(agent_message)
|
||||
if tool_message:
|
||||
messages.append(f"⚙️ => {tool_message}")
|
||||
if messages:
|
||||
merged_message = "\n\n".join(messages)
|
||||
await self.send_tool_message(merged_message)
|
||||
else:
|
||||
# 后台 capture 流程没有渠道上下文,不能把工具提示回灌到默认通知渠道。
|
||||
self._stream_handler.record_tool_call(
|
||||
tool_name=self.name,
|
||||
tool_message=tool_message,
|
||||
tool_kwargs=kwargs,
|
||||
)
|
||||
else:
|
||||
# 非VERBOSE:不逐条回显工具调用,转为在下一段文本前补一句聚合摘要
|
||||
self._stream_handler.record_tool_call(
|
||||
tool_name=self.name,
|
||||
tool_message=tool_message,
|
||||
tool_kwargs=kwargs,
|
||||
)
|
||||
else:
|
||||
# 未启用流式传输,不发送任何工具消息内容
|
||||
pass
|
||||
|
||||
logger.debug(f"Executing tool {self.name} with args: {kwargs}")
|
||||
|
||||
# 执行具体工具逻辑
|
||||
try:
|
||||
result = await self.run(**kwargs)
|
||||
logger.debug(f"Tool {self.name} executed with result: {result}")
|
||||
except Exception as e:
|
||||
error_message = f"工具执行异常 ({type(e).__name__}): {str(e)}"
|
||||
logger.error(f"Tool {self.name} execution failed: {e}", exc_info=True)
|
||||
result = error_message
|
||||
|
||||
# 格式化结果
|
||||
if isinstance(result, str):
|
||||
formatted_result = result
|
||||
elif isinstance(result, (int, float)):
|
||||
formatted_result = str(result)
|
||||
else:
|
||||
formatted_result = json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
return formatted_result
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""
|
||||
获取工具执行时的友好提示消息。
|
||||
|
||||
子类可以重写此方法,根据实际参数生成个性化的提示消息。
|
||||
如果返回 None 或空字符串,将回退使用 explanation 参数。
|
||||
|
||||
Args:
|
||||
**kwargs: 工具的所有参数(包括 explanation)
|
||||
|
||||
Returns:
|
||||
str: 友好的提示消息,如果返回 None 或空字符串则使用 explanation
|
||||
"""
|
||||
return None
|
||||
|
||||
@abstractmethod
|
||||
async def run(self, **kwargs) -> str:
|
||||
"""子类实现具体的工具执行逻辑"""
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
async def run_blocking(
|
||||
bucket: str, func: Callable[..., Any], *args: Any, **kwargs: Any
|
||||
) -> Any:
|
||||
"""
|
||||
在受控线程池中运行阻塞型同步代码,避免拖住 FastAPI 主事件循环。
|
||||
"""
|
||||
bucket_name = bucket if bucket in _BLOCKING_BUCKET_LIMITS else "default"
|
||||
semaphore = _blocking_semaphores[bucket_name]
|
||||
bound_call = partial(func, *args, **kwargs)
|
||||
|
||||
async with semaphore:
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(
|
||||
_get_blocking_executor(bucket_name), bound_call
|
||||
)
|
||||
|
||||
def set_message_attr(self, channel: str, source: str, username: str):
|
||||
"""
|
||||
设置消息属性
|
||||
"""
|
||||
self._channel = channel
|
||||
self._source = source
|
||||
self._username = username
|
||||
|
||||
def set_stream_handler(self, stream_handler: StreamingHandler):
|
||||
"""
|
||||
设置回调处理器
|
||||
"""
|
||||
self._stream_handler = stream_handler
|
||||
|
||||
def set_agent_context(self, agent_context: Optional[dict]):
|
||||
"""
|
||||
设置与当前 Agent 共享的上下文。
|
||||
"""
|
||||
self._agent_context = agent_context or {}
|
||||
|
||||
async def _check_permission(self) -> Optional[str]:
|
||||
"""
|
||||
检查用户权限:
|
||||
1. 首先检查工具是否需要管理员权限
|
||||
2. 如果需要管理员权限,则检查用户是否是渠道管理员
|
||||
3. 如果渠道没有设置管理员名单,则检查用户是否是系统管理员
|
||||
4. 如果都不是系统管理员,检查用户ID是否等于渠道配置的用户ID
|
||||
5. 如果都不是,返回权限拒绝消息
|
||||
"""
|
||||
if not self._require_admin:
|
||||
return None
|
||||
|
||||
if not self._channel or not self._source:
|
||||
return None
|
||||
|
||||
# 渠道配置来自 SystemConfigOper 内存缓存,可以直接读取;
|
||||
# 只有用户信息需要走异步数据库查询。
|
||||
user_id_str = str(self._user_id) if self._user_id else None
|
||||
|
||||
channel_type_map = {
|
||||
MessageChannel.Telegram: "telegram",
|
||||
MessageChannel.Discord: "discord",
|
||||
MessageChannel.Wechat: "wechat",
|
||||
MessageChannel.Slack: "slack",
|
||||
MessageChannel.VoceChat: "vocechat",
|
||||
MessageChannel.SynologyChat: "synologychat",
|
||||
MessageChannel.QQ: "qqbot",
|
||||
}
|
||||
|
||||
channel_type = None
|
||||
for key, value in channel_type_map.items():
|
||||
if self._channel == key.value:
|
||||
channel_type = value
|
||||
break
|
||||
|
||||
if not channel_type:
|
||||
return None
|
||||
|
||||
admin_key_map = {
|
||||
"telegram": "TELEGRAM_ADMINS",
|
||||
"discord": "DISCORD_ADMINS",
|
||||
"wechat": "WECHAT_ADMINS",
|
||||
"slack": "SLACK_ADMINS",
|
||||
"vocechat": "VOCECHAT_ADMINS",
|
||||
"synologychat": "SYNOLOGYCHAT_ADMINS",
|
||||
"qqbot": "QQBOT_ADMINS",
|
||||
}
|
||||
|
||||
user_id_key_map = {
|
||||
"telegram": "TELEGRAM_CHAT_ID",
|
||||
"vocechat": "VOCECHAT_CHANNEL_ID",
|
||||
"wechat": "WECHAT_BOT_CHAT_ID",
|
||||
}
|
||||
|
||||
admin_key = admin_key_map.get(channel_type)
|
||||
user_id_key = user_id_key_map.get(channel_type)
|
||||
|
||||
try:
|
||||
configs = ServiceConfigHelper.get_notification_configs()
|
||||
for config in configs:
|
||||
if config.name == self._source and config.config:
|
||||
channel_admins = config.config.get(admin_key) if admin_key else None
|
||||
if channel_admins:
|
||||
admin_list = [
|
||||
aid.strip()
|
||||
for aid in str(channel_admins).split(",")
|
||||
if aid.strip()
|
||||
]
|
||||
if user_id_str and user_id_str in admin_list:
|
||||
return None
|
||||
|
||||
user = (
|
||||
await UserOper().async_get_by_name(self._username)
|
||||
if self._username
|
||||
else None
|
||||
)
|
||||
if user and user.is_superuser:
|
||||
return None
|
||||
|
||||
return (
|
||||
"抱歉,您没有执行此工具的权限。"
|
||||
"只有渠道管理员或系统管理员才能执行工具操作。"
|
||||
"如需执行工具,请联系渠道管理员将您的用户ID添加到渠道管理员列表中,"
|
||||
"或联系系统管理员为您设置权限。"
|
||||
)
|
||||
else:
|
||||
user = (
|
||||
await UserOper().async_get_by_name(self._username)
|
||||
if self._username
|
||||
else None
|
||||
)
|
||||
if user and user.is_superuser:
|
||||
return None
|
||||
|
||||
if user_id_key:
|
||||
config_user_id = config.config.get(user_id_key)
|
||||
if config_user_id and str(config_user_id) == user_id_str:
|
||||
return None
|
||||
|
||||
return (
|
||||
"抱歉,您没有执行此工具的权限。"
|
||||
"只有系统管理员才能执行工具操作。"
|
||||
"如需执行工具,请联系系统管理员为您设置权限。"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"检查权限失败: {e}")
|
||||
|
||||
return None
|
||||
|
||||
async def send_tool_message(
|
||||
self, message: str, title: str = "", image: Optional[str] = None
|
||||
):
|
||||
"""
|
||||
发送工具消息
|
||||
"""
|
||||
await ToolChain().async_post_message(
|
||||
Notification(
|
||||
channel=self._channel,
|
||||
source=self._source,
|
||||
userid=self._user_id,
|
||||
username=self._username,
|
||||
title=title,
|
||||
text=message,
|
||||
image=image,
|
||||
)
|
||||
)
|
||||
281
app/agent/tools/factory.py
Normal file
281
app/agent/tools/factory.py
Normal file
@@ -0,0 +1,281 @@
|
||||
from typing import List, Callable
|
||||
|
||||
from app.agent.tools.impl.add_download import AddDownloadTool
|
||||
from app.agent.tools.impl.add_subscribe import AddSubscribeTool
|
||||
from app.agent.tools.impl.update_subscribe import UpdateSubscribeTool
|
||||
from app.agent.tools.impl.search_subscribe import SearchSubscribeTool
|
||||
from app.agent.tools.impl.get_recommendations import GetRecommendationsTool
|
||||
from app.agent.tools.impl.query_downloaders import QueryDownloadersTool
|
||||
from app.agent.tools.impl.query_download_tasks import QueryDownloadTasksTool
|
||||
from app.agent.tools.impl.query_library_exists import QueryLibraryExistsTool
|
||||
from app.agent.tools.impl.query_library_latest import QueryLibraryLatestTool
|
||||
from app.agent.tools.impl.query_sites import QuerySitesTool
|
||||
from app.agent.tools.impl.update_site import UpdateSiteTool
|
||||
from app.agent.tools.impl.query_site_userdata import QuerySiteUserdataTool
|
||||
from app.agent.tools.impl.test_site import TestSiteTool
|
||||
from app.agent.tools.impl.query_subscribes import QuerySubscribesTool
|
||||
from app.agent.tools.impl.query_subscribe_shares import QuerySubscribeSharesTool
|
||||
from app.agent.tools.impl.query_rule_groups import QueryRuleGroupsTool
|
||||
from app.agent.tools.impl.query_builtin_filter_rules import QueryBuiltinFilterRulesTool
|
||||
from app.agent.tools.impl.query_custom_filter_rules import QueryCustomFilterRulesTool
|
||||
from app.agent.tools.impl.add_custom_filter_rule import AddCustomFilterRuleTool
|
||||
from app.agent.tools.impl.update_custom_filter_rule import UpdateCustomFilterRuleTool
|
||||
from app.agent.tools.impl.delete_custom_filter_rule import DeleteCustomFilterRuleTool
|
||||
from app.agent.tools.impl.add_rule_group import AddRuleGroupTool
|
||||
from app.agent.tools.impl.update_rule_group import UpdateRuleGroupTool
|
||||
from app.agent.tools.impl.delete_rule_group import DeleteRuleGroupTool
|
||||
from app.agent.tools.impl.query_popular_subscribes import QueryPopularSubscribesTool
|
||||
from app.agent.tools.impl.query_subscribe_history import QuerySubscribeHistoryTool
|
||||
from app.agent.tools.impl.delete_subscribe import DeleteSubscribeTool
|
||||
from app.agent.tools.impl.search_media import SearchMediaTool
|
||||
from app.agent.tools.impl.search_person import SearchPersonTool
|
||||
from app.agent.tools.impl.search_person_credits import SearchPersonCreditsTool
|
||||
from app.agent.tools.impl.recognize_media import RecognizeMediaTool
|
||||
from app.agent.tools.impl.scrape_metadata import ScrapeMetadataTool
|
||||
from app.agent.tools.impl.query_episode_schedule import QueryEpisodeScheduleTool
|
||||
from app.agent.tools.impl.query_media_detail import QueryMediaDetailTool
|
||||
from app.agent.tools.impl.search_torrents import SearchTorrentsTool
|
||||
from app.agent.tools.impl.get_search_results import GetSearchResultsTool
|
||||
from app.agent.tools.impl.search_web import SearchWebTool
|
||||
from app.agent.tools.impl.send_message import SendMessageTool
|
||||
from app.agent.tools.impl.ask_user_choice import AskUserChoiceTool
|
||||
from app.agent.tools.impl.send_local_file import SendLocalFileTool
|
||||
from app.agent.tools.impl.send_voice_message import SendVoiceMessageTool
|
||||
from app.agent.tools.impl.query_schedulers import QuerySchedulersTool
|
||||
from app.agent.tools.impl.run_scheduler import RunSchedulerTool
|
||||
from app.agent.tools.impl.query_workflows import QueryWorkflowsTool
|
||||
from app.agent.tools.impl.run_workflow import RunWorkflowTool
|
||||
from app.agent.tools.impl.query_personas import QueryPersonasTool
|
||||
from app.agent.tools.impl.switch_persona import SwitchPersonaTool
|
||||
from app.agent.tools.impl.update_persona_definition import UpdatePersonaDefinitionTool
|
||||
from app.agent.tools.impl.update_site_cookie import UpdateSiteCookieTool
|
||||
from app.agent.tools.impl.delete_download import DeleteDownloadTool
|
||||
from app.agent.tools.impl.delete_download_history import DeleteDownloadHistoryTool
|
||||
from app.agent.tools.impl.delete_transfer_history import DeleteTransferHistoryTool
|
||||
from app.agent.tools.impl.modify_download import ModifyDownloadTool
|
||||
from app.agent.tools.impl.query_directory_settings import QueryDirectorySettingsTool
|
||||
from app.agent.tools.impl.list_directory import ListDirectoryTool
|
||||
from app.agent.tools.impl.query_transfer_history import QueryTransferHistoryTool
|
||||
from app.agent.tools.impl.transfer_file import TransferFileTool
|
||||
from app.agent.tools.impl.execute_command import ExecuteCommandTool
|
||||
from app.agent.tools.impl.edit_file import EditFileTool
|
||||
from app.agent.tools.impl.write_file import WriteFileTool
|
||||
from app.agent.tools.impl.read_file import ReadFileTool
|
||||
from app.agent.tools.impl.browse_webpage import BrowseWebpageTool
|
||||
from app.agent.tools.impl.query_installed_plugins import QueryInstalledPluginsTool
|
||||
from app.agent.tools.impl.query_market_plugins import QueryMarketPluginsTool
|
||||
from app.agent.tools.impl.query_plugin_capabilities import QueryPluginCapabilitiesTool
|
||||
from app.agent.tools.impl.query_plugin_config import QueryPluginConfigTool
|
||||
from app.agent.tools.impl.update_plugin_config import UpdatePluginConfigTool
|
||||
from app.agent.tools.impl.reload_plugin import ReloadPluginTool
|
||||
from app.agent.tools.impl.query_plugin_data import QueryPluginDataTool
|
||||
from app.agent.tools.impl.install_plugin import InstallPluginTool
|
||||
from app.agent.tools.impl.uninstall_plugin import UninstallPluginTool
|
||||
from app.agent.tools.impl.run_slash_command import RunSlashCommandTool
|
||||
from app.agent.tools.impl.list_slash_commands import ListSlashCommandsTool
|
||||
from app.agent.tools.impl.query_custom_identifiers import QueryCustomIdentifiersTool
|
||||
from app.agent.tools.impl.update_custom_identifiers import UpdateCustomIdentifiersTool
|
||||
from app.core.plugin import PluginManager
|
||||
from app.log import logger
|
||||
from app.schemas.message import ChannelCapabilityManager
|
||||
from app.schemas.types import MessageChannel
|
||||
from .base import MoviePilotTool
|
||||
|
||||
|
||||
class MoviePilotToolFactory:
|
||||
"""
|
||||
MoviePilot工具工厂
|
||||
"""
|
||||
|
||||
# 这些通用工具需要始终保留,避免大工具集裁剪后让 Agent 丢失基础的
|
||||
# 文件系统、命令执行或交互确认能力。AskUserChoiceTool 仅在支持按钮
|
||||
# 的渠道中才会实际注入,因此后续会再按已加载工具做一次求交集。
|
||||
TOOL_SELECTOR_ALWAYS_INCLUDE_NAMES = (
|
||||
"list_directory",
|
||||
"write_file",
|
||||
"read_file",
|
||||
"edit_file",
|
||||
"execute_command",
|
||||
"ask_user_choice",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _should_enable_choice_tool(channel: str = None) -> bool:
|
||||
if not channel:
|
||||
return False
|
||||
try:
|
||||
message_channel = MessageChannel(channel)
|
||||
except ValueError:
|
||||
return False
|
||||
return ChannelCapabilityManager.supports_buttons(
|
||||
message_channel
|
||||
) and ChannelCapabilityManager.supports_callbacks(message_channel)
|
||||
|
||||
@classmethod
|
||||
def get_tool_selector_always_include_names(
|
||||
cls, tools: List[MoviePilotTool]
|
||||
) -> List[str]:
|
||||
"""
|
||||
返回当前实际已加载且需要绕过工具筛选的工具名。
|
||||
|
||||
`LLMToolSelectorMiddleware` 会校验 `always_include` 中的工具名是否
|
||||
存在于当前请求里,因此这里必须根据运行时工具列表做交集过滤。
|
||||
"""
|
||||
available_tool_names = {
|
||||
tool.name for tool in tools if getattr(tool, "name", None)
|
||||
}
|
||||
return [
|
||||
tool_name
|
||||
for tool_name in cls.TOOL_SELECTOR_ALWAYS_INCLUDE_NAMES
|
||||
if tool_name in available_tool_names
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def create_tools(
|
||||
session_id: str,
|
||||
user_id: str,
|
||||
channel: str = None,
|
||||
source: str = None,
|
||||
username: str = None,
|
||||
stream_handler: Callable = None,
|
||||
agent_context: dict = None,
|
||||
allow_message_tools: bool = True,
|
||||
) -> List[MoviePilotTool]:
|
||||
"""
|
||||
创建MoviePilot工具列表
|
||||
"""
|
||||
tools = []
|
||||
tool_definitions = [
|
||||
SearchMediaTool,
|
||||
SearchPersonTool,
|
||||
SearchPersonCreditsTool,
|
||||
RecognizeMediaTool,
|
||||
ScrapeMetadataTool,
|
||||
QueryEpisodeScheduleTool,
|
||||
QueryMediaDetailTool,
|
||||
AddSubscribeTool,
|
||||
UpdateSubscribeTool,
|
||||
SearchSubscribeTool,
|
||||
SearchTorrentsTool,
|
||||
GetSearchResultsTool,
|
||||
SearchWebTool,
|
||||
AddDownloadTool,
|
||||
QuerySubscribesTool,
|
||||
QuerySubscribeSharesTool,
|
||||
QueryPopularSubscribesTool,
|
||||
QueryBuiltinFilterRulesTool,
|
||||
QueryCustomFilterRulesTool,
|
||||
QueryRuleGroupsTool,
|
||||
AddCustomFilterRuleTool,
|
||||
UpdateCustomFilterRuleTool,
|
||||
DeleteCustomFilterRuleTool,
|
||||
AddRuleGroupTool,
|
||||
UpdateRuleGroupTool,
|
||||
DeleteRuleGroupTool,
|
||||
QuerySubscribeHistoryTool,
|
||||
DeleteSubscribeTool,
|
||||
QueryDownloadTasksTool,
|
||||
DeleteDownloadTool,
|
||||
DeleteDownloadHistoryTool,
|
||||
DeleteTransferHistoryTool,
|
||||
ModifyDownloadTool,
|
||||
QueryDownloadersTool,
|
||||
QuerySitesTool,
|
||||
UpdateSiteTool,
|
||||
QuerySiteUserdataTool,
|
||||
TestSiteTool,
|
||||
UpdateSiteCookieTool,
|
||||
GetRecommendationsTool,
|
||||
QueryLibraryExistsTool,
|
||||
QueryLibraryLatestTool,
|
||||
QueryDirectorySettingsTool,
|
||||
ListDirectoryTool,
|
||||
QueryTransferHistoryTool,
|
||||
TransferFileTool,
|
||||
SendMessageTool,
|
||||
QuerySchedulersTool,
|
||||
RunSchedulerTool,
|
||||
QueryWorkflowsTool,
|
||||
RunWorkflowTool,
|
||||
QueryPersonasTool,
|
||||
SwitchPersonaTool,
|
||||
UpdatePersonaDefinitionTool,
|
||||
ExecuteCommandTool,
|
||||
EditFileTool,
|
||||
WriteFileTool,
|
||||
ReadFileTool,
|
||||
BrowseWebpageTool,
|
||||
QueryInstalledPluginsTool,
|
||||
QueryMarketPluginsTool,
|
||||
QueryPluginCapabilitiesTool,
|
||||
QueryPluginConfigTool,
|
||||
UpdatePluginConfigTool,
|
||||
ReloadPluginTool,
|
||||
QueryPluginDataTool,
|
||||
InstallPluginTool,
|
||||
UninstallPluginTool,
|
||||
RunSlashCommandTool,
|
||||
ListSlashCommandsTool,
|
||||
QueryCustomIdentifiersTool,
|
||||
UpdateCustomIdentifiersTool,
|
||||
]
|
||||
if MoviePilotToolFactory._should_enable_choice_tool(channel):
|
||||
tool_definitions.append(AskUserChoiceTool)
|
||||
tool_definitions.extend(
|
||||
[
|
||||
SendLocalFileTool,
|
||||
SendVoiceMessageTool,
|
||||
]
|
||||
)
|
||||
# 创建内置工具
|
||||
for ToolClass in tool_definitions:
|
||||
tool = ToolClass(session_id=session_id, user_id=user_id)
|
||||
if not allow_message_tools and getattr(tool, "sends_message", False):
|
||||
continue
|
||||
tool.set_message_attr(channel=channel, source=source, username=username)
|
||||
tool.set_stream_handler(stream_handler=stream_handler)
|
||||
tool.set_agent_context(agent_context=agent_context)
|
||||
tools.append(tool)
|
||||
|
||||
# 加载插件提供的工具
|
||||
plugin_tools_count = 0
|
||||
plugin_tools_info = PluginManager().get_plugin_agent_tools()
|
||||
for plugin_info in plugin_tools_info:
|
||||
plugin_id = plugin_info.get("plugin_id")
|
||||
plugin_name = plugin_info.get("plugin_name")
|
||||
tool_classes = plugin_info.get("tools", [])
|
||||
for ToolClass in tool_classes:
|
||||
try:
|
||||
# 验证工具类是否继承自 MoviePilotTool
|
||||
if not issubclass(ToolClass, MoviePilotTool):
|
||||
logger.warning(
|
||||
f"插件 {plugin_name}({plugin_id}) 提供的工具类 {ToolClass.__name__} 未继承自 MoviePilotTool,已跳过"
|
||||
)
|
||||
continue
|
||||
# 创建工具实例
|
||||
tool = ToolClass(session_id=session_id, user_id=user_id)
|
||||
if not allow_message_tools and getattr(tool, "sends_message", False):
|
||||
continue
|
||||
tool.set_message_attr(
|
||||
channel=channel, source=source, username=username
|
||||
)
|
||||
tool.set_stream_handler(stream_handler=stream_handler)
|
||||
tool.set_agent_context(agent_context=agent_context)
|
||||
tools.append(tool)
|
||||
plugin_tools_count += 1
|
||||
logger.debug(
|
||||
f"成功加载插件 {plugin_name}({plugin_id}) 的工具: {ToolClass.__name__}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"加载插件 {plugin_name}({plugin_id}) 的工具 {ToolClass.__name__} 失败: {str(e)}"
|
||||
)
|
||||
|
||||
builtin_tools_count = len(tool_definitions)
|
||||
if plugin_tools_count > 0:
|
||||
logger.info(
|
||||
f"成功创建 {len(tools)} 个MoviePilot工具(内置工具: {builtin_tools_count} 个,插件工具: {plugin_tools_count} 个)"
|
||||
)
|
||||
else:
|
||||
logger.info(f"成功创建 {len(tools)} 个MoviePilot工具")
|
||||
return tools
|
||||
0
app/agent/tools/impl/__init__.py
Normal file
0
app/agent/tools/impl/__init__.py
Normal file
540
app/agent/tools/impl/_filter_rule_utils.py
Normal file
540
app/agent/tools/impl/_filter_rule_utils.py
Normal file
@@ -0,0 +1,540 @@
|
||||
"""过滤规则 Agent 工具共用的校验、查询和引用处理逻辑。"""
|
||||
|
||||
import copy
|
||||
import re
|
||||
from typing import Any, Dict, Iterable, Optional
|
||||
|
||||
from app.core.event import eventmanager
|
||||
from app.db import AsyncSessionFactory
|
||||
from app.db.models.subscribe import Subscribe
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.helper.rule import RuleHelper
|
||||
from app.modules.filter.RuleParser import RuleParser
|
||||
from app.modules.filter.builtin_rules import BUILTIN_RULE_SET
|
||||
from app.schemas import CustomRule, FilterRuleGroup
|
||||
from app.schemas.event import ConfigChangeEventData
|
||||
from app.schemas.types import EventType, SystemConfigKey
|
||||
|
||||
RULE_ID_PATTERN = re.compile(r"^[A-Za-z0-9]+$")
|
||||
RULE_TOKEN_PATTERN = re.compile(r"[A-Za-z][A-Za-z0-9]*|[0-9][A-Za-z0-9]+")
|
||||
NUMERIC_RANGE_PATTERN = re.compile(
|
||||
r"^\d+(?:\.\d+)?(?:\s*-\s*\d+(?:\.\d+)?)?$"
|
||||
)
|
||||
|
||||
MEDIA_TYPE_ALIASES = {
|
||||
"movie": "电影",
|
||||
"film": "电影",
|
||||
"tv": "电视剧",
|
||||
"series": "电视剧",
|
||||
"show": "电视剧",
|
||||
"电影": "电影",
|
||||
"电视剧": "电视剧",
|
||||
}
|
||||
|
||||
RULE_STRING_SYNTAX = {
|
||||
"level_separator": ">",
|
||||
"and_operator": "&",
|
||||
"not_operator": "!",
|
||||
"supported_grouping": "Parentheses are supported inside a single level.",
|
||||
"spacing_note": "Prefer spaces around '&', and '>' for readability; use '!RULE' for negation.",
|
||||
"match_order": "Levels are evaluated from left to right. The first matched level wins and stops further matching.",
|
||||
"match_result": "If no level matches, the torrent is filtered out. If a level matches, the torrent is kept.",
|
||||
"writing_workflow": [
|
||||
"First query built-in rules and custom rules to learn valid rule IDs.",
|
||||
"Compose one priority level with '&', '!' and optional parentheses.",
|
||||
"Join multiple priority levels with '>' from highest priority to lowest priority.",
|
||||
"Use spaces around '&', and '>' for readability.",
|
||||
],
|
||||
"examples": [
|
||||
{
|
||||
"description": "Prefer torrents with special subtitles and Chinese dubbing at 4K, otherwise fall back to Chinese subtitles and Chinese dubbing at 4K.",
|
||||
"rule_string": "SPECSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL > CNSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL",
|
||||
},
|
||||
{
|
||||
"description": "Inside one level, require 4K and reject Blu-ray source.",
|
||||
"rule_string": "4K & !BLU",
|
||||
},
|
||||
{
|
||||
"description": "Inside one level, accept either special subtitles or Chinese subtitles, then also require 1080P.",
|
||||
"rule_string": "(SPECSUB | CNSUB) & 1080P",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def normalize_optional_text(value: Optional[str]) -> Optional[str]:
|
||||
"""把空白字符串折叠为 None,避免保存无意义的空值。"""
|
||||
if value is None:
|
||||
return None
|
||||
value = str(value).strip()
|
||||
return value or None
|
||||
|
||||
|
||||
def normalize_media_type(value: Optional[str]) -> Optional[str]:
|
||||
"""兼容英中文媒体类型输入,最终统一为后端实际使用的中文值。"""
|
||||
value = normalize_optional_text(value)
|
||||
if not value:
|
||||
return None
|
||||
normalized = MEDIA_TYPE_ALIASES.get(value.lower(), value)
|
||||
if normalized not in {"电影", "电视剧"}:
|
||||
raise ValueError(
|
||||
"media_type 仅支持 '电影'、'电视剧'、'movie' 或 'tv'"
|
||||
)
|
||||
return normalized
|
||||
|
||||
|
||||
def validate_numeric_range(
|
||||
field_name: str, value: Optional[str]
|
||||
) -> Optional[str]:
|
||||
"""校验 size_range / publish_time 这类单值或区间值。"""
|
||||
value = normalize_optional_text(value)
|
||||
if not value:
|
||||
return None
|
||||
if not NUMERIC_RANGE_PATTERN.match(value):
|
||||
raise ValueError(
|
||||
f"{field_name} 格式无效,支持 '1000' 或 '1000-5000' 这类数字区间格式"
|
||||
)
|
||||
|
||||
parts = [float(item.strip()) for item in value.split("-")]
|
||||
if len(parts) == 2 and parts[0] > parts[1]:
|
||||
raise ValueError(f"{field_name} 区间起始值不能大于结束值")
|
||||
return value
|
||||
|
||||
|
||||
def validate_seeders(value: Optional[str]) -> Optional[str]:
|
||||
"""做种人数最终会被 int() 解析,这里提前拦住非法值。"""
|
||||
value = normalize_optional_text(value)
|
||||
if not value:
|
||||
return None
|
||||
if not value.isdigit():
|
||||
raise ValueError("seeders 必须是非负整数")
|
||||
return value
|
||||
|
||||
|
||||
def get_builtin_rules() -> Dict[str, dict]:
|
||||
"""返回内置规则的深拷贝,避免调用方误改共享常量。"""
|
||||
return copy.deepcopy(BUILTIN_RULE_SET)
|
||||
|
||||
|
||||
def get_custom_rules() -> list[CustomRule]:
|
||||
return RuleHelper().get_custom_rules()
|
||||
|
||||
|
||||
def get_rule_groups() -> list[FilterRuleGroup]:
|
||||
return RuleHelper().get_rule_groups()
|
||||
|
||||
|
||||
def build_custom_rule_map(rules: Optional[Iterable[CustomRule]] = None) -> Dict[str, CustomRule]:
|
||||
return {
|
||||
rule.id: rule
|
||||
for rule in (rules or get_custom_rules())
|
||||
if rule.id
|
||||
}
|
||||
|
||||
|
||||
def build_rule_group_map(
|
||||
groups: Optional[Iterable[FilterRuleGroup]] = None,
|
||||
) -> Dict[str, FilterRuleGroup]:
|
||||
return {
|
||||
group.name: group
|
||||
for group in (groups or get_rule_groups())
|
||||
if group.name
|
||||
}
|
||||
|
||||
|
||||
def extract_rule_tokens(rule_string: Optional[str]) -> list[str]:
|
||||
"""从规则串里提取规则 ID,用于引用分析和未知规则校验。"""
|
||||
if not rule_string:
|
||||
return []
|
||||
# dict.fromkeys 用来在保留顺序的同时去重,便于展示和报错。
|
||||
return list(dict.fromkeys(RULE_TOKEN_PATTERN.findall(rule_string)))
|
||||
|
||||
|
||||
def parse_rule_string(rule_string: str) -> dict:
|
||||
"""使用后端同款 RuleParser 解析规则串,并拆出每一层的元数据。"""
|
||||
normalized = normalize_optional_text(rule_string)
|
||||
if not normalized:
|
||||
raise ValueError("rule_string 不能为空")
|
||||
|
||||
parser = RuleParser()
|
||||
levels = [level.strip() for level in normalized.split(">")]
|
||||
if any(not level for level in levels):
|
||||
raise ValueError("rule_string 不能包含空层级,请检查 '>' 两侧内容")
|
||||
|
||||
parsed_levels = []
|
||||
for index, level in enumerate(levels, start=1):
|
||||
try:
|
||||
parser.parse(level)
|
||||
except Exception as exc: # pragma: no cover - 依赖 pyparsing 的具体异常
|
||||
raise ValueError(f"规则串第 {index} 层语法错误: {exc}") from exc
|
||||
|
||||
parsed_levels.append(
|
||||
{
|
||||
"priority": index,
|
||||
"expression": level,
|
||||
"referenced_rules": extract_rule_tokens(level),
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"rule_string": " > ".join(levels),
|
||||
"levels": parsed_levels,
|
||||
"referenced_rules": extract_rule_tokens(normalized),
|
||||
}
|
||||
|
||||
|
||||
def validate_rule_string(rule_string: str, available_rule_ids: Iterable[str]) -> dict:
|
||||
"""校验规则串语法和引用规则是否都存在。"""
|
||||
parsed = parse_rule_string(rule_string)
|
||||
available_ids = set(available_rule_ids)
|
||||
unknown_rules = sorted(
|
||||
{
|
||||
rule_id
|
||||
for rule_id in parsed["referenced_rules"]
|
||||
if rule_id not in available_ids
|
||||
}
|
||||
)
|
||||
if unknown_rules:
|
||||
raise ValueError(
|
||||
f"rule_string 引用了不存在的规则: {', '.join(unknown_rules)}"
|
||||
)
|
||||
return parsed
|
||||
|
||||
|
||||
def serialize_builtin_rule(rule_id: str, payload: dict) -> dict:
|
||||
"""把内置规则整理成适合 Agent 阅读的结构。"""
|
||||
data = copy.deepcopy(payload)
|
||||
data["id"] = rule_id
|
||||
data["source"] = "builtin"
|
||||
return data
|
||||
|
||||
|
||||
def serialize_custom_rule(rule: CustomRule, group_refs: Optional[list[str]] = None) -> dict:
|
||||
data = rule.model_dump(exclude_none=True)
|
||||
data["source"] = "custom"
|
||||
data["referenced_by_rule_groups"] = group_refs or []
|
||||
return data
|
||||
|
||||
|
||||
def serialize_rule_group(group: FilterRuleGroup, usage: Optional[dict] = None) -> dict:
|
||||
"""查询时尽量附带解析结果,便于 Agent 理解优先级层级。"""
|
||||
data = group.model_dump(exclude_none=True)
|
||||
if group.rule_string:
|
||||
try:
|
||||
parsed = parse_rule_string(group.rule_string)
|
||||
data["levels"] = parsed["levels"]
|
||||
data["referenced_rules"] = parsed["referenced_rules"]
|
||||
data["syntax_valid"] = True
|
||||
except ValueError as exc:
|
||||
data["syntax_valid"] = False
|
||||
data["syntax_error"] = str(exc)
|
||||
data["referenced_rules"] = extract_rule_tokens(group.rule_string)
|
||||
else:
|
||||
data["syntax_valid"] = False
|
||||
data["syntax_error"] = "rule_string 为空"
|
||||
data["referenced_rules"] = []
|
||||
data["usage"] = usage or default_rule_group_usage()
|
||||
return data
|
||||
|
||||
|
||||
def default_rule_group_usage() -> dict:
|
||||
return {
|
||||
"used_in_global_search": False,
|
||||
"used_in_global_subscribe": False,
|
||||
"used_in_global_best_version": False,
|
||||
"subscribes": [],
|
||||
}
|
||||
|
||||
|
||||
async def collect_rule_group_usages(
|
||||
group_names: Optional[Iterable[str]] = None,
|
||||
) -> Dict[str, dict]:
|
||||
"""收集规则组在全局配置和订阅上的引用情况。"""
|
||||
target_names = set(group_names or [])
|
||||
search_groups = set(
|
||||
SystemConfigOper().get(SystemConfigKey.SearchFilterRuleGroups) or []
|
||||
)
|
||||
subscribe_groups = set(
|
||||
SystemConfigOper().get(SystemConfigKey.SubscribeFilterRuleGroups) or []
|
||||
)
|
||||
best_version_groups = set(
|
||||
SystemConfigOper().get(SystemConfigKey.BestVersionFilterRuleGroups) or []
|
||||
)
|
||||
|
||||
usage_map = {
|
||||
name: default_rule_group_usage()
|
||||
for name in target_names
|
||||
}
|
||||
|
||||
def ensure_usage(name: str) -> dict:
|
||||
if name not in usage_map:
|
||||
usage_map[name] = default_rule_group_usage()
|
||||
return usage_map[name]
|
||||
|
||||
for name in search_groups:
|
||||
if target_names and name not in target_names:
|
||||
continue
|
||||
ensure_usage(name)["used_in_global_search"] = True
|
||||
for name in subscribe_groups:
|
||||
if target_names and name not in target_names:
|
||||
continue
|
||||
ensure_usage(name)["used_in_global_subscribe"] = True
|
||||
for name in best_version_groups:
|
||||
if target_names and name not in target_names:
|
||||
continue
|
||||
ensure_usage(name)["used_in_global_best_version"] = True
|
||||
|
||||
async with AsyncSessionFactory() as db:
|
||||
subscribes = await Subscribe.async_list(db)
|
||||
for subscribe in subscribes:
|
||||
filter_groups = subscribe.filter_groups or []
|
||||
for name in filter_groups:
|
||||
if target_names and name not in target_names:
|
||||
continue
|
||||
ensure_usage(name)["subscribes"].append(
|
||||
{
|
||||
"subscribe_id": subscribe.id,
|
||||
"name": subscribe.name,
|
||||
"season": subscribe.season,
|
||||
"type": subscribe.type,
|
||||
"username": subscribe.username,
|
||||
"best_version": bool(subscribe.best_version),
|
||||
}
|
||||
)
|
||||
|
||||
return usage_map
|
||||
|
||||
|
||||
def collect_custom_rule_group_refs(
|
||||
rule_groups: Iterable[FilterRuleGroup],
|
||||
rule_ids: Optional[Iterable[str]] = None,
|
||||
) -> Dict[str, list[str]]:
|
||||
"""收集自定义规则被哪些规则组引用。"""
|
||||
target_rule_ids = set(rule_ids or [])
|
||||
refs: Dict[str, list[str]] = {
|
||||
rule_id: []
|
||||
for rule_id in target_rule_ids
|
||||
}
|
||||
|
||||
for group in rule_groups:
|
||||
if not group.name or not group.rule_string:
|
||||
continue
|
||||
referenced = set(extract_rule_tokens(group.rule_string))
|
||||
for rule_id in referenced:
|
||||
if target_rule_ids and rule_id not in target_rule_ids:
|
||||
continue
|
||||
refs.setdefault(rule_id, []).append(group.name)
|
||||
|
||||
for names in refs.values():
|
||||
names.sort()
|
||||
return refs
|
||||
|
||||
|
||||
def normalize_custom_rule(
|
||||
rule_id: str,
|
||||
name: str,
|
||||
include: Optional[str],
|
||||
exclude: Optional[str],
|
||||
size_range: Optional[str],
|
||||
seeders: Optional[str],
|
||||
publish_time: Optional[str],
|
||||
existing_rules: Iterable[CustomRule],
|
||||
original_rule_id: Optional[str] = None,
|
||||
) -> CustomRule:
|
||||
"""新增/更新自定义规则时统一走这里,避免多处散落校验逻辑。"""
|
||||
normalized_rule_id = normalize_optional_text(rule_id)
|
||||
normalized_name = normalize_optional_text(name)
|
||||
if not normalized_rule_id:
|
||||
raise ValueError("rule_id 不能为空")
|
||||
if not normalized_name:
|
||||
raise ValueError("name 不能为空")
|
||||
if not RULE_ID_PATTERN.match(normalized_rule_id):
|
||||
raise ValueError("rule_id 仅支持英文字母和数字")
|
||||
if (
|
||||
normalized_rule_id in BUILTIN_RULE_SET
|
||||
and normalized_rule_id != original_rule_id
|
||||
):
|
||||
raise ValueError(
|
||||
f"rule_id '{normalized_rule_id}' 与内置规则冲突,不能覆盖内置规则"
|
||||
)
|
||||
|
||||
for existing_rule in existing_rules:
|
||||
if (
|
||||
existing_rule.id == normalized_rule_id
|
||||
and existing_rule.id != original_rule_id
|
||||
):
|
||||
raise ValueError(f"rule_id '{normalized_rule_id}' 已存在")
|
||||
if (
|
||||
existing_rule.name == normalized_name
|
||||
and existing_rule.id != original_rule_id
|
||||
):
|
||||
raise ValueError(f"规则名称 '{normalized_name}' 已存在")
|
||||
|
||||
return CustomRule(
|
||||
id=normalized_rule_id,
|
||||
name=normalized_name,
|
||||
include=normalize_optional_text(include),
|
||||
exclude=normalize_optional_text(exclude),
|
||||
size_range=validate_numeric_range("size_range", size_range),
|
||||
seeders=validate_seeders(seeders),
|
||||
publish_time=validate_numeric_range("publish_time", publish_time),
|
||||
)
|
||||
|
||||
|
||||
def normalize_rule_group(
|
||||
name: str,
|
||||
rule_string: str,
|
||||
media_type: Optional[str],
|
||||
category: Optional[str],
|
||||
existing_groups: Iterable[FilterRuleGroup],
|
||||
available_rule_ids: Iterable[str],
|
||||
original_name: Optional[str] = None,
|
||||
) -> tuple[FilterRuleGroup, dict]:
|
||||
"""新增/更新规则组时统一校验名字、适用范围和规则串。"""
|
||||
normalized_name = normalize_optional_text(name)
|
||||
if not normalized_name:
|
||||
raise ValueError("规则组名称不能为空")
|
||||
|
||||
for group in existing_groups:
|
||||
if group.name == normalized_name and group.name != original_name:
|
||||
raise ValueError(f"规则组名称 '{normalized_name}' 已存在")
|
||||
|
||||
normalized_media_type = normalize_media_type(media_type)
|
||||
normalized_category = normalize_optional_text(category)
|
||||
if normalized_category and not normalized_media_type:
|
||||
raise ValueError("设置 category 时必须同时设置 media_type")
|
||||
|
||||
parsed = validate_rule_string(rule_string, available_rule_ids)
|
||||
return (
|
||||
FilterRuleGroup(
|
||||
name=normalized_name,
|
||||
rule_string=parsed["rule_string"],
|
||||
media_type=normalized_media_type,
|
||||
category=normalized_category,
|
||||
),
|
||||
parsed,
|
||||
)
|
||||
|
||||
|
||||
async def save_system_config(
|
||||
key: SystemConfigKey, value: Any
|
||||
) -> Optional[bool]:
|
||||
"""通过统一入口保存配置并补发 ConfigChanged 事件。"""
|
||||
normalized_value = value
|
||||
if isinstance(normalized_value, list):
|
||||
normalized_value = [
|
||||
item
|
||||
for item in normalized_value
|
||||
if item is not None and item != ""
|
||||
]
|
||||
normalized_value = normalized_value or None
|
||||
|
||||
success = await SystemConfigOper().async_set(key, normalized_value)
|
||||
if success:
|
||||
await eventmanager.async_send_event(
|
||||
etype=EventType.ConfigChanged,
|
||||
data=ConfigChangeEventData(
|
||||
key=key,
|
||||
value=normalized_value,
|
||||
change_type="update",
|
||||
),
|
||||
)
|
||||
return success
|
||||
|
||||
|
||||
def replace_rule_id_in_rule_string(
|
||||
rule_string: str, old_rule_id: str, new_rule_id: str
|
||||
) -> str:
|
||||
"""只替换完整 token,避免误伤其他规则名。"""
|
||||
pattern = re.compile(
|
||||
rf"(?<![A-Za-z0-9]){re.escape(old_rule_id)}(?![A-Za-z0-9])"
|
||||
)
|
||||
return pattern.sub(new_rule_id, rule_string)
|
||||
|
||||
|
||||
def replace_group_name_in_list(
|
||||
values: Optional[Iterable[str]], old_name: str, new_name: str
|
||||
) -> list[str]:
|
||||
"""更新配置里的规则组名引用,并顺手去重。"""
|
||||
result = []
|
||||
for value in values or []:
|
||||
mapped = new_name if value == old_name else value
|
||||
if mapped not in result:
|
||||
result.append(mapped)
|
||||
return result
|
||||
|
||||
|
||||
async def rename_rule_group_references(old_name: str, new_name: str) -> dict:
|
||||
"""规则组改名后,联动更新全局设置和订阅引用。"""
|
||||
changed = {
|
||||
"global_settings": {},
|
||||
"subscribes": [],
|
||||
}
|
||||
|
||||
for config_key in (
|
||||
SystemConfigKey.SearchFilterRuleGroups,
|
||||
SystemConfigKey.SubscribeFilterRuleGroups,
|
||||
SystemConfigKey.BestVersionFilterRuleGroups,
|
||||
):
|
||||
original = SystemConfigOper().get(config_key) or []
|
||||
updated = replace_group_name_in_list(original, old_name, new_name)
|
||||
if updated != original:
|
||||
await save_system_config(config_key, updated)
|
||||
changed["global_settings"][config_key.value] = updated
|
||||
|
||||
async with AsyncSessionFactory() as db:
|
||||
subscribes = await Subscribe.async_list(db)
|
||||
for subscribe in subscribes:
|
||||
original = subscribe.filter_groups or []
|
||||
updated = replace_group_name_in_list(original, old_name, new_name)
|
||||
if updated == original:
|
||||
continue
|
||||
await subscribe.async_update(db, {"filter_groups": updated})
|
||||
changed["subscribes"].append(
|
||||
{
|
||||
"subscribe_id": subscribe.id,
|
||||
"name": subscribe.name,
|
||||
"season": subscribe.season,
|
||||
"filter_groups": updated,
|
||||
}
|
||||
)
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
async def remove_rule_group_references(group_name: str) -> dict:
|
||||
"""删除规则组后,清理全局设置和订阅里的悬空引用。"""
|
||||
changed = {
|
||||
"global_settings": {},
|
||||
"subscribes": [],
|
||||
}
|
||||
|
||||
for config_key in (
|
||||
SystemConfigKey.SearchFilterRuleGroups,
|
||||
SystemConfigKey.SubscribeFilterRuleGroups,
|
||||
SystemConfigKey.BestVersionFilterRuleGroups,
|
||||
):
|
||||
original = SystemConfigOper().get(config_key) or []
|
||||
updated = [value for value in original if value != group_name]
|
||||
if updated != original:
|
||||
await save_system_config(config_key, updated)
|
||||
changed["global_settings"][config_key.value] = updated
|
||||
|
||||
async with AsyncSessionFactory() as db:
|
||||
subscribes = await Subscribe.async_list(db)
|
||||
for subscribe in subscribes:
|
||||
original = subscribe.filter_groups or []
|
||||
updated = [value for value in original if value != group_name]
|
||||
if updated == original:
|
||||
continue
|
||||
await subscribe.async_update(db, {"filter_groups": updated})
|
||||
changed["subscribes"].append(
|
||||
{
|
||||
"subscribe_id": subscribe.id,
|
||||
"name": subscribe.name,
|
||||
"season": subscribe.season,
|
||||
"filter_groups": updated,
|
||||
}
|
||||
)
|
||||
|
||||
return changed
|
||||
290
app/agent/tools/impl/_plugin_tool_utils.py
Normal file
290
app/agent/tools/impl/_plugin_tool_utils.py
Normal file
@@ -0,0 +1,290 @@
|
||||
"""插件 Agent 工具共享辅助方法"""
|
||||
|
||||
import json
|
||||
import shutil
|
||||
from typing import Any, Optional
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.plugin import PluginManager
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.helper.plugin import PluginHelper
|
||||
from app.schemas.types import SystemConfigKey
|
||||
|
||||
# 默认只向智能体返回一个可读预览,避免超大插件数据挤爆上下文窗口。
|
||||
DEFAULT_PLUGIN_DATA_PREVIEW_CHARS = 12_000
|
||||
MAX_PLUGIN_DATA_PREVIEW_CHARS = 50_000
|
||||
PLUGIN_DATA_KEY_PREVIEW_LIMIT = 50
|
||||
PLUGIN_DATA_TRUNCATION_SUFFIX = "\n...(插件数据内容过长,已截断)"
|
||||
DEFAULT_PLUGIN_CANDIDATE_LIMIT = 500
|
||||
|
||||
|
||||
def get_plugin_snapshot(plugin_id: str) -> Optional[dict[str, Any]]:
|
||||
"""
|
||||
获取已安装插件的基础信息快照。
|
||||
"""
|
||||
plugin_manager = PluginManager()
|
||||
for plugin in plugin_manager.get_local_plugins():
|
||||
if plugin.id == plugin_id:
|
||||
return {
|
||||
"plugin_id": plugin.id,
|
||||
"plugin_name": plugin.plugin_name,
|
||||
"plugin_version": plugin.plugin_version,
|
||||
"state": plugin.state,
|
||||
}
|
||||
return None
|
||||
|
||||
|
||||
def clamp_preview_chars(max_chars: Optional[int]) -> int:
|
||||
"""
|
||||
约束插件数据预览长度,避免工具结果无限膨胀。
|
||||
"""
|
||||
if max_chars is None:
|
||||
return DEFAULT_PLUGIN_DATA_PREVIEW_CHARS
|
||||
return max(512, min(int(max_chars), MAX_PLUGIN_DATA_PREVIEW_CHARS))
|
||||
|
||||
|
||||
def serialize_for_agent(value: Any) -> str:
|
||||
"""
|
||||
将结果稳定序列化为 JSON 字符串,无法原生序列化的对象退化为字符串。
|
||||
"""
|
||||
return json.dumps(value, ensure_ascii=False, indent=2, default=str)
|
||||
|
||||
|
||||
def build_preview_payload(value: Any, max_chars: Optional[int]) -> tuple[bool, int, int, str]:
|
||||
"""
|
||||
为可能很大的插件数据生成预览结果。
|
||||
"""
|
||||
serialized = serialize_for_agent(value)
|
||||
if len(serialized) <= clamp_preview_chars(max_chars):
|
||||
return False, len(serialized), len(serialized), serialized
|
||||
|
||||
preview_limit = clamp_preview_chars(max_chars)
|
||||
preview = serialized[:preview_limit] + PLUGIN_DATA_TRUNCATION_SUFFIX
|
||||
return True, len(serialized), len(preview), preview
|
||||
|
||||
|
||||
def reload_plugin_runtime(plugin_id: str) -> None:
|
||||
"""
|
||||
重载插件并重新注册其命令、定时任务和 API。
|
||||
"""
|
||||
# 这些依赖只在真正执行重载时才导入,避免普通查询工具引入不必要的初始化开销。
|
||||
from app.api.endpoints.plugin import register_plugin_api
|
||||
from app.command import Command
|
||||
from app.scheduler import Scheduler
|
||||
|
||||
plugin_manager = PluginManager()
|
||||
plugin_manager.reload_plugin(plugin_id)
|
||||
Scheduler().update_plugin_job(plugin_id)
|
||||
Command().init_commands(plugin_id)
|
||||
register_plugin_api(plugin_id)
|
||||
|
||||
|
||||
def summarize_plugin(plugin: Any) -> dict[str, Any]:
|
||||
"""
|
||||
提取插件对象中对 Agent 有价值的摘要字段。
|
||||
"""
|
||||
repo_url = getattr(plugin, "repo_url", None)
|
||||
return {
|
||||
"id": getattr(plugin, "id", None),
|
||||
"plugin_name": getattr(plugin, "plugin_name", None),
|
||||
"plugin_desc": getattr(plugin, "plugin_desc", None),
|
||||
"plugin_version": getattr(plugin, "plugin_version", None),
|
||||
"plugin_author": getattr(plugin, "plugin_author", None),
|
||||
"installed": bool(getattr(plugin, "installed", False)),
|
||||
"has_update": bool(getattr(plugin, "has_update", False)),
|
||||
"state": bool(getattr(plugin, "state", False)),
|
||||
"repo_url": repo_url,
|
||||
"source": "local_repo" if PluginHelper.is_local_repo_url(repo_url) else "market",
|
||||
}
|
||||
|
||||
|
||||
async def load_market_plugins(force_refresh: bool = False) -> list[Any]:
|
||||
"""
|
||||
聚合插件市场与本地插件仓库中的候选插件。
|
||||
"""
|
||||
plugin_manager = PluginManager()
|
||||
online_plugins = await plugin_manager.async_get_online_plugins(force=force_refresh)
|
||||
local_repo_plugins = plugin_manager.get_local_repo_plugins()
|
||||
if not online_plugins and not local_repo_plugins:
|
||||
return []
|
||||
return plugin_manager.process_plugins_list(online_plugins + local_repo_plugins, [])
|
||||
|
||||
|
||||
def list_installed_plugins() -> list[Any]:
|
||||
"""
|
||||
返回当前已安装插件列表。
|
||||
"""
|
||||
plugin_manager = PluginManager()
|
||||
return [plugin for plugin in plugin_manager.get_local_plugins() if plugin.installed]
|
||||
|
||||
|
||||
def _normalize_text(value: Optional[str]) -> str:
|
||||
return (value or "").strip().lower()
|
||||
|
||||
|
||||
def is_exact_plugin_match(plugin: Any, query: str) -> bool:
|
||||
"""
|
||||
精确匹配插件 ID 或插件名称,用于安全地自动选择候选。
|
||||
"""
|
||||
normalized_query = _normalize_text(query)
|
||||
return normalized_query in {
|
||||
_normalize_text(getattr(plugin, "id", None)),
|
||||
_normalize_text(getattr(plugin, "plugin_name", None)),
|
||||
}
|
||||
|
||||
|
||||
def search_plugin_candidates(query: str, plugins: list[Any]) -> list[dict[str, Any]]:
|
||||
"""
|
||||
按插件 ID、名称、描述和作者搜索候选,并返回打分结果。
|
||||
"""
|
||||
normalized_query = _normalize_text(query)
|
||||
if not normalized_query:
|
||||
return []
|
||||
|
||||
tokens = [token for token in normalized_query.replace("-", " ").split() if token]
|
||||
matches: list[dict[str, Any]] = []
|
||||
|
||||
for plugin in plugins:
|
||||
plugin_id = _normalize_text(getattr(plugin, "id", None))
|
||||
plugin_name = _normalize_text(getattr(plugin, "plugin_name", None))
|
||||
plugin_desc = _normalize_text(getattr(plugin, "plugin_desc", None))
|
||||
plugin_author = _normalize_text(getattr(plugin, "plugin_author", None))
|
||||
haystack = "\n".join([plugin_id, plugin_name, plugin_desc, plugin_author])
|
||||
|
||||
score = 0
|
||||
if normalized_query == plugin_id:
|
||||
score = 100
|
||||
elif normalized_query == plugin_name:
|
||||
score = 95
|
||||
elif plugin_id.startswith(normalized_query):
|
||||
score = 85
|
||||
elif plugin_name.startswith(normalized_query):
|
||||
score = 80
|
||||
elif normalized_query in plugin_id:
|
||||
score = 75
|
||||
elif normalized_query in plugin_name:
|
||||
score = 70
|
||||
elif tokens and all(token in plugin_name for token in tokens):
|
||||
score = 68
|
||||
elif tokens and all(token in plugin_id for token in tokens):
|
||||
score = 66
|
||||
elif normalized_query in plugin_desc:
|
||||
score = 45
|
||||
elif normalized_query in plugin_author:
|
||||
score = 40
|
||||
elif tokens and all(token in haystack for token in tokens):
|
||||
score = 35
|
||||
|
||||
if score <= 0:
|
||||
continue
|
||||
|
||||
matches.append(
|
||||
{
|
||||
"plugin": plugin,
|
||||
"score": score,
|
||||
"exact": is_exact_plugin_match(plugin, normalized_query),
|
||||
}
|
||||
)
|
||||
|
||||
return sorted(
|
||||
matches,
|
||||
key=lambda item: (
|
||||
-item["score"],
|
||||
not item["exact"],
|
||||
-int(bool(getattr(item["plugin"], "has_update", False))),
|
||||
-int(bool(getattr(item["plugin"], "installed", False))),
|
||||
-int(getattr(item["plugin"], "add_time", 0) or 0),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def summarize_candidates(matches: list[dict[str, Any]], limit: int = DEFAULT_PLUGIN_CANDIDATE_LIMIT) -> list[dict[str, Any]]:
|
||||
"""
|
||||
压缩候选列表,避免一次性把完整市场数据返回给 Agent。
|
||||
"""
|
||||
return [
|
||||
{
|
||||
**summarize_plugin(item["plugin"]),
|
||||
"score": item["score"],
|
||||
"exact": item["exact"],
|
||||
}
|
||||
for item in matches[:limit]
|
||||
]
|
||||
|
||||
|
||||
async def install_plugin_runtime(
|
||||
plugin_id: str, repo_url: Optional[str], force: bool = False
|
||||
) -> tuple[bool, str, bool]:
|
||||
"""
|
||||
按现有插件接口的行为安装插件,并刷新运行态注册信息。
|
||||
"""
|
||||
install_plugins = SystemConfigOper().get(SystemConfigKey.UserInstalledPlugins) or []
|
||||
plugin_manager = PluginManager()
|
||||
plugin_helper = PluginHelper()
|
||||
|
||||
refreshed_only = False
|
||||
if not force and plugin_id in plugin_manager.get_plugin_ids():
|
||||
refreshed_only = True
|
||||
await plugin_helper.async_install_reg(pid=plugin_id, repo_url=repo_url)
|
||||
message = "插件已存在,已刷新加载"
|
||||
else:
|
||||
if not repo_url:
|
||||
return False, "没有传入仓库地址,无法正确安装插件,请检查配置", False
|
||||
state, message = await plugin_helper.async_install(
|
||||
pid=plugin_id,
|
||||
repo_url=repo_url,
|
||||
force_install=force,
|
||||
)
|
||||
if not state:
|
||||
return False, message, False
|
||||
|
||||
if plugin_id not in install_plugins:
|
||||
install_plugins.append(plugin_id)
|
||||
await SystemConfigOper().async_set(
|
||||
SystemConfigKey.UserInstalledPlugins, install_plugins
|
||||
)
|
||||
|
||||
reload_plugin_runtime(plugin_id)
|
||||
return True, message or "插件安装成功", refreshed_only
|
||||
|
||||
|
||||
async def uninstall_plugin_runtime(plugin_id: str) -> dict[str, Any]:
|
||||
"""
|
||||
按现有卸载逻辑移除插件,并清理运行态注册与分组信息。
|
||||
"""
|
||||
from app.api.endpoints.plugin import _remove_plugin_from_folders, remove_plugin_api
|
||||
from app.scheduler import Scheduler
|
||||
|
||||
config_oper = SystemConfigOper()
|
||||
install_plugins = config_oper.get(SystemConfigKey.UserInstalledPlugins) or []
|
||||
if plugin_id in install_plugins:
|
||||
install_plugins = [plugin for plugin in install_plugins if plugin != plugin_id]
|
||||
await config_oper.async_set(SystemConfigKey.UserInstalledPlugins, install_plugins)
|
||||
|
||||
remove_plugin_api(plugin_id)
|
||||
Scheduler().remove_plugin_job(plugin_id)
|
||||
|
||||
plugin_manager = PluginManager()
|
||||
plugin_class = plugin_manager.plugins.get(plugin_id)
|
||||
was_clone = bool(getattr(plugin_class, "is_clone", False))
|
||||
clone_files_removed = False
|
||||
|
||||
if was_clone:
|
||||
plugin_manager.delete_plugin_config(plugin_id)
|
||||
plugin_manager.delete_plugin_data(plugin_id)
|
||||
plugin_base_dir = settings.ROOT_PATH / "app" / "plugins" / plugin_id.lower()
|
||||
if plugin_base_dir.exists():
|
||||
try:
|
||||
shutil.rmtree(plugin_base_dir)
|
||||
plugin_manager.plugins.pop(plugin_id, None)
|
||||
clone_files_removed = True
|
||||
except Exception:
|
||||
clone_files_removed = False
|
||||
|
||||
_remove_plugin_from_folders(plugin_id)
|
||||
plugin_manager.remove_plugin(plugin_id)
|
||||
|
||||
return {
|
||||
"was_clone": was_clone,
|
||||
"clone_files_removed": clone_files_removed,
|
||||
}
|
||||
176
app/agent/tools/impl/_torrent_search_utils.py
Normal file
176
app/agent/tools/impl/_torrent_search_utils.py
Normal file
@@ -0,0 +1,176 @@
|
||||
"""种子搜索工具辅助函数"""
|
||||
|
||||
import re
|
||||
from typing import List, Optional
|
||||
|
||||
from app.core.context import Context
|
||||
from app.utils.crypto import HashUtils
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
SEARCH_RESULT_CACHE_FILE = "__search_result__"
|
||||
TORRENT_RESULT_LIMIT = 50
|
||||
|
||||
|
||||
def build_torrent_ref(context: Optional[Context]) -> str:
|
||||
"""生成用于下载校验的短引用"""
|
||||
if not context or not context.torrent_info:
|
||||
return ""
|
||||
return HashUtils.sha1(context.torrent_info.enclosure or "")[:7]
|
||||
|
||||
|
||||
def sort_season_options(options: List[str]) -> List[str]:
|
||||
"""按前端逻辑排序季集选项"""
|
||||
if len(options) <= 1:
|
||||
return options
|
||||
|
||||
parsed_options = []
|
||||
for index, option in enumerate(options):
|
||||
match = re.match(r"^S(\d+)(?:-S(\d+))?\s*(?:E(\d+)(?:-E(\d+))?)?$", option or "")
|
||||
if not match:
|
||||
parsed_options.append({
|
||||
"original": option,
|
||||
"season_num": 0,
|
||||
"episode_num": 0,
|
||||
"max_episode_num": 0,
|
||||
"is_whole_season": False,
|
||||
"index": index,
|
||||
})
|
||||
continue
|
||||
|
||||
episode_num = int(match.group(3)) if match.group(3) else 0
|
||||
max_episode_num = int(match.group(4)) if match.group(4) else episode_num
|
||||
parsed_options.append({
|
||||
"original": option,
|
||||
"season_num": int(match.group(1)),
|
||||
"episode_num": episode_num,
|
||||
"max_episode_num": max_episode_num,
|
||||
"is_whole_season": not match.group(3),
|
||||
"index": index,
|
||||
})
|
||||
|
||||
whole_seasons = [item for item in parsed_options if item["is_whole_season"]]
|
||||
episodes = [item for item in parsed_options if not item["is_whole_season"]]
|
||||
|
||||
whole_seasons.sort(key=lambda item: (-item["season_num"], item["index"]))
|
||||
episodes.sort(
|
||||
key=lambda item: (
|
||||
-item["season_num"],
|
||||
-(item["max_episode_num"] or item["episode_num"]),
|
||||
-item["episode_num"],
|
||||
item["index"],
|
||||
)
|
||||
)
|
||||
return [item["original"] for item in whole_seasons + episodes]
|
||||
|
||||
|
||||
def append_option(options: List[str], value: Optional[str]) -> None:
|
||||
"""按前端逻辑收集去重后的筛选项"""
|
||||
if value and value not in options:
|
||||
options.append(value)
|
||||
|
||||
|
||||
def build_filter_options(items: List[Context]) -> dict:
|
||||
"""从搜索结果中构建筛选项汇总"""
|
||||
filter_options = {
|
||||
"site": [],
|
||||
"season": [],
|
||||
"freeState": [],
|
||||
"edition": [],
|
||||
"resolution": [],
|
||||
"videoCode": [],
|
||||
"releaseGroup": [],
|
||||
}
|
||||
|
||||
for item in items:
|
||||
torrent_info = item.torrent_info
|
||||
meta_info = item.meta_info
|
||||
append_option(filter_options["site"], getattr(torrent_info, "site_name", None))
|
||||
append_option(filter_options["season"], getattr(meta_info, "season_episode", None))
|
||||
append_option(filter_options["freeState"], getattr(torrent_info, "volume_factor", None))
|
||||
append_option(filter_options["edition"], getattr(meta_info, "edition", None))
|
||||
append_option(filter_options["resolution"], getattr(meta_info, "resource_pix", None))
|
||||
append_option(filter_options["videoCode"], getattr(meta_info, "video_encode", None))
|
||||
append_option(filter_options["releaseGroup"], getattr(meta_info, "resource_team", None))
|
||||
|
||||
filter_options["season"] = sort_season_options(filter_options["season"])
|
||||
return filter_options
|
||||
|
||||
|
||||
def match_filter(filter_values: Optional[List[str]], value: Optional[str]) -> bool:
|
||||
"""匹配前端同款多选筛选规则"""
|
||||
return not filter_values or bool(value and value in filter_values)
|
||||
|
||||
|
||||
def filter_contexts(items: List[Context],
|
||||
site: Optional[List[str]] = None,
|
||||
season: Optional[List[str]] = None,
|
||||
free_state: Optional[List[str]] = None,
|
||||
video_code: Optional[List[str]] = None,
|
||||
edition: Optional[List[str]] = None,
|
||||
resolution: Optional[List[str]] = None,
|
||||
release_group: Optional[List[str]] = None) -> List[Context]:
|
||||
"""按前端同款维度筛选结果"""
|
||||
filtered_items = []
|
||||
for item in items:
|
||||
torrent_info = item.torrent_info
|
||||
meta_info = item.meta_info
|
||||
if (
|
||||
match_filter(site, getattr(torrent_info, "site_name", None))
|
||||
and match_filter(free_state, getattr(torrent_info, "volume_factor", None))
|
||||
and match_filter(season, getattr(meta_info, "season_episode", None))
|
||||
and match_filter(release_group, getattr(meta_info, "resource_team", None))
|
||||
and match_filter(video_code, getattr(meta_info, "video_encode", None))
|
||||
and match_filter(resolution, getattr(meta_info, "resource_pix", None))
|
||||
and match_filter(edition, getattr(meta_info, "edition", None))
|
||||
):
|
||||
filtered_items.append(item)
|
||||
return filtered_items
|
||||
|
||||
|
||||
def simplify_search_result(context: Context, index: int) -> dict:
|
||||
"""精简单条搜索结果"""
|
||||
simplified = {}
|
||||
torrent_info = context.torrent_info
|
||||
meta_info = context.meta_info
|
||||
media_info = context.media_info
|
||||
|
||||
if torrent_info:
|
||||
simplified["torrent_info"] = {
|
||||
"title": torrent_info.title,
|
||||
"size": StringUtils.format_size(torrent_info.size),
|
||||
"seeders": torrent_info.seeders,
|
||||
"peers": torrent_info.peers,
|
||||
"site_name": torrent_info.site_name,
|
||||
"torrent_url": f"{build_torrent_ref(context)}:{index}",
|
||||
"page_url": torrent_info.page_url,
|
||||
"volume_factor": torrent_info.volume_factor,
|
||||
"freedate_diff": torrent_info.freedate_diff,
|
||||
"pubdate": torrent_info.pubdate,
|
||||
}
|
||||
|
||||
if media_info:
|
||||
simplified["media_info"] = {
|
||||
"title": media_info.title,
|
||||
"en_title": media_info.en_title,
|
||||
"year": media_info.year,
|
||||
"type": media_info.type.value if media_info.type else None,
|
||||
"season": media_info.season,
|
||||
"tmdb_id": media_info.tmdb_id,
|
||||
}
|
||||
|
||||
if meta_info:
|
||||
simplified["meta_info"] = {
|
||||
"name": meta_info.name,
|
||||
"cn_name": meta_info.cn_name,
|
||||
"en_name": meta_info.en_name,
|
||||
"year": meta_info.year,
|
||||
"type": meta_info.type.value if meta_info.type else None,
|
||||
"begin_season": meta_info.begin_season,
|
||||
"season_episode": meta_info.season_episode,
|
||||
"resource_team": meta_info.resource_team,
|
||||
"video_encode": meta_info.video_encode,
|
||||
"edition": meta_info.edition,
|
||||
"resource_pix": meta_info.resource_pix,
|
||||
}
|
||||
|
||||
return simplified
|
||||
111
app/agent/tools/impl/add_custom_filter_rule.py
Normal file
111
app/agent/tools/impl/add_custom_filter_rule.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""新增自定义过滤规则工具。"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._filter_rule_utils import (
|
||||
get_custom_rules,
|
||||
normalize_custom_rule,
|
||||
save_system_config,
|
||||
serialize_custom_rule,
|
||||
)
|
||||
from app.log import logger
|
||||
from app.schemas.types import SystemConfigKey
|
||||
|
||||
|
||||
class AddCustomFilterRuleInput(BaseModel):
|
||||
"""新增自定义过滤规则工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
rule_id: str = Field(
|
||||
...,
|
||||
description="Unique custom rule ID. Only letters and numbers are allowed.",
|
||||
)
|
||||
name: str = Field(..., description="Display name of the custom rule.")
|
||||
include: Optional[str] = Field(
|
||||
None, description="Optional include regex for the rule."
|
||||
)
|
||||
exclude: Optional[str] = Field(
|
||||
None, description="Optional exclude regex for the rule."
|
||||
)
|
||||
size_range: Optional[str] = Field(
|
||||
None, description="Optional size range in MB, for example '1000-5000'."
|
||||
)
|
||||
seeders: Optional[str] = Field(
|
||||
None, description="Optional minimum seeder count as a non-negative integer."
|
||||
)
|
||||
publish_time: Optional[str] = Field(
|
||||
None,
|
||||
description="Optional publish-time filter in minutes, for example '60' or '60-1440'.",
|
||||
)
|
||||
|
||||
|
||||
class AddCustomFilterRuleTool(MoviePilotTool):
|
||||
name: str = "add_custom_filter_rule"
|
||||
description: str = (
|
||||
"Add a custom filter rule to CustomFilterRules. "
|
||||
"The new rule can then be referenced by rule ID inside filter rule groups."
|
||||
)
|
||||
args_schema: Type[BaseModel] = AddCustomFilterRuleInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
return f"新增自定义过滤规则 {kwargs.get('rule_id', '')}"
|
||||
|
||||
async def run(
|
||||
self,
|
||||
rule_id: str,
|
||||
name: str,
|
||||
include: Optional[str] = None,
|
||||
exclude: Optional[str] = None,
|
||||
size_range: Optional[str] = None,
|
||||
seeders: Optional[str] = None,
|
||||
publish_time: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(f"执行工具: {self.name}, rule_id={rule_id}")
|
||||
|
||||
try:
|
||||
custom_rules = get_custom_rules()
|
||||
new_rule = normalize_custom_rule(
|
||||
rule_id=rule_id,
|
||||
name=name,
|
||||
include=include,
|
||||
exclude=exclude,
|
||||
size_range=size_range,
|
||||
seeders=seeders,
|
||||
publish_time=publish_time,
|
||||
existing_rules=custom_rules,
|
||||
)
|
||||
|
||||
custom_rules.append(new_rule)
|
||||
await save_system_config(
|
||||
SystemConfigKey.CustomFilterRules,
|
||||
[rule.model_dump(exclude_none=True) for rule in custom_rules],
|
||||
)
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"已新增自定义过滤规则 {new_rule.id}",
|
||||
"custom_rule": serialize_custom_rule(new_rule),
|
||||
"count": len(custom_rules),
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.error(f"新增自定义过滤规则失败: {exc}", exc_info=True)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"新增自定义过滤规则失败: {exc}",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
336
app/agent/tools/impl/add_download.py
Normal file
336
app/agent/tools/impl/add_download.py
Normal file
@@ -0,0 +1,336 @@
|
||||
"""添加下载工具"""
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool, ToolChain
|
||||
from app.chain.search import SearchChain
|
||||
from app.chain.download import DownloadChain
|
||||
from app.core.config import settings
|
||||
from app.core.context import Context
|
||||
from app.core.metainfo import MetaInfo
|
||||
from app.db.site_oper import SiteOper
|
||||
from app.helper.directory import DirectoryHelper
|
||||
from app.log import logger
|
||||
from app.schemas import TorrentInfo, FileURI
|
||||
from app.utils.crypto import HashUtils
|
||||
|
||||
|
||||
class AddDownloadInput(BaseModel):
|
||||
"""添加下载工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
torrent_url: List[str] = Field(
|
||||
...,
|
||||
description="One or more torrent_url values. Supports refs from get_search_results (`hash:id`) and magnet links."
|
||||
)
|
||||
downloader: Optional[str] = Field(None,
|
||||
description="Name of the downloader to use (optional, uses default if not specified)")
|
||||
save_path: Optional[str] = Field(None,
|
||||
description="Directory path where the downloaded files should be saved. Using `<storage>:<path>` for remote storage. e.g. rclone:/MP, smb:/server/share/Movies. (optional, uses default path if not specified)")
|
||||
labels: Optional[str] = Field(None,
|
||||
description="Comma-separated list of labels/tags to assign to the download (optional, e.g., 'movie,hd,bluray')")
|
||||
|
||||
|
||||
class AddDownloadTool(MoviePilotTool):
|
||||
name: str = "add_download"
|
||||
description: str = "Add torrent download tasks using refs from get_search_results or magnet links."
|
||||
args_schema: Type[BaseModel] = AddDownloadInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据下载参数生成友好的提示消息"""
|
||||
torrent_urls = self._normalize_torrent_urls(kwargs.get("torrent_url"))
|
||||
downloader = kwargs.get("downloader")
|
||||
|
||||
if torrent_urls:
|
||||
if len(torrent_urls) == 1:
|
||||
if self._is_torrent_ref(torrent_urls[0]):
|
||||
message = f"添加下载任务: 资源 {torrent_urls[0]}"
|
||||
else:
|
||||
message = "添加下载任务: 磁力链接"
|
||||
else:
|
||||
message = f"批量添加下载任务: 共 {len(torrent_urls)} 个资源"
|
||||
else:
|
||||
message = "添加下载任务"
|
||||
if downloader:
|
||||
message += f" [下载器: {downloader}]"
|
||||
|
||||
return message
|
||||
|
||||
@staticmethod
|
||||
def _build_torrent_ref(context: Context) -> str:
|
||||
"""生成用于校验缓存项的短引用"""
|
||||
if not context or not context.torrent_info:
|
||||
return ""
|
||||
return HashUtils.sha1(context.torrent_info.enclosure or "")[:7]
|
||||
|
||||
@staticmethod
|
||||
def _is_torrent_ref(torrent_ref: Optional[str]) -> bool:
|
||||
"""判断是否为内部搜索结果引用"""
|
||||
if not torrent_ref:
|
||||
return False
|
||||
return bool(re.fullmatch(r"[0-9a-f]{7}:\d+", str(torrent_ref).strip()))
|
||||
|
||||
@staticmethod
|
||||
def _is_magnet_link_input(torrent_url: Optional[str]) -> bool:
|
||||
"""判断输入是否为允许直接添加的磁力链接"""
|
||||
if not torrent_url:
|
||||
return False
|
||||
value = str(torrent_url).strip()
|
||||
return value.startswith("magnet:")
|
||||
|
||||
@classmethod
|
||||
def _resolve_cached_context(cls, torrent_ref: str) -> Optional[Context]:
|
||||
"""从最近一次搜索缓存中解析种子上下文,仅支持 hash:id 格式"""
|
||||
ref = str(torrent_ref).strip()
|
||||
if ":" not in ref:
|
||||
return None
|
||||
try:
|
||||
ref_hash, ref_index = ref.split(":", 1)
|
||||
index = int(ref_index)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
if index < 1:
|
||||
return None
|
||||
|
||||
results = SearchChain().last_search_results() or []
|
||||
if index > len(results):
|
||||
return None
|
||||
context = results[index - 1]
|
||||
if not ref_hash or cls._build_torrent_ref(context) != ref_hash:
|
||||
return None
|
||||
return context
|
||||
|
||||
@classmethod
|
||||
async def _async_resolve_cached_context(cls, torrent_ref: str) -> Optional[Context]:
|
||||
"""异步读取最近搜索缓存,避免在协程里直接访问同步文件缓存。"""
|
||||
ref = str(torrent_ref).strip()
|
||||
if ":" not in ref:
|
||||
return None
|
||||
try:
|
||||
ref_hash, ref_index = ref.split(":", 1)
|
||||
index = int(ref_index)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
if index < 1:
|
||||
return None
|
||||
|
||||
results = await SearchChain().async_last_search_results() or []
|
||||
if index > len(results):
|
||||
return None
|
||||
context = results[index - 1]
|
||||
if not ref_hash or cls._build_torrent_ref(context) != ref_hash:
|
||||
return None
|
||||
return context
|
||||
|
||||
@staticmethod
|
||||
def _merge_labels_with_system_tag(labels: Optional[str]) -> Optional[str]:
|
||||
"""合并用户标签与系统默认标签,确保任务可被系统管理"""
|
||||
system_tag = (settings.TORRENT_TAG or "").strip()
|
||||
user_labels = [item.strip() for item in (labels or "").split(",") if item.strip()]
|
||||
|
||||
if system_tag and system_tag not in user_labels:
|
||||
user_labels.append(system_tag)
|
||||
|
||||
return ",".join(user_labels) if user_labels else None
|
||||
|
||||
@staticmethod
|
||||
def _format_failed_result(failed_messages: List[str]) -> str:
|
||||
"""统一格式化失败结果"""
|
||||
return ", ".join([message for message in failed_messages if message])
|
||||
|
||||
@staticmethod
|
||||
def _build_failure_message(torrent_ref: str, error_msg: Optional[str] = None) -> str:
|
||||
"""构造失败提示"""
|
||||
normalized_error = (error_msg or "").strip()
|
||||
prefix = "添加种子任务失败:"
|
||||
if normalized_error.startswith(prefix):
|
||||
normalized_error = normalized_error[len(prefix):].lstrip()
|
||||
if AddDownloadTool._is_magnet_link_input(normalized_error):
|
||||
normalized_error = ""
|
||||
if normalized_error:
|
||||
return f"{torrent_ref} {normalized_error}"
|
||||
if AddDownloadTool._is_torrent_ref(torrent_ref):
|
||||
return torrent_ref
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def _normalize_torrent_urls(cls, torrent_url: Optional[List[str] | str]) -> List[str]:
|
||||
"""统一规范 torrent_url 输入,保留所有非空值"""
|
||||
if torrent_url is None:
|
||||
return []
|
||||
|
||||
if isinstance(torrent_url, str):
|
||||
candidates = torrent_url.split(",")
|
||||
else:
|
||||
candidates = torrent_url
|
||||
|
||||
return [str(item).strip() for item in candidates if item and str(item).strip()]
|
||||
|
||||
@staticmethod
|
||||
def _resolve_direct_download_dir(save_path: Optional[str]) -> Optional[Path]:
|
||||
"""解析直接下载使用的目录,优先使用 save_path,其次使用默认下载目录"""
|
||||
if save_path:
|
||||
return Path(save_path)
|
||||
|
||||
download_dirs = DirectoryHelper().get_download_dirs()
|
||||
if not download_dirs:
|
||||
return None
|
||||
|
||||
dir_conf = download_dirs[0]
|
||||
if not dir_conf.download_path:
|
||||
return None
|
||||
|
||||
return Path(FileURI(storage=dir_conf.storage or "local", path=dir_conf.download_path).uri)
|
||||
|
||||
@staticmethod
|
||||
def _download_direct_sync(
|
||||
torrent_input: str,
|
||||
download_dir: Path,
|
||||
merged_labels: Optional[str],
|
||||
downloader: Optional[str],
|
||||
) -> tuple[Optional[str], Optional[str]]:
|
||||
"""同步添加磁力下载任务,避免下载器调用阻塞事件循环。"""
|
||||
result = DownloadChain().download(
|
||||
content=torrent_input,
|
||||
download_dir=download_dir,
|
||||
cookie=None,
|
||||
label=merged_labels,
|
||||
downloader=downloader,
|
||||
)
|
||||
if result:
|
||||
_, did, _, error_msg = result
|
||||
else:
|
||||
did, error_msg = None, "未找到下载器"
|
||||
return did, error_msg
|
||||
|
||||
@staticmethod
|
||||
def _download_single_sync(
|
||||
context: Context,
|
||||
downloader: Optional[str],
|
||||
save_path: Optional[str],
|
||||
merged_labels: Optional[str],
|
||||
) -> tuple[Optional[str], Optional[str]]:
|
||||
"""同步提交带上下文的下载任务,避免站点下载与下载器调用阻塞事件循环。"""
|
||||
return DownloadChain().download_single(
|
||||
context=context,
|
||||
downloader=downloader,
|
||||
save_path=save_path,
|
||||
label=merged_labels,
|
||||
return_detail=True,
|
||||
)
|
||||
|
||||
async def run(self, torrent_url: Optional[List[str]] = None,
|
||||
downloader: Optional[str] = None, save_path: Optional[str] = None,
|
||||
labels: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: torrent_url={torrent_url}, downloader={downloader}, save_path={save_path}, labels={labels}")
|
||||
|
||||
try:
|
||||
torrent_inputs = self._normalize_torrent_urls(torrent_url)
|
||||
if not torrent_inputs:
|
||||
return "错误:torrent_url 不能为空。"
|
||||
|
||||
merged_labels = self._merge_labels_with_system_tag(labels)
|
||||
success_count = 0
|
||||
failed_messages = []
|
||||
|
||||
for torrent_input in torrent_inputs:
|
||||
if self._is_torrent_ref(torrent_input):
|
||||
cached_context = await self._async_resolve_cached_context(torrent_input)
|
||||
if not cached_context or not cached_context.torrent_info:
|
||||
failed_messages.append(f"{torrent_input} 引用无效,请重新使用 get_search_results 查看搜索结果")
|
||||
continue
|
||||
|
||||
cached_torrent = cached_context.torrent_info
|
||||
site_name = cached_torrent.site_name
|
||||
torrent_title = cached_torrent.title or torrent_input
|
||||
torrent_description = cached_torrent.description
|
||||
enclosure = cached_torrent.enclosure
|
||||
|
||||
if not site_name:
|
||||
failed_messages.append(f"{torrent_input} 缺少站点名称")
|
||||
continue
|
||||
|
||||
siteinfo = await SiteOper().async_get_by_name(site_name)
|
||||
if not siteinfo:
|
||||
failed_messages.append(f"{torrent_input} 未找到站点信息 {site_name}")
|
||||
continue
|
||||
|
||||
torrent_info = TorrentInfo(
|
||||
title=torrent_title,
|
||||
description=torrent_description,
|
||||
enclosure=enclosure,
|
||||
site_name=site_name,
|
||||
site_ua=siteinfo.ua,
|
||||
site_cookie=siteinfo.cookie,
|
||||
site_proxy=siteinfo.proxy,
|
||||
site_order=siteinfo.pri,
|
||||
site_downloader=siteinfo.downloader
|
||||
)
|
||||
meta_info = MetaInfo(title=torrent_title, subtitle=torrent_description)
|
||||
media_info = cached_context.media_info if cached_context.media_info else None
|
||||
if not media_info:
|
||||
media_info = await ToolChain().async_recognize_media(meta=meta_info)
|
||||
if not media_info:
|
||||
failed_messages.append(f"{torrent_input} 无法识别媒体信息")
|
||||
continue
|
||||
|
||||
context = Context(
|
||||
torrent_info=torrent_info,
|
||||
meta_info=meta_info,
|
||||
media_info=media_info
|
||||
)
|
||||
else:
|
||||
if not self._is_magnet_link_input(torrent_input):
|
||||
failed_messages.append(
|
||||
f"{torrent_input} 不是有效的下载内容,非 hash:id 时仅支持 magnet: 开头"
|
||||
)
|
||||
continue
|
||||
download_dir = await self.run_blocking(
|
||||
"storage", self._resolve_direct_download_dir, save_path
|
||||
)
|
||||
if not download_dir:
|
||||
failed_messages.append(f"{torrent_input} 缺少保存路径,且系统未配置可用下载目录")
|
||||
continue
|
||||
did, error_msg = await self.run_blocking(
|
||||
"downloader",
|
||||
self._download_direct_sync,
|
||||
torrent_input,
|
||||
download_dir,
|
||||
merged_labels,
|
||||
downloader,
|
||||
)
|
||||
if did:
|
||||
success_count += 1
|
||||
else:
|
||||
failed_messages.append(self._build_failure_message(torrent_input, error_msg))
|
||||
continue
|
||||
|
||||
did, error_msg = await self.run_blocking(
|
||||
"downloader",
|
||||
self._download_single_sync,
|
||||
context,
|
||||
downloader,
|
||||
save_path,
|
||||
merged_labels,
|
||||
)
|
||||
if did:
|
||||
success_count += 1
|
||||
else:
|
||||
failed_messages.append(self._build_failure_message(torrent_input, error_msg))
|
||||
|
||||
if success_count and not failed_messages:
|
||||
return "任务添加成功"
|
||||
|
||||
if success_count:
|
||||
return f"部分任务添加失败:{self._format_failed_result(failed_messages)}"
|
||||
|
||||
return f"任务添加失败:{self._format_failed_result(failed_messages)}"
|
||||
except Exception as e:
|
||||
logger.error(f"添加下载任务失败: {e}", exc_info=True)
|
||||
return f"添加下载任务时发生错误: {str(e)}"
|
||||
115
app/agent/tools/impl/add_rule_group.py
Normal file
115
app/agent/tools/impl/add_rule_group.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""新增过滤规则组工具。"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._filter_rule_utils import (
|
||||
build_custom_rule_map,
|
||||
collect_rule_group_usages,
|
||||
get_builtin_rules,
|
||||
get_custom_rules,
|
||||
get_rule_groups,
|
||||
normalize_rule_group,
|
||||
save_system_config,
|
||||
serialize_rule_group,
|
||||
)
|
||||
from app.log import logger
|
||||
from app.schemas.types import SystemConfigKey
|
||||
|
||||
|
||||
class AddRuleGroupInput(BaseModel):
|
||||
"""新增过滤规则组工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
name: str = Field(..., description="New rule group name.")
|
||||
rule_string: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"Rule expression using built-in/custom rule IDs. "
|
||||
"Use '&', '!' inside one level, and use '>' between priority levels. "
|
||||
"Example: 'SPECSUB & CNVOI & 4K & !BLU > CNSUB & CNVOI & 4K & !BLU'."
|
||||
),
|
||||
)
|
||||
media_type: Optional[str] = Field(
|
||||
None,
|
||||
description="Optional media type scope: '电影', '电视剧', 'movie', or 'tv'.",
|
||||
)
|
||||
category: Optional[str] = Field(
|
||||
None,
|
||||
description="Optional media category. Only valid when media_type is set.",
|
||||
)
|
||||
|
||||
|
||||
class AddRuleGroupTool(MoviePilotTool):
|
||||
name: str = "add_rule_group"
|
||||
description: str = (
|
||||
"Add a new filter rule group to UserFilterRuleGroups. "
|
||||
"Rule groups are matched level by level from left to right and can be linked to search/subscription flows. "
|
||||
"Before calling this tool, first use query_builtin_filter_rules and query_custom_filter_rules to confirm valid rule IDs, "
|
||||
"and optionally use query_rule_groups to imitate existing rule_string patterns."
|
||||
)
|
||||
args_schema: Type[BaseModel] = AddRuleGroupInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
return f"新增规则组 {kwargs.get('name', '')}"
|
||||
|
||||
async def run(
|
||||
self,
|
||||
name: str,
|
||||
rule_string: str,
|
||||
media_type: Optional[str] = None,
|
||||
category: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(f"执行工具: {self.name}, name={name}")
|
||||
|
||||
try:
|
||||
custom_rules = get_custom_rules()
|
||||
available_rule_ids = set(get_builtin_rules().keys()) | set(
|
||||
build_custom_rule_map(custom_rules).keys()
|
||||
)
|
||||
rule_groups = get_rule_groups()
|
||||
new_group, _ = normalize_rule_group(
|
||||
name=name,
|
||||
rule_string=rule_string,
|
||||
media_type=media_type,
|
||||
category=category,
|
||||
existing_groups=rule_groups,
|
||||
available_rule_ids=available_rule_ids,
|
||||
)
|
||||
|
||||
rule_groups.append(new_group)
|
||||
await save_system_config(
|
||||
SystemConfigKey.UserFilterRuleGroups,
|
||||
[group.model_dump(exclude_none=True) for group in rule_groups],
|
||||
)
|
||||
usage = await collect_rule_group_usages([new_group.name])
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"已新增规则组 {new_group.name}",
|
||||
"rule_group": serialize_rule_group(
|
||||
new_group, usage.get(new_group.name)
|
||||
),
|
||||
"count": len(rule_groups),
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.error(f"新增规则组失败: {exc}", exc_info=True)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"新增规则组失败: {exc}",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
236
app/agent/tools/impl/add_subscribe.py
Normal file
236
app/agent/tools/impl/add_subscribe.py
Normal file
@@ -0,0 +1,236 @@
|
||||
"""添加订阅工具"""
|
||||
|
||||
from typing import List, Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.subscribe import SubscribeChain
|
||||
from app.db.user_oper import UserOper
|
||||
from app.log import logger
|
||||
from app.schemas.types import MediaType, MessageChannel
|
||||
|
||||
|
||||
class AddSubscribeInput(BaseModel):
|
||||
"""添加订阅工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
title: str = Field(
|
||||
...,
|
||||
description="The title of the media to subscribe to (e.g., 'The Matrix', 'Breaking Bad')",
|
||||
)
|
||||
year: str = Field(
|
||||
...,
|
||||
description="Release year of the media (required for accurate identification)",
|
||||
)
|
||||
media_type: str = Field(..., description="Allowed values: movie, tv")
|
||||
season: Optional[int] = Field(
|
||||
None,
|
||||
description=(
|
||||
"Season number for TV shows (optional). If omitted, the subscription defaults to season 1 only. "
|
||||
"To subscribe multiple seasons or the full series, call this tool separately for each season."
|
||||
),
|
||||
)
|
||||
tmdb_id: Optional[int] = Field(
|
||||
None,
|
||||
description="TMDB database ID for precise media identification (optional, can be obtained from search_media tool)",
|
||||
)
|
||||
douban_id: Optional[str] = Field(
|
||||
None,
|
||||
description="Douban ID for precise media identification (optional, alternative to tmdb_id)",
|
||||
)
|
||||
start_episode: Optional[int] = Field(
|
||||
None,
|
||||
description="Starting episode number for TV shows (optional, defaults to 1 if not specified)",
|
||||
)
|
||||
total_episode: Optional[int] = Field(
|
||||
None,
|
||||
description="Total number of episodes for TV shows (optional, will be auto-detected from TMDB if not specified)",
|
||||
)
|
||||
quality: Optional[str] = Field(
|
||||
None,
|
||||
description="Quality filter as regular expression (optional, e.g., 'BluRay|WEB-DL|HDTV')",
|
||||
)
|
||||
resolution: Optional[str] = Field(
|
||||
None,
|
||||
description="Resolution filter as regular expression (optional, e.g., '1080p|720p|2160p')",
|
||||
)
|
||||
effect: Optional[str] = Field(
|
||||
None,
|
||||
description="Effect filter as regular expression (optional, e.g., 'HDR|DV|SDR')",
|
||||
)
|
||||
filter_groups: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="List of filter rule group names to apply (optional, can be obtained from query_rule_groups tool)",
|
||||
)
|
||||
sites: Optional[List[int]] = Field(
|
||||
None,
|
||||
description="List of site IDs to search from (optional, can be obtained from query_sites tool)",
|
||||
)
|
||||
|
||||
|
||||
class AddSubscribeTool(MoviePilotTool):
|
||||
name: str = "add_subscribe"
|
||||
description: str = (
|
||||
"Add media subscription to create automated download rules for movies and TV shows. "
|
||||
"The system will automatically search and download new episodes or releases based on the subscription criteria. "
|
||||
"For TV shows, omitting `season` subscribes season 1 only by default; to subscribe multiple seasons or "
|
||||
"the full series, call this tool once per season. Supports advanced filtering options like quality, "
|
||||
"resolution, and effect filters using regular expressions."
|
||||
)
|
||||
args_schema: Type[BaseModel] = AddSubscribeInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据订阅参数生成友好的提示消息"""
|
||||
title = kwargs.get("title", "")
|
||||
year = kwargs.get("year", "")
|
||||
media_type = kwargs.get("media_type", "")
|
||||
season = kwargs.get("season")
|
||||
|
||||
message = f"添加订阅: {title}"
|
||||
if year:
|
||||
message += f" ({year})"
|
||||
if media_type:
|
||||
message += f" [{media_type}]"
|
||||
if season:
|
||||
message += f" 第{season}季"
|
||||
elif media_type == "tv":
|
||||
message += " 第1季(默认)"
|
||||
|
||||
return message
|
||||
|
||||
async def _resolve_subscribe_username(self) -> Optional[str]:
|
||||
"""优先映射为系统用户名,未绑定时回退当前渠道用户名。"""
|
||||
resolved_username = self._username
|
||||
if not self._channel or not self._user_id:
|
||||
return resolved_username
|
||||
|
||||
try:
|
||||
channel = MessageChannel(self._channel)
|
||||
except ValueError:
|
||||
return resolved_username
|
||||
|
||||
binding_keys = {
|
||||
MessageChannel.Telegram: ("telegram_userid",),
|
||||
MessageChannel.Discord: ("discord_userid",),
|
||||
MessageChannel.Wechat: ("wechat_userid",),
|
||||
MessageChannel.Slack: ("slack_userid",),
|
||||
MessageChannel.VoceChat: ("vocechat_userid",),
|
||||
MessageChannel.SynologyChat: ("synologychat_userid",),
|
||||
MessageChannel.QQ: ("qq_userid", "qq_openid"),
|
||||
}.get(channel)
|
||||
if not binding_keys:
|
||||
return resolved_username
|
||||
|
||||
mapped_username = await self.run_blocking(
|
||||
"db",
|
||||
UserOper().get_name,
|
||||
**{key: self._user_id for key in binding_keys},
|
||||
)
|
||||
return mapped_username or resolved_username
|
||||
|
||||
async def run(
|
||||
self,
|
||||
title: str,
|
||||
year: str,
|
||||
media_type: str,
|
||||
season: Optional[int] = None,
|
||||
tmdb_id: Optional[int] = None,
|
||||
douban_id: Optional[str] = None,
|
||||
start_episode: Optional[int] = None,
|
||||
total_episode: Optional[int] = None,
|
||||
quality: Optional[str] = None,
|
||||
resolution: Optional[str] = None,
|
||||
effect: Optional[str] = None,
|
||||
filter_groups: Optional[List[str]] = None,
|
||||
sites: Optional[List[int]] = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: title={title}, year={year}, media_type={media_type}, "
|
||||
f"season={season}, tmdb_id={tmdb_id}, douban_id={douban_id}, start_episode={start_episode}, "
|
||||
f"total_episode={total_episode}, quality={quality}, resolution={resolution}, "
|
||||
f"effect={effect}, filter_groups={filter_groups}, sites={sites}"
|
||||
)
|
||||
|
||||
try:
|
||||
subscribe_chain = SubscribeChain()
|
||||
media_type_enum = MediaType.from_agent(media_type)
|
||||
if not media_type_enum:
|
||||
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
|
||||
effective_season = (
|
||||
season
|
||||
if season is not None
|
||||
else 1
|
||||
if media_type_enum == MediaType.TV
|
||||
else None
|
||||
)
|
||||
subscribe_username = await self._resolve_subscribe_username()
|
||||
|
||||
# 构建额外的订阅参数
|
||||
subscribe_kwargs = {}
|
||||
if start_episode is not None:
|
||||
subscribe_kwargs["start_episode"] = start_episode
|
||||
if total_episode is not None:
|
||||
subscribe_kwargs["total_episode"] = total_episode
|
||||
if quality:
|
||||
subscribe_kwargs["quality"] = quality
|
||||
if resolution:
|
||||
subscribe_kwargs["resolution"] = resolution
|
||||
if effect:
|
||||
subscribe_kwargs["effect"] = effect
|
||||
if filter_groups:
|
||||
subscribe_kwargs["filter_groups"] = filter_groups
|
||||
if sites:
|
||||
subscribe_kwargs["sites"] = sites
|
||||
|
||||
sid, message = await subscribe_chain.async_add(
|
||||
mtype=media_type_enum,
|
||||
title=title,
|
||||
year=year,
|
||||
tmdbid=tmdb_id,
|
||||
doubanid=douban_id,
|
||||
season=season,
|
||||
username=subscribe_username,
|
||||
**subscribe_kwargs,
|
||||
)
|
||||
if sid:
|
||||
if message and "已存在" in message:
|
||||
result_msg = f"订阅已存在:{title} ({year})"
|
||||
if effective_season is not None:
|
||||
result_msg += f" 第{effective_season}季"
|
||||
result_msg += "。如需修改参数请先删除旧订阅。"
|
||||
return result_msg
|
||||
|
||||
result_msg = f"成功添加订阅:{title} ({year})"
|
||||
if effective_season is not None:
|
||||
result_msg += f" 第{effective_season}季"
|
||||
if season is None:
|
||||
result_msg += "(未指定季号,默认按第一季订阅)"
|
||||
if subscribe_kwargs:
|
||||
params = []
|
||||
if start_episode is not None:
|
||||
params.append(f"开始集数: {start_episode}")
|
||||
if total_episode is not None:
|
||||
params.append(f"总集数: {total_episode}")
|
||||
if quality:
|
||||
params.append(f"质量过滤: {quality}")
|
||||
if resolution:
|
||||
params.append(f"分辨率过滤: {resolution}")
|
||||
if effect:
|
||||
params.append(f"特效过滤: {effect}")
|
||||
if filter_groups:
|
||||
params.append(f"规则组: {', '.join(filter_groups)}")
|
||||
if sites:
|
||||
params.append(f"站点: {', '.join(map(str, sites))}")
|
||||
if params:
|
||||
result_msg += f"\n配置参数: {', '.join(params)}"
|
||||
return result_msg
|
||||
else:
|
||||
return f"添加订阅失败:{message}"
|
||||
except Exception as e:
|
||||
logger.error(f"添加订阅失败: {e}", exc_info=True)
|
||||
return f"添加订阅时发生错误: {str(e)}"
|
||||
174
app/agent/tools/impl/ask_user_choice.py
Normal file
174
app/agent/tools/impl/ask_user_choice.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""让用户通过按钮进行选择的工具。"""
|
||||
|
||||
from typing import List, Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool, ToolChain
|
||||
from app.helper.interaction import (
|
||||
AgentInteractionOption,
|
||||
agent_interaction_manager,
|
||||
)
|
||||
from app.log import logger
|
||||
from app.schemas import Notification, NotificationType
|
||||
from app.schemas.message import ChannelCapabilityManager
|
||||
from app.schemas.types import MessageChannel
|
||||
|
||||
|
||||
class UserChoiceOptionInput(BaseModel):
|
||||
"""单个按钮选项。"""
|
||||
|
||||
label: str = Field(..., description="Text shown on the button")
|
||||
value: str = Field(
|
||||
...,
|
||||
description="The exact content that will be sent back to the agent after the user clicks this button",
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_option(self):
|
||||
if not self.label.strip():
|
||||
raise ValueError("label 不能为空")
|
||||
if not self.value.strip():
|
||||
raise ValueError("value 不能为空")
|
||||
return self
|
||||
|
||||
|
||||
class AskUserChoiceInput(BaseModel):
|
||||
"""按钮选择工具输入。"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why the agent needs the user to choose from buttons",
|
||||
)
|
||||
message: str = Field(
|
||||
...,
|
||||
description="Question or prompt shown to the user together with the buttons",
|
||||
)
|
||||
title: Optional[str] = Field(
|
||||
None,
|
||||
description="Optional short title displayed above the question",
|
||||
)
|
||||
options: List[UserChoiceOptionInput] = Field(
|
||||
...,
|
||||
description="Button options to show to the user",
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_payload(self):
|
||||
if not self.message.strip():
|
||||
raise ValueError("message 不能为空")
|
||||
if not self.options:
|
||||
raise ValueError("options 至少需要提供一个")
|
||||
return self
|
||||
|
||||
|
||||
class AskUserChoiceTool(MoviePilotTool):
|
||||
name: str = "ask_user_choice"
|
||||
sends_message: bool = True
|
||||
description: str = (
|
||||
"Ask the user to choose from button options on channels that support interactive buttons. "
|
||||
"After the user clicks a button, the selected value will come back as the user's next message."
|
||||
)
|
||||
args_schema: Type[BaseModel] = AskUserChoiceInput
|
||||
require_admin: bool = False
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
message = kwargs.get("message", "") or ""
|
||||
if len(message) > 40:
|
||||
message = message[:40] + "..."
|
||||
return f"发送按钮选择: {message}"
|
||||
|
||||
@staticmethod
|
||||
def _truncate_button_text(text: str, max_length: int) -> str:
|
||||
if max_length <= 0 or len(text) <= max_length:
|
||||
return text
|
||||
if max_length <= 3:
|
||||
return text[:max_length]
|
||||
return text[: max_length - 3] + "..."
|
||||
|
||||
async def run(
|
||||
self,
|
||||
message: str,
|
||||
options: List[UserChoiceOptionInput],
|
||||
title: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
if not self._channel or not self._source:
|
||||
return "当前不在可回传消息的会话中,无法发起按钮选择"
|
||||
|
||||
try:
|
||||
channel = MessageChannel(self._channel)
|
||||
except ValueError:
|
||||
return f"不支持的消息渠道: {self._channel}"
|
||||
|
||||
if not (
|
||||
ChannelCapabilityManager.supports_buttons(channel)
|
||||
and ChannelCapabilityManager.supports_callbacks(channel)
|
||||
):
|
||||
return f"当前渠道 {channel.value} 不支持按钮选择"
|
||||
|
||||
max_per_row = 1
|
||||
max_rows = ChannelCapabilityManager.get_max_button_rows(channel)
|
||||
max_text_length = ChannelCapabilityManager.get_max_button_text_length(channel)
|
||||
max_options = max_per_row * max_rows
|
||||
if len(options) > max_options:
|
||||
return f"当前渠道最多支持 {max_options} 个按钮选项"
|
||||
|
||||
choice_options = [
|
||||
AgentInteractionOption(
|
||||
label=option.label.strip(), value=option.value.strip()
|
||||
)
|
||||
for option in options
|
||||
]
|
||||
request = agent_interaction_manager.create_request(
|
||||
session_id=self._session_id,
|
||||
user_id=str(self._user_id),
|
||||
channel=channel.value,
|
||||
source=self._source,
|
||||
username=self._username,
|
||||
title=title,
|
||||
prompt=message.strip(),
|
||||
options=choice_options,
|
||||
)
|
||||
|
||||
buttons = []
|
||||
current_row = []
|
||||
for index, option in enumerate(choice_options, start=1):
|
||||
current_row.append(
|
||||
{
|
||||
"text": self._truncate_button_text(option.label, max_text_length),
|
||||
"callback_data": (
|
||||
f"agent_interaction:choice:{request.request_id}:{index}"
|
||||
),
|
||||
}
|
||||
)
|
||||
if len(current_row) >= max_per_row:
|
||||
buttons.append(current_row)
|
||||
current_row = []
|
||||
if current_row:
|
||||
buttons.append(current_row)
|
||||
|
||||
logger.info(
|
||||
"执行工具: %s, channel=%s, session_id=%s, options=%s",
|
||||
self.name,
|
||||
channel.value,
|
||||
self._session_id,
|
||||
len(choice_options),
|
||||
)
|
||||
|
||||
await ToolChain().async_post_message(
|
||||
Notification(
|
||||
channel=channel,
|
||||
source=self._source,
|
||||
mtype=NotificationType.Agent,
|
||||
userid=self._user_id,
|
||||
username=self._username,
|
||||
title=title,
|
||||
text=message.strip(),
|
||||
buttons=buttons,
|
||||
)
|
||||
)
|
||||
|
||||
self._agent_context["user_reply_sent"] = True
|
||||
self._agent_context["reply_mode"] = "button_choice"
|
||||
return f"已发送 {len(choice_options)} 个按钮选项,等待用户选择"
|
||||
539
app/agent/tools/impl/browse_webpage.py
Normal file
539
app/agent/tools/impl/browse_webpage.py
Normal file
@@ -0,0 +1,539 @@
|
||||
"""浏览器操作工具 - 让Agent能够通过Playwright控制浏览器进行网页交互"""
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
from enum import Enum
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.core.config import settings
|
||||
from app.log import logger
|
||||
|
||||
# 页面内容最大长度
|
||||
MAX_CONTENT_LENGTH = 8000
|
||||
# 默认超时时间(秒)
|
||||
DEFAULT_TIMEOUT = 30
|
||||
# 截图最大宽度
|
||||
SCREENSHOT_MAX_WIDTH = 1280
|
||||
# 截图最大高度
|
||||
SCREENSHOT_MAX_HEIGHT = 720
|
||||
|
||||
|
||||
class BrowserAction(str, Enum):
|
||||
"""浏览器操作类型"""
|
||||
|
||||
GOTO = "goto"
|
||||
GET_CONTENT = "get_content"
|
||||
SCREENSHOT = "screenshot"
|
||||
CLICK = "click"
|
||||
FILL = "fill"
|
||||
SELECT = "select"
|
||||
EVALUATE = "evaluate"
|
||||
WAIT = "wait"
|
||||
|
||||
|
||||
class BrowseWebpageInput(BaseModel):
|
||||
"""浏览器操作工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this browser action is being performed",
|
||||
)
|
||||
action: str = Field(
|
||||
...,
|
||||
description=(
|
||||
"The browser action to perform. Available actions:\n"
|
||||
"- 'goto': Navigate to a URL, returns page title and text summary\n"
|
||||
"- 'get_content': Get current page content (text or HTML)\n"
|
||||
"- 'screenshot': Take a screenshot of the current page, returns base64 image\n"
|
||||
"- 'click': Click on an element specified by selector\n"
|
||||
"- 'fill': Fill text into an input element specified by selector\n"
|
||||
"- 'select': Select an option from a dropdown element\n"
|
||||
"- 'evaluate': Execute JavaScript code on the page and return the result\n"
|
||||
"- 'wait': Wait for an element to appear on the page"
|
||||
),
|
||||
)
|
||||
url: Optional[str] = Field(
|
||||
None, description="URL to navigate to (required for 'goto' action)"
|
||||
)
|
||||
selector: Optional[str] = Field(
|
||||
None,
|
||||
description="CSS selector or text selector for the target element (for 'click', 'fill', 'select', 'wait' actions). "
|
||||
"Supports CSS selectors like '#id', '.class', 'tag', and Playwright text selectors like 'text=Click me'",
|
||||
)
|
||||
value: Optional[str] = Field(
|
||||
None,
|
||||
description="Value to fill into input or option value to select (for 'fill' and 'select' actions)",
|
||||
)
|
||||
script: Optional[str] = Field(
|
||||
None,
|
||||
description="JavaScript code to execute on the page (for 'evaluate' action). "
|
||||
"The script should return a value that can be serialized to JSON.",
|
||||
)
|
||||
content_type: Optional[str] = Field(
|
||||
"text",
|
||||
description="Content type for 'get_content' action: 'text' for readable text, 'html' for raw HTML",
|
||||
)
|
||||
timeout: Optional[int] = Field(
|
||||
DEFAULT_TIMEOUT, description="Timeout in seconds for the action (default: 30)"
|
||||
)
|
||||
cookies: Optional[str] = Field(
|
||||
None,
|
||||
description="Cookies to set for the browser context, format: 'name1=value1; name2=value2'",
|
||||
)
|
||||
user_agent: Optional[str] = Field(
|
||||
None, description="Custom User-Agent string for the browser context"
|
||||
)
|
||||
|
||||
|
||||
class BrowseWebpageTool(MoviePilotTool):
|
||||
name: str = "browse_webpage"
|
||||
description: str = (
|
||||
"Control a real browser (Playwright) to interact with web pages. "
|
||||
"Supports navigating to URLs, reading page content, taking screenshots, "
|
||||
"clicking elements, filling forms, selecting dropdown options, executing JavaScript, and waiting for elements. "
|
||||
"Use this tool when you need to interact with dynamic web pages, "
|
||||
"fill in forms, click buttons, or extract content from JavaScript-rendered pages. "
|
||||
"The browser session persists across multiple calls within the same conversation - "
|
||||
"first call 'goto' to open a page, then use other actions to interact with it."
|
||||
)
|
||||
args_schema: Type[BaseModel] = BrowseWebpageInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据操作类型生成友好的提示消息"""
|
||||
action = kwargs.get("action", "")
|
||||
url = kwargs.get("url", "")
|
||||
selector = kwargs.get("selector", "")
|
||||
action_messages = {
|
||||
"goto": f"打开网页: {url}",
|
||||
"get_content": "获取页面内容",
|
||||
"screenshot": "截取页面截图",
|
||||
"click": f"点击元素: {selector}",
|
||||
"fill": f"填写表单: {selector}",
|
||||
"select": f"选择选项: {selector}",
|
||||
"evaluate": "执行 JavaScript",
|
||||
"wait": f"等待元素: {selector}",
|
||||
}
|
||||
return action_messages.get(action, f"执行浏览器操作: {action}")
|
||||
|
||||
async def run(
|
||||
self,
|
||||
action: str,
|
||||
url: Optional[str] = None,
|
||||
selector: Optional[str] = None,
|
||||
value: Optional[str] = None,
|
||||
script: Optional[str] = None,
|
||||
content_type: Optional[str] = "text",
|
||||
timeout: Optional[int] = DEFAULT_TIMEOUT,
|
||||
cookies: Optional[str] = None,
|
||||
user_agent: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
"""执行浏览器操作"""
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 动作: {action}, URL: {url}, 选择器: {selector}"
|
||||
)
|
||||
|
||||
try:
|
||||
# 验证操作类型
|
||||
try:
|
||||
browser_action = BrowserAction(action)
|
||||
except ValueError:
|
||||
valid_actions = ", ".join([a.value for a in BrowserAction])
|
||||
return f"错误: 不支持的操作类型 '{action}',支持的操作: {valid_actions}"
|
||||
|
||||
# 参数校验
|
||||
if browser_action == BrowserAction.GOTO and not url:
|
||||
return "错误: 'goto' 操作需要提供 url 参数"
|
||||
if (
|
||||
browser_action
|
||||
in (
|
||||
BrowserAction.CLICK,
|
||||
BrowserAction.FILL,
|
||||
BrowserAction.SELECT,
|
||||
BrowserAction.WAIT,
|
||||
)
|
||||
and not selector
|
||||
):
|
||||
return f"错误: '{action}' 操作需要提供 selector 参数"
|
||||
if browser_action == BrowserAction.FILL and value is None:
|
||||
return "错误: 'fill' 操作需要提供 value 参数"
|
||||
if browser_action == BrowserAction.EVALUATE and not script:
|
||||
return "错误: 'evaluate' 操作需要提供 script 参数"
|
||||
|
||||
# 在线程池中运行同步的 Playwright 操作
|
||||
loop = asyncio.get_running_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self._execute_browser_action(
|
||||
browser_action=browser_action,
|
||||
url=url,
|
||||
selector=selector,
|
||||
value=value,
|
||||
script=script,
|
||||
content_type=content_type,
|
||||
timeout=timeout,
|
||||
cookies=cookies,
|
||||
user_agent=user_agent,
|
||||
),
|
||||
)
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"浏览器操作失败: {e}", exc_info=True)
|
||||
return f"浏览器操作失败: {str(e)}"
|
||||
|
||||
def _execute_browser_action(
|
||||
self,
|
||||
browser_action: BrowserAction,
|
||||
url: Optional[str],
|
||||
selector: Optional[str],
|
||||
value: Optional[str],
|
||||
script: Optional[str],
|
||||
content_type: Optional[str],
|
||||
timeout: int,
|
||||
cookies: Optional[str],
|
||||
user_agent: Optional[str],
|
||||
) -> str:
|
||||
"""在同步上下文中执行 Playwright 浏览器操作"""
|
||||
from playwright.sync_api import sync_playwright
|
||||
|
||||
try:
|
||||
with sync_playwright() as playwright:
|
||||
browser = None
|
||||
context = None
|
||||
page = None
|
||||
try:
|
||||
# 启动浏览器
|
||||
browser_type = settings.PLAYWRIGHT_BROWSER_TYPE or "chromium"
|
||||
browser = playwright[browser_type].launch(headless=True)
|
||||
|
||||
# 创建上下文
|
||||
context_kwargs = {}
|
||||
if user_agent:
|
||||
context_kwargs["user_agent"] = user_agent
|
||||
# 设置视口大小
|
||||
context_kwargs["viewport"] = {
|
||||
"width": SCREENSHOT_MAX_WIDTH,
|
||||
"height": SCREENSHOT_MAX_HEIGHT,
|
||||
}
|
||||
|
||||
context = browser.new_context(**context_kwargs)
|
||||
page = context.new_page()
|
||||
page.set_default_timeout(timeout * 1000)
|
||||
|
||||
# 设置 cookies
|
||||
if cookies:
|
||||
page.set_extra_http_headers({"cookie": cookies})
|
||||
|
||||
# 对于非 goto 操作,如果提供了 url 先导航
|
||||
if url and browser_action != BrowserAction.GOTO:
|
||||
page.goto(
|
||||
url, wait_until="domcontentloaded", timeout=timeout * 1000
|
||||
)
|
||||
page.wait_for_load_state("networkidle", timeout=timeout * 1000)
|
||||
|
||||
# 执行具体操作
|
||||
result = self._do_action(
|
||||
page,
|
||||
browser_action,
|
||||
url,
|
||||
selector,
|
||||
value,
|
||||
script,
|
||||
content_type,
|
||||
timeout,
|
||||
)
|
||||
return result
|
||||
|
||||
finally:
|
||||
if page:
|
||||
page.close()
|
||||
if context:
|
||||
context.close()
|
||||
if browser:
|
||||
browser.close()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Playwright 执行失败: {e}", exc_info=True)
|
||||
return f"Playwright 执行失败: {str(e)}"
|
||||
|
||||
def _do_action(
|
||||
self,
|
||||
page,
|
||||
browser_action: BrowserAction,
|
||||
url: Optional[str],
|
||||
selector: Optional[str],
|
||||
value: Optional[str],
|
||||
script: Optional[str],
|
||||
content_type: Optional[str],
|
||||
timeout: int,
|
||||
) -> str:
|
||||
"""执行具体的浏览器操作"""
|
||||
|
||||
if browser_action == BrowserAction.GOTO:
|
||||
return self._action_goto(page, url, timeout)
|
||||
|
||||
elif browser_action == BrowserAction.GET_CONTENT:
|
||||
return self._action_get_content(page, content_type)
|
||||
|
||||
elif browser_action == BrowserAction.SCREENSHOT:
|
||||
return self._action_screenshot(page)
|
||||
|
||||
elif browser_action == BrowserAction.CLICK:
|
||||
return self._action_click(page, selector, timeout)
|
||||
|
||||
elif browser_action == BrowserAction.FILL:
|
||||
return self._action_fill(page, selector, value, timeout)
|
||||
|
||||
elif browser_action == BrowserAction.SELECT:
|
||||
return self._action_select(page, selector, value, timeout)
|
||||
|
||||
elif browser_action == BrowserAction.EVALUATE:
|
||||
return self._action_evaluate(page, script)
|
||||
|
||||
elif browser_action == BrowserAction.WAIT:
|
||||
return self._action_wait(page, selector, timeout)
|
||||
|
||||
return f"未知操作: {browser_action}"
|
||||
|
||||
@staticmethod
|
||||
def _action_goto(page, url: str, timeout: int) -> str:
|
||||
"""导航到URL"""
|
||||
response = page.goto(url, wait_until="domcontentloaded", timeout=timeout * 1000)
|
||||
try:
|
||||
page.wait_for_load_state("networkidle", timeout=min(timeout, 15) * 1000)
|
||||
except Exception:
|
||||
# networkidle 超时不是致命错误,页面可能已经可用
|
||||
pass
|
||||
|
||||
status = response.status if response else "unknown"
|
||||
title = page.title()
|
||||
page_url = page.url
|
||||
|
||||
# 提取页面可读文本摘要
|
||||
text_content = page.inner_text("body")
|
||||
if text_content and len(text_content) > MAX_CONTENT_LENGTH:
|
||||
text_content = text_content[:MAX_CONTENT_LENGTH] + "\n\n...(内容已截断)"
|
||||
|
||||
# 提取页面链接
|
||||
links = page.evaluate("""
|
||||
() => {
|
||||
const links = [];
|
||||
document.querySelectorAll('a[href]').forEach(a => {
|
||||
const text = a.innerText.trim();
|
||||
const href = a.href;
|
||||
if (text && href && !href.startsWith('javascript:')) {
|
||||
links.push({text: text.substring(0, 80), href: href});
|
||||
}
|
||||
});
|
||||
return links.slice(0, 30);
|
||||
}
|
||||
""")
|
||||
|
||||
# 提取表单信息
|
||||
forms = page.evaluate("""
|
||||
() => {
|
||||
const forms = [];
|
||||
document.querySelectorAll('input, textarea, select, button').forEach(el => {
|
||||
const info = {
|
||||
tag: el.tagName.toLowerCase(),
|
||||
type: el.type || '',
|
||||
name: el.name || '',
|
||||
id: el.id || '',
|
||||
placeholder: el.placeholder || '',
|
||||
value: el.tagName.toLowerCase() === 'select' ? '' : (el.value || '').substring(0, 50),
|
||||
text: el.innerText ? el.innerText.trim().substring(0, 50) : ''
|
||||
};
|
||||
// 只保留有标识信息的元素
|
||||
if (info.name || info.id || info.placeholder || info.text) {
|
||||
forms.push(info);
|
||||
}
|
||||
});
|
||||
return forms.slice(0, 30);
|
||||
}
|
||||
""")
|
||||
|
||||
result = {
|
||||
"status": status,
|
||||
"url": page_url,
|
||||
"title": title,
|
||||
"text_content": text_content,
|
||||
}
|
||||
if links:
|
||||
result["links"] = links
|
||||
if forms:
|
||||
result["form_elements"] = forms
|
||||
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
@staticmethod
|
||||
def _action_get_content(page, content_type: Optional[str]) -> str:
|
||||
"""获取页面内容"""
|
||||
title = page.title()
|
||||
page_url = page.url
|
||||
|
||||
if content_type == "html":
|
||||
content = page.content()
|
||||
else:
|
||||
content = page.inner_text("body")
|
||||
|
||||
if content and len(content) > MAX_CONTENT_LENGTH:
|
||||
content = content[:MAX_CONTENT_LENGTH] + "\n\n...(内容已截断)"
|
||||
|
||||
result = {
|
||||
"url": page_url,
|
||||
"title": title,
|
||||
"content_type": content_type,
|
||||
"content": content,
|
||||
}
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
@staticmethod
|
||||
def _action_screenshot(page) -> str:
|
||||
"""截取页面截图"""
|
||||
screenshot_bytes = page.screenshot(
|
||||
full_page=False,
|
||||
type="jpeg",
|
||||
quality=60,
|
||||
)
|
||||
screenshot_b64 = base64.b64encode(screenshot_bytes).decode("utf-8")
|
||||
|
||||
# 限制截图大小(base64编码后大约增大33%)
|
||||
max_b64_size = 200 * 1024 # ~150KB 原始图片
|
||||
if len(screenshot_b64) > max_b64_size:
|
||||
# 降低质量重新截图
|
||||
screenshot_bytes = page.screenshot(
|
||||
full_page=False,
|
||||
type="jpeg",
|
||||
quality=30,
|
||||
)
|
||||
screenshot_b64 = base64.b64encode(screenshot_bytes).decode("utf-8")
|
||||
|
||||
title = page.title()
|
||||
page_url = page.url
|
||||
|
||||
result = {
|
||||
"url": page_url,
|
||||
"title": title,
|
||||
"screenshot_base64": screenshot_b64,
|
||||
"format": "jpeg",
|
||||
"note": "截图已以 base64 编码返回",
|
||||
}
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
@staticmethod
|
||||
def _action_click(page, selector: str, timeout: int) -> str:
|
||||
"""点击元素"""
|
||||
page.click(selector, timeout=timeout * 1000)
|
||||
|
||||
# 等待可能的页面变化
|
||||
try:
|
||||
page.wait_for_load_state("networkidle", timeout=5000)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
title = page.title()
|
||||
page_url = page.url
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"成功点击元素: {selector}",
|
||||
"current_url": page_url,
|
||||
"current_title": title,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _action_fill(page, selector: str, value: str, timeout: int) -> str:
|
||||
"""填写表单"""
|
||||
page.fill(selector, value, timeout=timeout * 1000)
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"成功填写元素 '{selector}' 的值为 '{value}'",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _action_select(page, selector: str, value: Optional[str], timeout: int) -> str:
|
||||
"""选择下拉选项"""
|
||||
if value:
|
||||
page.select_option(selector, value=value, timeout=timeout * 1000)
|
||||
else:
|
||||
return "错误: 'select' 操作需要提供 value 参数"
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"成功选择元素 '{selector}' 的选项 '{value}'",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _action_evaluate(page, script: str) -> str:
|
||||
"""执行 JavaScript"""
|
||||
result = page.evaluate(script)
|
||||
|
||||
# 格式化结果
|
||||
if result is None:
|
||||
formatted = "null"
|
||||
elif isinstance(result, (dict, list)):
|
||||
formatted = json.dumps(result, ensure_ascii=False, indent=2)
|
||||
else:
|
||||
formatted = str(result)
|
||||
|
||||
# 限制结果长度
|
||||
if len(formatted) > MAX_CONTENT_LENGTH:
|
||||
formatted = formatted[:MAX_CONTENT_LENGTH] + "\n\n...(结果已截断)"
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"result": formatted,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _action_wait(page, selector: str, timeout: int) -> str:
|
||||
"""等待元素出现"""
|
||||
element = page.wait_for_selector(selector, timeout=timeout * 1000)
|
||||
|
||||
if element:
|
||||
visible = element.is_visible()
|
||||
text = element.inner_text()
|
||||
if text and len(text) > 200:
|
||||
text = text[:200] + "..."
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"元素 '{selector}' 已出现",
|
||||
"visible": visible,
|
||||
"text": text,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
else:
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"等待元素 '{selector}' 超时",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
97
app/agent/tools/impl/delete_custom_filter_rule.py
Normal file
97
app/agent/tools/impl/delete_custom_filter_rule.py
Normal file
@@ -0,0 +1,97 @@
|
||||
"""删除自定义过滤规则工具。"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._filter_rule_utils import (
|
||||
collect_custom_rule_group_refs,
|
||||
get_custom_rules,
|
||||
get_rule_groups,
|
||||
save_system_config,
|
||||
)
|
||||
from app.log import logger
|
||||
from app.schemas.types import SystemConfigKey
|
||||
|
||||
|
||||
class DeleteCustomFilterRuleInput(BaseModel):
|
||||
"""删除自定义过滤规则工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
rule_id: str = Field(..., description="Custom rule ID to delete.")
|
||||
|
||||
|
||||
class DeleteCustomFilterRuleTool(MoviePilotTool):
|
||||
name: str = "delete_custom_filter_rule"
|
||||
description: str = (
|
||||
"Delete a custom filter rule from CustomFilterRules. "
|
||||
"If the rule is still referenced by rule groups, the deletion is blocked to avoid breaking rule_string expressions."
|
||||
)
|
||||
args_schema: Type[BaseModel] = DeleteCustomFilterRuleInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
return f"删除自定义过滤规则 {kwargs.get('rule_id', '')}"
|
||||
|
||||
async def run(self, rule_id: str, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, rule_id={rule_id}")
|
||||
|
||||
try:
|
||||
custom_rules = get_custom_rules()
|
||||
target_rule = next((rule for rule in custom_rules if rule.id == rule_id), None)
|
||||
if not target_rule:
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"自定义过滤规则 '{rule_id}' 不存在",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
refs = collect_custom_rule_group_refs(get_rule_groups(), [rule_id]).get(
|
||||
rule_id, []
|
||||
)
|
||||
if refs:
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": (
|
||||
f"自定义过滤规则 '{rule_id}' 仍被规则组引用,无法删除。"
|
||||
),
|
||||
"referenced_by_rule_groups": refs,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
|
||||
remaining_rules = [
|
||||
rule for rule in custom_rules if rule.id != rule_id
|
||||
]
|
||||
await save_system_config(
|
||||
SystemConfigKey.CustomFilterRules,
|
||||
[rule.model_dump(exclude_none=True) for rule in remaining_rules],
|
||||
)
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"已删除自定义过滤规则 {rule_id}",
|
||||
"count": len(remaining_rules),
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.error(f"删除自定义过滤规则失败: {exc}", exc_info=True)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"删除自定义过滤规则失败: {exc}",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
94
app/agent/tools/impl/delete_download.py
Normal file
94
app/agent/tools/impl/delete_download.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""删除下载任务工具"""
|
||||
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.download import DownloadChain
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class DeleteDownloadInput(BaseModel):
|
||||
"""删除下载任务工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
hash: str = Field(
|
||||
..., description="Task hash (can be obtained from query_download_tasks tool)"
|
||||
)
|
||||
downloader: Optional[str] = Field(
|
||||
None,
|
||||
description="Name of specific downloader (optional, if not provided will search all downloaders)",
|
||||
)
|
||||
delete_files: Optional[bool] = Field(
|
||||
False,
|
||||
description="Whether to delete downloaded files along with the task (default: False, only removes the task from downloader)",
|
||||
)
|
||||
|
||||
|
||||
class DeleteDownloadTool(MoviePilotTool):
|
||||
name: str = "delete_download"
|
||||
description: str = "Delete a download task from the downloader by task hash only. Optionally specify the downloader name and whether to delete downloaded files."
|
||||
args_schema: Type[BaseModel] = DeleteDownloadInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据删除参数生成友好的提示消息"""
|
||||
hash_value = kwargs.get("hash", "")
|
||||
downloader = kwargs.get("downloader")
|
||||
delete_files = kwargs.get("delete_files", False)
|
||||
|
||||
message = f"删除下载任务: {hash_value}"
|
||||
if downloader:
|
||||
message += f" [下载器: {downloader}]"
|
||||
if delete_files:
|
||||
message += " (包含文件)"
|
||||
|
||||
return message
|
||||
|
||||
@staticmethod
|
||||
def _delete_download_sync(
|
||||
hash_value: str, downloader: Optional[str] = None, delete_files: bool = False
|
||||
) -> bool:
|
||||
"""同步删除下载任务,避免下载器客户端阻塞事件循环。"""
|
||||
return DownloadChain().remove_torrents(
|
||||
hashs=[hash_value], downloader=downloader, delete_file=delete_files
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
hash: str,
|
||||
downloader: Optional[str] = None,
|
||||
delete_files: Optional[bool] = False,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: hash={hash}, downloader={downloader}, delete_files={delete_files}"
|
||||
)
|
||||
|
||||
try:
|
||||
# 仅支持通过hash删除任务
|
||||
if len(hash) != 40 or not all(c in "0123456789abcdefABCDEF" for c in hash):
|
||||
return "参数错误:hash 格式无效,请先使用 query_download_tasks 工具获取正确的 hash。"
|
||||
|
||||
# 删除下载任务
|
||||
# remove_torrents 支持 delete_file 参数,可以控制是否删除文件
|
||||
result = await self.run_blocking(
|
||||
"downloader",
|
||||
self._delete_download_sync,
|
||||
hash,
|
||||
downloader,
|
||||
bool(delete_files),
|
||||
)
|
||||
|
||||
if result:
|
||||
files_info = "(包含文件)" if delete_files else "(不包含文件)"
|
||||
return f"成功删除下载任务:{hash} {files_info}"
|
||||
else:
|
||||
return f"删除下载任务失败:{hash},请检查任务是否存在或下载器是否可用"
|
||||
except Exception as e:
|
||||
logger.error(f"删除下载任务失败: {e}", exc_info=True)
|
||||
return f"删除下载任务时发生错误: {str(e)}"
|
||||
44
app/agent/tools/impl/delete_download_history.py
Normal file
44
app/agent/tools/impl/delete_download_history.py
Normal file
@@ -0,0 +1,44 @@
|
||||
"""删除下载历史记录工具"""
|
||||
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.db import AsyncSessionFactory
|
||||
from app.db.models.downloadhistory import DownloadHistory
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class DeleteDownloadHistoryInput(BaseModel):
|
||||
"""删除下载历史记录工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
history_id: int = Field(
|
||||
..., description="The ID of the download history record to delete"
|
||||
)
|
||||
|
||||
|
||||
class DeleteDownloadHistoryTool(MoviePilotTool):
|
||||
name: str = "delete_download_history"
|
||||
description: str = "Delete a download history record by ID. This only removes the record from the database, does not delete any actual files."
|
||||
args_schema: Type[BaseModel] = DeleteDownloadHistoryInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
history_id = kwargs.get("history_id")
|
||||
return f"删除下载历史记录 ID: {history_id}"
|
||||
|
||||
async def run(self, history_id: int, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: history_id={history_id}")
|
||||
|
||||
try:
|
||||
async with AsyncSessionFactory() as db:
|
||||
await DownloadHistory.async_delete(db, history_id)
|
||||
return f"下载历史记录 ID: {history_id} 已成功删除"
|
||||
except Exception as e:
|
||||
logger.error(f"删除下载历史记录失败: {e}", exc_info=True)
|
||||
return f"删除下载历史记录时发生错误: {str(e)}"
|
||||
81
app/agent/tools/impl/delete_rule_group.py
Normal file
81
app/agent/tools/impl/delete_rule_group.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""删除过滤规则组工具。"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._filter_rule_utils import (
|
||||
get_rule_groups,
|
||||
remove_rule_group_references,
|
||||
save_system_config,
|
||||
)
|
||||
from app.log import logger
|
||||
from app.schemas.types import SystemConfigKey
|
||||
|
||||
|
||||
class DeleteRuleGroupInput(BaseModel):
|
||||
"""删除过滤规则组工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
name: str = Field(..., description="Rule group name to delete.")
|
||||
|
||||
|
||||
class DeleteRuleGroupTool(MoviePilotTool):
|
||||
name: str = "delete_rule_group"
|
||||
description: str = (
|
||||
"Delete a filter rule group from UserFilterRuleGroups. "
|
||||
"The tool also removes dangling references from global settings and subscriptions."
|
||||
)
|
||||
args_schema: Type[BaseModel] = DeleteRuleGroupInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
return f"删除规则组 {kwargs.get('name', '')}"
|
||||
|
||||
async def run(self, name: str, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, name={name}")
|
||||
|
||||
try:
|
||||
rule_groups = get_rule_groups()
|
||||
if not any(group.name == name for group in rule_groups):
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"规则组 '{name}' 不存在",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
remaining_groups = [
|
||||
group for group in rule_groups if group.name != name
|
||||
]
|
||||
await save_system_config(
|
||||
SystemConfigKey.UserFilterRuleGroups,
|
||||
[group.model_dump(exclude_none=True) for group in remaining_groups],
|
||||
)
|
||||
reference_changes = await remove_rule_group_references(name)
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"已删除规则组 {name}",
|
||||
"count": len(remaining_groups),
|
||||
"reference_updates": reference_changes,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.error(f"删除规则组失败: {exc}", exc_info=True)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"删除规则组失败: {exc}",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
67
app/agent/tools/impl/delete_subscribe.py
Normal file
67
app/agent/tools/impl/delete_subscribe.py
Normal file
@@ -0,0 +1,67 @@
|
||||
"""删除订阅工具"""
|
||||
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.core.event import eventmanager
|
||||
from app.db.subscribe_oper import SubscribeOper
|
||||
from app.helper.subscribe import SubscribeHelper
|
||||
from app.log import logger
|
||||
from app.schemas.types import EventType
|
||||
|
||||
|
||||
class DeleteSubscribeInput(BaseModel):
|
||||
"""删除订阅工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
subscribe_id: int = Field(
|
||||
...,
|
||||
description="The ID of the subscription to delete (can be obtained from query_subscribes tool)",
|
||||
)
|
||||
|
||||
|
||||
class DeleteSubscribeTool(MoviePilotTool):
|
||||
name: str = "delete_subscribe"
|
||||
description: str = "Delete a media subscription by its ID. This will remove the subscription and stop automatic downloads for that media."
|
||||
args_schema: Type[BaseModel] = DeleteSubscribeInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据删除参数生成友好的提示消息"""
|
||||
subscribe_id = kwargs.get("subscribe_id")
|
||||
return f"删除订阅 (ID: {subscribe_id})"
|
||||
|
||||
async def run(self, subscribe_id: int, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: subscribe_id={subscribe_id}")
|
||||
|
||||
try:
|
||||
subscribe_oper = SubscribeOper()
|
||||
# 获取订阅信息
|
||||
subscribe = await subscribe_oper.async_get(subscribe_id)
|
||||
if not subscribe:
|
||||
return f"订阅 ID {subscribe_id} 不存在"
|
||||
|
||||
# 在删除之前获取订阅信息(用于事件)
|
||||
subscribe_info = subscribe.to_dict()
|
||||
|
||||
await subscribe_oper.async_delete(subscribe_id)
|
||||
# 分享订阅统计刷新本身已异步化,这里只需要在删除后触发即可。
|
||||
SubscribeHelper().sub_done_async(
|
||||
{"tmdbid": subscribe.tmdbid, "doubanid": subscribe.doubanid}
|
||||
)
|
||||
|
||||
# 发送事件
|
||||
await eventmanager.async_send_event(
|
||||
EventType.SubscribeDeleted,
|
||||
{"subscribe_id": subscribe_id, "subscribe_info": subscribe_info},
|
||||
)
|
||||
|
||||
return f"成功删除订阅:{subscribe.name} ({subscribe.year})"
|
||||
except Exception as e:
|
||||
logger.error(f"删除订阅失败: {e}", exc_info=True)
|
||||
return f"删除订阅时发生错误: {str(e)}"
|
||||
53
app/agent/tools/impl/delete_transfer_history.py
Normal file
53
app/agent/tools/impl/delete_transfer_history.py
Normal file
@@ -0,0 +1,53 @@
|
||||
"""删除整理历史记录工具"""
|
||||
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.db.transferhistory_oper import TransferHistoryOper
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class DeleteTransferHistoryInput(BaseModel):
|
||||
"""删除整理历史记录工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
history_id: int = Field(
|
||||
..., description="The ID of the transfer history record to delete"
|
||||
)
|
||||
|
||||
|
||||
class DeleteTransferHistoryTool(MoviePilotTool):
|
||||
name: str = "delete_transfer_history"
|
||||
description: str = "Delete a specific transfer history record by its ID. This is useful when you need to remove a failed transfer record before retrying the transfer, as the system skips files that already have transfer history."
|
||||
args_schema: Type[BaseModel] = DeleteTransferHistoryInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据参数生成友好的提示消息"""
|
||||
history_id = kwargs.get("history_id")
|
||||
return f"删除整理历史记录: ID={history_id}"
|
||||
|
||||
async def run(self, history_id: int, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: history_id={history_id}")
|
||||
|
||||
try:
|
||||
transferhis = TransferHistoryOper()
|
||||
history = await transferhis.async_get(history_id)
|
||||
if not history:
|
||||
return f"错误:整理历史记录不存在,ID={history_id}"
|
||||
|
||||
title = history.title or "未知"
|
||||
src = history.src or "未知"
|
||||
status = "成功" if history.status else "失败"
|
||||
await transferhis.async_delete(history_id)
|
||||
return (
|
||||
f"已删除整理历史记录:ID={history_id},标题={title},源路径={src},状态={status}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"删除整理历史记录失败: {e}", exc_info=True)
|
||||
return f"删除整理历史记录时发生错误: {str(e)}"
|
||||
74
app/agent/tools/impl/edit_file.py
Normal file
74
app/agent/tools/impl/edit_file.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""文件编辑工具"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional, Type
|
||||
|
||||
from anyio import Path as AsyncPath
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class EditFileInput(BaseModel):
|
||||
"""Input parameters for edit file tool"""
|
||||
|
||||
file_path: str = Field(..., description="The absolute path of the file to edit")
|
||||
old_text: str = Field(..., description="The exact old text to be replaced")
|
||||
new_text: str = Field(..., description="The new text to replace with")
|
||||
|
||||
|
||||
class EditFileTool(MoviePilotTool):
|
||||
name: str = "edit_file"
|
||||
description: str = "Edit a file by replacing specific old text with new text. Useful for modifying configuration files, code, or scripts."
|
||||
args_schema: Type[BaseModel] = EditFileInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据参数生成友好的提示消息"""
|
||||
file_path = kwargs.get("file_path", "")
|
||||
file_name = Path(file_path).name if file_path else "未知文件"
|
||||
return f"编辑文件: {file_name}"
|
||||
|
||||
async def run(self, file_path: str, old_text: str, new_text: str, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: file_path={file_path}")
|
||||
|
||||
try:
|
||||
path = AsyncPath(file_path)
|
||||
# 校验逻辑:如果要替换特定文本,文件必须存在且包含该文本
|
||||
if not await path.exists():
|
||||
# 如果 old_text 为空,可能用户想直接创建文件,但通常 edit_file 需要匹配旧内容
|
||||
if old_text:
|
||||
return f"错误:文件 {file_path} 不存在,无法进行内容替换。"
|
||||
|
||||
if await path.exists() and not await path.is_file():
|
||||
return f"错误:{file_path} 不是一个文件"
|
||||
|
||||
if await path.exists():
|
||||
content = await path.read_text(encoding="utf-8")
|
||||
if old_text not in content:
|
||||
logger.warning(f"编辑文件 {file_path} 失败:未找到指定的旧文本块")
|
||||
return f"错误:在文件 {file_path} 中未找到指定的旧文本。请确保包含所有的空格、缩进 and 换行符。"
|
||||
occurrences = content.count(old_text)
|
||||
new_content = content.replace(old_text, new_text)
|
||||
else:
|
||||
# 文件不存在且 old_text 为空的情形(初始化新文件)
|
||||
new_content = new_text
|
||||
occurrences = 1
|
||||
|
||||
# 自动创建父目录
|
||||
await path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 写入文件
|
||||
await path.write_text(new_content, encoding="utf-8")
|
||||
|
||||
logger.info(f"成功编辑文件 {file_path},替换了 {occurrences} 处内容")
|
||||
return f"成功编辑文件 {file_path} (替换了 {occurrences} 处匹配内容)"
|
||||
|
||||
except PermissionError:
|
||||
return f"错误:没有访问/修改 {file_path} 的权限"
|
||||
except UnicodeDecodeError:
|
||||
return f"错误:{file_path} 不是文本文件,无法编辑"
|
||||
except Exception as e:
|
||||
logger.error(f"编辑文件 {file_path} 时发生错误: {str(e)}", exc_info=True)
|
||||
return f"操作失败: {str(e)}"
|
||||
345
app/agent/tools/impl/execute_command.py
Normal file
345
app/agent/tools/impl/execute_command.py
Normal file
@@ -0,0 +1,345 @@
|
||||
"""执行Shell命令工具"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
from dataclasses import dataclass, field
|
||||
from tempfile import NamedTemporaryFile
|
||||
from typing import Optional, TextIO, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.log import logger
|
||||
|
||||
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
MAX_TIMEOUT_SECONDS = 300
|
||||
MAX_OUTPUT_PREVIEW_BYTES = 10 * 1024
|
||||
READ_CHUNK_SIZE = 4096
|
||||
KILL_GRACE_SECONDS = 3
|
||||
COMMAND_CONCURRENCY_LIMIT = 2
|
||||
|
||||
_command_semaphore = asyncio.Semaphore(COMMAND_CONCURRENCY_LIMIT)
|
||||
|
||||
|
||||
@dataclass
|
||||
class _CommandOutput:
|
||||
"""保存前 10KB 预览,并在超限时将完整输出写入临时文件。"""
|
||||
|
||||
preview_limit_bytes: int
|
||||
preview_entries: list[tuple[str, str]] = field(default_factory=list)
|
||||
captured_bytes: int = 0
|
||||
preview_truncated: bool = False
|
||||
temp_file_path: Optional[str] = None
|
||||
temp_file_handle: Optional[TextIO] = None
|
||||
last_written_stream: Optional[str] = None
|
||||
|
||||
@staticmethod
|
||||
def _clip_text_to_bytes(text: str, byte_limit: int) -> str:
|
||||
if byte_limit <= 0:
|
||||
return ""
|
||||
return text.encode("utf-8")[:byte_limit].decode("utf-8", errors="ignore")
|
||||
|
||||
def _write_chunk(self, stream_name: str, text: str) -> None:
|
||||
if not self.temp_file_handle or not text:
|
||||
return
|
||||
|
||||
if self.last_written_stream != stream_name:
|
||||
if self.temp_file_handle.tell() > 0:
|
||||
self.temp_file_handle.write("\n")
|
||||
title = "标准输出" if stream_name == "stdout" else "错误输出"
|
||||
self.temp_file_handle.write(f"[{title}]\n")
|
||||
self.last_written_stream = stream_name
|
||||
|
||||
self.temp_file_handle.write(text)
|
||||
|
||||
def _ensure_temp_file(self) -> None:
|
||||
if self.temp_file_handle:
|
||||
return
|
||||
|
||||
temp_file = NamedTemporaryFile(
|
||||
mode="w",
|
||||
encoding="utf-8",
|
||||
suffix=".log",
|
||||
prefix="moviepilot-command-",
|
||||
delete=False,
|
||||
)
|
||||
self.temp_file_path = temp_file.name
|
||||
self.temp_file_handle = temp_file
|
||||
for stream_name, chunk in self.preview_entries:
|
||||
self._write_chunk(stream_name, chunk)
|
||||
|
||||
def close(self) -> None:
|
||||
if not self.temp_file_handle:
|
||||
return
|
||||
self.temp_file_handle.flush()
|
||||
self.temp_file_handle.close()
|
||||
self.temp_file_handle = None
|
||||
|
||||
def append(self, stream_name: str, text: str) -> None:
|
||||
if not text:
|
||||
return
|
||||
|
||||
if self.temp_file_handle:
|
||||
self._write_chunk(stream_name, text)
|
||||
return
|
||||
|
||||
chunk_bytes = len(text.encode("utf-8"))
|
||||
remaining = self.preview_limit_bytes - self.captured_bytes
|
||||
if chunk_bytes <= remaining:
|
||||
self.preview_entries.append((stream_name, text))
|
||||
self.captured_bytes += chunk_bytes
|
||||
return
|
||||
|
||||
self.preview_truncated = True
|
||||
self._ensure_temp_file()
|
||||
self._write_chunk(stream_name, text)
|
||||
|
||||
preview = self._clip_text_to_bytes(text, remaining)
|
||||
if preview:
|
||||
self.preview_entries.append((stream_name, preview))
|
||||
self.captured_bytes += len(preview.encode("utf-8"))
|
||||
|
||||
@property
|
||||
def stdout(self) -> str:
|
||||
return "".join(
|
||||
text for stream_name, text in self.preview_entries if stream_name == "stdout"
|
||||
).strip()
|
||||
|
||||
@property
|
||||
def stderr(self) -> str:
|
||||
return "".join(
|
||||
text for stream_name, text in self.preview_entries if stream_name == "stderr"
|
||||
).strip()
|
||||
|
||||
|
||||
class ExecuteCommandInput(BaseModel):
|
||||
"""执行Shell命令工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
..., description="Clear explanation of why this command is being executed"
|
||||
)
|
||||
command: str = Field(..., description="The shell command to execute")
|
||||
timeout: Optional[int] = Field(
|
||||
60, description="Max execution time in seconds (default: 60)"
|
||||
)
|
||||
|
||||
|
||||
class ExecuteCommandTool(MoviePilotTool):
|
||||
name: str = "execute_command"
|
||||
description: str = (
|
||||
"Safely execute shell commands on the server. Useful for system "
|
||||
"maintenance, checking status, or running custom scripts. Includes "
|
||||
"timeout, concurrency, and output preview limits."
|
||||
)
|
||||
args_schema: Type[BaseModel] = ExecuteCommandInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据命令生成友好的提示消息"""
|
||||
command = kwargs.get("command", "")
|
||||
return f"执行系统命令: {command}"
|
||||
|
||||
@staticmethod
|
||||
def _normalize_timeout(timeout: Optional[int]) -> tuple[int, Optional[str]]:
|
||||
"""限制命令最长运行时间,避免 Agent 传入过大的 timeout。"""
|
||||
try:
|
||||
normalized = int(timeout or DEFAULT_TIMEOUT_SECONDS)
|
||||
except (TypeError, ValueError):
|
||||
normalized = DEFAULT_TIMEOUT_SECONDS
|
||||
|
||||
if normalized <= 0:
|
||||
return DEFAULT_TIMEOUT_SECONDS, "timeout 参数无效,已使用默认 60 秒"
|
||||
if normalized > MAX_TIMEOUT_SECONDS:
|
||||
return (
|
||||
MAX_TIMEOUT_SECONDS,
|
||||
f"timeout 参数超过上限,已从 {normalized} 秒限制为 {MAX_TIMEOUT_SECONDS} 秒",
|
||||
)
|
||||
return normalized, None
|
||||
|
||||
@staticmethod
|
||||
def _subprocess_kwargs() -> dict:
|
||||
"""为子进程创建独立进程组,便于超时场景清理整棵子进程。"""
|
||||
kwargs = {
|
||||
"stdin": subprocess.DEVNULL,
|
||||
"stdout": asyncio.subprocess.PIPE,
|
||||
"stderr": asyncio.subprocess.PIPE,
|
||||
}
|
||||
if os.name == "posix":
|
||||
kwargs["start_new_session"] = True
|
||||
elif os.name == "nt":
|
||||
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
return kwargs
|
||||
|
||||
@staticmethod
|
||||
async def _read_stream(
|
||||
stream: asyncio.StreamReader,
|
||||
stream_name: str,
|
||||
output: _CommandOutput,
|
||||
) -> None:
|
||||
"""按块读取输出,始终只把前 10KB 保留在返回结果中。"""
|
||||
while True:
|
||||
chunk = await stream.read(READ_CHUNK_SIZE)
|
||||
if not chunk:
|
||||
break
|
||||
|
||||
output.append(stream_name, chunk.decode("utf-8", errors="replace"))
|
||||
|
||||
@staticmethod
|
||||
def _terminate_process(process: asyncio.subprocess.Process, sig: int):
|
||||
"""向进程组发送终止信号;不支持进程组的平台回退为单进程终止。"""
|
||||
try:
|
||||
if os.name == "posix":
|
||||
os.killpg(process.pid, sig)
|
||||
elif sig == getattr(signal, "SIGKILL", None):
|
||||
process.kill()
|
||||
else:
|
||||
process.terminate()
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
async def _cleanup_process(
|
||||
cls,
|
||||
process: asyncio.subprocess.Process,
|
||||
wait_task: asyncio.Task,
|
||||
) -> None:
|
||||
"""先温和终止,失败后强杀,避免超时 shell 遗留子进程。"""
|
||||
if wait_task.done():
|
||||
return
|
||||
|
||||
cls._terminate_process(process, signal.SIGTERM)
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
asyncio.shield(wait_task), timeout=KILL_GRACE_SECONDS
|
||||
)
|
||||
return
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
|
||||
kill_signal = getattr(signal, "SIGKILL", signal.SIGTERM)
|
||||
cls._terminate_process(process, kill_signal)
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
asyncio.shield(wait_task), timeout=KILL_GRACE_SECONDS
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning("命令进程强制清理超时: pid=%s", process.pid)
|
||||
|
||||
@staticmethod
|
||||
async def _finish_reader_tasks(reader_tasks: list[asyncio.Task]) -> None:
|
||||
"""等待输出读取任务退出,异常只记录不影响工具返回。"""
|
||||
if not reader_tasks:
|
||||
return
|
||||
done, pending = await asyncio.wait(reader_tasks, timeout=1)
|
||||
for task in pending:
|
||||
task.cancel()
|
||||
results = await asyncio.gather(*done, *pending, return_exceptions=True)
|
||||
for result in results:
|
||||
if isinstance(result, Exception) and not isinstance(
|
||||
result, asyncio.CancelledError
|
||||
):
|
||||
logger.debug("命令输出读取任务异常: %s", result)
|
||||
|
||||
@staticmethod
|
||||
def _format_result(
|
||||
*,
|
||||
exit_code: Optional[int],
|
||||
output: _CommandOutput,
|
||||
timeout: int,
|
||||
timed_out: bool,
|
||||
timeout_note: Optional[str],
|
||||
) -> str:
|
||||
if timed_out:
|
||||
result = f"命令执行超时 (限制: {timeout}秒,已终止进程)"
|
||||
else:
|
||||
result = f"命令执行完成 (退出码: {exit_code})"
|
||||
|
||||
if timeout_note:
|
||||
result += f"\n\n提示:\n{timeout_note}"
|
||||
if output.temp_file_path:
|
||||
file_note = (
|
||||
"截至命令终止前的完整输出"
|
||||
if timed_out
|
||||
else "完整输出"
|
||||
)
|
||||
result += (
|
||||
"\n\n提示:\n"
|
||||
f"命令输出超过 10KB,仅返回前 {MAX_OUTPUT_PREVIEW_BYTES} 字节内容。\n"
|
||||
f"{file_note}已写入临时文件: {output.temp_file_path}\n"
|
||||
"如需完整内容,请继续读取该文件。"
|
||||
)
|
||||
if output.stdout:
|
||||
result += f"\n\n标准输出:\n{output.stdout}"
|
||||
if output.stderr:
|
||||
result += f"\n\n错误输出:\n{output.stderr}"
|
||||
if output.preview_truncated:
|
||||
result += "\n\n...(仅展示前 10KB 内容)"
|
||||
if not output.stdout and not output.stderr:
|
||||
result += "\n\n(无输出内容)"
|
||||
return result
|
||||
|
||||
async def run(self, command: str, timeout: Optional[int] = 60, **kwargs) -> str:
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: command={command}, timeout={timeout}"
|
||||
)
|
||||
|
||||
# 简单安全过滤
|
||||
forbidden_keywords = [
|
||||
"rm -rf /",
|
||||
":(){ :|:& };:",
|
||||
"dd if=/dev/zero",
|
||||
"mkfs",
|
||||
"reboot",
|
||||
"shutdown",
|
||||
]
|
||||
for keyword in forbidden_keywords:
|
||||
if keyword in command:
|
||||
return f"错误:命令包含禁止使用的关键字 '{keyword}'"
|
||||
|
||||
normalized_timeout, timeout_note = self._normalize_timeout(timeout)
|
||||
|
||||
try:
|
||||
async with _command_semaphore:
|
||||
# 命令输出可能非常大,必须边读边落盘,不能使用 communicate() 一次性收集。
|
||||
process = await asyncio.create_subprocess_shell(
|
||||
command, **self._subprocess_kwargs()
|
||||
)
|
||||
output = _CommandOutput(preview_limit_bytes=MAX_OUTPUT_PREVIEW_BYTES)
|
||||
wait_task = asyncio.create_task(process.wait())
|
||||
reader_tasks = [
|
||||
asyncio.create_task(
|
||||
self._read_stream(process.stdout, "stdout", output)
|
||||
),
|
||||
asyncio.create_task(
|
||||
self._read_stream(process.stderr, "stderr", output)
|
||||
),
|
||||
]
|
||||
|
||||
timed_out = False
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
asyncio.shield(wait_task), timeout=normalized_timeout
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
timed_out = True
|
||||
await self._cleanup_process(process, wait_task)
|
||||
|
||||
try:
|
||||
await self._finish_reader_tasks(reader_tasks)
|
||||
finally:
|
||||
output.close()
|
||||
|
||||
return self._format_result(
|
||||
exit_code=process.returncode,
|
||||
output=output,
|
||||
timeout=normalized_timeout,
|
||||
timed_out=timed_out,
|
||||
timeout_note=timeout_note,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"执行命令失败: {e}", exc_info=True)
|
||||
return f"执行命令时发生错误: {str(e)}"
|
||||
229
app/agent/tools/impl/get_recommendations.py
Normal file
229
app/agent/tools/impl/get_recommendations.py
Normal file
@@ -0,0 +1,229 @@
|
||||
"""获取推荐工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.recommend import RecommendChain
|
||||
from app.log import logger
|
||||
from app.schemas.types import MediaType, media_type_to_agent
|
||||
|
||||
|
||||
class GetRecommendationsInput(BaseModel):
|
||||
"""获取推荐工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
source: Optional[str] = Field(
|
||||
"tmdb_trending",
|
||||
description="Recommendation source: "
|
||||
"'tmdb_trending' for TMDB trending content, "
|
||||
"'tmdb_movies' for TMDB popular movies, "
|
||||
"'tmdb_tvs' for TMDB popular TV shows, "
|
||||
"'douban_hot' for Douban popular content, "
|
||||
"'douban_movie_hot' for Douban hot movies, "
|
||||
"'douban_tv_hot' for Douban hot TV shows, "
|
||||
"'douban_movie_showing' for Douban movies currently showing, "
|
||||
"'douban_movies' for Douban latest movies, "
|
||||
"'douban_tvs' for Douban latest TV shows, "
|
||||
"'douban_movie_top250' for Douban movie TOP250, "
|
||||
"'douban_tv_weekly_chinese' for Douban Chinese TV weekly chart, "
|
||||
"'douban_tv_weekly_global' for Douban global TV weekly chart, "
|
||||
"'douban_tv_animation' for Douban popular animation, "
|
||||
"'bangumi_calendar' for Bangumi anime calendar",
|
||||
)
|
||||
media_type: Optional[str] = Field(
|
||||
"all", description="Allowed values: movie, tv, all"
|
||||
)
|
||||
page: Optional[int] = Field(
|
||||
1, description="Page number for pagination (default: 1, 20 items per page)"
|
||||
)
|
||||
|
||||
|
||||
class GetRecommendationsTool(MoviePilotTool):
|
||||
name: str = "get_recommendations"
|
||||
description: str = "Get trending and popular media recommendations from various sources. Returns curated lists of popular movies, TV shows, and anime based on different criteria like trending, ratings, or calendar schedules. Supports pagination with 20 items per page."
|
||||
args_schema: Type[BaseModel] = GetRecommendationsInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据推荐参数生成友好的提示消息"""
|
||||
source = kwargs.get("source", "tmdb_trending")
|
||||
media_type = kwargs.get("media_type", "all")
|
||||
page = kwargs.get("page", 1)
|
||||
|
||||
source_map = {
|
||||
"tmdb_trending": "TMDB流行趋势",
|
||||
"tmdb_movies": "TMDB热门电影",
|
||||
"tmdb_tvs": "TMDB热门电视剧",
|
||||
"douban_hot": "豆瓣热门",
|
||||
"douban_movie_hot": "豆瓣热门电影",
|
||||
"douban_tv_hot": "豆瓣热门电视剧",
|
||||
"douban_movie_showing": "豆瓣热映",
|
||||
"douban_movies": "豆瓣最新电影",
|
||||
"douban_tvs": "豆瓣最新电视剧",
|
||||
"douban_movie_top250": "豆瓣电影TOP250",
|
||||
"douban_tv_weekly_chinese": "豆瓣国产剧集榜",
|
||||
"douban_tv_weekly_global": "豆瓣全球剧集榜",
|
||||
"douban_tv_animation": "豆瓣热门动漫",
|
||||
"bangumi_calendar": "番组计划",
|
||||
}
|
||||
source_desc = source_map.get(source, source)
|
||||
|
||||
message = f"获取推荐: {source_desc}"
|
||||
if media_type != "all":
|
||||
message += f" [{media_type}]"
|
||||
message += f" (第{page}页)"
|
||||
|
||||
return message
|
||||
|
||||
async def run(
|
||||
self,
|
||||
source: Optional[str] = "tmdb_trending",
|
||||
media_type: Optional[str] = "all",
|
||||
page: Optional[int] = 1,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
page = max(1, page or 1)
|
||||
page_size = 20
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: source={source}, media_type={media_type}, page={page}"
|
||||
)
|
||||
try:
|
||||
if media_type != "all":
|
||||
media_type_enum = MediaType.from_agent(media_type)
|
||||
if not media_type_enum:
|
||||
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv', 'all'"
|
||||
media_type = media_type_enum.to_agent() # 归一化为 "movie"/"tv"
|
||||
|
||||
recommend_chain = RecommendChain()
|
||||
results = []
|
||||
if source == "tmdb_trending":
|
||||
results = await recommend_chain.async_tmdb_trending(page=page)
|
||||
elif source == "tmdb_movies":
|
||||
results = await recommend_chain.async_tmdb_movies(page=page)
|
||||
elif source == "tmdb_tvs":
|
||||
results = await recommend_chain.async_tmdb_tvs(page=page)
|
||||
elif source == "douban_hot":
|
||||
if media_type == "movie":
|
||||
results = await recommend_chain.async_douban_movie_hot(
|
||||
page=page, count=page_size
|
||||
)
|
||||
elif media_type == "tv":
|
||||
results = await recommend_chain.async_douban_tv_hot(
|
||||
page=page, count=page_size
|
||||
)
|
||||
else: # all
|
||||
results.extend(
|
||||
await recommend_chain.async_douban_movie_hot(
|
||||
page=page, count=page_size
|
||||
)
|
||||
)
|
||||
results.extend(
|
||||
await recommend_chain.async_douban_tv_hot(
|
||||
page=page, count=page_size
|
||||
)
|
||||
)
|
||||
elif source == "douban_movie_hot":
|
||||
results = await recommend_chain.async_douban_movie_hot(
|
||||
page=page, count=page_size
|
||||
)
|
||||
elif source == "douban_tv_hot":
|
||||
results = await recommend_chain.async_douban_tv_hot(
|
||||
page=page, count=page_size
|
||||
)
|
||||
elif source == "douban_movie_showing":
|
||||
results = await recommend_chain.async_douban_movie_showing(
|
||||
page=page, count=page_size
|
||||
)
|
||||
elif source == "douban_movies":
|
||||
results = await recommend_chain.async_douban_movies(
|
||||
page=page, count=page_size
|
||||
)
|
||||
elif source == "douban_tvs":
|
||||
results = await recommend_chain.async_douban_tvs(
|
||||
page=page, count=page_size
|
||||
)
|
||||
elif source == "douban_movie_top250":
|
||||
results = await recommend_chain.async_douban_movie_top250(
|
||||
page=page, count=page_size
|
||||
)
|
||||
elif source == "douban_tv_weekly_chinese":
|
||||
results = await recommend_chain.async_douban_tv_weekly_chinese(
|
||||
page=page, count=page_size
|
||||
)
|
||||
elif source == "douban_tv_weekly_global":
|
||||
results = await recommend_chain.async_douban_tv_weekly_global(
|
||||
page=page, count=page_size
|
||||
)
|
||||
elif source == "douban_tv_animation":
|
||||
results = await recommend_chain.async_douban_tv_animation(
|
||||
page=page, count=page_size
|
||||
)
|
||||
elif source == "bangumi_calendar":
|
||||
results = await recommend_chain.async_bangumi_calendar(
|
||||
page=page, count=page_size
|
||||
)
|
||||
else:
|
||||
# 不支持的推荐来源
|
||||
supported_sources = [
|
||||
"tmdb_trending",
|
||||
"tmdb_movies",
|
||||
"tmdb_tvs",
|
||||
"douban_hot",
|
||||
"douban_movie_hot",
|
||||
"douban_tv_hot",
|
||||
"douban_movie_showing",
|
||||
"douban_movies",
|
||||
"douban_tvs",
|
||||
"douban_movie_top250",
|
||||
"douban_tv_weekly_chinese",
|
||||
"douban_tv_weekly_global",
|
||||
"douban_tv_animation",
|
||||
"bangumi_calendar",
|
||||
]
|
||||
return f"不支持的推荐来源: {source}。支持的来源包括: {', '.join(supported_sources)}"
|
||||
|
||||
if results:
|
||||
# 对于TMDB来源,API自身按页返回,取前page_size条
|
||||
total_count = len(results)
|
||||
page_results = results[:page_size]
|
||||
# 精简字段,只保留关键信息
|
||||
simplified_results = []
|
||||
for r in page_results:
|
||||
# r 应该是字典格式(to_dict的结果),但为了安全起见进行检查
|
||||
if not isinstance(r, dict):
|
||||
logger.warning(f"推荐结果格式异常,跳过: {type(r)}")
|
||||
continue
|
||||
|
||||
simplified = {
|
||||
"title": r.get("title"),
|
||||
"en_title": r.get("en_title"),
|
||||
"year": r.get("year"),
|
||||
"type": media_type_to_agent(r.get("type")),
|
||||
"season": r.get("season"),
|
||||
"tmdb_id": r.get("tmdb_id"),
|
||||
"imdb_id": r.get("imdb_id"),
|
||||
"douban_id": r.get("douban_id"),
|
||||
"vote_average": r.get("vote_average"),
|
||||
"poster_path": r.get("poster_path"),
|
||||
"detail_link": r.get("detail_link"),
|
||||
}
|
||||
simplified_results.append(simplified)
|
||||
result_json = json.dumps(
|
||||
simplified_results, ensure_ascii=False, indent=2
|
||||
)
|
||||
has_more = total_count > page_size
|
||||
payload_msg = f"第 {page} 页,当前页 {len(simplified_results)} 条结果。"
|
||||
if has_more:
|
||||
payload_msg += (
|
||||
f" 可能有更多数据,可使用 page={page + 1} 获取下一页。"
|
||||
)
|
||||
return f"{payload_msg}\n\n{result_json}"
|
||||
return "未找到推荐内容。"
|
||||
except Exception as e:
|
||||
logger.error(f"获取推荐失败: {e}", exc_info=True)
|
||||
return f"获取推荐时发生错误: {str(e)}"
|
||||
155
app/agent/tools/impl/get_search_results.py
Normal file
155
app/agent/tools/impl/get_search_results.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""获取搜索结果工具"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from typing import List, Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.search import SearchChain
|
||||
from app.log import logger
|
||||
from ._torrent_search_utils import (
|
||||
TORRENT_RESULT_LIMIT,
|
||||
build_filter_options,
|
||||
filter_contexts,
|
||||
simplify_search_result,
|
||||
)
|
||||
|
||||
|
||||
class GetSearchResultsInput(BaseModel):
|
||||
"""获取搜索结果工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
site: Optional[List[str]] = Field(None, description="Site name filters")
|
||||
season: Optional[List[str]] = Field(None, description="Season or episode filters")
|
||||
free_state: Optional[List[str]] = Field(None, description="Promotion state filters")
|
||||
video_code: Optional[List[str]] = Field(None, description="Video codec filters")
|
||||
edition: Optional[List[str]] = Field(None, description="Edition filters")
|
||||
resolution: Optional[List[str]] = Field(None, description="Resolution filters")
|
||||
release_group: Optional[List[str]] = Field(
|
||||
None, description="Release group filters"
|
||||
)
|
||||
title_pattern: Optional[str] = Field(
|
||||
None,
|
||||
description="Regular expression pattern to filter torrent titles (e.g., '4K|2160p|UHD', '1080p.*BluRay')",
|
||||
)
|
||||
show_filter_options: Optional[bool] = Field(
|
||||
False,
|
||||
description="Whether to return only optional filter options for re-checking available conditions",
|
||||
)
|
||||
page: Optional[int] = Field(
|
||||
1,
|
||||
description="Page number for pagination (default: 1, each page returns up to 50 results)",
|
||||
)
|
||||
|
||||
|
||||
class GetSearchResultsTool(MoviePilotTool):
|
||||
name: str = "get_search_results"
|
||||
description: str = "Get cached torrent search results from search_torrents with optional filters. Supports pagination with up to 50 results per page."
|
||||
args_schema: Type[BaseModel] = GetSearchResultsInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
return "获取搜索结果"
|
||||
|
||||
async def run(
|
||||
self,
|
||||
site: Optional[List[str]] = None,
|
||||
season: Optional[List[str]] = None,
|
||||
free_state: Optional[List[str]] = None,
|
||||
video_code: Optional[List[str]] = None,
|
||||
edition: Optional[List[str]] = None,
|
||||
resolution: Optional[List[str]] = None,
|
||||
release_group: Optional[List[str]] = None,
|
||||
title_pattern: Optional[str] = None,
|
||||
show_filter_options: bool = False,
|
||||
page: Optional[int] = 1,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
page = max(1, page or 1)
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: site={site}, season={season}, free_state={free_state}, video_code={video_code}, edition={edition}, resolution={resolution}, release_group={release_group}, title_pattern={title_pattern}, show_filter_options={show_filter_options}, page={page}"
|
||||
)
|
||||
|
||||
try:
|
||||
items = await SearchChain().async_last_search_results() or []
|
||||
if not items:
|
||||
return "没有可用的搜索结果,请先使用 search_torrents 搜索"
|
||||
|
||||
if show_filter_options:
|
||||
payload = {
|
||||
"total_count": len(items),
|
||||
"filter_options": build_filter_options(items),
|
||||
}
|
||||
return json.dumps(payload, ensure_ascii=False, indent=2)
|
||||
|
||||
regex_pattern = None
|
||||
if title_pattern:
|
||||
try:
|
||||
regex_pattern = re.compile(title_pattern, re.IGNORECASE)
|
||||
except re.error as e:
|
||||
logger.warning(f"正则表达式编译失败: {title_pattern}, 错误: {e}")
|
||||
return f"正则表达式格式错误: {str(e)}"
|
||||
|
||||
filtered_items = filter_contexts(
|
||||
items=items,
|
||||
site=site,
|
||||
season=season,
|
||||
free_state=free_state,
|
||||
video_code=video_code,
|
||||
edition=edition,
|
||||
resolution=resolution,
|
||||
release_group=release_group,
|
||||
)
|
||||
if regex_pattern:
|
||||
filtered_items = [
|
||||
item
|
||||
for item in filtered_items
|
||||
if item.torrent_info
|
||||
and item.torrent_info.title
|
||||
and regex_pattern.search(item.torrent_info.title)
|
||||
]
|
||||
if not filtered_items:
|
||||
return "没有符合筛选条件的搜索结果,请调整筛选条件"
|
||||
|
||||
total_count = len(filtered_items)
|
||||
filtered_ids = {id(item) for item in filtered_items}
|
||||
matched_indices = [
|
||||
index
|
||||
for index, item in enumerate(items, start=1)
|
||||
if id(item) in filtered_ids
|
||||
]
|
||||
|
||||
# 分页
|
||||
page_size = TORRENT_RESULT_LIMIT
|
||||
start = (page - 1) * page_size
|
||||
end = start + page_size
|
||||
page_items = filtered_items[start:end]
|
||||
page_indices = matched_indices[start:end]
|
||||
|
||||
if not page_items:
|
||||
return f"第 {page} 页没有数据,共 {total_count} 条结果,共 {(total_count + page_size - 1) // page_size} 页。"
|
||||
|
||||
results = [
|
||||
simplify_search_result(item, index)
|
||||
for item, index in zip(page_items, page_indices)
|
||||
]
|
||||
total_pages = (total_count + page_size - 1) // page_size
|
||||
payload = {
|
||||
"total_count": total_count,
|
||||
"page": page,
|
||||
"total_pages": total_pages,
|
||||
"results": results,
|
||||
}
|
||||
if page < total_pages:
|
||||
payload["message"] = (
|
||||
f"搜索结果共 {total_count} 条,当前第 {page}/{total_pages} 页,可使用 page={page + 1} 获取下一页。"
|
||||
)
|
||||
return json.dumps(payload, ensure_ascii=False, indent=2)
|
||||
except Exception as e:
|
||||
error_message = f"获取搜索结果失败: {str(e)}"
|
||||
logger.error(f"获取搜索结果失败: {e}", exc_info=True)
|
||||
return error_message
|
||||
118
app/agent/tools/impl/install_plugin.py
Normal file
118
app/agent/tools/impl/install_plugin.py
Normal file
@@ -0,0 +1,118 @@
|
||||
"""安装插件工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._plugin_tool_utils import (
|
||||
get_plugin_snapshot,
|
||||
install_plugin_runtime,
|
||||
load_market_plugins,
|
||||
summarize_plugin,
|
||||
)
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class InstallPluginInput(BaseModel):
|
||||
"""安装插件工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
plugin_id: str = Field(
|
||||
...,
|
||||
description="Exact plugin ID to install. Use query_market_plugins first to find the correct plugin_id.",
|
||||
)
|
||||
force: bool = Field(
|
||||
False,
|
||||
description="Whether to force reinstall or upgrade the specified plugin.",
|
||||
)
|
||||
force_refresh_market: bool = Field(
|
||||
False,
|
||||
description="Whether to refresh plugin market caches before reading the market list.",
|
||||
)
|
||||
|
||||
|
||||
class InstallPluginTool(MoviePilotTool):
|
||||
name: str = "install_plugin"
|
||||
description: str = (
|
||||
"Install a plugin by exact plugin_id from the plugin market or local plugin repositories. "
|
||||
"Use query_market_plugins first when you need filtering or discovery."
|
||||
)
|
||||
require_admin: bool = True
|
||||
args_schema: Type[BaseModel] = InstallPluginInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
plugin_id = kwargs.get("plugin_id")
|
||||
return f"安装插件: {plugin_id or '未知插件'}"
|
||||
|
||||
async def run(
|
||||
self,
|
||||
plugin_id: str,
|
||||
force: bool = False,
|
||||
force_refresh_market: bool = False,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: plugin_id={plugin_id}, force={force}"
|
||||
)
|
||||
|
||||
try:
|
||||
plugins = await load_market_plugins(force_refresh=force_refresh_market)
|
||||
if not plugins:
|
||||
return json.dumps(
|
||||
{"success": False, "message": "当前插件市场没有可用插件"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
candidate = next((plugin for plugin in plugins if plugin.id == plugin_id), None)
|
||||
if not candidate:
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"未在插件市场中找到插件: {plugin_id}。请先调用 query_market_plugins 确认 plugin_id。",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
success, message, refreshed_only = await install_plugin_runtime(
|
||||
candidate.id,
|
||||
getattr(candidate, "repo_url", None),
|
||||
force=force,
|
||||
)
|
||||
if not success:
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"plugin": summarize_plugin(candidate),
|
||||
"message": message,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
|
||||
plugin_snapshot = get_plugin_snapshot(candidate.id)
|
||||
if refreshed_only and getattr(candidate, "has_update", False) and not force:
|
||||
message = "插件已安装,当前仅刷新加载;如需升级到市场新版本,请设置 force=true"
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"message": message,
|
||||
"force": force,
|
||||
"refreshed_only": refreshed_only,
|
||||
"plugin": summarize_plugin(candidate),
|
||||
"runtime": plugin_snapshot,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"安装插件失败: {e}", exc_info=True)
|
||||
return json.dumps(
|
||||
{"success": False, "message": f"安装插件时发生错误: {str(e)}"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
118
app/agent/tools/impl/list_directory.py
Normal file
118
app/agent/tools/impl/list_directory.py
Normal file
@@ -0,0 +1,118 @@
|
||||
"""查询文件系统目录内容工具"""
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.storage import StorageChain
|
||||
from app.log import logger
|
||||
from app.schemas.file import FileItem
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class ListDirectoryInput(BaseModel):
|
||||
"""查询文件系统目录内容工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
path: str = Field(..., description="Directory path to list contents (e.g., '/home/user/downloads' or 'C:/Downloads')")
|
||||
storage: Optional[str] = Field("local", description="Storage type (default: 'local' for local file system, can be 'smb', 'alist', etc.)")
|
||||
sort_by: Optional[str] = Field("name", description="Sort order: 'name' for alphabetical sorting, 'time' for modification time sorting (default: 'name')")
|
||||
|
||||
|
||||
class ListDirectoryTool(MoviePilotTool):
|
||||
name: str = "list_directory"
|
||||
description: str = "List actual files and folders in a file system directory (NOT configuration). Shows files and subdirectories with their names, types, sizes, and modification times. Returns up to 20 items and the total count if there are more items. Use 'query_directory_settings' to query directory configuration settings."
|
||||
args_schema: Type[BaseModel] = ListDirectoryInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据目录参数生成友好的提示消息"""
|
||||
path = kwargs.get("path", "")
|
||||
storage = kwargs.get("storage", "local")
|
||||
|
||||
message = f"查询目录: {path}"
|
||||
if storage != "local":
|
||||
message += f" [存储: {storage}]"
|
||||
|
||||
return message
|
||||
|
||||
@staticmethod
|
||||
def _list_directory_sync(
|
||||
path: str, storage: Optional[str] = "local", sort_by: Optional[str] = "name"
|
||||
) -> str:
|
||||
"""
|
||||
目录遍历可能触发本地磁盘或远程存储请求,统一放到线程池中执行。
|
||||
"""
|
||||
if not path:
|
||||
return "错误:路径不能为空"
|
||||
|
||||
if storage == "local":
|
||||
if not path.startswith("/") and not (len(path) > 1 and path[1] == ":"):
|
||||
path = str(Path(path).resolve())
|
||||
elif not path.startswith("/"):
|
||||
path = "/" + path
|
||||
|
||||
fileitem = FileItem(storage=storage or "local", path=path, type="dir")
|
||||
file_list = StorageChain().list_files(fileitem, recursion=False)
|
||||
|
||||
if file_list is None:
|
||||
return f"无法访问目录:{path},请检查路径是否正确或存储是否可用"
|
||||
if not file_list:
|
||||
return f"目录 {path} 为空"
|
||||
|
||||
if sort_by == "time":
|
||||
file_list.sort(key=lambda x: x.modify_time or 0, reverse=True)
|
||||
else:
|
||||
file_list.sort(
|
||||
key=lambda x: (
|
||||
0 if x.type == "dir" else 1,
|
||||
StringUtils.natural_sort_key(x.name or ""),
|
||||
)
|
||||
)
|
||||
|
||||
total_count = len(file_list)
|
||||
limited_list = file_list[:20]
|
||||
simplified_items = []
|
||||
for item in limited_list:
|
||||
size_str = StringUtils.str_filesize(item.size) if item.size else None
|
||||
modify_time_str = None
|
||||
if item.modify_time:
|
||||
try:
|
||||
modify_time_str = datetime.fromtimestamp(item.modify_time).strftime(
|
||||
"%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
except (ValueError, OSError):
|
||||
modify_time_str = str(item.modify_time)
|
||||
|
||||
simplified = {
|
||||
"name": item.name,
|
||||
"type": item.type,
|
||||
"path": item.path,
|
||||
"size": size_str,
|
||||
"modify_time": modify_time_str,
|
||||
}
|
||||
if item.type == "file" and item.extension:
|
||||
simplified["extension"] = item.extension
|
||||
simplified_items.append(simplified)
|
||||
|
||||
result_json = json.dumps(simplified_items, ensure_ascii=False, indent=2)
|
||||
if total_count > 20:
|
||||
return (
|
||||
f"注意:目录中共有 {total_count} 个项目,为节省上下文空间,仅显示前 20 个项目。\n\n"
|
||||
f"{result_json}"
|
||||
)
|
||||
return result_json
|
||||
|
||||
async def run(self, path: str, storage: Optional[str] = "local",
|
||||
sort_by: Optional[str] = "name", **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: path={path}, storage={storage}, sort_by={sort_by}")
|
||||
|
||||
try:
|
||||
return await self.run_blocking(
|
||||
"storage", self._list_directory_sync, path, storage, sort_by
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"查询目录内容失败: {e}", exc_info=True)
|
||||
return f"查询目录内容时发生错误: {str(e)}"
|
||||
79
app/agent/tools/impl/list_slash_commands.py
Normal file
79
app/agent/tools/impl/list_slash_commands.py
Normal file
@@ -0,0 +1,79 @@
|
||||
"""查询所有可用斜杠命令工具(系统命令 + 插件命令)"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class ListSlashCommandsInput(BaseModel):
|
||||
"""查询所有可用斜杠命令工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
|
||||
|
||||
class ListSlashCommandsTool(MoviePilotTool):
|
||||
name: str = "list_slash_commands"
|
||||
description: str = (
|
||||
"List all available slash commands in the system, including system preset commands "
|
||||
"(e.g. /cookiecloud, /sites, /subscribes, /downloading, /transfer, /restart, etc.) "
|
||||
"and plugin-registered commands. "
|
||||
"Use this tool to discover what slash commands are available before executing them with run_slash_command. "
|
||||
"This is especially useful when the user describes an action in natural language and you need to "
|
||||
"find the matching command to fulfill their request."
|
||||
)
|
||||
args_schema: Type[BaseModel] = ListSlashCommandsInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""生成友好的提示消息"""
|
||||
return "查询所有可用命令"
|
||||
|
||||
async def run(self, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}")
|
||||
|
||||
try:
|
||||
from app.command import Command
|
||||
|
||||
command_obj = Command()
|
||||
all_commands = command_obj.get_commands()
|
||||
|
||||
if not all_commands:
|
||||
return "当前没有可用的命令"
|
||||
|
||||
commands_list = []
|
||||
for cmd, info in all_commands.items():
|
||||
cmd_info = {
|
||||
"command": cmd,
|
||||
"description": info.get("description", ""),
|
||||
}
|
||||
if info.get("category"):
|
||||
cmd_info["category"] = info["category"]
|
||||
# 标识命令类型
|
||||
if info.get("type") == "scheduler":
|
||||
cmd_info["type"] = "scheduler"
|
||||
elif info.get("pid"):
|
||||
cmd_info["type"] = "plugin"
|
||||
cmd_info["plugin_id"] = info["pid"]
|
||||
else:
|
||||
cmd_info["type"] = "system"
|
||||
commands_list.append(cmd_info)
|
||||
|
||||
result = {
|
||||
"total": len(commands_list),
|
||||
"commands": commands_list,
|
||||
}
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"查询可用命令失败: {e}", exc_info=True)
|
||||
return json.dumps(
|
||||
{"success": False, "message": f"查询可用命令时发生错误: {str(e)}"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
139
app/agent/tools/impl/modify_download.py
Normal file
139
app/agent/tools/impl/modify_download.py
Normal file
@@ -0,0 +1,139 @@
|
||||
"""修改下载任务工具"""
|
||||
|
||||
from typing import Optional, Type, List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.download import DownloadChain
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class ModifyDownloadInput(BaseModel):
|
||||
"""修改下载任务工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
hash: str = Field(
|
||||
..., description="Task hash (can be obtained from query_download_tasks tool)"
|
||||
)
|
||||
action: Optional[str] = Field(
|
||||
None,
|
||||
description="Action to perform on the task: 'start' to resume downloading, 'stop' to pause downloading. "
|
||||
"If not provided, no start/stop action will be performed.",
|
||||
)
|
||||
tags: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="List of tags to set on the download task. If provided, these tags will be added to the task. "
|
||||
"Example: ['movie', 'hd']",
|
||||
)
|
||||
downloader: Optional[str] = Field(
|
||||
None,
|
||||
description="Name of specific downloader (optional, if not provided will search all downloaders)",
|
||||
)
|
||||
|
||||
|
||||
class ModifyDownloadTool(MoviePilotTool):
|
||||
"""修改下载任务工具"""
|
||||
|
||||
name: str = "modify_download"
|
||||
description: str = (
|
||||
"Modify a download task in the downloader by task hash. "
|
||||
"Supports: 1) Setting tags on a download task, "
|
||||
"2) Starting (resuming) a paused download task, "
|
||||
"3) Stopping (pausing) a downloading task. "
|
||||
"Multiple operations can be performed in a single call."
|
||||
)
|
||||
args_schema: Type[BaseModel] = ModifyDownloadInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
hash_value = kwargs.get("hash", "")
|
||||
action = kwargs.get("action")
|
||||
tags = kwargs.get("tags")
|
||||
downloader = kwargs.get("downloader")
|
||||
|
||||
parts = [f"修改下载任务: {hash_value}"]
|
||||
if action == "start":
|
||||
parts.append("操作: 开始下载")
|
||||
elif action == "stop":
|
||||
parts.append("操作: 暂停下载")
|
||||
if tags:
|
||||
parts.append(f"标签: {', '.join(tags)}")
|
||||
if downloader:
|
||||
parts.append(f"下载器: {downloader}")
|
||||
return " | ".join(parts)
|
||||
|
||||
@staticmethod
|
||||
def _modify_download_sync(
|
||||
hash_value: str,
|
||||
action: Optional[str] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
downloader: Optional[str] = None,
|
||||
) -> List[str]:
|
||||
"""同步修改下载任务状态和标签,避免下载器 SDK 阻塞事件循环。"""
|
||||
download_chain = DownloadChain()
|
||||
results = []
|
||||
|
||||
if tags:
|
||||
tag_result = download_chain.set_torrents_tag(
|
||||
hashs=[hash_value], tags=tags, downloader=downloader
|
||||
)
|
||||
if tag_result:
|
||||
results.append(f"成功设置标签:{', '.join(tags)}")
|
||||
else:
|
||||
results.append("设置标签失败,请检查任务是否存在或下载器是否可用")
|
||||
|
||||
if action:
|
||||
action_result = download_chain.set_downloading(
|
||||
hash_str=hash_value, oper=action, name=downloader
|
||||
)
|
||||
action_desc = "开始" if action == "start" else "暂停"
|
||||
if action_result:
|
||||
results.append(f"成功{action_desc}下载任务")
|
||||
else:
|
||||
results.append(f"{action_desc}下载任务失败,请检查任务是否存在或下载器是否可用")
|
||||
|
||||
return results
|
||||
|
||||
async def run(
|
||||
self,
|
||||
hash: str,
|
||||
action: Optional[str] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
downloader: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: hash={hash}, action={action}, tags={tags}, downloader={downloader}"
|
||||
)
|
||||
|
||||
try:
|
||||
# 校验 hash 格式
|
||||
if len(hash) != 40 or not all(c in "0123456789abcdefABCDEF" for c in hash):
|
||||
return "参数错误:hash 格式无效,请先使用 query_download_tasks 工具获取正确的 hash。"
|
||||
|
||||
# 校验参数:至少需要一个操作
|
||||
if not action and not tags:
|
||||
return "参数错误:至少需要指定 action(start/stop)或 tags 中的一个。"
|
||||
|
||||
# 校验 action 参数
|
||||
if action and action not in ("start", "stop"):
|
||||
return f"参数错误:action 只支持 'start'(开始下载)或 'stop'(暂停下载),收到: '{action}'。"
|
||||
|
||||
results = await self.run_blocking(
|
||||
"downloader",
|
||||
self._modify_download_sync,
|
||||
hash,
|
||||
action,
|
||||
tags,
|
||||
downloader,
|
||||
)
|
||||
|
||||
return f"下载任务 {hash}:" + ";".join(results)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"修改下载任务失败: {e}", exc_info=True)
|
||||
return f"修改下载任务时发生错误: {str(e)}"
|
||||
85
app/agent/tools/impl/query_builtin_filter_rules.py
Normal file
85
app/agent/tools/impl/query_builtin_filter_rules.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""查询内置过滤规则工具。"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type, List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._filter_rule_utils import (
|
||||
get_builtin_rules,
|
||||
serialize_builtin_rule,
|
||||
RULE_STRING_SYNTAX,
|
||||
)
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryBuiltinFilterRulesInput(BaseModel):
|
||||
"""查询内置过滤规则工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
rule_ids: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="Optional list of built-in rule IDs to query. If omitted, return all built-in rules.",
|
||||
)
|
||||
|
||||
|
||||
class QueryBuiltinFilterRulesTool(MoviePilotTool):
|
||||
name: str = "query_builtin_filter_rules"
|
||||
description: str = (
|
||||
"Query built-in filter rules defined by the backend filter module. "
|
||||
"These rule IDs can be used directly inside rule_string expressions for filter rule groups. "
|
||||
"Use this tool before add_rule_group or update_rule_group to learn valid built-in rule IDs."
|
||||
)
|
||||
args_schema: Type[BaseModel] = QueryBuiltinFilterRulesInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
rule_ids = kwargs.get("rule_ids") or []
|
||||
if rule_ids:
|
||||
return f"查询内置过滤规则: {', '.join(rule_ids)}"
|
||||
return "查询所有内置过滤规则"
|
||||
|
||||
async def run(
|
||||
self,
|
||||
rule_ids: Optional[List[str]] = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(f"执行工具: {self.name}")
|
||||
|
||||
try:
|
||||
builtin_rules = get_builtin_rules()
|
||||
if rule_ids:
|
||||
target_ids = set(rule_ids)
|
||||
builtin_rules = {
|
||||
rule_id: payload
|
||||
for rule_id, payload in builtin_rules.items()
|
||||
if rule_id in target_ids
|
||||
}
|
||||
|
||||
serialized = [
|
||||
serialize_builtin_rule(rule_id, payload)
|
||||
for rule_id, payload in builtin_rules.items()
|
||||
]
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"count": len(serialized),
|
||||
"rule_string_syntax": RULE_STRING_SYNTAX,
|
||||
"rules": serialized,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.error(f"查询内置过滤规则失败: {exc}", exc_info=True)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"查询内置过滤规则失败: {exc}",
|
||||
"rules": [],
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
95
app/agent/tools/impl/query_custom_filter_rules.py
Normal file
95
app/agent/tools/impl/query_custom_filter_rules.py
Normal file
@@ -0,0 +1,95 @@
|
||||
"""查询自定义过滤规则工具。"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type, List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._filter_rule_utils import (
|
||||
collect_custom_rule_group_refs,
|
||||
get_custom_rules,
|
||||
get_rule_groups,
|
||||
serialize_custom_rule,
|
||||
)
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryCustomFilterRulesInput(BaseModel):
|
||||
"""查询自定义过滤规则工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
rule_ids: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="Optional list of custom rule IDs to query. If omitted, return all custom rules.",
|
||||
)
|
||||
include_group_refs: bool = Field(
|
||||
True,
|
||||
description="Whether to include which rule groups reference each custom rule.",
|
||||
)
|
||||
|
||||
|
||||
class QueryCustomFilterRulesTool(MoviePilotTool):
|
||||
name: str = "query_custom_filter_rules"
|
||||
description: str = (
|
||||
"Query custom filter rules stored in CustomFilterRules. "
|
||||
"Custom rules can be referenced from rule_string expressions in filter rule groups. "
|
||||
"Use this tool before add_rule_group or update_rule_group to learn valid custom rule IDs."
|
||||
)
|
||||
args_schema: Type[BaseModel] = QueryCustomFilterRulesInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
rule_ids = kwargs.get("rule_ids") or []
|
||||
if rule_ids:
|
||||
return f"查询自定义过滤规则: {', '.join(rule_ids)}"
|
||||
return "查询所有自定义过滤规则"
|
||||
|
||||
async def run(
|
||||
self,
|
||||
rule_ids: Optional[List[str]] = None,
|
||||
include_group_refs: bool = True,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(f"执行工具: {self.name}")
|
||||
|
||||
try:
|
||||
custom_rules = get_custom_rules()
|
||||
if rule_ids:
|
||||
target_ids = set(rule_ids)
|
||||
custom_rules = [
|
||||
rule for rule in custom_rules if rule.id in target_ids
|
||||
]
|
||||
|
||||
refs = {}
|
||||
if include_group_refs:
|
||||
refs = collect_custom_rule_group_refs(
|
||||
get_rule_groups(),
|
||||
[rule.id for rule in custom_rules if rule.id],
|
||||
)
|
||||
|
||||
serialized = [
|
||||
serialize_custom_rule(rule, refs.get(rule.id))
|
||||
for rule in custom_rules
|
||||
]
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"count": len(serialized),
|
||||
"rules": serialized,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.error(f"查询自定义过滤规则失败: {exc}", exc_info=True)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"查询自定义过滤规则失败: {exc}",
|
||||
"rules": [],
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
70
app/agent/tools/impl/query_custom_identifiers.py
Normal file
70
app/agent/tools/impl/query_custom_identifiers.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""查询自定义识别词工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.log import logger
|
||||
from app.schemas.types import SystemConfigKey
|
||||
|
||||
|
||||
class QueryCustomIdentifiersInput(BaseModel):
|
||||
"""查询自定义识别词工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
|
||||
|
||||
class QueryCustomIdentifiersTool(MoviePilotTool):
|
||||
name: str = "query_custom_identifiers"
|
||||
description: str = (
|
||||
"Query all currently configured custom identifiers (自定义识别词). "
|
||||
"Returns the list of identifier rules used for preprocessing torrent/file names before media recognition. "
|
||||
"Use this tool to check existing rules before adding new ones to avoid duplicates."
|
||||
)
|
||||
args_schema: Type[BaseModel] = QueryCustomIdentifiersInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""生成友好的提示消息"""
|
||||
return "查询自定义识别词"
|
||||
|
||||
@staticmethod
|
||||
def _load_custom_identifiers():
|
||||
"""从内存配置缓存中读取自定义识别词。"""
|
||||
return SystemConfigOper().get(SystemConfigKey.CustomIdentifiers)
|
||||
|
||||
async def run(self, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}")
|
||||
try:
|
||||
identifiers = self._load_custom_identifiers()
|
||||
if identifiers:
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"count": len(identifiers),
|
||||
"identifiers": identifiers,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"count": 0,
|
||||
"identifiers": [],
|
||||
"message": "当前没有配置自定义识别词",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"查询自定义识别词失败: {e}")
|
||||
return json.dumps(
|
||||
{"success": False, "message": f"查询自定义识别词时发生错误: {str(e)}"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
139
app/agent/tools/impl/query_directory_settings.py
Normal file
139
app/agent/tools/impl/query_directory_settings.py
Normal file
@@ -0,0 +1,139 @@
|
||||
"""查询系统目录设置工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.helper.directory import DirectoryHelper
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryDirectorySettingsInput(BaseModel):
|
||||
"""查询系统目录设置工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
directory_type: Optional[str] = Field("all",
|
||||
description="Filter directories by type: 'download' for download directories, 'library' for media library directories, 'all' for all directories")
|
||||
storage_type: Optional[str] = Field("all",
|
||||
description="Filter directories by storage type: 'local' for local storage, 'remote' for remote storage, 'all' for all storage types")
|
||||
name: Optional[str] = Field(None,
|
||||
description="Filter directories by name (partial match, optional)")
|
||||
|
||||
|
||||
class QueryDirectorySettingsTool(MoviePilotTool):
|
||||
name: str = "query_directory_settings"
|
||||
description: str = "Query system directory configuration settings (NOT file listings). Returns configured directory paths, storage types, transfer modes, and other directory-related settings. Use 'list_directory' to list actual files and folders in a directory."
|
||||
args_schema: Type[BaseModel] = QueryDirectorySettingsInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
directory_type = kwargs.get("directory_type", "all")
|
||||
storage_type = kwargs.get("storage_type", "all")
|
||||
name = kwargs.get("name")
|
||||
|
||||
parts = ["查询目录配置"]
|
||||
|
||||
if directory_type != "all":
|
||||
type_map = {"download": "下载目录", "library": "媒体库目录"}
|
||||
parts.append(f"类型: {type_map.get(directory_type, directory_type)}")
|
||||
|
||||
if storage_type != "all":
|
||||
storage_map = {"local": "本地存储", "remote": "远程存储"}
|
||||
parts.append(f"存储: {storage_map.get(storage_type, storage_type)}")
|
||||
|
||||
if name:
|
||||
parts.append(f"名称: {name}")
|
||||
|
||||
return " | ".join(parts) if len(parts) > 1 else parts[0]
|
||||
|
||||
@staticmethod
|
||||
def _query_directory_settings(
|
||||
directory_type: Optional[str] = "all",
|
||||
storage_type: Optional[str] = "all",
|
||||
name: Optional[str] = None,
|
||||
) -> str:
|
||||
"""
|
||||
目录配置完全来自内存配置缓存,这里只做本地过滤和序列化。
|
||||
"""
|
||||
directory_helper = DirectoryHelper()
|
||||
|
||||
if directory_type == "download":
|
||||
dirs = directory_helper.get_download_dirs()
|
||||
elif directory_type == "library":
|
||||
dirs = directory_helper.get_library_dirs()
|
||||
else:
|
||||
dirs = directory_helper.get_dirs()
|
||||
|
||||
filtered_dirs = []
|
||||
for d in dirs:
|
||||
if storage_type == "local":
|
||||
if directory_type == "download" and d.storage != "local":
|
||||
continue
|
||||
if directory_type == "library" and d.library_storage != "local":
|
||||
continue
|
||||
if directory_type == "all":
|
||||
if d.download_path and d.storage != "local":
|
||||
continue
|
||||
if d.library_path and d.library_storage != "local":
|
||||
continue
|
||||
elif storage_type == "remote":
|
||||
if directory_type == "download" and d.storage == "local":
|
||||
continue
|
||||
if directory_type == "library" and d.library_storage == "local":
|
||||
continue
|
||||
if directory_type == "all":
|
||||
if d.download_path and d.storage == "local":
|
||||
continue
|
||||
if d.library_path and d.library_storage == "local":
|
||||
continue
|
||||
|
||||
if name and d.name and name.lower() not in d.name.lower():
|
||||
continue
|
||||
filtered_dirs.append(d)
|
||||
|
||||
if not filtered_dirs:
|
||||
return "未找到相关目录配置"
|
||||
|
||||
simplified_dirs = []
|
||||
for d in filtered_dirs:
|
||||
simplified_dirs.append(
|
||||
{
|
||||
"name": d.name,
|
||||
"priority": d.priority,
|
||||
"storage": d.storage,
|
||||
"download_path": d.download_path,
|
||||
"library_path": d.library_path,
|
||||
"library_storage": d.library_storage,
|
||||
"media_type": d.media_type,
|
||||
"media_category": d.media_category,
|
||||
"monitor_type": d.monitor_type,
|
||||
"monitor_mode": d.monitor_mode,
|
||||
"transfer_type": d.transfer_type,
|
||||
"overwrite_mode": d.overwrite_mode,
|
||||
"renaming": d.renaming,
|
||||
"scraping": d.scraping,
|
||||
"notify": d.notify,
|
||||
"download_type_folder": d.download_type_folder,
|
||||
"download_category_folder": d.download_category_folder,
|
||||
"library_type_folder": d.library_type_folder,
|
||||
"library_category_folder": d.library_category_folder,
|
||||
}
|
||||
)
|
||||
|
||||
return json.dumps(simplified_dirs, ensure_ascii=False, indent=2)
|
||||
|
||||
async def run(self, directory_type: Optional[str] = "all",
|
||||
storage_type: Optional[str] = "all",
|
||||
name: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: directory_type={directory_type}, storage_type={storage_type}, name={name}")
|
||||
|
||||
try:
|
||||
return self._query_directory_settings(
|
||||
directory_type=directory_type,
|
||||
storage_type=storage_type,
|
||||
name=name,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"查询系统目录设置失败: {e}", exc_info=True)
|
||||
return f"查询系统目录设置时发生错误: {str(e)}"
|
||||
282
app/agent/tools/impl/query_download_tasks.py
Normal file
282
app/agent/tools/impl/query_download_tasks.py
Normal file
@@ -0,0 +1,282 @@
|
||||
"""查询下载工具"""
|
||||
|
||||
import json
|
||||
from typing import Any, Dict, List, Optional, Type, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.download import DownloadChain
|
||||
from app.db.downloadhistory_oper import DownloadHistoryOper
|
||||
from app.log import logger
|
||||
from app.schemas import TransferTorrent, DownloadingTorrent
|
||||
from app.schemas.types import TorrentStatus, media_type_to_agent
|
||||
|
||||
|
||||
class QueryDownloadTasksInput(BaseModel):
|
||||
"""查询下载工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
downloader: Optional[str] = Field(None,
|
||||
description="Name of specific downloader to query (optional, if not provided queries all configured downloaders)")
|
||||
status: Optional[str] = Field("all",
|
||||
description="Filter downloads by status: 'downloading' for active downloads, 'completed' for finished downloads, 'paused' for paused downloads, 'all' for all downloads")
|
||||
hash: Optional[str] = Field(None, description="Query specific download task by hash (optional, if provided will search for this specific task regardless of status)")
|
||||
title: Optional[str] = Field(None, description="Query download tasks by title/name (optional, supports partial match, searches all tasks if provided)")
|
||||
tag: Optional[str] = Field(None, description="Filter download tasks by tag (optional, supports partial match, e.g. 'movie' will match tasks with tag 'movie' or 'movie_2024')")
|
||||
|
||||
|
||||
class QueryDownloadTasksTool(MoviePilotTool):
|
||||
name: str = "query_download_tasks"
|
||||
description: str = "Query download status and list download tasks. Can query all active downloads, or search for specific tasks by hash, title, or tag. Shows download progress, completion status, tags, and task details from configured downloaders."
|
||||
args_schema: Type[BaseModel] = QueryDownloadTasksInput
|
||||
|
||||
@staticmethod
|
||||
def _get_all_torrents(download_chain: DownloadChain, downloader: Optional[str] = None) -> List[Union[TransferTorrent, DownloadingTorrent]]:
|
||||
"""
|
||||
查询所有状态的任务(包括下载中和已完成的任务)
|
||||
"""
|
||||
all_torrents = []
|
||||
# 查询下载的任务
|
||||
downloading_torrents = download_chain.list_torrents(
|
||||
downloader=downloader,
|
||||
status=TorrentStatus.DOWNLOADING
|
||||
) or []
|
||||
all_torrents.extend(downloading_torrents)
|
||||
|
||||
# 查询已完成的任务(可转移状态)
|
||||
transfer_torrents = download_chain.list_torrents(
|
||||
downloader=downloader,
|
||||
status=TorrentStatus.TRANSFER
|
||||
) or []
|
||||
all_torrents.extend(transfer_torrents)
|
||||
|
||||
return all_torrents
|
||||
|
||||
@staticmethod
|
||||
def _format_progress(progress: Optional[float]) -> Optional[str]:
|
||||
"""
|
||||
将下载进度格式化为保留一位小数的百分比字符串
|
||||
"""
|
||||
try:
|
||||
if progress is None:
|
||||
return None
|
||||
return f"{float(progress):.1f}%"
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _apply_download_history(
|
||||
torrent: Union[TransferTorrent, DownloadingTorrent], history: Any
|
||||
) -> None:
|
||||
"""将下载历史中的补充信息回填到下载任务结果中。"""
|
||||
if not history:
|
||||
return
|
||||
if hasattr(torrent, "media"):
|
||||
torrent.media = {
|
||||
"tmdbid": history.tmdbid,
|
||||
"type": history.type,
|
||||
"title": history.title,
|
||||
"season": history.seasons,
|
||||
"episode": history.episodes,
|
||||
"image": history.image,
|
||||
}
|
||||
if hasattr(torrent, "username"):
|
||||
torrent.username = history.username
|
||||
torrent.userid = history.userid
|
||||
|
||||
@classmethod
|
||||
def _load_history_map(
|
||||
cls, torrents: List[Union[TransferTorrent, DownloadingTorrent]]
|
||||
) -> Dict[str, Any]:
|
||||
"""批量加载下载历史,避免逐条查询形成 N+1。"""
|
||||
hashes = [torrent.hash for torrent in torrents if getattr(torrent, "hash", None)]
|
||||
if not hashes:
|
||||
return {}
|
||||
return DownloadHistoryOper().get_by_hashes(hashes)
|
||||
|
||||
@classmethod
|
||||
def _query_downloads_sync(
|
||||
cls,
|
||||
downloader: Optional[str] = None,
|
||||
status: Optional[str] = "all",
|
||||
hash_value: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
tag: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
同步查询下载器和下载历史,整个链路放在线程池中执行。
|
||||
"""
|
||||
download_chain = DownloadChain()
|
||||
|
||||
if hash_value:
|
||||
torrents = (
|
||||
download_chain.list_torrents(downloader=downloader, hashs=[hash_value])
|
||||
or []
|
||||
)
|
||||
if not torrents:
|
||||
return {
|
||||
"message": f"未找到hash为 {hash_value} 的下载任务(该任务可能已完成、已删除或不存在)"
|
||||
}
|
||||
|
||||
history_map = cls._load_history_map(torrents)
|
||||
for torrent in torrents:
|
||||
cls._apply_download_history(torrent, history_map.get(torrent.hash))
|
||||
filtered_downloads = list(torrents)
|
||||
elif title:
|
||||
all_torrents = cls._get_all_torrents(download_chain, downloader)
|
||||
history_map = cls._load_history_map(all_torrents)
|
||||
filtered_downloads = []
|
||||
title_lower = title.lower()
|
||||
|
||||
for torrent in all_torrents:
|
||||
history = history_map.get(torrent.hash)
|
||||
matched = title_lower in (torrent.title or "").lower() or title_lower in (
|
||||
getattr(torrent, "name", None) or ""
|
||||
).lower()
|
||||
if not matched and history and history.title:
|
||||
matched = title_lower in history.title.lower()
|
||||
|
||||
if not matched:
|
||||
continue
|
||||
|
||||
cls._apply_download_history(torrent, history)
|
||||
filtered_downloads.append(torrent)
|
||||
|
||||
if not filtered_downloads:
|
||||
return {"message": f"未找到标题包含 '{title}' 的下载任务"}
|
||||
else:
|
||||
if status == "downloading":
|
||||
downloads = download_chain.downloading(name=downloader) or []
|
||||
filtered_downloads = [
|
||||
dl
|
||||
for dl in downloads
|
||||
if not downloader or dl.downloader == downloader
|
||||
]
|
||||
else:
|
||||
all_torrents = cls._get_all_torrents(download_chain, downloader)
|
||||
filtered_downloads = []
|
||||
for torrent in all_torrents:
|
||||
if downloader and torrent.downloader != downloader:
|
||||
continue
|
||||
if status == "completed" and torrent.state not in [
|
||||
"seeding",
|
||||
"completed",
|
||||
]:
|
||||
continue
|
||||
if status == "paused" and torrent.state != "paused":
|
||||
continue
|
||||
filtered_downloads.append(torrent)
|
||||
|
||||
history_map = cls._load_history_map(filtered_downloads)
|
||||
for torrent in filtered_downloads:
|
||||
cls._apply_download_history(torrent, history_map.get(torrent.hash))
|
||||
|
||||
if tag and filtered_downloads:
|
||||
tag_lower = tag.lower()
|
||||
filtered_downloads = [
|
||||
d for d in filtered_downloads if d.tags and tag_lower in d.tags.lower()
|
||||
]
|
||||
if not filtered_downloads:
|
||||
return {"message": f"未找到标签包含 '{tag}' 的下载任务"}
|
||||
|
||||
if not filtered_downloads:
|
||||
return {"message": "未找到相关下载任务"}
|
||||
|
||||
return {"downloads": filtered_downloads}
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
downloader = kwargs.get("downloader")
|
||||
status = kwargs.get("status", "all")
|
||||
hash_value = kwargs.get("hash")
|
||||
title = kwargs.get("title")
|
||||
|
||||
parts = ["查询下载任务"]
|
||||
|
||||
if downloader:
|
||||
parts.append(f"下载器: {downloader}")
|
||||
|
||||
if status != "all":
|
||||
status_map = {"downloading": "下载中", "completed": "已完成", "paused": "已暂停"}
|
||||
parts.append(f"状态: {status_map.get(status, status)}")
|
||||
|
||||
if hash_value:
|
||||
parts.append(f"Hash: {hash_value[:8]}...")
|
||||
elif title:
|
||||
parts.append(f"标题: {title}")
|
||||
|
||||
tag = kwargs.get("tag")
|
||||
if tag:
|
||||
parts.append(f"标签: {tag}")
|
||||
|
||||
return " | ".join(parts) if len(parts) > 1 else parts[0]
|
||||
|
||||
async def run(self, downloader: Optional[str] = None,
|
||||
status: Optional[str] = "all",
|
||||
hash: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
tag: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: downloader={downloader}, status={status}, hash={hash}, title={title}, tag={tag}")
|
||||
try:
|
||||
payload = await self.run_blocking(
|
||||
"downloader",
|
||||
self._query_downloads_sync,
|
||||
downloader,
|
||||
status,
|
||||
hash,
|
||||
title,
|
||||
tag,
|
||||
)
|
||||
if payload.get("message"):
|
||||
return payload["message"]
|
||||
|
||||
filtered_downloads = payload.get("downloads") or []
|
||||
if filtered_downloads:
|
||||
# 限制最多20条结果
|
||||
total_count = len(filtered_downloads)
|
||||
limited_downloads = filtered_downloads[:20]
|
||||
# 精简字段,只保留关键信息
|
||||
simplified_downloads = []
|
||||
for d in limited_downloads:
|
||||
simplified = {
|
||||
"downloader": d.downloader,
|
||||
"hash": d.hash,
|
||||
"title": d.title,
|
||||
"name": getattr(d, "name", None),
|
||||
"year": getattr(d, "year", None),
|
||||
"season_episode": getattr(d, "season_episode", None),
|
||||
"size": d.size,
|
||||
"progress": self._format_progress(d.progress),
|
||||
"state": d.state,
|
||||
"upspeed": getattr(d, "upspeed", None),
|
||||
"dlspeed": getattr(d, "dlspeed", None),
|
||||
"tags": d.tags,
|
||||
"left_time": getattr(d, "left_time", None)
|
||||
}
|
||||
# 精简 media 字段
|
||||
media = getattr(d, "media", None)
|
||||
if media:
|
||||
simplified["media"] = {
|
||||
"tmdbid": media.get("tmdbid"),
|
||||
"type": media_type_to_agent(media.get("type")),
|
||||
"title": media.get("title"),
|
||||
"season": media.get("season"),
|
||||
"episode": media.get("episode")
|
||||
}
|
||||
simplified_downloads.append(simplified)
|
||||
result_json = json.dumps(simplified_downloads, ensure_ascii=False, indent=2)
|
||||
# 如果结果被裁剪,添加提示信息
|
||||
if total_count > 20:
|
||||
return f"注意:查询结果共找到 {total_count} 条,为节省上下文空间,仅显示前 20 条结果。\n\n{result_json}"
|
||||
|
||||
# 如果查询的是特定hash或title,添加明确的状态信息
|
||||
if hash:
|
||||
return f"找到hash为 {hash} 的下载任务:\n\n{result_json}"
|
||||
elif title:
|
||||
return f"找到 {total_count} 个标题包含 '{title}' 的下载任务:\n\n{result_json}"
|
||||
|
||||
return result_json
|
||||
return "未找到相关下载任务"
|
||||
except Exception as e:
|
||||
logger.error(f"查询下载失败: {e}", exc_info=True)
|
||||
return f"查询下载时发生错误: {str(e)}"
|
||||
42
app/agent/tools/impl/query_downloaders.py
Normal file
42
app/agent/tools/impl/query_downloaders.py
Normal file
@@ -0,0 +1,42 @@
|
||||
"""查询下载器工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.log import logger
|
||||
from app.schemas.types import SystemConfigKey
|
||||
|
||||
|
||||
class QueryDownloadersInput(BaseModel):
|
||||
"""查询下载器工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
|
||||
|
||||
class QueryDownloadersTool(MoviePilotTool):
|
||||
name: str = "query_downloaders"
|
||||
description: str = "Query downloader configuration and list all available downloaders. Shows downloader status, connection details, and configuration settings."
|
||||
args_schema: Type[BaseModel] = QueryDownloadersInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""生成友好的提示消息"""
|
||||
return "查询下载器配置"
|
||||
|
||||
@staticmethod
|
||||
def _load_downloaders_config():
|
||||
"""从内存配置缓存中读取下载器配置。"""
|
||||
return SystemConfigOper().get(SystemConfigKey.Downloaders)
|
||||
|
||||
async def run(self, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}")
|
||||
try:
|
||||
downloaders_config = self._load_downloaders_config()
|
||||
if downloaders_config:
|
||||
return json.dumps(downloaders_config, ensure_ascii=False, indent=2)
|
||||
return "未配置下载器。"
|
||||
except Exception as e:
|
||||
logger.error(f"查询下载器失败: {e}")
|
||||
return f"查询下载器时发生错误: {str(e)}"
|
||||
103
app/agent/tools/impl/query_episode_schedule.py
Normal file
103
app/agent/tools/impl/query_episode_schedule.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""查询剧集上映时间工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.tmdb import TmdbChain
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryEpisodeScheduleInput(BaseModel):
|
||||
"""查询剧集上映时间工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
tmdb_id: int = Field(..., description="TMDB ID of the TV series (can be obtained from search_media tool)")
|
||||
season: int = Field(..., description="Season number to query")
|
||||
episode_group: Optional[str] = Field(None, description="Episode group ID (optional)")
|
||||
|
||||
|
||||
class QueryEpisodeScheduleTool(MoviePilotTool):
|
||||
name: str = "query_episode_schedule"
|
||||
description: str = "Query TV series episode air dates and schedule. Returns non-duplicated schedule fields, including episode list, air-date statistics, and per-episode metadata. Filters out episodes without air dates."
|
||||
args_schema: Type[BaseModel] = QueryEpisodeScheduleInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
tmdb_id = kwargs.get("tmdb_id")
|
||||
season = kwargs.get("season")
|
||||
episode_group = kwargs.get("episode_group")
|
||||
|
||||
message = f"查询剧集上映时间: TMDB ID {tmdb_id} 第{season}季"
|
||||
if episode_group:
|
||||
message += f" (剧集组: {episode_group})"
|
||||
|
||||
return message
|
||||
|
||||
async def run(self, tmdb_id: int, season: int, episode_group: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: tmdb_id={tmdb_id}, season={season}, episode_group={episode_group}")
|
||||
|
||||
try:
|
||||
# 获取集列表
|
||||
tmdb_chain = TmdbChain()
|
||||
episodes = await tmdb_chain.async_tmdb_episodes(
|
||||
tmdbid=tmdb_id,
|
||||
season=season,
|
||||
episode_group=episode_group
|
||||
)
|
||||
|
||||
if not episodes:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": f"未找到 TMDB ID {tmdb_id} 第{season}季的集信息"
|
||||
}, ensure_ascii=False)
|
||||
|
||||
# 过滤掉没有上映日期的集,并构建每集的详细信息
|
||||
episode_list = []
|
||||
for episode in episodes:
|
||||
air_date = episode.air_date
|
||||
|
||||
# 过滤掉没有上映日期的数据
|
||||
if not air_date:
|
||||
continue
|
||||
|
||||
episode_info = {
|
||||
"episode_number": episode.episode_number,
|
||||
"name": episode.name,
|
||||
"air_date": air_date,
|
||||
"runtime": episode.runtime,
|
||||
"vote_average": episode.vote_average,
|
||||
"still_path": episode.still_path,
|
||||
"episode_type": episode.episode_type,
|
||||
"season_number": episode.season_number
|
||||
}
|
||||
episode_list.append(episode_info)
|
||||
|
||||
if not episode_list:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": f"未找到 TMDB ID {tmdb_id} 第{season}季的播出时间信息(所有集都没有播出日期)"
|
||||
}, ensure_ascii=False)
|
||||
|
||||
# 按播出日期排序
|
||||
episode_list.sort(key=lambda x: (x["air_date"] or "", x["episode_number"] or 0))
|
||||
|
||||
result = {
|
||||
"season": season,
|
||||
"total_episodes": len(episodes),
|
||||
"episodes_with_air_date": len(episode_list),
|
||||
"episodes": episode_list
|
||||
}
|
||||
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"查询剧集上映时间失败: {str(e)}"
|
||||
logger.error(f"查询剧集上映时间失败: {e}", exc_info=True)
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": error_message,
|
||||
"tmdb_id": tmdb_id,
|
||||
"season": season
|
||||
}, ensure_ascii=False)
|
||||
108
app/agent/tools/impl/query_installed_plugins.py
Normal file
108
app/agent/tools/impl/query_installed_plugins.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""查询已安装插件工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._plugin_tool_utils import (
|
||||
DEFAULT_PLUGIN_CANDIDATE_LIMIT,
|
||||
list_installed_plugins,
|
||||
search_plugin_candidates,
|
||||
summarize_candidates,
|
||||
summarize_plugin,
|
||||
)
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryInstalledPluginsInput(BaseModel):
|
||||
"""查询已安装插件工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
query: Optional[str] = Field(
|
||||
None,
|
||||
description="Optional keyword to filter installed plugins by plugin ID, name, description, or author.",
|
||||
)
|
||||
max_results: Optional[int] = Field(
|
||||
DEFAULT_PLUGIN_CANDIDATE_LIMIT,
|
||||
description="Maximum number of plugins to return. Defaults to 10.",
|
||||
)
|
||||
|
||||
|
||||
class QueryInstalledPluginsTool(MoviePilotTool):
|
||||
name: str = "query_installed_plugins"
|
||||
description: str = (
|
||||
"Query installed plugins in MoviePilot. Returns all installed plugins or filters them by keywords. "
|
||||
"Use this tool to find the exact plugin_id before uninstall_plugin or other plugin management tools are used."
|
||||
)
|
||||
require_admin: bool = True
|
||||
args_schema: Type[BaseModel] = QueryInstalledPluginsInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""生成友好的提示消息"""
|
||||
query = kwargs.get("query")
|
||||
if query:
|
||||
return f"查询已安装插件: {query}"
|
||||
return "查询已安装插件"
|
||||
|
||||
@staticmethod
|
||||
def _clamp_results(max_results: Optional[int]) -> int:
|
||||
if max_results is None:
|
||||
return DEFAULT_PLUGIN_CANDIDATE_LIMIT
|
||||
return max(1, min(int(max_results), 200))
|
||||
|
||||
async def run(
|
||||
self,
|
||||
query: Optional[str] = None,
|
||||
max_results: Optional[int] = DEFAULT_PLUGIN_CANDIDATE_LIMIT,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: query={query}")
|
||||
try:
|
||||
installed_plugins = list_installed_plugins()
|
||||
if not installed_plugins:
|
||||
return json.dumps(
|
||||
{"success": False, "message": "当前没有已安装的插件"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
limit = self._clamp_results(max_results)
|
||||
if query:
|
||||
matches = search_plugin_candidates(query, installed_plugins)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"query": query,
|
||||
"total_installed": len(installed_plugins),
|
||||
"match_count": len(matches),
|
||||
"truncated": len(matches) > limit,
|
||||
"plugins": summarize_candidates(matches, limit=limit),
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
|
||||
plugin_summaries = [
|
||||
summarize_plugin(plugin) for plugin in installed_plugins[:limit]
|
||||
]
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"total_installed": len(installed_plugins),
|
||||
"returned_count": len(plugin_summaries),
|
||||
"truncated": len(installed_plugins) > limit,
|
||||
"plugins": plugin_summaries,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"查询已安装插件失败: {e}", exc_info=True)
|
||||
return json.dumps(
|
||||
{"success": False, "message": f"查询已安装插件时发生错误: {str(e)}"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
200
app/agent/tools/impl/query_library_exists.py
Normal file
200
app/agent/tools/impl/query_library_exists.py
Normal file
@@ -0,0 +1,200 @@
|
||||
"""查询媒体库工具"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
from typing import Optional, Type, Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.mediaserver import MediaServerChain
|
||||
from app.helper.mediaserver import MediaServerHelper
|
||||
from app.log import logger
|
||||
from app.schemas.types import MediaType, media_type_to_agent
|
||||
|
||||
|
||||
def _sort_seasons(seasons: Optional[dict]) -> dict:
|
||||
"""按季号、集号升序整理季集信息,保证输出稳定。"""
|
||||
if not seasons:
|
||||
return {}
|
||||
|
||||
def _sort_key(value):
|
||||
try:
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
return str(value)
|
||||
|
||||
return OrderedDict(
|
||||
(season, sorted(episodes, key=_sort_key))
|
||||
for season, episodes in sorted(seasons.items(), key=lambda item: _sort_key(item[0]))
|
||||
)
|
||||
|
||||
|
||||
def _filter_regular_seasons(seasons: Optional[dict]) -> OrderedDict:
|
||||
"""仅保留正片季,忽略 season 0 等特殊季。"""
|
||||
sorted_seasons = _sort_seasons(seasons)
|
||||
regular_seasons = OrderedDict()
|
||||
for season, episodes in sorted_seasons.items():
|
||||
try:
|
||||
season_number = int(season)
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
if season_number > 0:
|
||||
regular_seasons[season_number] = episodes
|
||||
return regular_seasons
|
||||
|
||||
|
||||
def _build_tv_server_result(existing_seasons: OrderedDict, total_seasons: OrderedDict) -> dict[str, Any]:
|
||||
"""构建单个服务器的电视剧存在性结果。"""
|
||||
seasons_result = OrderedDict()
|
||||
missing_seasons = []
|
||||
all_seasons = sorted(set(total_seasons.keys()) | set(existing_seasons.keys()))
|
||||
|
||||
for season in all_seasons:
|
||||
existing_episodes = existing_seasons.get(season, [])
|
||||
total_episodes = total_seasons.get(season)
|
||||
if total_episodes is not None:
|
||||
missing_episodes = [episode for episode in total_episodes if episode not in existing_episodes]
|
||||
total_episode_count = len(total_episodes)
|
||||
else:
|
||||
missing_episodes = None
|
||||
total_episode_count = None
|
||||
seasons_result[str(season)] = {
|
||||
"existing_episodes": existing_episodes,
|
||||
"total_episodes": total_episode_count,
|
||||
"missing_episodes": missing_episodes
|
||||
}
|
||||
if total_episodes is not None and not existing_episodes:
|
||||
missing_seasons.append(season)
|
||||
|
||||
return {
|
||||
"seasons": seasons_result,
|
||||
"missing_seasons": missing_seasons
|
||||
}
|
||||
|
||||
|
||||
class QueryLibraryExistsInput(BaseModel):
|
||||
"""查询媒体库工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
tmdb_id: Optional[int] = Field(None, description="TMDB ID (can be obtained from search_media tool). Either tmdb_id or douban_id must be provided.")
|
||||
douban_id: Optional[str] = Field(None, description="Douban ID (can be obtained from search_media tool). Either tmdb_id or douban_id must be provided.")
|
||||
media_type: Optional[str] = Field(None, description="Allowed values: movie, tv")
|
||||
|
||||
|
||||
class QueryLibraryExistsTool(MoviePilotTool):
|
||||
name: str = "query_library_exists"
|
||||
description: str = "Check whether media already exists in Plex, Emby, or Jellyfin by media ID. Results are grouped by media server; TV results include existing episodes, total episodes, and missing episodes/seasons. Requires tmdb_id or douban_id from search_media."
|
||||
args_schema: Type[BaseModel] = QueryLibraryExistsInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
tmdb_id = kwargs.get("tmdb_id")
|
||||
douban_id = kwargs.get("douban_id")
|
||||
media_type = kwargs.get("media_type")
|
||||
|
||||
if tmdb_id:
|
||||
message = f"查询媒体库: TMDB={tmdb_id}"
|
||||
elif douban_id:
|
||||
message = f"查询媒体库: 豆瓣={douban_id}"
|
||||
else:
|
||||
message = "查询媒体库"
|
||||
if media_type:
|
||||
message += f" [{media_type}]"
|
||||
return message
|
||||
|
||||
@staticmethod
|
||||
def _get_media_server_names() -> list[str]:
|
||||
"""同步读取已加载媒体服务器名称。"""
|
||||
return sorted(MediaServerHelper().get_services().keys())
|
||||
|
||||
@staticmethod
|
||||
def _query_media_exists(mediainfo, server: Optional[str] = None):
|
||||
"""同步查询单个媒体服务器的存在性信息。"""
|
||||
return MediaServerChain().media_exists(mediainfo=mediainfo, server=server)
|
||||
|
||||
async def run(self, tmdb_id: Optional[int] = None, douban_id: Optional[str] = None,
|
||||
media_type: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: tmdb_id={tmdb_id}, douban_id={douban_id}, media_type={media_type}")
|
||||
try:
|
||||
if not tmdb_id and not douban_id:
|
||||
return "参数错误:tmdb_id 和 douban_id 至少需要提供一个,请先使用 search_media 工具获取媒体 ID。"
|
||||
|
||||
media_type_enum = None
|
||||
if media_type:
|
||||
media_type_enum = MediaType.from_agent(media_type)
|
||||
if not media_type_enum:
|
||||
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
|
||||
|
||||
media_chain = MediaServerChain()
|
||||
mediainfo = await media_chain.async_recognize_media(
|
||||
tmdbid=tmdb_id,
|
||||
doubanid=douban_id,
|
||||
mtype=media_type_enum,
|
||||
)
|
||||
if not mediainfo:
|
||||
media_id = f"TMDB={tmdb_id}" if tmdb_id else f"豆瓣={douban_id}"
|
||||
return f"未识别到媒体信息: {media_id}"
|
||||
|
||||
# 2. 遍历所有媒体服务器,分别查询存在性信息
|
||||
server_results = OrderedDict()
|
||||
total_seasons = _filter_regular_seasons(mediainfo.seasons)
|
||||
service_names = self._get_media_server_names()
|
||||
|
||||
server_checks = await asyncio.gather(
|
||||
*[
|
||||
self.run_blocking(
|
||||
"mediaserver",
|
||||
self._query_media_exists,
|
||||
mediainfo,
|
||||
service_name,
|
||||
)
|
||||
for service_name in service_names
|
||||
]
|
||||
)
|
||||
|
||||
for service_name, existsinfo in zip(service_names, server_checks):
|
||||
if not existsinfo:
|
||||
continue
|
||||
|
||||
if existsinfo.type == MediaType.TV:
|
||||
existing_seasons = _filter_regular_seasons(existsinfo.seasons)
|
||||
server_results[service_name] = _build_tv_server_result(
|
||||
existing_seasons=existing_seasons,
|
||||
total_seasons=total_seasons
|
||||
)
|
||||
else:
|
||||
server_results[service_name] = {
|
||||
"exists": True
|
||||
}
|
||||
|
||||
if not server_results:
|
||||
global_existsinfo = await self.run_blocking(
|
||||
"mediaserver", self._query_media_exists, mediainfo, None
|
||||
)
|
||||
if not global_existsinfo:
|
||||
return "媒体库中未找到相关媒体"
|
||||
|
||||
fallback_server_name = global_existsinfo.server or "local"
|
||||
if global_existsinfo.type == MediaType.TV:
|
||||
server_results[fallback_server_name] = _build_tv_server_result(
|
||||
existing_seasons=_filter_regular_seasons(global_existsinfo.seasons),
|
||||
total_seasons=total_seasons
|
||||
)
|
||||
else:
|
||||
server_results[fallback_server_name] = {
|
||||
"exists": True
|
||||
}
|
||||
|
||||
# 3. 组装统一的存在性结果,不查询媒体服务器详情
|
||||
result_dict = {
|
||||
"title": mediainfo.title,
|
||||
"year": mediainfo.year,
|
||||
"type": media_type_to_agent(mediainfo.type),
|
||||
"servers": server_results
|
||||
}
|
||||
|
||||
return json.dumps([result_dict], ensure_ascii=False)
|
||||
except Exception as e:
|
||||
logger.error(f"查询媒体库失败: {e}", exc_info=True)
|
||||
return f"查询媒体库时发生错误: {str(e)}"
|
||||
141
app/agent/tools/impl/query_library_latest.py
Normal file
141
app/agent/tools/impl/query_library_latest.py
Normal file
@@ -0,0 +1,141 @@
|
||||
"""查询媒体服务器最近入库影片工具"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.mediaserver import MediaServerChain
|
||||
from app.helper.service import ServiceConfigHelper
|
||||
from app.log import logger
|
||||
|
||||
PAGE_SIZE = 20
|
||||
|
||||
|
||||
class QueryLibraryLatestInput(BaseModel):
|
||||
"""查询媒体服务器最近入库影片工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
server: Optional[str] = Field(
|
||||
None,
|
||||
description="Media server name (optional, if not specified queries all enabled media servers)",
|
||||
)
|
||||
page: Optional[int] = Field(
|
||||
1, description="Page number for pagination (default: 1, 20 items per page)"
|
||||
)
|
||||
|
||||
|
||||
class QueryLibraryLatestTool(MoviePilotTool):
|
||||
name: str = "query_library_latest"
|
||||
description: str = "Query the latest media items added to the media server (Plex, Emby, Jellyfin). Returns recently added movies and TV series with their titles, images, links, and other metadata. Supports pagination with 20 items per page."
|
||||
args_schema: Type[BaseModel] = QueryLibraryLatestInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
server = kwargs.get("server")
|
||||
page = kwargs.get("page", 1)
|
||||
|
||||
parts = ["查询媒体服务器最近入库影片"]
|
||||
|
||||
if server:
|
||||
parts.append(f"服务器: {server}")
|
||||
else:
|
||||
parts.append("所有服务器")
|
||||
|
||||
parts.append(f"第{page}页")
|
||||
|
||||
return " | ".join(parts)
|
||||
|
||||
@staticmethod
|
||||
def _get_enabled_servers() -> list[str]:
|
||||
"""同步读取启用的媒体服务器列表。"""
|
||||
mediaservers = ServiceConfigHelper.get_mediaserver_configs()
|
||||
return [ms.name for ms in mediaservers if ms.enabled]
|
||||
|
||||
@staticmethod
|
||||
def _load_latest_items(
|
||||
server_name: str, count: int, username: Optional[str] = None
|
||||
) -> list[dict]:
|
||||
"""
|
||||
媒体服务器 SDK 和 requests 调用都是同步的,这里在线程池中转换为可序列化结果。
|
||||
"""
|
||||
latest_items = MediaServerChain().latest(
|
||||
server=server_name, count=count, username=username
|
||||
)
|
||||
if not latest_items:
|
||||
return []
|
||||
return [
|
||||
{
|
||||
**item.model_dump(exclude_none=True),
|
||||
"server": server_name,
|
||||
}
|
||||
for item in latest_items
|
||||
]
|
||||
|
||||
async def run(
|
||||
self, server: Optional[str] = None, page: Optional[int] = 1, **kwargs
|
||||
) -> str:
|
||||
page = max(1, page or 1)
|
||||
# 为了支持分页,需要获取足够多的数据再切片
|
||||
fetch_count = page * PAGE_SIZE
|
||||
logger.info(f"执行工具: {self.name}, 参数: server={server}, page={page}")
|
||||
try:
|
||||
# 如果没有指定服务器,获取所有启用的媒体服务器
|
||||
if not server:
|
||||
enabled_servers = self._get_enabled_servers()
|
||||
if not enabled_servers:
|
||||
return "未找到启用的媒体服务器"
|
||||
server_results = await asyncio.gather(
|
||||
*[
|
||||
self.run_blocking(
|
||||
"mediaserver",
|
||||
self._load_latest_items,
|
||||
server_name,
|
||||
fetch_count,
|
||||
self._username,
|
||||
)
|
||||
for server_name in enabled_servers
|
||||
]
|
||||
)
|
||||
results = [
|
||||
item for items in server_results for item in items if items
|
||||
]
|
||||
else:
|
||||
results = await self.run_blocking(
|
||||
"mediaserver",
|
||||
self._load_latest_items,
|
||||
server,
|
||||
fetch_count,
|
||||
self._username,
|
||||
)
|
||||
|
||||
if not results:
|
||||
server_info = f"服务器 {server}" if server else "所有服务器"
|
||||
return f"未找到 {server_info} 的最近入库影片"
|
||||
|
||||
# 分页
|
||||
total_count = len(results)
|
||||
start = (page - 1) * PAGE_SIZE
|
||||
end = start + PAGE_SIZE
|
||||
page_results = results[start:end]
|
||||
|
||||
if not page_results:
|
||||
total_pages = (total_count + PAGE_SIZE - 1) // PAGE_SIZE
|
||||
return f"第 {page} 页没有数据,共 {total_count} 条结果,共 {total_pages} 页。"
|
||||
|
||||
total_pages = (total_count + PAGE_SIZE - 1) // PAGE_SIZE
|
||||
payload_msg = f"第 {page}/{total_pages} 页,当前页 {len(page_results)} 条结果,共 {total_count} 条。"
|
||||
if page < total_pages:
|
||||
payload_msg += f" 可使用 page={page + 1} 获取下一页。"
|
||||
|
||||
result_json = json.dumps(page_results, ensure_ascii=False, indent=2)
|
||||
return f"{payload_msg}\n\n{result_json}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"查询媒体服务器最近入库影片失败: {e}", exc_info=True)
|
||||
return f"查询媒体服务器最近入库影片时发生错误: {str(e)}"
|
||||
113
app/agent/tools/impl/query_market_plugins.py
Normal file
113
app/agent/tools/impl/query_market_plugins.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""查询插件市场工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._plugin_tool_utils import (
|
||||
DEFAULT_PLUGIN_CANDIDATE_LIMIT,
|
||||
load_market_plugins,
|
||||
search_plugin_candidates,
|
||||
summarize_candidates,
|
||||
summarize_plugin,
|
||||
)
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryMarketPluginsInput(BaseModel):
|
||||
"""查询插件市场工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
query: Optional[str] = Field(
|
||||
None,
|
||||
description="Optional keyword to filter plugin market results by plugin ID, name, description, or author.",
|
||||
)
|
||||
max_results: Optional[int] = Field(
|
||||
DEFAULT_PLUGIN_CANDIDATE_LIMIT,
|
||||
description="Maximum number of plugins to return. Defaults to 10.",
|
||||
)
|
||||
force_refresh: Optional[bool] = Field(
|
||||
False,
|
||||
description="Whether to refresh plugin market caches before querying.",
|
||||
)
|
||||
|
||||
|
||||
class QueryMarketPluginsTool(MoviePilotTool):
|
||||
name: str = "query_market_plugins"
|
||||
description: str = (
|
||||
"Query available plugins from the plugin market and local plugin repositories. "
|
||||
"Can return the full plugin list or filter by keywords before install_plugin is used."
|
||||
)
|
||||
require_admin: bool = True
|
||||
args_schema: Type[BaseModel] = QueryMarketPluginsInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
query = kwargs.get("query")
|
||||
if query:
|
||||
return f"查询插件市场: {query}"
|
||||
return "查询插件市场全部插件"
|
||||
|
||||
@staticmethod
|
||||
def _clamp_results(max_results: Optional[int]) -> int:
|
||||
if max_results is None:
|
||||
return DEFAULT_PLUGIN_CANDIDATE_LIMIT
|
||||
return max(1, min(int(max_results), 200))
|
||||
|
||||
async def run(
|
||||
self,
|
||||
query: Optional[str] = None,
|
||||
max_results: Optional[int] = DEFAULT_PLUGIN_CANDIDATE_LIMIT,
|
||||
force_refresh: bool = False,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: query={query}, force_refresh={force_refresh}"
|
||||
)
|
||||
|
||||
try:
|
||||
plugins = await load_market_plugins(force_refresh=force_refresh)
|
||||
if not plugins:
|
||||
return json.dumps(
|
||||
{"success": False, "message": "当前插件市场没有可用插件"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
limit = self._clamp_results(max_results)
|
||||
if query:
|
||||
matches = search_plugin_candidates(query, plugins)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"query": query,
|
||||
"total_available": len(plugins),
|
||||
"match_count": len(matches),
|
||||
"truncated": len(matches) > limit,
|
||||
"plugins": summarize_candidates(matches, limit=limit),
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
|
||||
plugin_summaries = [summarize_plugin(plugin) for plugin in plugins[:limit]]
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"total_available": len(plugins),
|
||||
"returned_count": len(plugin_summaries),
|
||||
"truncated": len(plugins) > limit,
|
||||
"plugins": plugin_summaries,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"查询插件市场失败: {e}", exc_info=True)
|
||||
return json.dumps(
|
||||
{"success": False, "message": f"查询插件市场时发生错误: {str(e)}"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
126
app/agent/tools/impl/query_media_detail.py
Normal file
126
app/agent/tools/impl/query_media_detail.py
Normal file
@@ -0,0 +1,126 @@
|
||||
"""查询媒体详情工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.media import MediaChain
|
||||
from app.log import logger
|
||||
from app.schemas.types import MediaType
|
||||
|
||||
|
||||
class QueryMediaDetailInput(BaseModel):
|
||||
"""查询媒体详情工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
tmdb_id: Optional[int] = Field(None, description="TMDB ID of the media (movie or TV series, can be obtained from search_media tool)")
|
||||
douban_id: Optional[str] = Field(None, description="Douban ID of the media (alternative to tmdb_id)")
|
||||
media_type: str = Field(..., description="Allowed values: movie, tv")
|
||||
|
||||
|
||||
class QueryMediaDetailTool(MoviePilotTool):
|
||||
name: str = "query_media_detail"
|
||||
description: str = "Query supplementary media details from TMDB by ID and media_type. Accepts tmdb_id or douban_id (at least one required). media_type accepts 'movie' or 'tv'. Returns non-duplicated detail fields such as status, genres, directors, actors, and season info for TV series."
|
||||
args_schema: Type[BaseModel] = QueryMediaDetailInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
tmdb_id = kwargs.get("tmdb_id")
|
||||
douban_id = kwargs.get("douban_id")
|
||||
if tmdb_id:
|
||||
return f"查询媒体详情: TMDB ID {tmdb_id}"
|
||||
return f"查询媒体详情: 豆瓣 ID {douban_id}"
|
||||
|
||||
async def run(self, media_type: str, tmdb_id: Optional[int] = None, douban_id: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: tmdb_id={tmdb_id}, douban_id={douban_id}, media_type={media_type}")
|
||||
|
||||
if tmdb_id is None and douban_id is None:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": "必须提供 tmdb_id 或 douban_id 之一"
|
||||
}, ensure_ascii=False)
|
||||
|
||||
try:
|
||||
media_chain = MediaChain()
|
||||
|
||||
media_type_enum = MediaType.from_agent(media_type)
|
||||
if not media_type_enum:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": f"无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
|
||||
}, ensure_ascii=False)
|
||||
|
||||
mediainfo = await media_chain.async_recognize_media(tmdbid=tmdb_id, doubanid=douban_id, mtype=media_type_enum)
|
||||
|
||||
if not mediainfo:
|
||||
id_info = f"TMDB ID {tmdb_id}" if tmdb_id else f"豆瓣 ID {douban_id}"
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": f"未找到 {id_info} 的媒体信息"
|
||||
}, ensure_ascii=False)
|
||||
|
||||
# 精简 genres - 只保留名称
|
||||
genres = [g.get("name") for g in (mediainfo.genres or []) if g.get("name")]
|
||||
|
||||
# 精简 directors - 只保留姓名和职位
|
||||
directors = [
|
||||
{
|
||||
"name": d.get("name"),
|
||||
"job": d.get("job")
|
||||
}
|
||||
for d in (mediainfo.directors or [])
|
||||
if d.get("name")
|
||||
]
|
||||
|
||||
# 精简 actors - 只保留姓名和角色
|
||||
actors = [
|
||||
{
|
||||
"name": a.get("name"),
|
||||
"character": a.get("character")
|
||||
}
|
||||
for a in (mediainfo.actors or [])
|
||||
if a.get("name")
|
||||
]
|
||||
|
||||
# 构建基础媒体详情信息
|
||||
result = {
|
||||
"status": mediainfo.status,
|
||||
"genres": genres,
|
||||
"directors": directors,
|
||||
"actors": actors
|
||||
}
|
||||
|
||||
# 如果是电视剧,添加电视剧特有信息
|
||||
if mediainfo.type == MediaType.TV:
|
||||
# 精简 season_info - 只保留基础摘要
|
||||
season_info = [
|
||||
{
|
||||
"season_number": s.get("season_number"),
|
||||
"name": s.get("name"),
|
||||
"episode_count": s.get("episode_count"),
|
||||
"air_date": s.get("air_date")
|
||||
}
|
||||
for s in (mediainfo.season_info or [])
|
||||
if s.get("season_number") is not None
|
||||
]
|
||||
|
||||
result.update({
|
||||
"number_of_seasons": mediainfo.number_of_seasons,
|
||||
"number_of_episodes": mediainfo.number_of_episodes,
|
||||
"first_air_date": mediainfo.first_air_date,
|
||||
"last_air_date": mediainfo.last_air_date,
|
||||
"season_info": season_info
|
||||
})
|
||||
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"查询媒体详情失败: {str(e)}"
|
||||
logger.error(f"查询媒体详情失败: {e}", exc_info=True)
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": error_message,
|
||||
"tmdb_id": tmdb_id,
|
||||
"douban_id": douban_id
|
||||
}, ensure_ascii=False)
|
||||
75
app/agent/tools/impl/query_personas.py
Normal file
75
app/agent/tools/impl/query_personas.py
Normal file
@@ -0,0 +1,75 @@
|
||||
"""查询可用人格工具。"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.runtime import agent_runtime_manager
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryPersonasInput(BaseModel):
|
||||
"""查询人格工具的输入参数模型。"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
query: Optional[str] = Field(
|
||||
None,
|
||||
description=(
|
||||
"Optional search keyword for persona_id, label, description, or aliases. "
|
||||
"Use this when the user asks for a certain speaking style but the exact persona name is unknown."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class QueryPersonasTool(MoviePilotTool):
|
||||
name: str = "query_personas"
|
||||
description: str = (
|
||||
"List all available personas (人格) and show which one is currently active. "
|
||||
"Use this before switching persona when the user asks for a different speaking style but does not name "
|
||||
"an exact persona_id. The result includes persona_id, label, description, aliases, and whether it is active."
|
||||
)
|
||||
args_schema: Type[BaseModel] = QueryPersonasInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
query = kwargs.get("query")
|
||||
if query:
|
||||
return f"查询人格列表: {query}"
|
||||
return "查询人格列表"
|
||||
|
||||
async def run(self, query: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info("执行工具: %s, 参数: query=%s", self.name, query)
|
||||
try:
|
||||
runtime_config = agent_runtime_manager.load_runtime_config()
|
||||
personas = runtime_config.list_personas()
|
||||
|
||||
if query:
|
||||
normalized = query.strip().casefold()
|
||||
personas = [
|
||||
persona
|
||||
for persona in personas
|
||||
if normalized in persona["persona_id"].casefold()
|
||||
or normalized in persona["label"].casefold()
|
||||
or normalized in persona["description"].casefold()
|
||||
or any(normalized in alias.casefold() for alias in persona["aliases"])
|
||||
]
|
||||
|
||||
payload = {
|
||||
"active_persona": runtime_config.active_persona,
|
||||
"count": len(personas),
|
||||
"personas": personas,
|
||||
}
|
||||
return json.dumps(payload, ensure_ascii=False, indent=2)
|
||||
except Exception as e: # noqa: BLE001
|
||||
logger.error("查询人格列表失败: %s", e, exc_info=True)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"查询人格列表时发生错误: {str(e)}",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
116
app/agent/tools/impl/query_plugin_capabilities.py
Normal file
116
app/agent/tools/impl/query_plugin_capabilities.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""查询插件能力工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.core.plugin import PluginManager
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryPluginCapabilitiesInput(BaseModel):
|
||||
"""查询插件能力工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
plugin_id: Optional[str] = Field(
|
||||
None,
|
||||
description="Optional plugin ID to query capabilities for a specific plugin. "
|
||||
"If not provided, returns capabilities of all running plugins. "
|
||||
"Use query_installed_plugins tool to get the plugin IDs first.",
|
||||
)
|
||||
|
||||
|
||||
class QueryPluginCapabilitiesTool(MoviePilotTool):
|
||||
name: str = "query_plugin_capabilities"
|
||||
description: str = (
|
||||
"Query the capabilities of installed plugins, including supported commands and scheduled services. "
|
||||
"Commands are slash-commands (e.g. /xxx) that can be executed via the run_slash_command tool. "
|
||||
"Scheduled services are periodic tasks that can be triggered via the run_scheduler tool. "
|
||||
"Optionally specify a plugin_id to query a specific plugin, or omit to query all running plugins."
|
||||
)
|
||||
require_admin: bool = True
|
||||
args_schema: Type[BaseModel] = QueryPluginCapabilitiesInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""生成友好的提示消息"""
|
||||
plugin_id = kwargs.get("plugin_id")
|
||||
if plugin_id:
|
||||
return f"查询插件 {plugin_id} 的能力"
|
||||
return "查询所有插件的能力"
|
||||
|
||||
@staticmethod
|
||||
def _load_plugin_capabilities(plugin_id: Optional[str] = None) -> dict:
|
||||
"""读取运行中插件实例暴露的内存能力信息。"""
|
||||
plugin_manager = PluginManager()
|
||||
result = {}
|
||||
|
||||
commands = plugin_manager.get_plugin_commands(pid=plugin_id)
|
||||
if commands:
|
||||
result["commands"] = [
|
||||
{
|
||||
"cmd": cmd.get("cmd"),
|
||||
"desc": cmd.get("desc"),
|
||||
"plugin_id": cmd.get("pid"),
|
||||
**({"data": cmd.get("data")} if cmd.get("data") else {}),
|
||||
}
|
||||
for cmd in commands
|
||||
]
|
||||
|
||||
actions = plugin_manager.get_plugin_actions(pid=plugin_id)
|
||||
if actions:
|
||||
actions_list = []
|
||||
for action_group in actions:
|
||||
actions_list.append(
|
||||
{
|
||||
"plugin_id": action_group.get("plugin_id"),
|
||||
"plugin_name": action_group.get("plugin_name"),
|
||||
"actions": [
|
||||
{
|
||||
"id": action.get("id"),
|
||||
"name": action.get("name"),
|
||||
}
|
||||
for action in action_group.get("actions", [])
|
||||
],
|
||||
}
|
||||
)
|
||||
result["actions"] = actions_list
|
||||
|
||||
services = plugin_manager.get_plugin_services(pid=plugin_id)
|
||||
if services:
|
||||
services_list = []
|
||||
for svc in services:
|
||||
svc_info = {
|
||||
"id": svc.get("id"),
|
||||
"name": svc.get("name"),
|
||||
}
|
||||
trigger = svc.get("trigger")
|
||||
if trigger:
|
||||
svc_info["trigger"] = str(trigger)
|
||||
svc_kwargs = svc.get("kwargs")
|
||||
if svc_kwargs:
|
||||
svc_info["trigger_kwargs"] = {
|
||||
k: str(v) for k, v in svc_kwargs.items()
|
||||
}
|
||||
services_list.append(svc_info)
|
||||
result["services"] = services_list
|
||||
|
||||
return result
|
||||
|
||||
async def run(self, plugin_id: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: plugin_id={plugin_id}")
|
||||
try:
|
||||
result = self._load_plugin_capabilities(plugin_id)
|
||||
if not result:
|
||||
if plugin_id:
|
||||
return f"插件 {plugin_id} 没有注册任何命令、动作或定时服务"
|
||||
return "当前没有运行中的插件注册了命令、动作或定时服务"
|
||||
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
except Exception as e:
|
||||
logger.error(f"查询插件能力失败: {e}", exc_info=True)
|
||||
return f"查询插件能力时发生错误: {str(e)}"
|
||||
88
app/agent/tools/impl/query_plugin_config.py
Normal file
88
app/agent/tools/impl/query_plugin_config.py
Normal file
@@ -0,0 +1,88 @@
|
||||
"""查询插件配置工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._plugin_tool_utils import get_plugin_snapshot
|
||||
from app.core.plugin import PluginManager
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryPluginConfigInput(BaseModel):
|
||||
"""查询插件配置工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
plugin_id: str = Field(
|
||||
...,
|
||||
description="The plugin ID to query. Use query_installed_plugins first to discover valid plugin IDs.",
|
||||
)
|
||||
|
||||
|
||||
class QueryPluginConfigTool(MoviePilotTool):
|
||||
name: str = "query_plugin_config"
|
||||
description: str = (
|
||||
"Query the saved configuration of an installed plugin. "
|
||||
"Returns the current saved config and, when available, the plugin's default config model. "
|
||||
"Use this before update_plugin_config so you only change the intended keys."
|
||||
)
|
||||
require_admin: bool = True
|
||||
args_schema: Type[BaseModel] = QueryPluginConfigInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""生成友好的提示消息"""
|
||||
plugin_id = kwargs.get("plugin_id", "")
|
||||
return f"查询插件配置: {plugin_id}"
|
||||
|
||||
@staticmethod
|
||||
def _query_plugin_config(plugin_id: str) -> str:
|
||||
"""
|
||||
读取插件已保存配置,并尽量补充默认配置模型方便后续精确修改。
|
||||
"""
|
||||
plugin_info = get_plugin_snapshot(plugin_id)
|
||||
if not plugin_info:
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"插件 {plugin_id} 不存在,请先使用 query_installed_plugins 查询有效插件 ID",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
plugin_manager = PluginManager()
|
||||
saved_config = plugin_manager.get_plugin_config(plugin_id) or {}
|
||||
result = {
|
||||
"success": True,
|
||||
**plugin_info,
|
||||
"config": saved_config,
|
||||
}
|
||||
|
||||
# get_form 的 model 通常就是插件期望的配置结构,适合作为修改前的键参考。
|
||||
plugin_instance = plugin_manager.running_plugins.get(plugin_id)
|
||||
if plugin_instance and hasattr(plugin_instance, "get_form"):
|
||||
try:
|
||||
_form_schema, default_model = plugin_instance.get_form()
|
||||
if default_model is not None:
|
||||
result["default_model"] = default_model
|
||||
except Exception as err:
|
||||
logger.warning(f"读取插件 {plugin_id} 默认配置模型失败: {err}")
|
||||
|
||||
return json.dumps(result, ensure_ascii=False, indent=2, default=str)
|
||||
|
||||
async def run(self, plugin_id: str, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: plugin_id={plugin_id}")
|
||||
|
||||
try:
|
||||
# 插件配置来自内存配置缓存和运行态插件实例,直接读取即可。
|
||||
return self._query_plugin_config(plugin_id)
|
||||
except Exception as e:
|
||||
logger.error(f"查询插件配置失败: {e}", exc_info=True)
|
||||
return json.dumps(
|
||||
{"success": False, "message": f"查询插件配置时发生错误: {str(e)}"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
158
app/agent/tools/impl/query_plugin_data.py
Normal file
158
app/agent/tools/impl/query_plugin_data.py
Normal file
@@ -0,0 +1,158 @@
|
||||
"""查询插件数据工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._plugin_tool_utils import (
|
||||
PLUGIN_DATA_KEY_PREVIEW_LIMIT,
|
||||
build_preview_payload,
|
||||
get_plugin_snapshot,
|
||||
)
|
||||
from app.db.plugindata_oper import PluginDataOper
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryPluginDataInput(BaseModel):
|
||||
"""查询插件数据工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
plugin_id: str = Field(
|
||||
...,
|
||||
description="The plugin ID to query. Use query_installed_plugins first to discover valid plugin IDs.",
|
||||
)
|
||||
key: Optional[str] = Field(
|
||||
None,
|
||||
description="Optional plugin data key. If omitted, returns all plugin data entries for the plugin.",
|
||||
)
|
||||
max_chars: Optional[int] = Field(
|
||||
None,
|
||||
description="Maximum number of preview characters to return when plugin data is too large. Default 12000, capped at 50000.",
|
||||
)
|
||||
|
||||
|
||||
class QueryPluginDataTool(MoviePilotTool):
|
||||
name: str = "query_plugin_data"
|
||||
description: str = (
|
||||
"Query persisted data of an installed plugin. "
|
||||
"Optionally specify a key to read a single data item; otherwise all plugin data entries are returned. "
|
||||
"When the result is too large, the tool automatically truncates it and returns a preview instead."
|
||||
)
|
||||
require_admin: bool = True
|
||||
args_schema: Type[BaseModel] = QueryPluginDataInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""生成友好的提示消息"""
|
||||
plugin_id = kwargs.get("plugin_id", "")
|
||||
key = kwargs.get("key")
|
||||
if key:
|
||||
return f"查询插件数据: {plugin_id}.{key}"
|
||||
return f"查询插件全部数据: {plugin_id}"
|
||||
|
||||
@staticmethod
|
||||
async def _query_plugin_data(
|
||||
plugin_id: str, key: Optional[str] = None, max_chars: Optional[int] = None
|
||||
) -> str:
|
||||
"""
|
||||
插件数据改走异步 ORM 查询,避免再套一层线程池。
|
||||
"""
|
||||
plugin_info = get_plugin_snapshot(plugin_id)
|
||||
if not plugin_info:
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"插件 {plugin_id} 不存在,请先使用 query_installed_plugins 查询有效插件 ID",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
plugin_data_oper = PluginDataOper()
|
||||
if key:
|
||||
value = await plugin_data_oper.async_get_data(plugin_id, key)
|
||||
if value is None:
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
**plugin_info,
|
||||
"key": key,
|
||||
"found": False,
|
||||
"message": f"插件 {plugin_id} 没有数据项 {key}",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
|
||||
truncated, total_chars, returned_chars, preview = build_preview_payload(
|
||||
value, max_chars
|
||||
)
|
||||
result = {
|
||||
"success": True,
|
||||
**plugin_info,
|
||||
"key": key,
|
||||
"found": True,
|
||||
"truncated": truncated,
|
||||
"total_chars": total_chars,
|
||||
"returned_chars": returned_chars,
|
||||
}
|
||||
if truncated:
|
||||
result["value_preview"] = preview
|
||||
result["message"] = "插件数据内容过大,已截断预览"
|
||||
else:
|
||||
result["value"] = value
|
||||
return json.dumps(result, ensure_ascii=False, indent=2, default=str)
|
||||
|
||||
rows = await plugin_data_oper.async_get_data_all(plugin_id) or []
|
||||
data_map = {row.key: row.value for row in rows}
|
||||
keys = list(data_map.keys())
|
||||
key_preview = keys[:PLUGIN_DATA_KEY_PREVIEW_LIMIT]
|
||||
|
||||
result = {
|
||||
"success": True,
|
||||
**plugin_info,
|
||||
"count": len(data_map),
|
||||
"keys": key_preview,
|
||||
"keys_truncated": len(keys) > PLUGIN_DATA_KEY_PREVIEW_LIMIT,
|
||||
}
|
||||
|
||||
if not data_map:
|
||||
result["data"] = {}
|
||||
result["truncated"] = False
|
||||
return json.dumps(result, ensure_ascii=False, indent=2, default=str)
|
||||
|
||||
truncated, total_chars, returned_chars, preview = build_preview_payload(
|
||||
data_map, max_chars
|
||||
)
|
||||
result["truncated"] = truncated
|
||||
result["total_chars"] = total_chars
|
||||
result["returned_chars"] = returned_chars
|
||||
if truncated:
|
||||
result["data_preview"] = preview
|
||||
result["message"] = "插件数据内容过大,已截断。请传入 key 精确查询单个数据项。"
|
||||
else:
|
||||
result["data"] = data_map
|
||||
return json.dumps(result, ensure_ascii=False, indent=2, default=str)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
plugin_id: str,
|
||||
key: Optional[str] = None,
|
||||
max_chars: Optional[int] = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: plugin_id={plugin_id}, key={key}"
|
||||
)
|
||||
|
||||
try:
|
||||
return await self._query_plugin_data(plugin_id, key, max_chars)
|
||||
except Exception as e:
|
||||
logger.error(f"查询插件数据失败: {e}", exc_info=True)
|
||||
return json.dumps(
|
||||
{"success": False, "message": f"查询插件数据时发生错误: {str(e)}"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
162
app/agent/tools/impl/query_popular_subscribes.py
Normal file
162
app/agent/tools/impl/query_popular_subscribes.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""查询热门订阅工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
import cn2an
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.core.context import MediaInfo
|
||||
from app.helper.subscribe import SubscribeHelper
|
||||
from app.log import logger
|
||||
from app.schemas.types import MediaType, media_type_to_agent
|
||||
|
||||
|
||||
class QueryPopularSubscribesInput(BaseModel):
|
||||
"""查询热门订阅工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
media_type: str = Field(..., description="Allowed values: movie, tv")
|
||||
page: Optional[int] = Field(1, description="Page number for pagination (default: 1)")
|
||||
count: Optional[int] = Field(30, description="Number of items per page (default: 30)")
|
||||
min_sub: Optional[int] = Field(None, description="Minimum number of subscribers filter (optional, e.g., 5)")
|
||||
genre_id: Optional[int] = Field(None, description="Filter by genre ID (optional)")
|
||||
min_rating: Optional[float] = Field(None, description="Minimum rating filter (optional, e.g., 7.5)")
|
||||
max_rating: Optional[float] = Field(None, description="Maximum rating filter (optional, e.g., 10.0)")
|
||||
sort_type: Optional[str] = Field(None, description="Sort type (optional, e.g., 'count', 'rating')")
|
||||
|
||||
|
||||
class QueryPopularSubscribesTool(MoviePilotTool):
|
||||
name: str = "query_popular_subscribes"
|
||||
description: str = "Query popular subscriptions based on user shared data. Shows media with the most subscribers, supports filtering by genre, rating, minimum subscribers, and pagination."
|
||||
args_schema: Type[BaseModel] = QueryPopularSubscribesInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
media_type = kwargs.get("media_type", "")
|
||||
page = kwargs.get("page", 1)
|
||||
min_sub = kwargs.get("min_sub")
|
||||
min_rating = kwargs.get("min_rating")
|
||||
max_rating = kwargs.get("max_rating")
|
||||
|
||||
parts = [f"查询热门订阅 [{media_type}]"]
|
||||
|
||||
if min_sub:
|
||||
parts.append(f"最少订阅: {min_sub}")
|
||||
if min_rating:
|
||||
parts.append(f"最低评分: {min_rating}")
|
||||
if max_rating:
|
||||
parts.append(f"最高评分: {max_rating}")
|
||||
if page > 1:
|
||||
parts.append(f"第{page}页")
|
||||
|
||||
return " | ".join(parts) if len(parts) > 1 else parts[0]
|
||||
|
||||
async def run(self, media_type: str,
|
||||
page: Optional[int] = 1,
|
||||
count: Optional[int] = 30,
|
||||
min_sub: Optional[int] = None,
|
||||
genre_id: Optional[int] = None,
|
||||
min_rating: Optional[float] = None,
|
||||
max_rating: Optional[float] = None,
|
||||
sort_type: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: media_type={media_type}, page={page}, count={count}, min_sub={min_sub}, "
|
||||
f"genre_id={genre_id}, min_rating={min_rating}, max_rating={max_rating}, sort_type={sort_type}")
|
||||
|
||||
try:
|
||||
if page is None or page < 1:
|
||||
page = 1
|
||||
if count is None or count < 1:
|
||||
count = 30
|
||||
media_type_enum = MediaType.from_agent(media_type)
|
||||
if not media_type_enum:
|
||||
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
|
||||
|
||||
subscribe_helper = SubscribeHelper()
|
||||
subscribes = await subscribe_helper.async_get_statistic(
|
||||
stype=media_type_enum.to_agent(),
|
||||
page=page,
|
||||
count=count,
|
||||
genre_id=genre_id,
|
||||
min_rating=min_rating,
|
||||
max_rating=max_rating,
|
||||
sort_type=sort_type
|
||||
)
|
||||
|
||||
if not subscribes:
|
||||
return "未找到热门订阅数据(可能订阅统计功能未启用)"
|
||||
|
||||
# 转换为MediaInfo格式并过滤
|
||||
ret_medias = []
|
||||
for sub in subscribes:
|
||||
# 订阅人数
|
||||
subscriber_count = sub.get("count", 0)
|
||||
# 如果设置了最小订阅人数,进行过滤
|
||||
if min_sub and subscriber_count < min_sub:
|
||||
continue
|
||||
|
||||
media = MediaInfo()
|
||||
raw_type = str(sub.get("type") or "").strip().lower()
|
||||
if raw_type in ["movie", "电影"]:
|
||||
media.type = MediaType.MOVIE
|
||||
elif raw_type in ["tv", "电视剧"]:
|
||||
media.type = MediaType.TV
|
||||
else:
|
||||
# 跳过无法识别类型的数据,避免单条脏数据导致整批失败
|
||||
logger.warning(f"跳过未知媒体类型: {sub.get('type')}")
|
||||
continue
|
||||
media.tmdb_id = sub.get("tmdbid")
|
||||
# 处理标题
|
||||
title = sub.get("name")
|
||||
season = sub.get("season")
|
||||
if season and int(season) > 1 and media.tmdb_id:
|
||||
# 小写数据转大写
|
||||
season_str = cn2an.an2cn(season, "low")
|
||||
title = f"{title} 第{season_str}季"
|
||||
media.title = title
|
||||
media.year = sub.get("year")
|
||||
media.douban_id = sub.get("doubanid")
|
||||
media.bangumi_id = sub.get("bangumiid")
|
||||
media.tvdb_id = sub.get("tvdbid")
|
||||
media.imdb_id = sub.get("imdbid")
|
||||
media.season = sub.get("season")
|
||||
media.vote_average = sub.get("vote")
|
||||
media.poster_path = sub.get("poster")
|
||||
media.backdrop_path = sub.get("backdrop")
|
||||
media.popularity = subscriber_count
|
||||
ret_medias.append(media)
|
||||
|
||||
if not ret_medias:
|
||||
return "未找到符合条件的热门订阅"
|
||||
|
||||
# 转换为字典格式,只保留关键信息
|
||||
simplified_medias = []
|
||||
for media in ret_medias:
|
||||
media_dict = media.to_dict()
|
||||
simplified = {
|
||||
"type": media_type_to_agent(media_dict.get("type")),
|
||||
"title": media_dict.get("title"),
|
||||
"year": media_dict.get("year"),
|
||||
"tmdb_id": media_dict.get("tmdb_id"),
|
||||
"douban_id": media_dict.get("douban_id"),
|
||||
"bangumi_id": media_dict.get("bangumi_id"),
|
||||
"tvdb_id": media_dict.get("tvdb_id"),
|
||||
"imdb_id": media_dict.get("imdb_id"),
|
||||
"season": media_dict.get("season"),
|
||||
"vote_average": media_dict.get("vote_average"),
|
||||
"poster_path": media_dict.get("poster_path"),
|
||||
"backdrop_path": media_dict.get("backdrop_path"),
|
||||
"popularity": media_dict.get("popularity"), # 订阅人数
|
||||
"subscriber_count": media_dict.get("popularity") # 明确标注为订阅人数
|
||||
}
|
||||
simplified_medias.append(simplified)
|
||||
|
||||
result_json = json.dumps(simplified_medias, ensure_ascii=False, indent=2)
|
||||
|
||||
pagination_info = f"第 {page} 页,每页 {count} 条,共 {len(simplified_medias)} 条结果"
|
||||
|
||||
return f"{pagination_info}\n\n{result_json}"
|
||||
except Exception as e:
|
||||
logger.error(f"查询热门订阅失败: {e}", exc_info=True)
|
||||
return f"查询热门订阅时发生错误: {str(e)}"
|
||||
104
app/agent/tools/impl/query_rule_groups.py
Normal file
104
app/agent/tools/impl/query_rule_groups.py
Normal file
@@ -0,0 +1,104 @@
|
||||
"""查询过滤规则组工具。"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type, List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._filter_rule_utils import (
|
||||
collect_rule_group_usages,
|
||||
get_rule_groups,
|
||||
serialize_rule_group,
|
||||
RULE_STRING_SYNTAX,
|
||||
)
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryRuleGroupsInput(BaseModel):
|
||||
"""查询规则组工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
group_names: Optional[List[str]] = Field(
|
||||
None,
|
||||
description="Optional list of rule group names to query. If omitted, return all rule groups.",
|
||||
)
|
||||
include_usage: bool = Field(
|
||||
True,
|
||||
description="Whether to include where each rule group is referenced by global settings or subscriptions.",
|
||||
)
|
||||
|
||||
|
||||
class QueryRuleGroupsTool(MoviePilotTool):
|
||||
name: str = "query_rule_groups"
|
||||
description: str = (
|
||||
"Query filter rule groups (过滤规则组 / 优先级规则组). "
|
||||
"Each rule group contains a rule_string made of built-in rules and/or custom rules. "
|
||||
"Inside one level use '&', '|', '!' and optional parentheses; use '>' between levels. "
|
||||
"Levels are evaluated from left to right, and the first matched level wins. "
|
||||
"The result includes parsed levels and syntax guidance so the agent can learn existing patterns before writing a new rule group."
|
||||
)
|
||||
args_schema: Type[BaseModel] = QueryRuleGroupsInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
group_names = kwargs.get("group_names") or []
|
||||
if group_names:
|
||||
return f"查询规则组: {', '.join(group_names)}"
|
||||
return "查询所有规则组"
|
||||
|
||||
async def run(
|
||||
self,
|
||||
group_names: Optional[List[str]] = None,
|
||||
include_usage: bool = True,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
logger.info(f"执行工具: {self.name}")
|
||||
|
||||
try:
|
||||
rule_groups = get_rule_groups()
|
||||
if group_names:
|
||||
target_names = set(group_names)
|
||||
rule_groups = [
|
||||
group for group in rule_groups if group.name in target_names
|
||||
]
|
||||
|
||||
usage_map = {}
|
||||
if include_usage:
|
||||
usage_map = await collect_rule_group_usages(
|
||||
[group.name for group in rule_groups if group.name]
|
||||
)
|
||||
|
||||
serialized = [
|
||||
serialize_rule_group(group, usage_map.get(group.name))
|
||||
for group in rule_groups
|
||||
]
|
||||
message = (
|
||||
f"找到 {len(serialized)} 个规则组"
|
||||
if serialized
|
||||
else "未找到任何规则组"
|
||||
)
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
"message": message,
|
||||
"count": len(serialized),
|
||||
"rule_string_syntax": RULE_STRING_SYNTAX,
|
||||
"rule_groups": serialized,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.error(f"查询规则组失败: {exc}", exc_info=True)
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"查询规则组失败: {exc}",
|
||||
"rule_groups": [],
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
55
app/agent/tools/impl/query_schedulers.py
Normal file
55
app/agent/tools/impl/query_schedulers.py
Normal file
@@ -0,0 +1,55 @@
|
||||
"""查询定时服务工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QuerySchedulersInput(BaseModel):
|
||||
"""查询定时服务工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
|
||||
|
||||
class QuerySchedulersTool(MoviePilotTool):
|
||||
name: str = "query_schedulers"
|
||||
description: str = "Query scheduled tasks and list all available scheduler jobs. Shows job status, next run time, and provider information."
|
||||
args_schema: Type[BaseModel] = QuerySchedulersInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""生成友好的提示消息"""
|
||||
return "查询定时服务"
|
||||
|
||||
async def run(self, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}")
|
||||
try:
|
||||
from app.scheduler import Scheduler
|
||||
|
||||
scheduler = Scheduler()
|
||||
schedulers = scheduler.list()
|
||||
if schedulers:
|
||||
# 转换为字典列表以便JSON序列化
|
||||
schedulers_list = []
|
||||
for s in schedulers:
|
||||
schedulers_list.append({
|
||||
"id": s.id,
|
||||
"name": s.name,
|
||||
"provider": s.provider,
|
||||
"status": s.status,
|
||||
"next_run": s.next_run
|
||||
})
|
||||
result_json = json.dumps(schedulers_list, ensure_ascii=False, indent=2)
|
||||
# 限制最多30条结果
|
||||
total_count = len(schedulers_list)
|
||||
if total_count > 30:
|
||||
limited_schedulers = schedulers_list[:30]
|
||||
limited_json = json.dumps(limited_schedulers, ensure_ascii=False, indent=2)
|
||||
return f"注意:查询结果共找到 {total_count} 条,为节省上下文空间,仅显示前 30 条结果。\n\n{limited_json}"
|
||||
return result_json
|
||||
return "未找到定时服务"
|
||||
except Exception as e:
|
||||
logger.error(f"查询定时服务失败: {e}", exc_info=True)
|
||||
return f"查询定时服务时发生错误: {str(e)}"
|
||||
169
app/agent/tools/impl/query_site_userdata.py
Normal file
169
app/agent/tools/impl/query_site_userdata.py
Normal file
@@ -0,0 +1,169 @@
|
||||
"""查询站点用户数据工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.db import AsyncSessionFactory
|
||||
from app.db.models.site import Site
|
||||
from app.db.models.siteuserdata import SiteUserData
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QuerySiteUserdataInput(BaseModel):
|
||||
"""查询站点用户数据工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
site_id: int = Field(
|
||||
...,
|
||||
description="The ID of the site to query user data for (can be obtained from query_sites tool)",
|
||||
)
|
||||
workdate: Optional[str] = Field(
|
||||
None,
|
||||
description="Work date to query (optional, format: 'YYYY-MM-DD', if not specified returns latest data)",
|
||||
)
|
||||
|
||||
|
||||
class QuerySiteUserdataTool(MoviePilotTool):
|
||||
name: str = "query_site_userdata"
|
||||
description: str = "Query user data for a specific site including username, user level, upload/download statistics, seeding information, bonus points, and other account details. Supports querying data for a specific date or latest data."
|
||||
require_admin: bool = True
|
||||
args_schema: Type[BaseModel] = QuerySiteUserdataInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
site_id = kwargs.get("site_id")
|
||||
workdate = kwargs.get("workdate")
|
||||
|
||||
message = f"查询站点 #{site_id} 的用户数据"
|
||||
if workdate:
|
||||
message += f" (日期: {workdate})"
|
||||
else:
|
||||
message += " (最新数据)"
|
||||
|
||||
return message
|
||||
|
||||
async def run(self, site_id: int, workdate: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: site_id={site_id}, workdate={workdate}"
|
||||
)
|
||||
|
||||
try:
|
||||
# 获取数据库会话
|
||||
async with AsyncSessionFactory() as db:
|
||||
# 获取站点
|
||||
site = await Site.async_get(db, site_id)
|
||||
if not site:
|
||||
return json.dumps(
|
||||
{"success": False, "message": f"站点不存在: {site_id}"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
# 获取站点用户数据
|
||||
user_data_list = await SiteUserData.async_get_by_domain(
|
||||
db, domain=site.domain, workdate=workdate
|
||||
)
|
||||
|
||||
if not user_data_list:
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"站点 {site.name} ({site.domain}) 暂无用户数据",
|
||||
"site_id": site_id,
|
||||
"site_name": site.name,
|
||||
"site_domain": site.domain,
|
||||
"workdate": workdate,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
# 格式化用户数据
|
||||
result = {
|
||||
"success": True,
|
||||
"site_id": site_id,
|
||||
"site_name": site.name,
|
||||
"site_domain": site.domain,
|
||||
"workdate": workdate,
|
||||
"data_count": len(user_data_list),
|
||||
"user_data": [],
|
||||
}
|
||||
|
||||
for user_data in user_data_list:
|
||||
# 格式化上传/下载量(转换为可读格式)
|
||||
upload_gb = user_data.upload / (1024**3) if user_data.upload else 0
|
||||
download_gb = (
|
||||
user_data.download / (1024**3) if user_data.download else 0
|
||||
)
|
||||
seeding_size_gb = (
|
||||
user_data.seeding_size / (1024**3)
|
||||
if user_data.seeding_size
|
||||
else 0
|
||||
)
|
||||
leeching_size_gb = (
|
||||
user_data.leeching_size / (1024**3)
|
||||
if user_data.leeching_size
|
||||
else 0
|
||||
)
|
||||
|
||||
user_data_dict = {
|
||||
"domain": user_data.domain,
|
||||
"name": user_data.name,
|
||||
"username": user_data.username,
|
||||
"userid": user_data.userid,
|
||||
"user_level": user_data.user_level,
|
||||
"join_at": user_data.join_at,
|
||||
"bonus": user_data.bonus,
|
||||
"upload": user_data.upload,
|
||||
"upload_gb": round(upload_gb, 2),
|
||||
"download": user_data.download,
|
||||
"download_gb": round(download_gb, 2),
|
||||
"ratio": round(user_data.ratio, 2) if user_data.ratio else 0,
|
||||
"seeding": int(user_data.seeding) if user_data.seeding else 0,
|
||||
"leeching": int(user_data.leeching)
|
||||
if user_data.leeching
|
||||
else 0,
|
||||
"seeding_size": user_data.seeding_size,
|
||||
"seeding_size_gb": round(seeding_size_gb, 2),
|
||||
"leeching_size": user_data.leeching_size,
|
||||
"leeching_size_gb": round(leeching_size_gb, 2),
|
||||
"seeding_info": user_data.seeding_info
|
||||
if user_data.seeding_info
|
||||
else [],
|
||||
"message_unread": user_data.message_unread,
|
||||
"message_unread_contents": user_data.message_unread_contents
|
||||
if user_data.message_unread_contents
|
||||
else [],
|
||||
"err_msg": user_data.err_msg,
|
||||
"updated_day": user_data.updated_day,
|
||||
"updated_time": user_data.updated_time,
|
||||
}
|
||||
result["user_data"].append(user_data_dict)
|
||||
|
||||
# 如果有多条数据,只返回最新的(按更新时间排序)
|
||||
if len(result["user_data"]) > 1:
|
||||
result["user_data"].sort(
|
||||
key=lambda x: (
|
||||
x.get("updated_day", ""),
|
||||
x.get("updated_time", ""),
|
||||
),
|
||||
reverse=True,
|
||||
)
|
||||
result["message"] = (
|
||||
f"找到 {len(result['user_data'])} 条数据,显示最新的一条"
|
||||
)
|
||||
result["user_data"] = [result["user_data"][0]]
|
||||
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"查询站点用户数据失败: {str(e)}"
|
||||
logger.error(f"查询站点用户数据失败: {e}", exc_info=True)
|
||||
return json.dumps(
|
||||
{"success": False, "message": error_message, "site_id": site_id},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
92
app/agent/tools/impl/query_sites.py
Normal file
92
app/agent/tools/impl/query_sites.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""查询站点工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.db.site_oper import SiteOper
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QuerySitesInput(BaseModel):
|
||||
"""查询站点工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
status: Optional[str] = Field(
|
||||
"all",
|
||||
description="Filter sites by status: 'active' for enabled sites, 'inactive' for disabled sites, 'all' for all sites",
|
||||
)
|
||||
name: Optional[str] = Field(
|
||||
None, description="Filter sites by name (partial match, optional)"
|
||||
)
|
||||
|
||||
|
||||
class QuerySitesTool(MoviePilotTool):
|
||||
name: str = "query_sites"
|
||||
description: str = "Query site status and list all configured sites. Shows site name, domain, status, priority, and basic configuration. Site priority (pri): smaller values have higher priority (e.g., pri=1 has higher priority than pri=10)."
|
||||
require_admin: bool = True
|
||||
args_schema: Type[BaseModel] = QuerySitesInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
status = kwargs.get("status", "all")
|
||||
name = kwargs.get("name")
|
||||
|
||||
parts = ["查询站点"]
|
||||
|
||||
if status != "all":
|
||||
status_map = {"active": "已启用", "inactive": "已禁用"}
|
||||
parts.append(f"状态: {status_map.get(status, status)}")
|
||||
|
||||
if name:
|
||||
parts.append(f"名称: {name}")
|
||||
|
||||
return " | ".join(parts) if len(parts) > 1 else parts[0]
|
||||
|
||||
async def run(
|
||||
self, status: Optional[str] = "all", name: Optional[str] = None, **kwargs
|
||||
) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: status={status}, name={name}")
|
||||
try:
|
||||
site_oper = SiteOper()
|
||||
# 获取所有站点(按优先级排序)
|
||||
sites = await site_oper.async_list()
|
||||
filtered_sites = []
|
||||
for site in sites:
|
||||
# 按状态过滤
|
||||
if status == "active" and not site.is_active:
|
||||
continue
|
||||
if status == "inactive" and site.is_active:
|
||||
continue
|
||||
# 按名称过滤(部分匹配)
|
||||
if name and name.lower() not in (site.name or "").lower():
|
||||
continue
|
||||
filtered_sites.append(site)
|
||||
if filtered_sites:
|
||||
# 精简字段,只保留关键信息
|
||||
simplified_sites = []
|
||||
for s in filtered_sites:
|
||||
simplified = {
|
||||
"id": s.id,
|
||||
"name": s.name,
|
||||
"domain": s.domain,
|
||||
"url": s.url,
|
||||
"pri": s.pri,
|
||||
"is_active": s.is_active,
|
||||
"cookie": s.cookie,
|
||||
"downloader": s.downloader,
|
||||
"proxy": s.proxy,
|
||||
"timeout": s.timeout,
|
||||
}
|
||||
simplified_sites.append(simplified)
|
||||
result_json = json.dumps(simplified_sites, ensure_ascii=False, indent=2)
|
||||
return result_json
|
||||
return "未找到相关站点"
|
||||
except Exception as e:
|
||||
logger.error(f"查询站点失败: {e}", exc_info=True)
|
||||
return f"查询站点时发生错误: {str(e)}"
|
||||
187
app/agent/tools/impl/query_subscribe_history.py
Normal file
187
app/agent/tools/impl/query_subscribe_history.py
Normal file
@@ -0,0 +1,187 @@
|
||||
"""查询订阅历史工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.db import AsyncSessionFactory
|
||||
from app.db.models.subscribehistory import SubscribeHistory
|
||||
from app.log import logger
|
||||
from app.schemas.types import media_type_to_agent
|
||||
|
||||
PAGE_SIZE = 20
|
||||
|
||||
|
||||
class QuerySubscribeHistoryInput(BaseModel):
|
||||
"""查询订阅历史工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
media_type: Optional[str] = Field(
|
||||
"all", description="Allowed values: movie, tv, all"
|
||||
)
|
||||
name: Optional[str] = Field(
|
||||
None, description="Filter by media name (partial match, optional)"
|
||||
)
|
||||
page: Optional[int] = Field(
|
||||
1,
|
||||
description="Page number for pagination (default: 1, 20 items per page). Ignored when name filter is provided.",
|
||||
)
|
||||
|
||||
|
||||
class QuerySubscribeHistoryTool(MoviePilotTool):
|
||||
name: str = "query_subscribe_history"
|
||||
description: str = "Query subscription history records. Shows completed subscriptions with their details including name, type, rating, completion date, and other subscription information. Supports filtering by media type and name. Supports pagination with 20 records per page."
|
||||
args_schema: Type[BaseModel] = QuerySubscribeHistoryInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
media_type = kwargs.get("media_type", "all")
|
||||
name = kwargs.get("name")
|
||||
page = kwargs.get("page", 1)
|
||||
|
||||
parts = ["查询订阅历史"]
|
||||
|
||||
if media_type != "all":
|
||||
parts.append(f"类型: {media_type}")
|
||||
if name:
|
||||
parts.append(f"名称: {name}")
|
||||
else:
|
||||
parts.append(f"第{page}页")
|
||||
|
||||
return " | ".join(parts)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
media_type: Optional[str] = "all",
|
||||
name: Optional[str] = None,
|
||||
page: Optional[int] = 1,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
page = max(1, page or 1)
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: media_type={media_type}, name={name}, page={page}"
|
||||
)
|
||||
|
||||
try:
|
||||
if media_type not in ["all", "movie", "tv"]:
|
||||
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv', 'all'"
|
||||
|
||||
# 获取数据库会话
|
||||
async with AsyncSessionFactory() as db:
|
||||
if name:
|
||||
# 有名称过滤时,获取足够多的记录在内存中过滤,不分页
|
||||
fetch_count = 500
|
||||
if media_type == "all":
|
||||
movie_history = await SubscribeHistory.async_list_by_type(
|
||||
db, mtype="movie", page=1, count=fetch_count
|
||||
)
|
||||
tv_history = await SubscribeHistory.async_list_by_type(
|
||||
db, mtype="tv", page=1, count=fetch_count
|
||||
)
|
||||
all_history = list(movie_history) + list(tv_history)
|
||||
all_history.sort(key=lambda x: x.date or "", reverse=True)
|
||||
else:
|
||||
all_history = list(
|
||||
await SubscribeHistory.async_list_by_type(
|
||||
db, mtype=media_type, page=1, count=fetch_count
|
||||
)
|
||||
)
|
||||
|
||||
# 按名称过滤
|
||||
name_lower = name.lower()
|
||||
filtered_history = [
|
||||
record
|
||||
for record in all_history
|
||||
if record.name and name_lower in record.name.lower()
|
||||
]
|
||||
|
||||
if not filtered_history:
|
||||
return "未找到相关订阅历史记录"
|
||||
|
||||
# 名称过滤时直接返回所有匹配结果,不分页
|
||||
simplified_records = self._simplify_records(filtered_history)
|
||||
result_json = json.dumps(
|
||||
simplified_records, ensure_ascii=False, indent=2
|
||||
)
|
||||
return result_json
|
||||
else:
|
||||
# 无名称过滤时,直接利用数据库分页
|
||||
if media_type == "all":
|
||||
movie_history = await SubscribeHistory.async_list_by_type(
|
||||
db, mtype="movie", page=1, count=page * PAGE_SIZE
|
||||
)
|
||||
tv_history = await SubscribeHistory.async_list_by_type(
|
||||
db, mtype="tv", page=1, count=page * PAGE_SIZE
|
||||
)
|
||||
all_history = list(movie_history) + list(tv_history)
|
||||
all_history.sort(key=lambda x: x.date or "", reverse=True)
|
||||
filtered_history = all_history
|
||||
else:
|
||||
filtered_history = list(
|
||||
await SubscribeHistory.async_list_by_type(
|
||||
db, mtype=media_type, page=1, count=page * PAGE_SIZE
|
||||
)
|
||||
)
|
||||
|
||||
if not filtered_history:
|
||||
return "未找到相关订阅历史记录"
|
||||
|
||||
# 分页切片
|
||||
total_count = len(filtered_history)
|
||||
start = (page - 1) * PAGE_SIZE
|
||||
end = start + PAGE_SIZE
|
||||
page_records = filtered_history[start:end]
|
||||
|
||||
if not page_records:
|
||||
return f"第 {page} 页没有数据。"
|
||||
|
||||
simplified_records = self._simplify_records(page_records)
|
||||
result_json = json.dumps(
|
||||
simplified_records, ensure_ascii=False, indent=2
|
||||
)
|
||||
|
||||
has_more = total_count > end
|
||||
payload_msg = f"第 {page} 页,当前页 {len(simplified_records)} 条结果。"
|
||||
if has_more:
|
||||
payload_msg += (
|
||||
f" 可能有更多数据,可使用 page={page + 1} 获取下一页。"
|
||||
)
|
||||
|
||||
return f"{payload_msg}\n\n{result_json}"
|
||||
except Exception as e:
|
||||
logger.error(f"查询订阅历史失败: {e}", exc_info=True)
|
||||
return f"查询订阅历史时发生错误: {str(e)}"
|
||||
|
||||
@staticmethod
|
||||
def _simplify_records(records) -> list:
|
||||
"""转换为字典格式,只保留关键信息"""
|
||||
simplified_records = []
|
||||
for record in records:
|
||||
simplified = {
|
||||
"id": record.id,
|
||||
"name": record.name,
|
||||
"year": record.year,
|
||||
"type": media_type_to_agent(record.type),
|
||||
"season": record.season,
|
||||
"tmdbid": record.tmdbid,
|
||||
"doubanid": record.doubanid,
|
||||
"bangumiid": record.bangumiid,
|
||||
"poster": record.poster,
|
||||
"vote": record.vote,
|
||||
"total_episode": record.total_episode,
|
||||
"date": record.date,
|
||||
"username": record.username,
|
||||
}
|
||||
if record.filter:
|
||||
simplified["filter"] = record.filter
|
||||
if record.quality:
|
||||
simplified["quality"] = record.quality
|
||||
if record.resolution:
|
||||
simplified["resolution"] = record.resolution
|
||||
simplified_records.append(simplified)
|
||||
return simplified_records
|
||||
112
app/agent/tools/impl/query_subscribe_shares.py
Normal file
112
app/agent/tools/impl/query_subscribe_shares.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""查询订阅分享工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.helper.subscribe import SubscribeHelper
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QuerySubscribeSharesInput(BaseModel):
|
||||
"""查询订阅分享工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
name: Optional[str] = Field(None, description="Filter shares by media name (partial match, optional)")
|
||||
page: Optional[int] = Field(1, description="Page number for pagination (default: 1)")
|
||||
count: Optional[int] = Field(30, description="Number of items per page (default: 30)")
|
||||
genre_id: Optional[int] = Field(None, description="Filter by genre ID (optional)")
|
||||
min_rating: Optional[float] = Field(None, description="Minimum rating filter (optional, e.g., 7.5)")
|
||||
max_rating: Optional[float] = Field(None, description="Maximum rating filter (optional, e.g., 10.0)")
|
||||
sort_type: Optional[str] = Field(None, description="Sort type (optional, e.g., 'count', 'rating')")
|
||||
|
||||
|
||||
class QuerySubscribeSharesTool(MoviePilotTool):
|
||||
name: str = "query_subscribe_shares"
|
||||
description: str = "Query shared subscriptions from other users. Shows popular subscriptions shared by the community with filtering and pagination support."
|
||||
args_schema: Type[BaseModel] = QuerySubscribeSharesInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
name = kwargs.get("name")
|
||||
page = kwargs.get("page", 1)
|
||||
min_rating = kwargs.get("min_rating")
|
||||
max_rating = kwargs.get("max_rating")
|
||||
|
||||
parts = ["查询订阅分享"]
|
||||
|
||||
if name:
|
||||
parts.append(f"名称: {name}")
|
||||
if min_rating:
|
||||
parts.append(f"最低评分: {min_rating}")
|
||||
if max_rating:
|
||||
parts.append(f"最高评分: {max_rating}")
|
||||
if page > 1:
|
||||
parts.append(f"第{page}页")
|
||||
|
||||
return " | ".join(parts) if len(parts) > 1 else parts[0]
|
||||
|
||||
async def run(self, name: Optional[str] = None,
|
||||
page: Optional[int] = 1,
|
||||
count: Optional[int] = 30,
|
||||
genre_id: Optional[int] = None,
|
||||
min_rating: Optional[float] = None,
|
||||
max_rating: Optional[float] = None,
|
||||
sort_type: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: name={name}, page={page}, count={count}, genre_id={genre_id}, "
|
||||
f"min_rating={min_rating}, max_rating={max_rating}, sort_type={sort_type}")
|
||||
|
||||
try:
|
||||
if page is None or page < 1:
|
||||
page = 1
|
||||
if count is None or count < 1:
|
||||
count = 30
|
||||
|
||||
subscribe_helper = SubscribeHelper()
|
||||
shares = await subscribe_helper.async_get_shares(
|
||||
name=name,
|
||||
page=page,
|
||||
count=count,
|
||||
genre_id=genre_id,
|
||||
min_rating=min_rating,
|
||||
max_rating=max_rating,
|
||||
sort_type=sort_type
|
||||
)
|
||||
|
||||
if not shares:
|
||||
return "未找到订阅分享数据(可能订阅分享功能未启用)"
|
||||
|
||||
# 简化字段,只保留关键信息
|
||||
simplified_shares = []
|
||||
for share in shares:
|
||||
simplified = {
|
||||
"id": share.get("id"),
|
||||
"name": share.get("name"),
|
||||
"year": share.get("year"),
|
||||
"type": share.get("type"),
|
||||
"season": share.get("season"),
|
||||
"tmdbid": share.get("tmdbid"),
|
||||
"doubanid": share.get("doubanid"),
|
||||
"bangumiid": share.get("bangumiid"),
|
||||
"poster": share.get("poster"),
|
||||
"vote": share.get("vote"),
|
||||
"share_title": share.get("share_title"),
|
||||
"share_comment": share.get("share_comment"),
|
||||
"share_user": share.get("share_user"),
|
||||
"fork_count": share.get("fork_count", 0)
|
||||
}
|
||||
# 截断过长的描述
|
||||
if simplified.get("description") and len(simplified["description"]) > 200:
|
||||
simplified["description"] = simplified["description"][:200] + "..."
|
||||
simplified_shares.append(simplified)
|
||||
|
||||
result_json = json.dumps(simplified_shares, ensure_ascii=False, indent=2)
|
||||
|
||||
pagination_info = f"第 {page} 页,每页 {count} 条,共 {len(simplified_shares)} 条结果"
|
||||
|
||||
return f"{pagination_info}\n\n{result_json}"
|
||||
except Exception as e:
|
||||
logger.error(f"查询订阅分享失败: {e}", exc_info=True)
|
||||
return f"查询订阅分享时发生错误: {str(e)}"
|
||||
158
app/agent/tools/impl/query_subscribes.py
Normal file
158
app/agent/tools/impl/query_subscribes.py
Normal file
@@ -0,0 +1,158 @@
|
||||
"""查询订阅工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.db.subscribe_oper import SubscribeOper
|
||||
from app.log import logger
|
||||
from app.schemas.subscribe import Subscribe as SubscribeSchema
|
||||
from app.schemas.types import MediaType
|
||||
|
||||
PAGE_SIZE = 100
|
||||
|
||||
QUERY_SUBSCRIBE_OUTPUT_FIELDS = [
|
||||
"id",
|
||||
"name",
|
||||
"year",
|
||||
"type",
|
||||
"season",
|
||||
"total_episode",
|
||||
"start_episode",
|
||||
"lack_episode",
|
||||
"filter",
|
||||
"include",
|
||||
"exclude",
|
||||
"quality",
|
||||
"resolution",
|
||||
"effect",
|
||||
"state",
|
||||
"last_update",
|
||||
"sites",
|
||||
"downloader",
|
||||
"best_version",
|
||||
"save_path",
|
||||
"custom_words",
|
||||
"media_category",
|
||||
"filter_groups",
|
||||
"episode_group",
|
||||
]
|
||||
|
||||
|
||||
class QuerySubscribesInput(BaseModel):
|
||||
"""查询订阅工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
status: Optional[str] = Field(
|
||||
"all",
|
||||
description="Filter subscriptions by status: 'R' for enabled subscriptions, 'S' for paused ones, 'all' for all subscriptions",
|
||||
)
|
||||
media_type: Optional[str] = Field(
|
||||
"all", description="Allowed values: movie, tv, all"
|
||||
)
|
||||
tmdb_id: Optional[int] = Field(
|
||||
None,
|
||||
description="Filter by TMDB ID to check if a specific media is already subscribed",
|
||||
)
|
||||
douban_id: Optional[str] = Field(
|
||||
None,
|
||||
description="Filter by Douban ID to check if a specific media is already subscribed",
|
||||
)
|
||||
page: Optional[int] = Field(
|
||||
1, description="Page number for pagination (default: 1, 100 items per page)"
|
||||
)
|
||||
|
||||
|
||||
class QuerySubscribesTool(MoviePilotTool):
|
||||
name: str = "query_subscribes"
|
||||
description: str = "Query subscription status and list user subscriptions. Returns full subscription parameters for each matched subscription. Supports pagination with 100 items per page."
|
||||
args_schema: Type[BaseModel] = QuerySubscribesInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
status = kwargs.get("status", "all")
|
||||
media_type = kwargs.get("media_type", "all")
|
||||
page = kwargs.get("page", 1)
|
||||
|
||||
parts = ["查询订阅"]
|
||||
|
||||
# 根据状态过滤条件生成提示
|
||||
if status != "all":
|
||||
status_map = {"R": "已启用", "S": "已暂停"}
|
||||
parts.append(f"状态: {status_map.get(status, status)}")
|
||||
|
||||
# 根据媒体类型过滤条件生成提示
|
||||
if media_type != "all":
|
||||
parts.append(f"类型: {media_type}")
|
||||
|
||||
parts.append(f"第{page}页")
|
||||
|
||||
return " | ".join(parts)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
status: Optional[str] = "all",
|
||||
media_type: Optional[str] = "all",
|
||||
tmdb_id: Optional[int] = None,
|
||||
douban_id: Optional[str] = None,
|
||||
page: Optional[int] = 1,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
page = max(1, page or 1)
|
||||
logger.info(
|
||||
f"执行工具: {self.name}, 参数: status={status}, media_type={media_type}, tmdb_id={tmdb_id}, douban_id={douban_id}, page={page}"
|
||||
)
|
||||
try:
|
||||
if media_type != "all" and not MediaType.from_agent(media_type):
|
||||
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv', 'all'"
|
||||
|
||||
subscribe_oper = SubscribeOper()
|
||||
subscribes = await subscribe_oper.async_list()
|
||||
filtered_subscribes = []
|
||||
for sub in subscribes:
|
||||
if status != "all" and sub.state != status:
|
||||
continue
|
||||
if (
|
||||
media_type != "all"
|
||||
and sub.type != MediaType.from_agent(media_type).value
|
||||
):
|
||||
continue
|
||||
if tmdb_id is not None and sub.tmdbid != tmdb_id:
|
||||
continue
|
||||
if douban_id is not None and sub.doubanid != douban_id:
|
||||
continue
|
||||
filtered_subscribes.append(sub)
|
||||
if filtered_subscribes:
|
||||
total_count = len(filtered_subscribes)
|
||||
# 分页
|
||||
start = (page - 1) * PAGE_SIZE
|
||||
end = start + PAGE_SIZE
|
||||
page_subscribes = filtered_subscribes[start:end]
|
||||
|
||||
if not page_subscribes:
|
||||
total_pages = (total_count + PAGE_SIZE - 1) // PAGE_SIZE
|
||||
return f"第 {page} 页没有数据,共 {total_count} 条结果,共 {total_pages} 页。"
|
||||
|
||||
full_subscribes = [
|
||||
SubscribeSchema.model_validate(s, from_attributes=True).model_dump(
|
||||
include=set(QUERY_SUBSCRIBE_OUTPUT_FIELDS), exclude_none=True
|
||||
)
|
||||
for s in page_subscribes
|
||||
]
|
||||
result_json = json.dumps(full_subscribes, ensure_ascii=False, indent=2)
|
||||
|
||||
total_pages = (total_count + PAGE_SIZE - 1) // PAGE_SIZE
|
||||
payload_msg = f"第 {page}/{total_pages} 页,当前页 {len(page_subscribes)} 条结果,共 {total_count} 条。"
|
||||
if page < total_pages:
|
||||
payload_msg += f" 可使用 page={page + 1} 获取下一页。"
|
||||
|
||||
return f"{payload_msg}\n\n{result_json}"
|
||||
return "未找到相关订阅"
|
||||
except Exception as e:
|
||||
logger.error(f"查询订阅失败: {e}", exc_info=True)
|
||||
return f"查询订阅时发生错误: {str(e)}"
|
||||
134
app/agent/tools/impl/query_transfer_history.py
Normal file
134
app/agent/tools/impl/query_transfer_history.py
Normal file
@@ -0,0 +1,134 @@
|
||||
"""查询整理历史记录工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
import jieba
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.db import AsyncSessionFactory
|
||||
from app.db.models.transferhistory import TransferHistory
|
||||
from app.log import logger
|
||||
from app.schemas.types import media_type_to_agent
|
||||
|
||||
|
||||
class QueryTransferHistoryInput(BaseModel):
|
||||
"""查询整理历史记录工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
title: Optional[str] = Field(None, description="Search by title (optional, supports partial match)")
|
||||
status: Optional[str] = Field("all",
|
||||
description="Filter by status: 'success' for successful transfers, 'failed' for failed transfers, 'all' for all records (default: 'all')")
|
||||
page: Optional[int] = Field(1, description="Page number for pagination (default: 1, each page contains 30 records)")
|
||||
|
||||
|
||||
class QueryTransferHistoryTool(MoviePilotTool):
|
||||
name: str = "query_transfer_history"
|
||||
description: str = "Query file transfer history records. Shows transfer status, source and destination paths, media information, and transfer details. Supports filtering by title and status."
|
||||
args_schema: Type[BaseModel] = QueryTransferHistoryInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
title = kwargs.get("title")
|
||||
status = kwargs.get("status", "all")
|
||||
page = kwargs.get("page", 1)
|
||||
|
||||
parts = ["查询整理历史"]
|
||||
|
||||
if title:
|
||||
parts.append(f"标题: {title}")
|
||||
if status != "all":
|
||||
status_map = {"success": "成功", "failed": "失败"}
|
||||
parts.append(f"状态: {status_map.get(status, status)}")
|
||||
if page > 1:
|
||||
parts.append(f"第{page}页")
|
||||
|
||||
return " | ".join(parts) if len(parts) > 1 else parts[0]
|
||||
|
||||
async def run(self, title: Optional[str] = None,
|
||||
status: Optional[str] = "all",
|
||||
page: Optional[int] = 1, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: title={title}, status={status}, page={page}")
|
||||
|
||||
try:
|
||||
# 处理状态参数
|
||||
status_bool = None
|
||||
if status == "success":
|
||||
status_bool = True
|
||||
elif status == "failed":
|
||||
status_bool = False
|
||||
|
||||
# 处理页码参数
|
||||
if page is None or page < 1:
|
||||
page = 1
|
||||
|
||||
# 每页记录数
|
||||
count = 50
|
||||
|
||||
# 获取数据库会话
|
||||
async with AsyncSessionFactory() as db:
|
||||
# 处理标题搜索
|
||||
if title:
|
||||
# 使用 jieba 分词处理标题
|
||||
words = jieba.cut(title, HMM=False)
|
||||
title_search = "%".join(words)
|
||||
# 查询记录
|
||||
result = await TransferHistory.async_list_by_title(
|
||||
db, title=title_search, page=page, count=count, status=status_bool
|
||||
)
|
||||
total = await TransferHistory.async_count_by_title(
|
||||
db, title=title_search, status=status_bool
|
||||
)
|
||||
else:
|
||||
# 查询所有记录
|
||||
result = await TransferHistory.async_list_by_page(
|
||||
db, page=page, count=count, status=status_bool
|
||||
)
|
||||
total = await TransferHistory.async_count(db, status=status_bool)
|
||||
|
||||
if not result:
|
||||
return "未找到相关整理历史记录"
|
||||
|
||||
# 转换为字典格式,只保留关键信息
|
||||
simplified_records = []
|
||||
for record in result:
|
||||
simplified = {
|
||||
"id": record.id,
|
||||
"title": record.title,
|
||||
"year": record.year,
|
||||
"type": media_type_to_agent(record.type),
|
||||
"category": record.category,
|
||||
"seasons": record.seasons,
|
||||
"episodes": record.episodes,
|
||||
"src": record.src,
|
||||
"dest": record.dest,
|
||||
"mode": record.mode,
|
||||
"status": "成功" if record.status else "失败",
|
||||
"date": record.date,
|
||||
"downloader": record.downloader,
|
||||
"download_hash": record.download_hash
|
||||
}
|
||||
# 如果失败,添加错误信息
|
||||
if not record.status and record.errmsg:
|
||||
simplified["errmsg"] = record.errmsg
|
||||
# 添加媒体ID信息(如果有)
|
||||
if record.tmdbid:
|
||||
simplified["tmdbid"] = record.tmdbid
|
||||
if record.imdbid:
|
||||
simplified["imdbid"] = record.imdbid
|
||||
if record.doubanid:
|
||||
simplified["doubanid"] = record.doubanid
|
||||
simplified_records.append(simplified)
|
||||
|
||||
result_json = json.dumps(simplified_records, ensure_ascii=False, indent=2)
|
||||
|
||||
# 计算总页数
|
||||
total_pages = (total + count - 1) // count if total > 0 else 1
|
||||
|
||||
# 构建分页信息
|
||||
pagination_info = f"第 {page}/{total_pages} 页,共 {total} 条记录(每页 {count} 条)"
|
||||
|
||||
return f"{pagination_info}\n\n{result_json}"
|
||||
except Exception as e:
|
||||
logger.error(f"查询整理历史记录失败: {e}", exc_info=True)
|
||||
return f"查询整理历史记录时发生错误: {str(e)}"
|
||||
127
app/agent/tools/impl/query_workflows.py
Normal file
127
app/agent/tools/impl/query_workflows.py
Normal file
@@ -0,0 +1,127 @@
|
||||
"""查询工作流工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.db import AsyncSessionFactory
|
||||
from app.db.workflow_oper import WorkflowOper
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class QueryWorkflowsInput(BaseModel):
|
||||
"""查询工作流工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
state: Optional[str] = Field("all", description="Filter workflows by state: 'W' for waiting, 'R' for running, 'P' for paused, 'S' for success, 'F' for failed, 'all' for all workflows (default: 'all')")
|
||||
name: Optional[str] = Field(None, description="Filter workflows by name (partial match, optional)")
|
||||
trigger_type: Optional[str] = Field("all", description="Filter workflows by trigger type: 'timer' for scheduled, 'event' for event-triggered, 'manual' for manual, 'all' for all types (default: 'all')")
|
||||
|
||||
|
||||
class QueryWorkflowsTool(MoviePilotTool):
|
||||
name: str = "query_workflows"
|
||||
description: str = "Query workflow list and status. Shows workflow name, description, trigger type, state, execution count, and other workflow details. Supports filtering by state, name, and trigger type."
|
||||
args_schema: Type[BaseModel] = QueryWorkflowsInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
state = kwargs.get("state", "all")
|
||||
name = kwargs.get("name")
|
||||
trigger_type = kwargs.get("trigger_type", "all")
|
||||
|
||||
parts = ["查询工作流"]
|
||||
|
||||
if state != "all":
|
||||
state_map = {"W": "等待", "R": "运行中", "P": "暂停", "S": "成功", "F": "失败"}
|
||||
parts.append(f"状态: {state_map.get(state, state)}")
|
||||
|
||||
if trigger_type != "all":
|
||||
trigger_map = {"timer": "定时触发", "event": "事件触发", "manual": "手动触发"}
|
||||
parts.append(f"触发类型: {trigger_map.get(trigger_type, trigger_type)}")
|
||||
|
||||
if name:
|
||||
parts.append(f"名称: {name}")
|
||||
|
||||
return " | ".join(parts) if len(parts) > 1 else parts[0]
|
||||
|
||||
async def run(self, state: Optional[str] = "all",
|
||||
name: Optional[str] = None,
|
||||
trigger_type: Optional[str] = "all", **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: state={state}, name={name}, trigger_type={trigger_type}")
|
||||
|
||||
try:
|
||||
# 获取数据库会话
|
||||
async with AsyncSessionFactory() as db:
|
||||
workflow_oper = WorkflowOper(db)
|
||||
workflows = await workflow_oper.async_list()
|
||||
|
||||
# 过滤工作流
|
||||
filtered_workflows = []
|
||||
for wf in workflows:
|
||||
# 按状态过滤
|
||||
if state != "all" and wf.state != state:
|
||||
continue
|
||||
|
||||
# 按触发类型过滤
|
||||
if trigger_type != "all":
|
||||
if trigger_type == "timer" and wf.trigger_type not in ["timer", None]:
|
||||
continue
|
||||
elif trigger_type == "event" and wf.trigger_type != "event":
|
||||
continue
|
||||
elif trigger_type == "manual" and wf.trigger_type != "manual":
|
||||
continue
|
||||
|
||||
# 按名称过滤(部分匹配)
|
||||
if name and wf.name and name.lower() not in wf.name.lower():
|
||||
continue
|
||||
|
||||
filtered_workflows.append(wf)
|
||||
|
||||
if not filtered_workflows:
|
||||
return "未找到相关工作流"
|
||||
|
||||
# 转换为字典格式,只保留关键信息
|
||||
simplified_workflows = []
|
||||
for wf in filtered_workflows:
|
||||
# 状态说明
|
||||
state_map = {
|
||||
"W": "等待",
|
||||
"R": "运行中",
|
||||
"P": "暂停",
|
||||
"S": "成功",
|
||||
"F": "失败"
|
||||
}
|
||||
state_desc = state_map.get(wf.state, wf.state)
|
||||
|
||||
# 触发类型说明
|
||||
trigger_type_map = {
|
||||
"timer": "定时触发",
|
||||
"event": "事件触发",
|
||||
"manual": "手动触发"
|
||||
}
|
||||
trigger_type_desc = trigger_type_map.get(wf.trigger_type, wf.trigger_type or "定时触发")
|
||||
|
||||
simplified = {
|
||||
"id": wf.id,
|
||||
"name": wf.name,
|
||||
"description": wf.description,
|
||||
"trigger_type": trigger_type_desc,
|
||||
"state": state_desc,
|
||||
"run_count": wf.run_count,
|
||||
"timer": wf.timer,
|
||||
"event_type": wf.event_type,
|
||||
"add_time": wf.add_time,
|
||||
"last_time": wf.last_time,
|
||||
"current_action": wf.current_action
|
||||
}
|
||||
# 如果有结果,添加结果信息
|
||||
if wf.result:
|
||||
simplified["result"] = wf.result
|
||||
simplified_workflows.append(simplified)
|
||||
|
||||
result_json = json.dumps(simplified_workflows, ensure_ascii=False, indent=2)
|
||||
return result_json
|
||||
except Exception as e:
|
||||
logger.error(f"查询工作流失败: {e}", exc_info=True)
|
||||
return f"查询工作流时发生错误: {str(e)}"
|
||||
81
app/agent/tools/impl/read_file.py
Normal file
81
app/agent/tools/impl/read_file.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""文件读取工具"""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional, Type
|
||||
|
||||
from anyio import Path as AsyncPath
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.log import logger
|
||||
|
||||
# 最大读取大小 50KB
|
||||
MAX_READ_SIZE = 50 * 1024
|
||||
|
||||
|
||||
class ReadFileInput(BaseModel):
|
||||
"""Input parameters for read file tool"""
|
||||
file_path: str = Field(..., description="The absolute path of the file to read")
|
||||
start_line: Optional[int] = Field(None, description="The starting line number (1-based, inclusive). If not provided, reading starts from the beginning of the file.")
|
||||
end_line: Optional[int] = Field(None, description="The ending line number (1-based, inclusive). If not provided, reading goes until the end of the file.")
|
||||
|
||||
|
||||
class ReadFileTool(MoviePilotTool):
|
||||
name: str = "read_file"
|
||||
description: str = "Read the content of a text file. Supports reading by line range. Each read is limited to 50KB; content exceeding this limit will be truncated."
|
||||
args_schema: Type[BaseModel] = ReadFileInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据参数生成友好的提示消息"""
|
||||
file_path = kwargs.get("file_path", "")
|
||||
file_name = Path(file_path).name if file_path else "未知文件"
|
||||
return f"读取文件: {file_name}"
|
||||
|
||||
async def run(self, file_path: str, start_line: Optional[int] = None,
|
||||
end_line: Optional[int] = None, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: file_path={file_path}, start_line={start_line}, end_line={end_line}")
|
||||
|
||||
try:
|
||||
path = AsyncPath(file_path)
|
||||
|
||||
if not await path.exists():
|
||||
return f"错误:文件 {file_path} 不存在"
|
||||
|
||||
if not await path.is_file():
|
||||
return f"错误:{file_path} 不是一个文件"
|
||||
|
||||
content = await path.read_text(encoding="utf-8")
|
||||
truncated = False
|
||||
|
||||
if start_line is not None or end_line is not None:
|
||||
lines = content.splitlines(keepends=True)
|
||||
total_lines = len(lines)
|
||||
|
||||
# 将行号转换为索引(1-based -> 0-based)
|
||||
s = (start_line - 1) if start_line and start_line >= 1 else 0
|
||||
e = end_line if end_line and end_line >= 1 else total_lines
|
||||
|
||||
# 确保范围有效
|
||||
s = max(0, min(s, total_lines))
|
||||
e = max(s, min(e, total_lines))
|
||||
|
||||
content = "".join(lines[s:e])
|
||||
|
||||
# 检查大小限制
|
||||
content_bytes = content.encode("utf-8")
|
||||
if len(content_bytes) > MAX_READ_SIZE:
|
||||
content = content_bytes[:MAX_READ_SIZE].decode("utf-8", errors="ignore")
|
||||
truncated = True
|
||||
|
||||
if truncated:
|
||||
return f"{content}\n\n[警告:文件内容已超过50KB限制,以上内容已被截断。请使用 start_line/end_line 参数分段读取。]"
|
||||
|
||||
return content
|
||||
|
||||
except PermissionError:
|
||||
return f"错误:没有权限读取 {file_path}"
|
||||
except UnicodeDecodeError:
|
||||
return f"错误:{file_path} 不是文本文件,无法读取"
|
||||
except Exception as e:
|
||||
logger.error(f"读取文件 {file_path} 时发生错误: {str(e)}", exc_info=True)
|
||||
return f"操作失败: {str(e)}"
|
||||
163
app/agent/tools/impl/recognize_media.py
Normal file
163
app/agent/tools/impl/recognize_media.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""识别媒体信息工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.media import MediaChain
|
||||
from app.core.context import Context
|
||||
from app.core.metainfo import MetaInfo
|
||||
from app.log import logger
|
||||
from app.schemas.types import media_type_to_agent
|
||||
|
||||
|
||||
class RecognizeMediaInput(BaseModel):
|
||||
"""识别媒体信息工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
title: Optional[str] = Field(None, description="The title of the torrent/media to recognize (required for torrent recognition)")
|
||||
subtitle: Optional[str] = Field(None, description="The subtitle or description of the torrent (optional, helps improve recognition accuracy)")
|
||||
path: Optional[str] = Field(None, description="The file path to recognize (required for file recognition, mutually exclusive with title)")
|
||||
|
||||
|
||||
class RecognizeMediaTool(MoviePilotTool):
|
||||
name: str = "recognize_media"
|
||||
description: str = "Extract/identify media information from torrent titles or file paths (NOT database search). Supports two modes: 1) Extract from torrent title and optional subtitle, 2) Extract from file path. Returns detailed media information. Use 'search_media' to search TMDB database, or 'scrape_metadata' to generate metadata files for existing files."
|
||||
args_schema: Type[BaseModel] = RecognizeMediaInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据识别参数生成友好的提示消息"""
|
||||
title = kwargs.get("title")
|
||||
subtitle = kwargs.get("subtitle")
|
||||
path = kwargs.get("path")
|
||||
|
||||
if path:
|
||||
message = f"识别文件媒体信息: {path}"
|
||||
elif title:
|
||||
message = f"识别种子媒体信息: {title}"
|
||||
if subtitle:
|
||||
message += f" ({subtitle})"
|
||||
else:
|
||||
message = "识别媒体信息"
|
||||
|
||||
return message
|
||||
|
||||
async def run(self, title: Optional[str] = None, subtitle: Optional[str] = None,
|
||||
path: Optional[str] = None, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: title={title}, subtitle={subtitle}, path={path}")
|
||||
|
||||
try:
|
||||
media_chain = MediaChain()
|
||||
context = None
|
||||
|
||||
# 根据提供的参数选择识别方式
|
||||
if path:
|
||||
# 文件路径识别
|
||||
if not path:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": "文件路径不能为空"
|
||||
}, ensure_ascii=False)
|
||||
|
||||
context = await media_chain.async_recognize_by_path(path)
|
||||
if context:
|
||||
return self._format_context_result(context, "文件")
|
||||
else:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": f"无法识别文件媒体信息: {path}",
|
||||
"path": path
|
||||
}, ensure_ascii=False)
|
||||
|
||||
elif title:
|
||||
# 种子标题识别
|
||||
metainfo = MetaInfo(title, subtitle)
|
||||
mediainfo = await media_chain.async_recognize_by_meta(metainfo)
|
||||
if mediainfo:
|
||||
context = Context(meta_info=metainfo, media_info=mediainfo)
|
||||
return self._format_context_result(context, "种子")
|
||||
else:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": f"无法识别种子媒体信息: {title}",
|
||||
"title": title,
|
||||
"subtitle": subtitle
|
||||
}, ensure_ascii=False)
|
||||
|
||||
else:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": "必须提供 title(标题)或 path(文件路径)参数之一"
|
||||
}, ensure_ascii=False)
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"识别媒体信息失败: {str(e)}"
|
||||
logger.error(f"识别媒体信息失败: {e}", exc_info=True)
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": error_message
|
||||
}, ensure_ascii=False)
|
||||
|
||||
@staticmethod
|
||||
def _format_context_result(context: Context, source_type: str) -> str:
|
||||
"""格式化识别结果为JSON字符串"""
|
||||
if not context:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": "识别结果为空"
|
||||
}, ensure_ascii=False)
|
||||
|
||||
context_dict = context.to_dict()
|
||||
media_info = context_dict.get("media_info")
|
||||
meta_info = context_dict.get("meta_info")
|
||||
|
||||
# 构建简化的结果
|
||||
result = {
|
||||
"success": True,
|
||||
"source_type": source_type,
|
||||
"media_info": None,
|
||||
"meta_info": None
|
||||
}
|
||||
|
||||
# 处理媒体信息
|
||||
if media_info:
|
||||
result["media_info"] = {
|
||||
"title": media_info.get("title"),
|
||||
"en_title": media_info.get("en_title"),
|
||||
"year": media_info.get("year"),
|
||||
"type": media_type_to_agent(media_info.get("type")),
|
||||
"season": media_info.get("season"),
|
||||
"tmdb_id": media_info.get("tmdb_id"),
|
||||
"imdb_id": media_info.get("imdb_id"),
|
||||
"douban_id": media_info.get("douban_id"),
|
||||
"bangumi_id": media_info.get("bangumi_id"),
|
||||
"overview": media_info.get("overview"),
|
||||
"vote_average": media_info.get("vote_average"),
|
||||
"poster_path": media_info.get("poster_path"),
|
||||
"backdrop_path": media_info.get("backdrop_path"),
|
||||
"detail_link": media_info.get("detail_link"),
|
||||
"title_year": media_info.get("title_year"),
|
||||
"source": media_info.get("source")
|
||||
}
|
||||
|
||||
# 处理元数据信息
|
||||
if meta_info:
|
||||
result["meta_info"] = {
|
||||
"name": meta_info.get("name"),
|
||||
"title": meta_info.get("title"),
|
||||
"year": meta_info.get("year"),
|
||||
"type": media_type_to_agent(meta_info.get("type")),
|
||||
"begin_season": meta_info.get("begin_season"),
|
||||
"end_season": meta_info.get("end_season"),
|
||||
"begin_episode": meta_info.get("begin_episode"),
|
||||
"end_episode": meta_info.get("end_episode"),
|
||||
"total_episode": meta_info.get("total_episode"),
|
||||
"part": meta_info.get("part"),
|
||||
"season_episode": meta_info.get("season_episode"),
|
||||
"episode_list": meta_info.get("episode_list"),
|
||||
"tmdbid": meta_info.get("tmdbid"),
|
||||
"doubanid": meta_info.get("doubanid")
|
||||
}
|
||||
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
84
app/agent/tools/impl/reload_plugin.py
Normal file
84
app/agent/tools/impl/reload_plugin.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""重载插件工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.agent.tools.impl._plugin_tool_utils import (
|
||||
get_plugin_snapshot,
|
||||
reload_plugin_runtime,
|
||||
)
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class ReloadPluginInput(BaseModel):
|
||||
"""重载插件工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
plugin_id: str = Field(
|
||||
...,
|
||||
description="The plugin ID to reload so the latest saved config takes effect.",
|
||||
)
|
||||
|
||||
|
||||
class ReloadPluginTool(MoviePilotTool):
|
||||
name: str = "reload_plugin"
|
||||
description: str = (
|
||||
"Reload an installed plugin so its latest saved configuration takes effect. "
|
||||
"This also refreshes the plugin's registered commands, scheduled services, and API routes."
|
||||
)
|
||||
require_admin: bool = True
|
||||
args_schema: Type[BaseModel] = ReloadPluginInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""生成友好的提示消息"""
|
||||
plugin_id = kwargs.get("plugin_id", "")
|
||||
return f"重载插件: {plugin_id}"
|
||||
|
||||
@staticmethod
|
||||
def _reload_plugin_sync(plugin_id: str) -> str:
|
||||
"""
|
||||
按后台接口同样的流程重载插件,确保最新配置和注册信息一起刷新。
|
||||
"""
|
||||
plugin_info = get_plugin_snapshot(plugin_id)
|
||||
if not plugin_info:
|
||||
return json.dumps(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"插件 {plugin_id} 不存在,请先使用 query_installed_plugins 查询有效插件 ID",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
reload_plugin_runtime(plugin_id)
|
||||
refreshed_plugin = get_plugin_snapshot(plugin_id) or plugin_info
|
||||
|
||||
return json.dumps(
|
||||
{
|
||||
"success": True,
|
||||
**refreshed_plugin,
|
||||
"message": "插件已重载,最新配置已生效",
|
||||
},
|
||||
ensure_ascii=False,
|
||||
indent=2,
|
||||
default=str,
|
||||
)
|
||||
|
||||
async def run(self, plugin_id: str, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: plugin_id={plugin_id}")
|
||||
|
||||
try:
|
||||
return await self.run_blocking(
|
||||
"plugin", self._reload_plugin_sync, plugin_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"重载插件失败: {e}", exc_info=True)
|
||||
return json.dumps(
|
||||
{"success": False, "message": f"重载插件时发生错误: {str(e)}"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
60
app/agent/tools/impl/run_scheduler.py
Normal file
60
app/agent/tools/impl/run_scheduler.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""运行定时服务工具"""
|
||||
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class RunSchedulerInput(BaseModel):
|
||||
"""运行定时服务工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
job_id: str = Field(
|
||||
...,
|
||||
description="The ID of the scheduled job to run (can be obtained from query_schedulers tool)",
|
||||
)
|
||||
|
||||
|
||||
class RunSchedulerTool(MoviePilotTool):
|
||||
name: str = "run_scheduler"
|
||||
description: str = "Manually trigger a scheduled task to run immediately. This will execute the specified scheduler job by its ID."
|
||||
args_schema: Type[BaseModel] = RunSchedulerInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据运行参数生成友好的提示消息"""
|
||||
job_id = kwargs.get("job_id", "")
|
||||
return f"运行定时服务 (ID: {job_id})"
|
||||
|
||||
@staticmethod
|
||||
def _run_scheduler_sync(job_id: str) -> tuple[bool, str]:
|
||||
"""同步触发定时服务,避免调度器扫描阻塞事件循环。"""
|
||||
from app.scheduler import Scheduler
|
||||
|
||||
scheduler = Scheduler()
|
||||
for scheduler_item in scheduler.list():
|
||||
if scheduler_item.id == job_id:
|
||||
scheduler.start(job_id)
|
||||
return True, scheduler_item.name
|
||||
return False, ""
|
||||
|
||||
async def run(self, job_id: str, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: job_id={job_id}")
|
||||
|
||||
try:
|
||||
job_exists, job_name = await self.run_blocking(
|
||||
"workflow", self._run_scheduler_sync, job_id
|
||||
)
|
||||
if not job_exists:
|
||||
return f"定时服务 ID {job_id} 不存在,请使用 query_schedulers 工具查询可用的定时服务"
|
||||
|
||||
return f"成功触发定时服务:{job_name} (ID: {job_id})"
|
||||
except Exception as e:
|
||||
logger.error(f"运行定时服务失败: {e}", exc_info=True)
|
||||
return f"运行定时服务时发生错误: {str(e)}"
|
||||
115
app/agent/tools/impl/run_slash_command.py
Normal file
115
app/agent/tools/impl/run_slash_command.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""运行斜杠命令工具(系统命令 + 插件命令)"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.core.event import eventmanager
|
||||
from app.log import logger
|
||||
from app.schemas.types import EventType, MessageChannel
|
||||
|
||||
|
||||
class RunSlashCommandInput(BaseModel):
|
||||
"""运行斜杠命令工具的输入参数模型"""
|
||||
|
||||
explanation: str = Field(
|
||||
...,
|
||||
description="Clear explanation of why this tool is being used in the current context",
|
||||
)
|
||||
command: str = Field(
|
||||
...,
|
||||
description="The slash command to execute, e.g. '/cookiecloud'. "
|
||||
"Must start with '/'. Can include arguments after the command, e.g. '/command arg1 arg2'. "
|
||||
"Use query_plugin_capabilities tool to discover available plugin commands, "
|
||||
"or list_slash_commands tool to discover all available commands (including system commands).",
|
||||
)
|
||||
|
||||
|
||||
class RunSlashCommandTool(MoviePilotTool):
|
||||
name: str = "run_slash_command"
|
||||
description: str = (
|
||||
"Execute a slash command (system or plugin) by sending a CommandExcute event. "
|
||||
"This tool supports ALL registered slash commands, including: "
|
||||
"1) System preset commands (e.g. /cookiecloud, /sites, /subscribes, /downloading, /transfer, /restart, etc.) "
|
||||
"2) Plugin commands registered by installed plugins. "
|
||||
"Use the query_plugin_capabilities tool to discover plugin commands, "
|
||||
"or the list_slash_commands tool to discover all available commands. "
|
||||
"The command will be executed asynchronously. "
|
||||
"Note: This tool triggers the command execution but the actual processing happens in the background."
|
||||
)
|
||||
args_schema: Type[BaseModel] = RunSlashCommandInput
|
||||
require_admin: bool = True
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""生成友好的提示消息"""
|
||||
command = kwargs.get("command", "")
|
||||
return f"执行命令: {command}"
|
||||
|
||||
async def run(self, command: str, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: command={command}")
|
||||
|
||||
try:
|
||||
# 确保命令以 / 开头
|
||||
if not command.startswith("/"):
|
||||
command = f"/{command}"
|
||||
|
||||
# 从全局 Command 单例中验证命令是否存在(包含系统预设命令 + 插件命令 + 其他命令)
|
||||
from app.command import Command
|
||||
|
||||
cmd_name = command.split()[0]
|
||||
command_obj = Command()
|
||||
matched_command = command_obj.get(cmd_name)
|
||||
|
||||
if not matched_command:
|
||||
# 列出所有可用命令帮助用户
|
||||
all_commands = command_obj.get_commands()
|
||||
available_cmds = [
|
||||
f"{cmd} - {info.get('description', '无描述')}"
|
||||
for cmd, info in all_commands.items()
|
||||
]
|
||||
result = {
|
||||
"success": False,
|
||||
"message": f"命令 {cmd_name} 不存在",
|
||||
}
|
||||
if available_cmds:
|
||||
result["available_commands"] = available_cmds
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
# 构建消息渠道,优先使用当前会话的渠道信息
|
||||
channel = None
|
||||
if self._channel:
|
||||
try:
|
||||
channel = MessageChannel(self._channel)
|
||||
except (ValueError, KeyError):
|
||||
channel = None
|
||||
|
||||
# 发送命令执行事件,与 message.py 中的方式一致
|
||||
eventmanager.send_event(
|
||||
EventType.CommandExcute,
|
||||
{
|
||||
"cmd": command,
|
||||
"user": self._user_id,
|
||||
"channel": channel,
|
||||
"source": self._source,
|
||||
},
|
||||
)
|
||||
|
||||
result = {
|
||||
"success": True,
|
||||
"message": f"命令 {cmd_name} 已触发执行",
|
||||
"command": command,
|
||||
"command_desc": matched_command.get("description", ""),
|
||||
}
|
||||
# 如果是插件命令,附加插件ID
|
||||
if matched_command.get("pid"):
|
||||
result["plugin_id"] = matched_command["pid"]
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"执行命令失败: {e}", exc_info=True)
|
||||
return json.dumps(
|
||||
{"success": False, "message": f"执行命令时发生错误: {str(e)}"},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user