Compare commits

...

89 Commits

Author SHA1 Message Date
shiyu
4e16de973c feat: add search functionality to fetchFoxelCoreApps and enhance PluginsPage with query handling 2026-01-06 21:18:26 +08:00
shiyu
4dd0a4b1d6 chore: update version to v1.6.0 2026-01-06 18:02:01 +08:00
shiyu
5703825c31 fix: adjust grid column size for better layout in PluginsPage and ai-settings 2026-01-06 17:07:30 +08:00
时雨
24255744df feat: enhance plugin functionality 2026-01-06 16:54:49 +08:00
shiyu
31d97b2968 chore: update version to v1.5.5 2026-01-03 21:19:47 +08:00
shiyu
35abd080be feat: implement file search functionality in FileExplorerPage 2026-01-03 21:16:53 +08:00
shiyu
2fa93a1eeb feat: add vector and file collection constants, update vector index handling 2026-01-03 15:12:20 +08:00
shiyu
ff7eb13187 chore: update Python version in README files to 3.14 2026-01-03 14:09:29 +08:00
shiyu
ed9090c3d0 chore: update version to v1.5.4 2026-01-02 11:42:26 +08:00
shiyu
d430254868 feat: add Foxel adapter support and localization entries 2026-01-01 23:57:27 +08:00
dependabot[bot]
a8870f80da chore(deps): bump fastapi from 0.127.0 to 0.128.0 (#85)
Bumps [fastapi](https://github.com/fastapi/fastapi) from 0.127.0 to 0.128.0.
- [Release notes](https://github.com/fastapi/fastapi/releases)
- [Commits](https://github.com/fastapi/fastapi/compare/0.127.0...0.128.0)

---
updated-dependencies:
- dependency-name: fastapi
  dependency-version: 0.128.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-01-01 15:48:21 +08:00
dependabot[bot]
14ef2a4ccc chore(deps): bump antd from 6.1.2 to 6.1.3 in /web (#84)
Bumps [antd](https://github.com/ant-design/ant-design) from 6.1.2 to 6.1.3.
- [Release notes](https://github.com/ant-design/ant-design/releases)
- [Changelog](https://github.com/ant-design/ant-design/blob/master/CHANGELOG.en-US.md)
- [Commits](https://github.com/ant-design/ant-design/compare/6.1.2...6.1.3)

---
updated-dependencies:
- dependency-name: antd
  dependency-version: 6.1.3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-01-01 15:48:14 +08:00
dependabot[bot]
dd41941b04 chore(deps-dev): bump typescript-eslint from 8.50.1 to 8.51.0 in /web (#86)
Bumps [typescript-eslint](https://github.com/typescript-eslint/typescript-eslint/tree/HEAD/packages/typescript-eslint) from 8.50.1 to 8.51.0.
- [Release notes](https://github.com/typescript-eslint/typescript-eslint/releases)
- [Changelog](https://github.com/typescript-eslint/typescript-eslint/blob/main/packages/typescript-eslint/CHANGELOG.md)
- [Commits](https://github.com/typescript-eslint/typescript-eslint/commits/v8.51.0/packages/typescript-eslint)

---
updated-dependencies:
- dependency-name: typescript-eslint
  dependency-version: 8.51.0
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-01-01 15:48:02 +08:00
shiyu
01a259bae0 fix: update UI screenshot links in README files for consistency 2026-01-01 15:47:17 +08:00
shiyu
ef5ef2730c feat(audit): enhance client IP extraction logic in request handling 2026-01-01 15:45:38 +08:00
shiyu
8b8772b064 fix: update font stylesheet and family for consistency in design 2025-12-30 20:43:10 +08:00
shiyu
5393a973eb chore: update version to v1.5.3 2025-12-30 17:36:15 +08:00
shiyu
cc1f130099 feat(audit): adjust column widths and alignments in Audit Logs table for improved readability 2025-12-30 17:25:17 +08:00
shiyu
c8b3817805 feat(plugins): remove unused repository-related code and simplify UI for upcoming features 2025-12-30 15:39:09 +08:00
shiyu
b1ea181f96 feat: support dynamic port configuration in Docker Compose and entrypoint scripts 2025-12-30 15:00:22 +08:00
shiyu
078709b871 feat(audit): update log clearing confirmation message and remove redundant checks 2025-12-30 14:41:21 +08:00
shiyu
d788bde44f chore: update version to v1.5.2 2025-12-30 12:54:58 +08:00
shiyu
28ede26801 feat(s3): implement multipart upload functionality and related endpoints 2025-12-30 12:16:18 +08:00
时雨
53130383c1 chore: Update funding URL to remove trailing '.html' 2025-12-30 10:24:49 +08:00
shiyu
036eeb92c2 fix: enhance S3 signature authorization handling and improve error responses 2025-12-30 10:00:25 +08:00
shiyu
5701a13f4f chore: update version to v1.5.1 2025-12-29 16:53:53 +08:00
shiyu
184997deed feat(audit): add audit logs for S3/WebDAV mapping APIs 2025-12-29 16:41:02 +08:00
shiyu
1d5824d498 fix: update ConfigProvider cssVar prop and improve HTTP method tag rendering 2025-12-29 10:57:24 +08:00
shiyu
91ff1860b7 fix: update setConfig function to allow optional value and fix API endpoint 2025-12-29 10:29:44 +08:00
shiyu
56f947d0bf chore(deps): remove unused @ant-design/v5-patch-for-react-19 dependency 2025-12-25 17:42:44 +08:00
shiyu
ad016baaf9 chore(deps): update dependencies and remove unused package 2025-12-25 17:38:27 +08:00
shiyu
ad2e2858da chore: update dependencies in package.json 2025-12-25 17:34:32 +08:00
dependabot[bot]
a69d6c21a6 chore(deps): bump react-router from 7.8.0 to 7.11.0 in /web (#76) 2025-12-25 09:32:15 +00:00
dependabot[bot]
2a4a3c44b9 chore(deps): bump monaco-editor from 0.53.0 to 0.55.1 in /web (#81) 2025-12-25 09:32:10 +00:00
dependabot[bot]
cdb8543370 chore(deps): bump @uiw/react-md-editor from 4.0.8 to 4.0.11 in /web (#80) 2025-12-25 09:29:54 +00:00
dependabot[bot]
2dabe9255f chore(deps): bump react-dom and @types/react-dom in /web (#82) 2025-12-25 09:28:58 +00:00
shiyu
239216e574 chore(docker): update Python base image from 3.13-slim to 3.14-slim 2025-12-25 15:54:53 +08:00
shiyu
09c65bffb7 chore(db): remove patch for aiosqlite in init_db function 2025-12-25 15:49:53 +08:00
shiyu
ff1c06ad18 chore(deps): update Python version requirement and dependencies 2025-12-25 15:48:28 +08:00
shiyu
d88e95a9af feat(dependabot): change update schedule from weekly to monthly for all ecosystems 2025-12-25 15:09:45 +08:00
dependabot[bot]
ae80a751a8 chore(deps): bump bcrypt from 3.2.2 to 5.0.0 (#57)
Bumps [bcrypt](https://github.com/pyca/bcrypt) from 3.2.2 to 5.0.0.
- [Changelog](https://github.com/pyca/bcrypt/blob/main/CHANGELOG.rst)
- [Commits](https://github.com/pyca/bcrypt/compare/3.2.2...5.0.0)

---
updated-dependencies:
- dependency-name: bcrypt
  dependency-version: 5.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-25 15:09:13 +08:00
dependabot[bot]
b40e700a64 chore(deps): bump qdrant-client from 1.15.1 to 1.16.2 (#68)
Bumps [qdrant-client](https://github.com/qdrant/qdrant-client) from 1.15.1 to 1.16.2.
- [Release notes](https://github.com/qdrant/qdrant-client/releases)
- [Commits](https://github.com/qdrant/qdrant-client/compare/v1.15.1...v1.16.2)

---
updated-dependencies:
- dependency-name: qdrant-client
  dependency-version: 1.16.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-25 15:08:50 +08:00
dependabot[bot]
040d8346b3 chore(deps): bump python-multipart from 0.0.20 to 0.0.21 (#69)
Bumps [python-multipart](https://github.com/Kludex/python-multipart) from 0.0.20 to 0.0.21.
- [Release notes](https://github.com/Kludex/python-multipart/releases)
- [Changelog](https://github.com/Kludex/python-multipart/blob/master/CHANGELOG.md)
- [Commits](https://github.com/Kludex/python-multipart/compare/0.0.20...0.0.21)

---
updated-dependencies:
- dependency-name: python-multipart
  dependency-version: 0.0.21
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-25 15:08:30 +08:00
dependabot[bot]
55d062f0a7 chore(deps): bump fastapi from 0.116.1 to 0.127.0 (#70)
Bumps [fastapi](https://github.com/fastapi/fastapi) from 0.116.1 to 0.127.0.
- [Release notes](https://github.com/fastapi/fastapi/releases)
- [Commits](https://github.com/fastapi/fastapi/compare/0.116.1...0.127.0)

---
updated-dependencies:
- dependency-name: fastapi
  dependency-version: 0.127.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-25 15:08:15 +08:00
dependabot[bot]
cfaaff8a8c chore(deps-dev): bump vite from 7.1.2 to 7.3.0 in /web (#67)
Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 7.1.2 to 7.3.0.
- [Release notes](https://github.com/vitejs/vite/releases)
- [Changelog](https://github.com/vitejs/vite/blob/v7.3.0/packages/vite/CHANGELOG.md)
- [Commits](https://github.com/vitejs/vite/commits/v7.3.0/packages/vite)

---
updated-dependencies:
- dependency-name: vite
  dependency-version: 7.3.0
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-25 15:07:57 +08:00
dependabot[bot]
d6d41333fd chore(deps): bump uvicorn from 0.38.0 to 0.40.0 (#72)
Bumps [uvicorn](https://github.com/Kludex/uvicorn) from 0.38.0 to 0.40.0.
- [Release notes](https://github.com/Kludex/uvicorn/releases)
- [Changelog](https://github.com/Kludex/uvicorn/blob/main/docs/release-notes.md)
- [Commits](https://github.com/Kludex/uvicorn/compare/0.38.0...0.40.0)

---
updated-dependencies:
- dependency-name: uvicorn
  dependency-version: 0.40.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-25 15:07:45 +08:00
dependabot[bot]
a4efba94d5 chore(deps-dev): bump @eslint/js from 9.33.0 to 9.39.2 in /web (#73)
Bumps [@eslint/js](https://github.com/eslint/eslint/tree/HEAD/packages/js) from 9.33.0 to 9.39.2.
- [Release notes](https://github.com/eslint/eslint/releases)
- [Commits](https://github.com/eslint/eslint/commits/v9.39.2/packages/js)

---
updated-dependencies:
- dependency-name: "@eslint/js"
  dependency-version: 9.39.2
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-25 15:07:12 +08:00
dependabot[bot]
00e6419b12 chore(deps): bump react and @types/react in /web (#71)
Bumps [react](https://github.com/facebook/react/tree/HEAD/packages/react) and [@types/react](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/react). These dependencies needed to be updated together.

Updates `react` from 19.1.1 to 19.2.3
- [Release notes](https://github.com/facebook/react/releases)
- [Changelog](https://github.com/facebook/react/blob/main/CHANGELOG.md)
- [Commits](https://github.com/facebook/react/commits/v19.2.3/packages/react)

Updates `@types/react` from 19.1.10 to 19.2.7
- [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases)
- [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/react)

---
updated-dependencies:
- dependency-name: react
  dependency-version: 19.2.3
  dependency-type: direct:production
  update-type: version-update:semver-minor
- dependency-name: "@types/react"
  dependency-version: 19.2.7
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-25 15:05:13 +08:00
dependabot[bot]
bbe8465aa0 chore(deps-dev): bump eslint-plugin-react-hooks in /web (#74)
Bumps [eslint-plugin-react-hooks](https://github.com/facebook/react/tree/HEAD/packages/eslint-plugin-react-hooks) from 5.2.0 to 7.0.1.
- [Release notes](https://github.com/facebook/react/releases)
- [Changelog](https://github.com/facebook/react/blob/main/packages/eslint-plugin-react-hooks/CHANGELOG.md)
- [Commits](https://github.com/facebook/react/commits/HEAD/packages/eslint-plugin-react-hooks)

---
updated-dependencies:
- dependency-name: eslint-plugin-react-hooks
  dependency-version: 7.0.1
  dependency-type: direct:development
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-25 15:04:43 +08:00
dependabot[bot]
baadaa70a7 chore(deps-dev): bump typescript from 5.8.3 to 5.9.3 in /web (#75)
Bumps [typescript](https://github.com/microsoft/TypeScript) from 5.8.3 to 5.9.3.
- [Release notes](https://github.com/microsoft/TypeScript/releases)
- [Commits](https://github.com/microsoft/TypeScript/compare/v5.8.3...v5.9.3)

---
updated-dependencies:
- dependency-name: typescript
  dependency-version: 5.9.3
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-25 15:04:23 +08:00
shiyu
e7e34cda54 feat(db): add patch for aiosqlite to ensure proper connection handling 2025-12-23 09:53:31 +08:00
shiyu
adb80d0a6c feat(Dropbox): add Dropbox adapter with file management capabilities and localization support 2025-12-22 21:49:46 +08:00
shiyu
bcd4ae7aef feat(S3): update region handling to allow blank input and improve user guidance 2025-12-22 16:03:23 +08:00
shiyu
1ef80a087c feat(adapters): add AList and OpenList adapter types with configuration schemas 2025-12-22 15:10:39 +08:00
shiyu
f503d521e6 feat(cli): add foxel_cli.py for user password management and setup symlink 2025-12-22 12:43:52 +08:00
shiyu
7c38c0045b fix: update error handling to avoid unused catch variables and improve code clarity 2025-12-18 15:51:20 +08:00
shiyu
b582a89d08 fix(ContextMenu): simplify open label by removing app name 2025-12-18 14:21:33 +08:00
dependabot[bot]
4ea0b9884a chore(deps): bump aioboto3 from 15.2.0 to 15.5.0 (#55)
Bumps [aioboto3](https://github.com/terricain/aioboto3) from 15.2.0 to 15.5.0.
- [Changelog](https://github.com/terricain/aioboto3/blob/main/CHANGELOG.rst)
- [Commits](https://github.com/terricain/aioboto3/compare/v15.2.0...v15.5.0)

---
updated-dependencies:
- dependency-name: aioboto3
  dependency-version: 15.5.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-18 14:12:47 +08:00
dependabot[bot]
dfeec58ed9 chore(deps): bump pillow from 11.3.0 to 12.0.0 (#59)
Bumps [pillow](https://github.com/python-pillow/Pillow) from 11.3.0 to 12.0.0.
- [Release notes](https://github.com/python-pillow/Pillow/releases)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Commits](https://github.com/python-pillow/Pillow/compare/11.3.0...12.0.0)

---
updated-dependencies:
- dependency-name: pillow
  dependency-version: 12.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-18 14:12:33 +08:00
shiyu
e2f0037053 fix(Header): adjust path editor height and improve styling 2025-12-18 14:08:59 +08:00
dependabot[bot]
e34ee6f70d chore(deps-dev): bump eslint from 9.33.0 to 9.39.2 in /web (#63)
Bumps [eslint](https://github.com/eslint/eslint) from 9.33.0 to 9.39.2.
- [Release notes](https://github.com/eslint/eslint/releases)
- [Commits](https://github.com/eslint/eslint/compare/v9.33.0...v9.39.2)

---
updated-dependencies:
- dependency-name: eslint
  dependency-version: 9.39.2
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-18 12:42:09 +08:00
dependabot[bot]
0f856bb5b7 chore(deps-dev): bump eslint-plugin-react-refresh in /web (#60)
Bumps [eslint-plugin-react-refresh](https://github.com/ArnaudBarre/eslint-plugin-react-refresh) from 0.4.20 to 0.4.26.
- [Release notes](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/releases)
- [Changelog](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/blob/main/CHANGELOG.md)
- [Commits](https://github.com/ArnaudBarre/eslint-plugin-react-refresh/compare/v0.4.20...v0.4.26)

---
updated-dependencies:
- dependency-name: eslint-plugin-react-refresh
  dependency-version: 0.4.26
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-18 09:20:31 +08:00
dependabot[bot]
3b4b01a18d chore(deps): bump artplayer from 5.2.5 to 5.3.0 in /web (#58)
Bumps [artplayer](https://github.com/zhw2590582/ArtPlayer) from 5.2.5 to 5.3.0.
- [Release notes](https://github.com/zhw2590582/ArtPlayer/releases)
- [Changelog](https://github.com/zhw2590582/ArtPlayer/blob/master/CHANGELOG.md)
- [Commits](https://github.com/zhw2590582/ArtPlayer/compare/5.2.5...5.3.0)

---
updated-dependencies:
- dependency-name: artplayer
  dependency-version: 5.3.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-18 09:20:18 +08:00
dependabot[bot]
2e1f76d0bc chore(deps-dev): bump @vitejs/plugin-react from 5.0.0 to 5.1.2 in /web (#56)
Bumps [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/tree/HEAD/packages/plugin-react) from 5.0.0 to 5.1.2.
- [Release notes](https://github.com/vitejs/vite-plugin-react/releases)
- [Changelog](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/CHANGELOG.md)
- [Commits](https://github.com/vitejs/vite-plugin-react/commits/plugin-react@5.1.2/packages/plugin-react)

---
updated-dependencies:
- dependency-name: "@vitejs/plugin-react"
  dependency-version: 5.1.2
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-18 09:20:03 +08:00
dependabot[bot]
18ed7dcee1 chore(deps): bump @ant-design/icons from 5.6.1 to 6.1.0 in /web (#61)
Bumps [@ant-design/icons](https://github.com/ant-design/ant-design-icons) from 5.6.1 to 6.1.0.
- [Commits](https://github.com/ant-design/ant-design-icons/commits)

---
updated-dependencies:
- dependency-name: "@ant-design/icons"
  dependency-version: 6.1.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-17 15:35:34 +08:00
dependabot[bot]
5c3ab65cee chore(deps): bump telethon from 1.41.2 to 1.42.0 (#62)
Bumps [telethon](https://github.com/LonamiWebs/Telethon) from 1.41.2 to 1.42.0.
- [Release notes](https://github.com/LonamiWebs/Telethon/releases)
- [Commits](https://github.com/LonamiWebs/Telethon/commits/v1.42.0)

---
updated-dependencies:
- dependency-name: telethon
  dependency-version: 1.42.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-17 15:34:46 +08:00
dependabot[bot]
1ddd2e464c chore(deps): bump uvicorn from 0.37.0 to 0.38.0 (#64)
Bumps [uvicorn](https://github.com/Kludex/uvicorn) from 0.37.0 to 0.38.0.
- [Release notes](https://github.com/Kludex/uvicorn/releases)
- [Changelog](https://github.com/Kludex/uvicorn/blob/main/docs/release-notes.md)
- [Commits](https://github.com/Kludex/uvicorn/compare/0.37.0...0.38.0)

---
updated-dependencies:
- dependency-name: uvicorn
  dependency-version: 0.38.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-17 15:34:00 +08:00
dependabot[bot]
aeb7cf75a1 chore(deps): bump release-drafter/release-drafter from 5 to 6 (#52)
Bumps [release-drafter/release-drafter](https://github.com/release-drafter/release-drafter) from 5 to 6.
- [Release notes](https://github.com/release-drafter/release-drafter/releases)
- [Commits](https://github.com/release-drafter/release-drafter/compare/v5...v6)

---
updated-dependencies:
- dependency-name: release-drafter/release-drafter
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-17 12:52:07 +08:00
dependabot[bot]
648fd51d26 chore(deps): bump docker/build-push-action from 5 to 6 (#53)
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](https://github.com/docker/build-push-action/compare/v5...v6)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-17 12:51:48 +08:00
dependabot[bot]
98c7b3af9b chore(deps): bump actions/checkout from 4 to 6 (#54)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 6.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v4...v6)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-17 12:50:27 +08:00
shiyu
fc3b6a9d70 feat(dependabot): add configuration for automated dependency updates 2025-12-17 12:47:07 +08:00
shiyu
1c0fc24cfa chore(dependencies): update tortoise-orm version to 0.25.2 2025-12-17 11:55:54 +08:00
shiyu
5127d9f0fc feat(audit): add client IP extraction to audit logging 2025-12-17 11:10:37 +08:00
shiyu
ba1feb150b feat(VideoLibrary): update styling for video library header and statistics display 2025-12-17 10:57:59 +08:00
shiyu
6a1ff3afa6 feat(VideoLibrary): enhance cover rendering and loading skeletons 2025-12-17 09:59:48 +08:00
shiyu
724f551b00 feat(video-library): implement video library processing and API integration 2025-12-16 18:02:46 +08:00
shiyu
8cf147bf34 feat(GridView): add mouse down event for selecting multiple entries 2025-12-16 15:57:32 +08:00
shiyu
c2a473fac9 feat(setup): improve installer UX and defaults 2025-12-16 14:31:48 +08:00
shiyu
aaae37e7cb feat: add video transcoding URL retrieval and enhance thumbnail generation logic 2025-12-15 22:01:07 +08:00
shiyu
78de3b46be feat: enhance video thumbnail generation 2025-12-15 18:03:13 +08:00
shiyu
388ddfd869 feat: enhance PluginsPage layout with flexible tab content and improved overflow handling 2025-12-15 15:38:28 +08:00
shiyu
18f59f8d33 fix: update window positioning to prevent overlap with the top of the viewport 2025-12-15 15:14:37 +08:00
shiyu
b319b545fc feat: add video library component 2025-12-15 14:55:14 +08:00
shiyu
0fcb3b8ce0 feat: add support for opening plugins 2025-12-15 14:49:01 +08:00
shiyu
686202a0dd feat: enhance app descriptors with additional metadata and support for various file types 2025-12-12 18:09:44 +08:00
shiyu
1cda987723 fix: persist collapsed state in localStorage for layout 2025-12-12 17:10:36 +08:00
shiyu
49a4300fc3 fix: update SideNav to conditionally render buttons and version status based on collapsed state 2025-12-12 17:05:46 +08:00
shiyu
d7260e8863 fix: update type hint for VectorDBService instance variable 2025-12-10 11:20:25 +08:00
shiyu
62d0316d48 chore: remove unnessary imports 2025-12-10 11:02:35 +08:00
139 changed files with 7782 additions and 4550 deletions

2
.github/FUNDING.yml vendored
View File

@@ -1 +1 @@
custom: https://foxel.cc/sponsor.html
custom: https://foxel.cc/sponsor

16
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,16 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "monthly"
- package-ecosystem: "bun"
directory: "/web"
schedule:
interval: "monthly"
- package-ecosystem: "uv"
directory: "/"
schedule:
interval: "monthly"

View File

@@ -17,7 +17,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@@ -45,7 +45,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Docker image (multi arch)
uses: docker/build-push-action@v5
uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64

View File

@@ -10,7 +10,7 @@ jobs:
contents: write
pull-requests: write
steps:
- uses: release-drafter/release-drafter@v5
- uses: release-drafter/release-drafter@v6
with:
config-name: release-drafter.yml
env:

View File

@@ -1 +1 @@
3.13
3.14

View File

@@ -9,7 +9,7 @@ COPY web/ ./
RUN bun run build
FROM python:3.13-slim
FROM python:3.14-slim
WORKDIR /app
@@ -33,6 +33,8 @@ COPY . .
RUN mkdir -p data/db data/mount && \
chmod 777 data/db data/mount && \
chmod +x setup/foxel_cli.py && \
ln -sf /app/setup/foxel_cli.py /usr/local/bin/foxel && \
rm -rf /var/log/apt /var/cache/apt/archives
EXPOSE 80

View File

@@ -8,16 +8,17 @@
**A highly extensible private cloud storage solution for individuals and teams, featuring AI-powered semantic search.**
![Python Version](https://img.shields.io/badge/Python-3.13+-blue.svg)
![Python Version](https://img.shields.io/badge/Python-3.14+-blue.svg)
![React](https://img.shields.io/badge/React-19.0-blue.svg)
![License](https://img.shields.io/badge/license-MIT-green.svg)
![GitHub stars](https://img.shields.io/github/stars/DrizzleTime/foxel?style=social)
---
<blockquote>
<em><strong>The ocean of data is boundless, let the eye of insight guide the voyage, yet its intricate connections lie deep, not fully discernible from the surface.</strong></em>
</blockquote>
<img src="https://foxel.cc/image/ad-min.png" alt="UI Screenshot">
<img src="https://foxel.cc/image/ad-min-en.png" alt="UI Screenshot">
</div>
## 👀 Online Demo
@@ -39,36 +40,37 @@
Using Docker Compose is the most recommended way to start Foxel.
1. **Create Data Directories**:
Create a `data` folder for persistent data:
1. **Create Data Directories**
```bash
mkdir -p data/db
mkdir -p data/mount
chmod 777 data/db data/mount
```
Create a `data` folder for persistent data:
2. **Download Docker Compose File**:
```bash
mkdir -p data/db
mkdir -p data/mount
chmod 777 data/db data/mount
```
```bash
curl -L -O https://github.com/DrizzleTime/Foxel/raw/main/compose.yaml
```
2. **Download Docker Compose File**
After downloading, it is **strongly recommended** to modify the environment variables in the `compose.yaml` file to ensure security:
```bash
curl -L -O https://github.com/DrizzleTime/Foxel/raw/main/compose.yaml
```
- Modify `SECRET_KEY` and `TEMP_LINK_SECRET_KEY`: Replace the default keys with randomly generated strong keys.
After downloading, it is **strongly recommended** to modify the environment variables in the `compose.yaml` file to ensure security:
3. **Start the Services**:
- Modify `SECRET_KEY` and `TEMP_LINK_SECRET_KEY`: Replace the default keys with randomly generated strong keys.
```bash
docker-compose up -d
```
3. **Start the Services**
4. **Access the Application**:
```bash
docker-compose up -d
```
Once the services are running, open the page in your browser.
4. **Access the Application**
> On the first launch, please follow the setup guide to initialize the administrator account.
Once the services are running, open the page in your browser.
> On the first launch, please follow the setup guide to initialize the administrator account.
## 🤝 How to Contribute

View File

@@ -8,17 +8,17 @@
**一个面向个人和团队的、高度可扩展的私有云盘解决方案,支持 AI 语义搜索。**
![Python Version](https://img.shields.io/badge/Python-3.13+-blue.svg)
![Python Version](https://img.shields.io/badge/Python-3.14+-blue.svg)
![React](https://img.shields.io/badge/React-19.0-blue.svg)
![License](https://img.shields.io/badge/license-MIT-green.svg)
![GitHub stars](https://img.shields.io/github/stars/DrizzleTime/foxel?style=social)
---
<blockquote>
<em><strong>数据之洋浩瀚无涯,当以洞察之目引航,然其脉络深隐,非表象所能尽窥。</strong></em><br>
<em><strong>The ocean of data is boundless, let the eye of insight guide the voyage, yet its intricate connections lie deep, not fully discernible from the surface.</strong></em>
</blockquote>
<img src="https://foxel.cc/image/ad-min.png" alt="UI Screenshot">
<img src="https://foxel.cc/image/ad-min-zh.png" alt="UI Screenshot">
</div>
## 👀 在线体验
@@ -40,36 +40,37 @@
使用 Docker Compose 是启动 Foxel 最推荐的方式。
1. **创建数据目录**:
新建 `data` 文件夹用于持久化数据:
1. **创建数据目录**
```bash
mkdir -p data/db
mkdir -p data/mount
chmod 777 data/db data/mount
```
新建 `data` 文件夹用于持久化数据:
2. **下载 Docker Compose 文件**
```bash
mkdir -p data/db
mkdir -p data/mount
chmod 777 data/db data/mount
```
```bash
curl -L -O https://github.com/DrizzleTime/Foxel/raw/main/compose.yaml
```
2. **下载 Docker Compose 文件**
下载完成后,**强烈建议**修改 `compose.yaml` 文件中的环境变量以确保安全:
```bash
curl -L -O https://github.com/DrizzleTime/Foxel/raw/main/compose.yaml
```
- 修改 `SECRET_KEY` 和 `TEMP_LINK_SECRET_KEY`:将默认的密钥替换为随机生成的强密钥
下载完成后,**强烈建议**修改 `compose.yaml` 文件中的环境变量以确保安全:
3. **启动服务**:
- 修改 `SECRET_KEY` 和 `TEMP_LINK_SECRET_KEY`:将默认的密钥替换为随机生成的强密钥
```bash
docker-compose up -d
```
3. **启动服务**
4. **访问应用**:
```bash
docker-compose up -d
```
服务启动后,在浏览器中打开页面。
4. **访问应用**
> 首次启动,请根据引导页面完成管理员账号的初始化设置
服务启动后,在浏览器中打开页面
> 首次启动,请根据引导页面完成管理员账号的初始化设置。
## 🤝 如何贡献

View File

@@ -19,8 +19,8 @@ from domain.audit import router as audit
def include_routers(app: FastAPI):
app.include_router(adapters.router)
app.include_router(virtual_fs.router)
app.include_router(search_api.router)
app.include_router(virtual_fs.router)
app.include_router(auth.router)
app.include_router(config.router)
app.include_router(processors.router)

View File

@@ -5,9 +5,10 @@ services:
container_name: foxel
restart: unless-stopped
ports:
- "8088:80"
- "${FOXEL_HOST_PORT:-8088}:${FOXEL_PORT:-80}"
environment:
- TZ=Asia/Shanghai
- FOXEL_PORT=${FOXEL_PORT:-80}
- SECRET_KEY=EnsRhL9NFPxgFVc+7t96/y70DIOR+9SpntcIqQa90TU=
- TEMP_LINK_SECRET_KEY=EnsRhL9NFPxgFVc+7t96/y70DIOR+9SpntcIqQa90TU=
volumes:

View File

@@ -12,7 +12,6 @@ TORTOISE_ORM = {
},
}
async def init_db():
await Tortoise.init(config=TORTOISE_ORM)
await Tortoise.generate_schemas()

View File

@@ -0,0 +1,487 @@
import asyncio
import mimetypes
import re
import tempfile
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, AsyncIterator, Dict, List, Tuple
from urllib.parse import quote, urljoin
import httpx
from fastapi import HTTPException
from fastapi.responses import Response, StreamingResponse
from models import StorageAdapter
def _normalize_fs_path(path: str) -> str:
path = (path or "").replace("\\", "/").strip()
if not path or path == "/":
return "/"
if not path.startswith("/"):
path = "/" + path
path = re.sub(r"/{2,}", "/", path)
if path != "/" and path.endswith("/"):
path = path.rstrip("/")
return path or "/"
def _join_fs_path(base: str, rel: str) -> str:
base = _normalize_fs_path(base)
rel = (rel or "").replace("\\", "/").lstrip("/")
if not rel:
return base
if base == "/":
return "/" + rel
return f"{base}/{rel}"
def _split_parent_and_name(path: str) -> Tuple[str, str]:
path = _normalize_fs_path(path)
if path == "/":
return "/", ""
parent, _, name = path.rpartition("/")
if not parent:
parent = "/"
return parent, name
def _parse_iso_to_epoch(value: str | None) -> int:
if not value:
return 0
text = str(value).strip()
if not text:
return 0
try:
if text.endswith("Z"):
text = text[:-1] + "+00:00"
m = re.match(r"^(.*?)(\.\d+)([+-]\d\d:\d\d)?$", text)
if m:
head, frac, tz = m.group(1), m.group(2), m.group(3) or ""
digits = frac[1:]
if len(digits) > 6:
frac = "." + digits[:6]
text = head + frac + tz
dt = datetime.fromisoformat(text)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
return int(dt.timestamp())
except Exception:
return 0
class AListApiAdapterBase:
def __init__(self, record: StorageAdapter, *, product_name: str):
self.record = record
self.product_name = product_name
cfg = record.config or {}
self.base_url: str = str(cfg.get("base_url", "")).rstrip("/")
if not self.base_url.startswith("http"):
raise ValueError(f"{product_name} requires base_url http/https")
self.username: str = str(cfg.get("username") or "")
self.password: str = str(cfg.get("password") or "")
if not self.username or not self.password:
raise ValueError(f"{product_name} requires username and password")
self.timeout: float = float(cfg.get("timeout", 30))
self.root_path: str = _normalize_fs_path(str(cfg.get("root") or "/"))
self.enable_redirect_307: bool = bool(cfg.get("enable_direct_download_307"))
self._token: str | None = None
self._login_lock = asyncio.Lock()
def get_effective_root(self, sub_path: str | None) -> str:
base = _normalize_fs_path(self.root_path)
if sub_path:
return _join_fs_path(base, sub_path)
return base
async def _ensure_token(self) -> str:
if self._token:
return self._token
async with self._login_lock:
if self._token:
return self._token
self._token = await self._login()
return self._token
async def _login(self) -> str:
url = self.base_url + "/api/auth/login"
body = {"username": self.username, "password": self.password}
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
resp = await client.post(url, json=body)
resp.raise_for_status()
payload = resp.json()
if not isinstance(payload, dict):
raise HTTPException(502, detail=f"{self.product_name} login: invalid response")
code = payload.get("code")
if code not in (0, 200):
raise HTTPException(502, detail=f"{self.product_name} login failed: {payload.get('message')}")
data = payload.get("data") or {}
token = (data.get("token") if isinstance(data, dict) else None) or ""
token = str(token).strip()
if not token:
raise HTTPException(502, detail=f"{self.product_name} login: missing token")
return token
async def _api_json(
self,
method: str,
endpoint: str,
*,
json: Dict[str, Any] | None = None,
headers: Dict[str, str] | None = None,
retry: bool = True,
files: Any = None,
) -> Any:
token = await self._ensure_token()
url = self.base_url + endpoint
req_headers: Dict[str, str] = {"Authorization": token}
if headers:
req_headers.update(headers)
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
resp = await client.request(method, url, json=json, headers=req_headers, files=files)
if resp.status_code == 401 and retry:
self._token = None
return await self._api_json(method, endpoint, json=json, headers=headers, retry=False, files=files)
resp.raise_for_status()
payload = resp.json()
if not isinstance(payload, dict):
raise HTTPException(502, detail=f"{self.product_name} api: invalid response")
code = payload.get("code")
if code in (0, 200):
return payload.get("data")
if code in (401, 403) and retry:
self._token = None
return await self._api_json(method, endpoint, json=json, headers=headers, retry=False, files=files)
if code == 404:
raise FileNotFoundError(json.get("path") if json else "")
msg = payload.get("message") or payload.get("msg") or ""
raise HTTPException(502, detail=f"{self.product_name} api error code={code} msg={msg}")
def _abs_url(self, url: str) -> str:
u = (url or "").strip()
if not u:
return ""
if u.startswith("http://") or u.startswith("https://"):
return u
return urljoin(self.base_url.rstrip("/") + "/", u.lstrip("/"))
async def _fs_list(self, path: str) -> Dict[str, Any]:
body = {"path": path, "password": "", "page": 1, "per_page": 0, "refresh": False}
data = await self._api_json("POST", "/api/fs/list", json=body)
return data or {}
async def _fs_get(self, path: str) -> Dict[str, Any]:
body = {"path": path, "password": "", "page": 1, "per_page": 0, "refresh": False}
data = await self._api_json("POST", "/api/fs/get", json=body)
return data or {}
async def list_dir(
self,
root: str,
rel: str,
page_num: int = 1,
page_size: int = 50,
sort_by: str = "name",
sort_order: str = "asc",
) -> Tuple[List[Dict], int]:
path = _join_fs_path(root, rel)
data = await self._fs_list(path)
content = data.get("content") or []
if not isinstance(content, list):
raise HTTPException(502, detail=f"{self.product_name} list_dir: invalid content")
entries: List[Dict] = []
for it in content:
if not isinstance(it, dict):
continue
name = str(it.get("name") or "")
if not name:
continue
is_dir = bool(it.get("is_dir"))
size = int(it.get("size") or 0) if not is_dir else 0
mtime = _parse_iso_to_epoch(it.get("modified"))
entries.append(
{
"name": name,
"is_dir": is_dir,
"size": size,
"mtime": mtime,
"type": "dir" if is_dir else "file",
}
)
reverse = sort_order.lower() == "desc"
def get_sort_key(item: Dict) -> Tuple:
key = (not item.get("is_dir"),)
f = sort_by.lower()
if f == "name":
key += (str(item.get("name", "")).lower(),)
elif f == "size":
key += (int(item.get("size", 0)),)
elif f == "mtime":
key += (int(item.get("mtime", 0)),)
else:
key += (str(item.get("name", "")).lower(),)
return key
entries.sort(key=get_sort_key, reverse=reverse)
total = len(entries)
start = (page_num - 1) * page_size
end = start + page_size
return entries[start:end], total
async def stat_file(self, root: str, rel: str):
path = _join_fs_path(root, rel)
data = await self._fs_get(path)
if not data:
raise FileNotFoundError(rel)
is_dir = bool(data.get("is_dir"))
name = str(data.get("name") or (rel.rstrip("/").split("/")[-1] if rel else ""))
size = int(data.get("size") or 0) if not is_dir else 0
mtime = _parse_iso_to_epoch(data.get("modified"))
info = {
"name": name,
"is_dir": is_dir,
"size": size,
"mtime": mtime,
"type": "dir" if is_dir else "file",
"path": path,
}
return info
async def stat_path(self, root: str, rel: str):
try:
info = await self.stat_file(root, rel)
return {"exists": True, "is_dir": bool(info.get("is_dir")), "path": info.get("path")}
except FileNotFoundError:
return {"exists": False, "is_dir": None, "path": _join_fs_path(root, rel)}
async def exists(self, root: str, rel: str) -> bool:
try:
await self.stat_file(root, rel)
return True
except FileNotFoundError:
return False
except Exception:
return False
async def get_direct_download_response(self, root: str, rel: str):
if not self.enable_redirect_307:
return None
data = await self._fs_get(_join_fs_path(root, rel))
if not data:
raise FileNotFoundError(rel)
if bool(data.get("is_dir")):
raise IsADirectoryError(rel)
raw_url = self._abs_url(str(data.get("raw_url") or ""))
if not raw_url:
return None
return Response(status_code=307, headers={"Location": raw_url})
async def _get_raw_url_and_meta(self, root: str, rel: str) -> Tuple[str, int, str]:
data = await self._fs_get(_join_fs_path(root, rel))
if not data:
raise FileNotFoundError(rel)
if bool(data.get("is_dir")):
raise IsADirectoryError(rel)
raw_url = self._abs_url(str(data.get("raw_url") or ""))
if not raw_url:
raise HTTPException(502, detail=f"{self.product_name} missing raw_url")
size = int(data.get("size") or 0)
name = str(data.get("name") or "")
return raw_url, size, name
async def read_file(self, root: str, rel: str) -> bytes:
raw_url, _, _ = await self._get_raw_url_and_meta(root, rel)
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
resp = await client.get(raw_url)
resp.raise_for_status()
return resp.content
async def stream_file(self, root: str, rel: str, range_header: str | None):
raw_url, file_size, name = await self._get_raw_url_and_meta(root, rel)
mime, _ = mimetypes.guess_type(name or rel)
content_type = mime or "application/octet-stream"
start = 0
end = max(file_size - 1, 0)
status = 200
headers = {
"Accept-Ranges": "bytes",
"Content-Type": content_type,
}
if file_size >= 0:
headers["Content-Length"] = str(file_size)
if range_header and range_header.startswith("bytes="):
try:
part = range_header.removeprefix("bytes=")
s, e = part.split("-", 1)
if s.strip():
start = int(s)
if e.strip():
end = int(e)
if file_size and start >= file_size:
raise HTTPException(416, detail="Requested Range Not Satisfiable")
if file_size and end >= file_size:
end = file_size - 1
status = 206
except ValueError:
raise HTTPException(400, detail="Invalid Range header")
headers["Content-Range"] = f"bytes {start}-{end}/{file_size}"
headers["Content-Length"] = str(end - start + 1)
async def agen():
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
req_headers = {"Range": f"bytes={start}-{end}"} if status == 206 else {}
async with client.stream("GET", raw_url, headers=req_headers) as resp:
resp.raise_for_status()
async for chunk in resp.aiter_bytes():
if chunk:
yield chunk
return StreamingResponse(agen(), status_code=status, headers=headers, media_type=content_type)
async def _upload_file(self, full_path: str, file_path: Path) -> Any:
token = await self._ensure_token()
headers = {
"Authorization": token,
"File-Path": quote(full_path, safe="/"),
}
with file_path.open("rb") as f:
files = {"file": (file_path.name, f, "application/octet-stream")}
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
resp = await client.put(self.base_url + "/api/fs/form", headers=headers, files=files)
resp.raise_for_status()
payload = resp.json()
if not isinstance(payload, dict):
raise HTTPException(502, detail=f"{self.product_name} upload: invalid response")
code = payload.get("code")
if code not in (0, 200):
msg = payload.get("message") or payload.get("msg") or ""
raise HTTPException(502, detail=f"{self.product_name} upload failed: {msg}")
return payload.get("data")
async def write_file(self, root: str, rel: str, data: bytes):
full_path = _join_fs_path(root, rel)
suffix = Path(rel).suffix
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tf:
tf.write(data)
tmp_path = Path(tf.name)
try:
await self._upload_file(full_path, tmp_path)
finally:
try:
tmp_path.unlink(missing_ok=True)
except Exception:
pass
async def write_file_stream(self, root: str, rel: str, data_iter: AsyncIterator[bytes]):
full_path = _join_fs_path(root, rel)
suffix = Path(rel).suffix
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tf:
tmp_path = Path(tf.name)
size = 0
try:
with tmp_path.open("wb") as f:
async for chunk in data_iter:
if not chunk:
continue
f.write(chunk)
size += len(chunk)
await self._upload_file(full_path, tmp_path)
return size
finally:
try:
tmp_path.unlink(missing_ok=True)
except Exception:
pass
async def mkdir(self, root: str, rel: str):
path = _join_fs_path(root, rel)
await self._api_json("POST", "/api/fs/mkdir", json={"path": path})
async def delete(self, root: str, rel: str):
path = _join_fs_path(root, rel)
parent, name = _split_parent_and_name(path)
if not name:
return
await self._api_json("POST", "/api/fs/remove", json={"dir": parent, "names": [name]})
async def move(self, root: str, src_rel: str, dst_rel: str):
src_path = _join_fs_path(root, src_rel)
dst_path = _join_fs_path(root, dst_rel)
src_dir, src_name = _split_parent_and_name(src_path)
dst_dir, dst_name = _split_parent_and_name(dst_path)
if not src_name or not dst_name:
raise HTTPException(400, detail="Invalid move path")
if src_dir == dst_dir:
if src_name == dst_name:
return
await self._api_json("POST", "/api/fs/rename", json={"path": src_path, "name": dst_name})
return
await self._api_json("POST", "/api/fs/move", json={"src_dir": src_dir, "dst_dir": dst_dir, "names": [src_name]})
if src_name != dst_name:
await self._api_json("POST", "/api/fs/rename", json={"path": _join_fs_path(dst_dir, src_name), "name": dst_name})
async def rename(self, root: str, src_rel: str, dst_rel: str):
await self.move(root, src_rel, dst_rel)
async def copy(self, root: str, src_rel: str, dst_rel: str, overwrite: bool = False):
src_path = _join_fs_path(root, src_rel)
dst_path = _join_fs_path(root, dst_rel)
src_dir, src_name = _split_parent_and_name(src_path)
dst_dir, dst_name = _split_parent_and_name(dst_path)
if not src_name or not dst_name:
raise HTTPException(400, detail="Invalid copy path")
src_info = await self._fs_get(src_path)
if not src_info:
raise FileNotFoundError(src_rel)
if src_name != dst_name and not bool(src_info.get("is_dir")):
raw_url, _, _ = await self._get_raw_url_and_meta(root, src_rel)
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
async with client.stream("GET", raw_url) as resp:
resp.raise_for_status()
async def gen():
async for chunk in resp.aiter_bytes():
if chunk:
yield chunk
await self.write_file_stream(root, dst_rel, gen())
return
await self._api_json("POST", "/api/fs/copy", json={"src_dir": src_dir, "dst_dir": dst_dir, "names": [src_name]})
if src_name != dst_name:
await self._api_json("POST", "/api/fs/rename", json={"path": _join_fs_path(dst_dir, src_name), "name": dst_name})
class AListAdapter(AListApiAdapterBase):
def __init__(self, record: StorageAdapter):
super().__init__(record, product_name="AList")
class OpenListAdapter(AListApiAdapterBase):
def __init__(self, record: StorageAdapter):
super().__init__(record, product_name="OpenList")
ADAPTER_TYPES = {"alist": AListAdapter, "openlist": OpenListAdapter}
CONFIG_SCHEMA = [
{"key": "base_url", "label": "基础地址", "type": "string", "required": True, "placeholder": "http://127.0.0.1:5244"},
{"key": "username", "label": "用户名", "type": "string", "required": True},
{"key": "password", "label": "密码", "type": "password", "required": True},
{"key": "root", "label": "根目录", "type": "string", "required": False, "default": "/"},
{"key": "timeout", "label": "超时(秒)", "type": "number", "required": False, "default": 30},
{"key": "enable_direct_download_307", "label": "启用 307 直链下载", "type": "boolean", "default": False},
]

View File

@@ -1,4 +1,3 @@
from __future__ import annotations
from typing import List, Dict, Protocol, runtime_checkable, Tuple, AsyncIterator
from models import StorageAdapter

View File

@@ -0,0 +1,471 @@
import asyncio
import base64
import json
import mimetypes
import re
from datetime import datetime, timezone, timedelta
from typing import AsyncIterator, Dict, List, Tuple
import httpx
from fastapi import HTTPException
from fastapi.responses import Response, StreamingResponse
from models import StorageAdapter
DROPBOX_OAUTH_URL = "https://api.dropboxapi.com/oauth2/token"
DROPBOX_API_URL = "https://api.dropboxapi.com/2"
DROPBOX_CONTENT_URL = "https://content.dropboxapi.com/2"
def _normalize_dbx_path(path: str | None) -> str:
path = (path or "").replace("\\", "/").strip()
if not path or path == "/":
return ""
if not path.startswith("/"):
path = "/" + path
path = re.sub(r"/{2,}", "/", path)
if path.endswith("/"):
path = path.rstrip("/")
return path
def _join_dbx_path(base: str, rel: str) -> str:
base = _normalize_dbx_path(base)
rel = (rel or "").replace("\\", "/").strip("/")
if not rel:
return base
if not base:
return "/" + rel
return f"{base}/{rel}"
def _parse_iso_to_epoch(value: str | None) -> int:
if not value:
return 0
text = str(value).strip()
if not text:
return 0
try:
if text.endswith("Z"):
text = text[:-1] + "+00:00"
dt = datetime.fromisoformat(text)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
return int(dt.timestamp())
except Exception:
return 0
class DropboxAdapter:
def __init__(self, record: StorageAdapter):
self.record = record
cfg = record.config or {}
self.app_key: str = str(cfg.get("app_key") or "").strip()
self.app_secret: str = str(cfg.get("app_secret") or "").strip()
self.refresh_token: str = str(cfg.get("refresh_token") or "").strip()
self.root_path: str = _normalize_dbx_path(str(cfg.get("root") or "/"))
self.enable_redirect_307: bool = bool(cfg.get("enable_direct_download_307"))
self.timeout: float = float(cfg.get("timeout", 60))
if not (self.app_key and self.app_secret and self.refresh_token):
raise ValueError("Dropbox 适配器需要 app_key, app_secret, refresh_token")
self._access_token: str | None = None
self._token_expiry: datetime | None = None
self._token_lock = asyncio.Lock()
def get_effective_root(self, sub_path: str | None) -> str:
base = _normalize_dbx_path(self.root_path)
if sub_path:
return _join_dbx_path(base, sub_path)
return base
async def _get_access_token(self) -> str:
if self._access_token and self._token_expiry and datetime.now(timezone.utc) < self._token_expiry:
return self._access_token
async with self._token_lock:
if self._access_token and self._token_expiry and datetime.now(timezone.utc) < self._token_expiry:
return self._access_token
basic = base64.b64encode(f"{self.app_key}:{self.app_secret}".encode("utf-8")).decode("ascii")
headers = {"Authorization": f"Basic {basic}"}
data = {"grant_type": "refresh_token", "refresh_token": self.refresh_token}
async with httpx.AsyncClient(timeout=self.timeout) as client:
resp = await client.post(DROPBOX_OAUTH_URL, data=data, headers=headers)
resp.raise_for_status()
payload = resp.json()
token = str(payload.get("access_token") or "").strip()
if not token:
raise HTTPException(502, detail="Dropbox oauth: missing access_token")
expires_in = int(payload.get("expires_in") or 3600)
self._access_token = token
self._token_expiry = datetime.now(timezone.utc) + timedelta(seconds=max(60, expires_in - 300))
return token
async def _api_json(self, endpoint: str, body: Dict) -> httpx.Response:
token = await self._get_access_token()
headers = {"Authorization": f"Bearer {token}"}
async with httpx.AsyncClient(timeout=self.timeout) as client:
return await client.post(f"{DROPBOX_API_URL}{endpoint}", json=body, headers=headers)
async def _content_request(
self,
endpoint: str,
api_arg: Dict,
*,
content: bytes | None = None,
data_iter: AsyncIterator[bytes] | None = None,
extra_headers: Dict[str, str] | None = None,
) -> httpx.Response:
token = await self._get_access_token()
headers = {
"Authorization": f"Bearer {token}",
"Dropbox-API-Arg": json.dumps(api_arg, separators=(",", ":"), ensure_ascii=False),
}
if extra_headers:
headers.update(extra_headers)
if data_iter is None:
async with httpx.AsyncClient(timeout=self.timeout) as client:
return await client.post(f"{DROPBOX_CONTENT_URL}{endpoint}", headers=headers, content=content or b"")
async with httpx.AsyncClient(timeout=self.timeout) as client:
return await client.post(f"{DROPBOX_CONTENT_URL}{endpoint}", headers=headers, content=data_iter)
@staticmethod
def _raise_dbx_error(resp: httpx.Response, *, rel: str):
try:
payload = resp.json()
except Exception:
payload = None
summary = ""
if isinstance(payload, dict):
summary = str(payload.get("error_summary") or "")
if "not_found" in summary:
raise FileNotFoundError(rel)
if "conflict" in summary or "already_exists" in summary:
raise FileExistsError(rel)
if "is_folder" in summary:
raise IsADirectoryError(rel)
if "not_folder" in summary:
raise NotADirectoryError(rel)
raise HTTPException(502, detail=f"Dropbox API error: {summary or resp.text}")
def _format_entry(self, entry: Dict) -> Dict:
tag = entry.get(".tag")
is_dir = tag == "folder"
mtime = _parse_iso_to_epoch(entry.get("server_modified") if not is_dir else None)
return {
"name": entry.get("name") or "",
"is_dir": is_dir,
"size": 0 if is_dir else int(entry.get("size") or 0),
"mtime": mtime,
"type": "dir" if is_dir else "file",
}
async def list_dir(
self,
root: str,
rel: str,
page_num: int = 1,
page_size: int = 50,
sort_by: str = "name",
sort_order: str = "asc",
) -> Tuple[List[Dict], int]:
path = _join_dbx_path(root, rel)
body = {"path": path, "recursive": False, "include_deleted": False, "limit": 2000}
resp = await self._api_json("/files/list_folder", body)
if resp.status_code == 409:
try:
payload = resp.json()
except Exception:
payload = None
summary = str((payload or {}).get("error_summary") or "")
if "not_found" in summary:
return [], 0
self._raise_dbx_error(resp, rel=rel)
resp.raise_for_status()
payload = resp.json()
all_entries: List[Dict] = []
all_entries.extend(payload.get("entries") or [])
cursor = payload.get("cursor")
has_more = bool(payload.get("has_more"))
while has_more and cursor:
resp2 = await self._api_json("/files/list_folder/continue", {"cursor": cursor})
resp2.raise_for_status()
p2 = resp2.json()
all_entries.extend(p2.get("entries") or [])
cursor = p2.get("cursor")
has_more = bool(p2.get("has_more"))
items = [self._format_entry(e) for e in all_entries if isinstance(e, dict)]
reverse = sort_order.lower() == "desc"
def get_sort_key(item):
key = (not item["is_dir"],)
f = sort_by.lower()
if f == "name":
key += (item["name"].lower(),)
elif f == "size":
key += (item["size"],)
elif f == "mtime":
key += (item["mtime"],)
else:
key += (item["name"].lower(),)
return key
items.sort(key=get_sort_key, reverse=reverse)
total = len(items)
start = (page_num - 1) * page_size
end = start + page_size
return items[start:end], total
async def stat_file(self, root: str, rel: str):
path = _join_dbx_path(root, rel)
resp = await self._api_json("/files/get_metadata", {"path": path, "include_deleted": False})
if resp.status_code == 409:
self._raise_dbx_error(resp, rel=rel)
resp.raise_for_status()
meta = resp.json()
if not isinstance(meta, dict):
raise HTTPException(502, detail="Dropbox metadata: invalid response")
return self._format_entry(meta)
async def exists(self, root: str, rel: str) -> bool:
try:
await self.stat_file(root, rel)
return True
except FileNotFoundError:
return False
except Exception:
return False
async def read_file(self, root: str, rel: str) -> bytes:
path = _join_dbx_path(root, rel)
resp = await self._content_request("/files/download", {"path": path})
if resp.status_code == 409:
self._raise_dbx_error(resp, rel=rel)
resp.raise_for_status()
return resp.content
async def write_file(self, root: str, rel: str, data: bytes):
path = _join_dbx_path(root, rel)
arg = {
"path": path,
"mode": "overwrite",
"autorename": False,
"mute": False,
"strict_conflict": False,
}
resp = await self._content_request(
"/files/upload",
arg,
content=data,
extra_headers={"Content-Type": "application/octet-stream"},
)
if resp.status_code == 409:
self._raise_dbx_error(resp, rel=rel)
resp.raise_for_status()
return True
async def write_file_stream(self, root: str, rel: str, data_iter: AsyncIterator[bytes]):
path = _join_dbx_path(root, rel)
size = 0
session_id: str | None = None
offset = 0
async for chunk in data_iter:
if not chunk:
continue
if session_id is None:
resp = await self._content_request(
"/files/upload_session_start",
{"close": False},
content=chunk,
extra_headers={"Content-Type": "application/octet-stream"},
)
resp.raise_for_status()
payload = resp.json()
session_id = str(payload.get("session_id") or "")
if not session_id:
raise HTTPException(502, detail="Dropbox upload_session_start: missing session_id")
offset += len(chunk)
size += len(chunk)
continue
arg = {"cursor": {"session_id": session_id, "offset": offset}, "close": False}
resp = await self._content_request(
"/files/upload_session_append_v2",
arg,
content=chunk,
extra_headers={"Content-Type": "application/octet-stream"},
)
resp.raise_for_status()
offset += len(chunk)
size += len(chunk)
if session_id is None:
await self.write_file(root, rel, b"")
return 0
finish_arg = {
"cursor": {"session_id": session_id, "offset": offset},
"commit": {
"path": path,
"mode": "overwrite",
"autorename": False,
"mute": False,
"strict_conflict": False,
},
}
resp = await self._content_request(
"/files/upload_session_finish",
finish_arg,
content=b"",
extra_headers={"Content-Type": "application/octet-stream"},
)
if resp.status_code == 409:
self._raise_dbx_error(resp, rel=rel)
resp.raise_for_status()
return size
async def mkdir(self, root: str, rel: str):
path = _join_dbx_path(root, rel)
resp = await self._api_json("/files/create_folder_v2", {"path": path, "autorename": False})
if resp.status_code == 409:
self._raise_dbx_error(resp, rel=rel)
resp.raise_for_status()
return True
async def delete(self, root: str, rel: str):
path = _join_dbx_path(root, rel)
resp = await self._api_json("/files/delete_v2", {"path": path})
if resp.status_code == 409:
try:
payload = resp.json()
except Exception:
payload = None
summary = str((payload or {}).get("error_summary") or "")
if "not_found" in summary:
return
self._raise_dbx_error(resp, rel=rel)
resp.raise_for_status()
return True
async def move(self, root: str, src_rel: str, dst_rel: str):
src = _join_dbx_path(root, src_rel)
dst = _join_dbx_path(root, dst_rel)
resp = await self._api_json(
"/files/move_v2",
{"from_path": src, "to_path": dst, "autorename": False, "allow_shared_folder": True},
)
if resp.status_code == 409:
self._raise_dbx_error(resp, rel=src_rel)
resp.raise_for_status()
return True
async def rename(self, root: str, src_rel: str, dst_rel: str):
return await self.move(root, src_rel, dst_rel)
async def copy(self, root: str, src_rel: str, dst_rel: str, overwrite: bool = False):
src = _join_dbx_path(root, src_rel)
dst = _join_dbx_path(root, dst_rel)
resp = await self._api_json(
"/files/copy_v2",
{"from_path": src, "to_path": dst, "autorename": False, "allow_shared_folder": True},
)
if resp.status_code == 409:
self._raise_dbx_error(resp, rel=dst_rel if overwrite else dst_rel)
resp.raise_for_status()
return True
async def get_direct_download_response(self, root: str, rel: str):
if not self.enable_redirect_307:
return None
path = _join_dbx_path(root, rel)
resp = await self._api_json("/files/get_temporary_link", {"path": path})
if resp.status_code == 409:
self._raise_dbx_error(resp, rel=rel)
resp.raise_for_status()
payload = resp.json()
link = (payload.get("link") if isinstance(payload, dict) else None) or ""
link = str(link).strip()
if not link:
return None
return Response(status_code=307, headers={"Location": link})
async def stream_file(self, root: str, rel: str, range_header: str | None):
path = _join_dbx_path(root, rel)
token = await self._get_access_token()
headers = {
"Authorization": f"Bearer {token}",
"Dropbox-API-Arg": json.dumps({"path": path}, separators=(",", ":"), ensure_ascii=False),
}
if range_header:
headers["Range"] = range_header
client = httpx.AsyncClient(timeout=None)
stream_cm = client.stream("POST", f"{DROPBOX_CONTENT_URL}/files/download", headers=headers)
try:
resp = await stream_cm.__aenter__()
except Exception:
await client.aclose()
raise
if resp.status_code == 409:
try:
content = await resp.aread()
_ = content
finally:
await stream_cm.__aexit__(None, None, None)
await client.aclose()
self._raise_dbx_error(resp, rel=rel)
if resp.status_code >= 400:
try:
await resp.aread()
finally:
await stream_cm.__aexit__(None, None, None)
await client.aclose()
resp.raise_for_status()
content_type = resp.headers.get("Content-Type") or (mimetypes.guess_type(rel)[0] or "application/octet-stream")
out_headers = {}
for key in ("Accept-Ranges", "Content-Range", "Content-Length"):
value = resp.headers.get(key)
if value:
out_headers[key] = value
async def iterator():
try:
async for chunk in resp.aiter_bytes():
if chunk:
yield chunk
finally:
await stream_cm.__aexit__(None, None, None)
await client.aclose()
return StreamingResponse(iterator(), status_code=resp.status_code, headers=out_headers, media_type=content_type)
ADAPTER_TYPE = "dropbox"
CONFIG_SCHEMA = [
{"key": "app_key", "label": "App Key", "type": "string", "required": True},
{"key": "app_secret", "label": "App Secret", "type": "password", "required": True},
{"key": "refresh_token", "label": "Refresh Token", "type": "password", "required": True},
{"key": "root", "label": "Root Path", "type": "string", "required": False, "default": "/", "placeholder": "/ or /Apps/Foxel"},
{"key": "timeout", "label": "超时(秒)", "type": "number", "required": False, "default": 60},
{"key": "enable_direct_download_307", "label": "Enable 307 redirect download", "type": "boolean", "default": False},
]
def ADAPTER_FACTORY(rec): return DropboxAdapter(rec)

View File

@@ -0,0 +1,411 @@
import asyncio
import mimetypes
import re
import tempfile
from pathlib import Path
from typing import Any, AsyncIterator, Dict, List, Tuple
from urllib.parse import quote
import httpx
from fastapi import HTTPException
from fastapi.responses import StreamingResponse
from models import StorageAdapter
def _normalize_fs_path(path: str) -> str:
path = (path or "").replace("\\", "/").strip()
if not path or path == "/":
return "/"
if not path.startswith("/"):
path = "/" + path
path = re.sub(r"/{2,}", "/", path)
if path != "/" and path.endswith("/"):
path = path.rstrip("/")
return path or "/"
def _join_fs_path(base: str, rel: str | None) -> str:
base = _normalize_fs_path(base)
rel_norm = (rel or "").replace("\\", "/").strip().lstrip("/")
if not rel_norm:
return base
if base == "/":
return "/" + rel_norm
return f"{base}/{rel_norm}"
def _unwrap_success(payload: Any, *, context: str) -> Any:
if not isinstance(payload, dict):
return payload
if "data" not in payload:
return payload
code = payload.get("code")
if code not in (None, 0, 200):
msg = payload.get("msg") or payload.get("message") or ""
raise HTTPException(502, detail=f"Foxel 上游错误({context}): {msg}")
return payload.get("data")
class FoxelAdapter:
def __init__(self, record: StorageAdapter):
self.record = record
cfg = record.config or {}
self.base_url: str = str(cfg.get("base_url", "")).rstrip("/")
if not self.base_url.startswith("http"):
raise ValueError("foxel requires base_url http/https")
self.username: str = str(cfg.get("username") or "")
self.password: str = str(cfg.get("password") or "")
if not self.username or not self.password:
raise ValueError("foxel requires username and password")
self.timeout: float = float(cfg.get("timeout", 15))
self.root_path: str = _normalize_fs_path(str(cfg.get("root") or "/"))
self._token: str | None = None
self._login_lock = asyncio.Lock()
def get_effective_root(self, sub_path: str | None) -> str:
base = _normalize_fs_path(self.root_path)
if sub_path:
return _join_fs_path(base, sub_path)
return base
async def _login(self) -> str:
url = self.base_url + "/api/auth/login"
body = {"username": self.username, "password": self.password}
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
resp = await client.post(url, data=body)
resp.raise_for_status()
payload = resp.json()
if not isinstance(payload, dict):
raise HTTPException(502, detail="Foxel 登录响应异常")
token = payload.get("access_token")
if not token:
raise HTTPException(502, detail="Foxel 登录失败: 缺少 access_token")
return str(token)
async def _ensure_token(self) -> str:
if self._token:
return self._token
async with self._login_lock:
if self._token:
return self._token
self._token = await self._login()
return self._token
async def _request_json(self, method: str, path: str, *, params: dict | None = None, json: Any = None) -> Any:
url = self.base_url + path
for attempt in range(2):
token = await self._ensure_token()
headers = {"Authorization": f"Bearer {token}"}
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
resp = await client.request(method, url, headers=headers, params=params, json=json)
if resp.status_code == 401 and attempt == 0:
self._token = None
continue
resp.raise_for_status()
return resp.json()
raise HTTPException(502, detail="Foxel 上游请求失败")
@staticmethod
def _encode_path(full_path: str) -> str:
return quote(full_path.lstrip("/"), safe="/")
def _browse_path(self, full_path: str) -> str:
full_path = _normalize_fs_path(full_path)
if full_path == "/":
return "/api/fs/"
return "/api/fs/" + self._encode_path(full_path)
def _stat_path(self, full_path: str) -> str:
full_path = _normalize_fs_path(full_path)
if full_path == "/":
return "/api/fs/stat/"
return "/api/fs/stat/" + self._encode_path(full_path)
def _file_path(self, full_path: str) -> str:
full_path = _normalize_fs_path(full_path)
if full_path == "/":
return "/api/fs/file/"
return "/api/fs/file/" + self._encode_path(full_path)
def _stream_path(self, full_path: str) -> str:
full_path = _normalize_fs_path(full_path)
if full_path == "/":
return "/api/fs/stream/"
return "/api/fs/stream/" + self._encode_path(full_path)
async def list_dir(
self,
root: str,
rel: str,
page_num: int = 1,
page_size: int = 50,
sort_by: str = "name",
sort_order: str = "asc",
) -> Tuple[List[Dict], int]:
rel = (rel or "").strip("/")
full_path = _join_fs_path(root, rel)
payload = await self._request_json(
"GET",
self._browse_path(full_path),
params={
"page": page_num,
"page_size": page_size,
"sort_by": sort_by,
"sort_order": sort_order,
},
)
data = _unwrap_success(payload, context="list_dir")
if not isinstance(data, dict):
raise HTTPException(502, detail="Foxel 浏览响应异常")
entries = data.get("entries") or []
pagination = data.get("pagination") or {}
total = pagination.get("total")
try:
total_int = int(total) if total is not None else len(entries)
except Exception:
total_int = len(entries)
if not isinstance(entries, list):
entries = []
return entries, total_int
async def stat_file(self, root: str, rel: str):
rel = (rel or "").strip("/")
full_path = _join_fs_path(root, rel)
payload = await self._request_json("GET", self._stat_path(full_path))
data = _unwrap_success(payload, context="stat_file")
if not isinstance(data, dict):
raise HTTPException(502, detail="Foxel stat 响应异常")
return data
async def exists(self, root: str, rel: str) -> bool:
rel = (rel or "").strip("/")
full_path = _join_fs_path(root, rel)
url = self.base_url + self._stat_path(full_path)
for attempt in range(2):
token = await self._ensure_token()
headers = {"Authorization": f"Bearer {token}"}
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
resp = await client.get(url, headers=headers)
if resp.status_code == 401 and attempt == 0:
self._token = None
continue
return resp.status_code == 200
return False
async def read_file(self, root: str, rel: str) -> bytes:
rel = (rel or "").lstrip("/")
full_path = _join_fs_path(root, rel)
url = self.base_url + self._file_path(full_path)
for attempt in range(2):
token = await self._ensure_token()
headers = {"Authorization": f"Bearer {token}"}
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
resp = await client.get(url, headers=headers)
if resp.status_code == 401 and attempt == 0:
self._token = None
continue
if resp.status_code == 404:
raise FileNotFoundError(rel)
resp.raise_for_status()
return resp.content
raise HTTPException(502, detail="Foxel 读取失败")
async def _upload_file_path(self, full_path: str, file_path: Path) -> None:
url = self.base_url + self._file_path(full_path)
filename = Path(full_path).name or file_path.name
for attempt in range(2):
token = await self._ensure_token()
headers = {"Authorization": f"Bearer {token}"}
with file_path.open("rb") as f:
files = {"file": (filename, f, "application/octet-stream")}
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
resp = await client.post(url, headers=headers, files=files)
if resp.status_code == 401 and attempt == 0:
self._token = None
continue
resp.raise_for_status()
return
raise HTTPException(502, detail="Foxel 上传失败")
async def write_file(self, root: str, rel: str, data: bytes):
rel = (rel or "").lstrip("/")
full_path = _join_fs_path(root, rel)
url = self.base_url + self._file_path(full_path)
filename = Path(rel).name or "file"
for attempt in range(2):
token = await self._ensure_token()
headers = {"Authorization": f"Bearer {token}"}
files = {"file": (filename, data, "application/octet-stream")}
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
resp = await client.post(url, headers=headers, files=files)
if resp.status_code == 401 and attempt == 0:
self._token = None
continue
resp.raise_for_status()
return True
raise HTTPException(502, detail="Foxel 写入失败")
async def write_file_stream(self, root: str, rel: str, data_iter: AsyncIterator[bytes]):
rel = (rel or "").lstrip("/")
full_path = _join_fs_path(root, rel)
suffix = Path(rel).suffix
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tf:
tmp_path = Path(tf.name)
size = 0
try:
with tmp_path.open("wb") as f:
async for chunk in data_iter:
if not chunk:
continue
f.write(chunk)
size += len(chunk)
await self._upload_file_path(full_path, tmp_path)
return size
finally:
try:
tmp_path.unlink(missing_ok=True)
except Exception:
pass
async def mkdir(self, root: str, rel: str):
rel = (rel or "").strip("/")
full_path = _join_fs_path(root, rel)
payload = await self._request_json("POST", "/api/fs/mkdir", json={"path": full_path})
_unwrap_success(payload, context="mkdir")
return True
async def delete(self, root: str, rel: str):
rel = (rel or "").strip("/")
full_path = _join_fs_path(root, rel)
url = self.base_url + self._browse_path(full_path)
for attempt in range(2):
token = await self._ensure_token()
headers = {"Authorization": f"Bearer {token}"}
async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
resp = await client.delete(url, headers=headers)
if resp.status_code == 401 and attempt == 0:
self._token = None
continue
if resp.status_code == 404:
return
resp.raise_for_status()
return
raise HTTPException(502, detail="Foxel 删除失败")
async def move(self, root: str, src_rel: str, dst_rel: str):
src_path = _join_fs_path(root, (src_rel or "").lstrip("/"))
dst_path = _join_fs_path(root, (dst_rel or "").lstrip("/"))
payload = await self._request_json("POST", "/api/fs/move", json={"src": src_path, "dst": dst_path})
_unwrap_success(payload, context="move")
return True
async def rename(self, root: str, src_rel: str, dst_rel: str):
src_path = _join_fs_path(root, (src_rel or "").lstrip("/"))
dst_path = _join_fs_path(root, (dst_rel or "").lstrip("/"))
payload = await self._request_json("POST", "/api/fs/rename", json={"src": src_path, "dst": dst_path})
_unwrap_success(payload, context="rename")
return True
async def copy(self, root: str, src_rel: str, dst_rel: str, overwrite: bool = False):
src_path = _join_fs_path(root, (src_rel or "").lstrip("/"))
dst_path = _join_fs_path(root, (dst_rel or "").lstrip("/"))
payload = await self._request_json(
"POST",
"/api/fs/copy",
json={"src": src_path, "dst": dst_path},
params={"overwrite": overwrite},
)
_unwrap_success(payload, context="copy")
return True
async def stream_file(self, root: str, rel: str, range_header: str | None):
rel = (rel or "").lstrip("/")
full_path = _join_fs_path(root, rel)
url = self.base_url + self._stream_path(full_path)
headers = {}
if range_header:
headers["Range"] = range_header
for attempt in range(2):
token = await self._ensure_token()
headers["Authorization"] = f"Bearer {token}"
client = httpx.AsyncClient(timeout=None, follow_redirects=True)
stream_cm = client.stream("GET", url, headers=headers)
try:
resp = await stream_cm.__aenter__()
except Exception:
await client.aclose()
raise
if resp.status_code == 401 and attempt == 0:
try:
await resp.aread()
finally:
await stream_cm.__aexit__(None, None, None)
await client.aclose()
self._token = None
continue
if resp.status_code == 404:
try:
await resp.aread()
finally:
await stream_cm.__aexit__(None, None, None)
await client.aclose()
raise FileNotFoundError(rel)
if resp.status_code >= 400:
try:
await resp.aread()
finally:
await stream_cm.__aexit__(None, None, None)
await client.aclose()
resp.raise_for_status()
content_type = resp.headers.get("Content-Type") or (
mimetypes.guess_type(rel)[0] or "application/octet-stream"
)
out_headers = {}
for key in ("Accept-Ranges", "Content-Range", "Content-Length"):
value = resp.headers.get(key)
if value:
out_headers[key] = value
async def iterator():
try:
async for chunk in resp.aiter_bytes():
if chunk:
yield chunk
finally:
await stream_cm.__aexit__(None, None, None)
await client.aclose()
return StreamingResponse(
iterator(),
status_code=resp.status_code,
headers=out_headers,
media_type=content_type,
)
raise HTTPException(502, detail="Foxel 流式读取失败")
ADAPTER_TYPE = "foxel"
CONFIG_SCHEMA = [
{"key": "base_url", "label": "节点地址", "type": "string", "required": True, "placeholder": "http://127.0.0.1:8000"},
{"key": "username", "label": "用户名", "type": "string", "required": True},
{"key": "password", "label": "密码", "type": "password", "required": True},
{"key": "root", "label": "远端根目录", "type": "string", "required": False, "default": "/", "placeholder": "/ 或 /drive"},
{"key": "timeout", "label": "超时(秒)", "type": "number", "required": False, "default": 60},
]
def ADAPTER_FACTORY(rec: StorageAdapter):
return FoxelAdapter(rec)

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
import asyncio
from dataclasses import dataclass
from typing import List, Dict, Tuple, AsyncIterator, Optional

View File

@@ -1,4 +1,3 @@
from __future__ import annotations
from datetime import datetime, timezone, timedelta
from typing import List, Dict, Tuple, AsyncIterator
import httpx

View File

@@ -1,4 +1,3 @@
from __future__ import annotations
import os
import shutil
import stat

View File

@@ -1,4 +1,3 @@
from __future__ import annotations
from datetime import datetime, timezone, timedelta
from typing import List, Dict, Tuple, AsyncIterator
import httpx

View File

@@ -1,4 +1,3 @@
from __future__ import annotations
import asyncio
import base64
import hashlib
@@ -291,6 +290,11 @@ class QuarkAdapter:
return None
return None
async def get_video_transcoding_url(self, fid: str) -> Optional[str]:
if not self.use_transcoding_address:
return None
return await self._get_transcoding_url(fid)
def _is_video_name(self, name: str) -> bool:
mime, _ = mimetypes.guess_type(name)
return bool(mime and mime.startswith("video/"))
@@ -317,6 +321,29 @@ class QuarkAdapter:
resp.raise_for_status()
return resp.content
async def read_file_range(self, root: str, rel: str, start: int, end: Optional[int] = None) -> bytes:
if not rel or rel.endswith("/"):
raise IsADirectoryError("Path is a directory")
parent = rel.rsplit("/", 1)[0] if "/" in rel else ""
name = rel.rsplit("/", 1)[-1]
base_fid = root or self.root_fid
parent_fid = await self._resolve_dir_fid_from(base_fid, parent)
it = await self._find_child(parent_fid, name)
if not it or it["is_dir"]:
raise FileNotFoundError(rel)
url = await self._get_download_url(it["fid"])
headers = dict(self._download_headers())
headers["Range"] = f"bytes={start}-" if end is None else f"bytes={start}-{end}"
async with httpx.AsyncClient(timeout=self._timeout, follow_redirects=True) as client:
resp = await client.get(url, headers=headers)
if resp.status_code == 404:
raise FileNotFoundError(rel)
if resp.status_code == 416:
raise HTTPException(416, detail="Requested Range Not Satisfiable")
resp.raise_for_status()
return resp.content
async def stream_file(self, root: str, rel: str, range_header: str | None):
if not rel or rel.endswith("/"):
raise IsADirectoryError("Path is a directory")

View File

@@ -1,4 +1,3 @@
from __future__ import annotations
import asyncio
import mimetypes
from datetime import datetime

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
import asyncio
import mimetypes
import stat as statmod

View File

@@ -1,4 +1,3 @@
from __future__ import annotations
from typing import List, Dict, Tuple, AsyncIterator
import io
import os

View File

@@ -1,4 +1,3 @@
from __future__ import annotations
from typing import List, Dict, Optional, Tuple, AsyncIterator
import httpx
from urllib.parse import urljoin, quote

View File

@@ -33,6 +33,27 @@ def discover_adapters():
module = import_module(full_name)
except Exception:
continue
adapter_types = getattr(module, "ADAPTER_TYPES", None)
if isinstance(adapter_types, dict):
default_schema = getattr(module, "CONFIG_SCHEMA", None)
schema_map = getattr(module, "CONFIG_SCHEMA_MAP", None)
if not isinstance(schema_map, dict):
schema_map = None
for adapter_type, factory in adapter_types.items():
normalized_type = normalize_adapter_type(adapter_type)
if not normalized_type:
continue
if not callable(factory):
continue
TYPE_MAP[normalized_type] = factory
schema = schema_map.get(normalized_type) if schema_map else default_schema
if isinstance(schema, list):
CONFIG_SCHEMAS[normalized_type] = schema
continue
adapter_type = normalize_adapter_type(getattr(module, "ADAPTER_TYPE", None))
schema = getattr(module, "CONFIG_SCHEMA", None)
factory = getattr(module, "ADAPTER_FACTORY", None)

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
import httpx
from typing import List, Sequence, Tuple

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
import asyncio
import json
from collections.abc import Iterable
@@ -21,6 +19,8 @@ from .vector_providers import (
)
DEFAULT_VECTOR_DIMENSION = 4096
VECTOR_COLLECTION_NAME = "vector_collection"
FILE_COLLECTION_NAME = "file_collection"
OPENAI_EMBEDDING_DIMS = {
"text-embedding-3-large": 3072,
@@ -400,7 +400,7 @@ class AIProviderService:
class VectorDBService:
_instance: "VectorDBService" | None = None
_instance: Optional["VectorDBService"] = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
from typing import Dict, List, Type
from .base import BaseVectorProvider

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
from typing import Any, Dict, List

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Optional

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
from typing import Any, Dict, List, Optional
from pymilvus import CollectionSchema, DataType, FieldSchema, MilvusClient

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence
from uuid import NAMESPACE_URL, uuid5

View File

@@ -62,7 +62,5 @@ async def clear_audit_logs(
):
start_dt = _parse_iso(start_time, "start_time")
end_dt = _parse_iso(end_time, "end_time")
if start_dt is None and end_dt is None:
raise HTTPException(status_code=400, detail="start_time 或 end_time 至少提供一个")
deleted_count = await AuditService.clear_logs(start_time=start_dt, end_time=end_dt)
return response.success({"deleted_count": deleted_count})

View File

@@ -95,6 +95,28 @@ def _build_request_params(request: Request | None) -> Dict[str, Any] | None:
return params or None
def _get_client_ip(request: Request | None) -> str | None:
if not request:
return None
cf_connecting_ip = request.headers.get("cf-connecting-ip") or request.headers.get("CF-Connecting-IP")
if cf_connecting_ip:
ip = cf_connecting_ip.strip()
if ip:
return ip
x_real_ip = request.headers.get("x-real-ip") or request.headers.get("X-Real-IP")
if x_real_ip:
ip = x_real_ip.strip()
if ip:
return ip
x_forwarded_for = request.headers.get("x-forwarded-for") or request.headers.get("X-Forwarded-For")
if x_forwarded_for:
for part in x_forwarded_for.split(","):
ip = part.strip()
if ip and ip.lower() != "unknown":
return ip
return request.client.host if request.client else None
def _status_code_from_response(response: Any) -> int:
if hasattr(response, "status_code"):
try:
@@ -142,7 +164,7 @@ def audit(
description=description,
user_id=user_id,
username=username,
client_ip=request.client.host if request and request.client else None,
client_ip=_get_client_ip(request),
method=request.method if request else "",
path=request.url.path if request else func.__name__,
status_code=status_code,
@@ -163,7 +185,7 @@ def audit(
description=description,
user_id=user_id,
username=username,
client_ip=request.client.host if request and request.client else None,
client_ip=_get_client_ip(request),
method=request.method if request else "",
path=request.url.path if request else func.__name__,
status_code=status_code,

View File

@@ -5,11 +5,11 @@ from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from typing import Annotated
import bcrypt
import jwt
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jwt.exceptions import InvalidTokenError
from passlib.context import CryptContext
from domain.auth.types import (
PasswordResetConfirm,
@@ -97,12 +97,15 @@ class PasswordResetStore:
class AuthService:
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="auth/login")
algorithm = ALGORITHM
access_token_expire_minutes = ACCESS_TOKEN_EXPIRE_MINUTES
password_reset_token_expire_minutes = PASSWORD_RESET_TOKEN_EXPIRE_MINUTES
@staticmethod
def _to_bytes(value: str) -> bytes:
return value.encode("utf-8")
@classmethod
async def get_secret_key(cls) -> str:
return await ConfigService.get_secret_key("SECRET_KEY", None)
@@ -113,11 +116,17 @@ class AuthService:
@classmethod
def verify_password(cls, plain_password: str, hashed_password: str) -> bool:
return cls.pwd_context.verify(plain_password, hashed_password)
try:
return bcrypt.checkpw(cls._to_bytes(plain_password), hashed_password.encode("utf-8"))
except (ValueError, TypeError):
return False
@classmethod
def get_password_hash(cls, password: str) -> str:
return cls.pwd_context.hash(password)
encoded = cls._to_bytes(password)
if len(encoded) > 72:
raise HTTPException(status_code=400, detail="密码过长")
return bcrypt.hashpw(encoded, bcrypt.gensalt()).decode("utf-8")
@classmethod
async def get_user_db(cls, username_or_email: str) -> UserInDB | None:

View File

@@ -29,7 +29,7 @@ async def set_config(
request: Request,
current_user: Annotated[User, Depends(get_current_active_user)],
key: str = Form(...),
value: str = Form(...),
value: str = Form(""),
):
await ConfigService.set(key, value)
return success(ConfigItem(key=key, value=value).model_dump())

View File

@@ -10,7 +10,7 @@ from models.database import Configuration, UserAccount
load_dotenv(dotenv_path=".env")
VERSION = "v1.4.0"
VERSION = "v1.6.0"
class ConfigService:

View File

@@ -1 +1,17 @@
"""
Foxel 插件系统
提供 .foxpkg 插件包的安装、管理和运行时加载功能。
"""
from domain.plugins.loader import PluginLoader, PluginLoadError
from domain.plugins.service import PluginService
from domain.plugins.startup import init_plugins, load_installed_plugins
__all__ = [
"PluginLoader",
"PluginLoadError",
"PluginService",
"init_plugins",
"load_installed_plugins",
]

View File

@@ -1,66 +1,109 @@
"""
插件管理 API 路由
"""
from typing import List
from fastapi import APIRouter, Body, Request
from fastapi import APIRouter, File, Request, UploadFile
from fastapi.responses import FileResponse
from domain.audit import AuditAction, audit
from domain.plugins.service import PluginService
from domain.plugins.types import PluginCreate, PluginManifestUpdate, PluginOut
from domain.plugins.types import (
PluginInstallResult,
PluginOut,
)
router = APIRouter(prefix="/api/plugins", tags=["plugins"])
@router.post("", response_model=PluginOut)
@audit(
action=AuditAction.CREATE,
description="创建插件",
body_fields=["url", "enabled"],
)
async def create_plugin(request: Request, payload: PluginCreate):
return await PluginService.create(payload)
# ========== 安装 ==========
@router.post("/install", response_model=PluginInstallResult)
@audit(action=AuditAction.CREATE, description="安装插件包")
async def install_plugin(request: Request, file: UploadFile = File(...)):
"""
安装 .foxpkg 插件包
上传 .foxpkg 文件进行安装。
"""
content = await file.read()
return await PluginService.install_package(content, file.filename or "plugin.foxpkg")
# ========== 插件列表和详情 ==========
@router.get("", response_model=List[PluginOut])
@audit(action=AuditAction.READ, description="获取插件列表")
async def list_plugins(request: Request):
"""获取已安装的插件列表"""
return await PluginService.list_plugins()
@router.delete("/{plugin_id}")
@audit(action=AuditAction.DELETE, description="删除插件")
async def delete_plugin(request: Request, plugin_id: int):
await PluginService.delete(plugin_id)
@router.get("/{key_or_id}", response_model=PluginOut)
@audit(action=AuditAction.READ, description="获取插件详情")
async def get_plugin(request: Request, key_or_id: str):
"""获取单个插件详情"""
return await PluginService.get_plugin(key_or_id)
# ========== 插件管理 ==========
@router.delete("/{key_or_id}")
@audit(action=AuditAction.DELETE, description="卸载插件")
async def delete_plugin(request: Request, key_or_id: str):
"""卸载插件"""
await PluginService.delete(key_or_id)
return {"code": 0, "msg": "ok"}
@router.put("/{plugin_id}", response_model=PluginOut)
@audit(
action=AuditAction.UPDATE,
description="更新插件",
body_fields=["url", "enabled"],
)
async def update_plugin(request: Request, plugin_id: int, payload: PluginCreate):
return await PluginService.update(plugin_id, payload)
# ========== 插件资源 ==========
@router.post("/{plugin_id}/metadata", response_model=PluginOut)
@audit(
action=AuditAction.UPDATE,
description="更新插件 manifest",
body_fields=[
"key",
"name",
"version",
"supported_exts",
"default_bounds",
"default_maximized",
"icon",
"description",
"author",
"website",
"github",
],
)
async def update_manifest(
request: Request, plugin_id: int, manifest: PluginManifestUpdate = Body(...)
):
return await PluginService.update_manifest(plugin_id, manifest)
@router.get("/{key_or_id}/bundle.js")
async def get_bundle(request: Request, key_or_id: str):
"""获取插件前端 bundle"""
path = await PluginService.get_bundle_path(key_or_id)
return FileResponse(
path,
media_type="application/javascript",
headers={"Cache-Control": "no-store"},
)
@router.get("/{key}/assets/{asset_path:path}")
async def get_asset(request: Request, key: str, asset_path: str):
"""获取插件静态资源"""
path = await PluginService.get_asset_path(key, asset_path)
# 根据扩展名确定 MIME 类型
ext = path.suffix.lower()
media_types = {
".js": "application/javascript",
".css": "text/css",
".json": "application/json",
".svg": "image/svg+xml",
".png": "image/png",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".gif": "image/gif",
".webp": "image/webp",
".ico": "image/x-icon",
".woff": "font/woff",
".woff2": "font/woff2",
".ttf": "font/ttf",
".eot": "application/vnd.ms-fontobject",
".html": "text/html",
".txt": "text/plain",
".md": "text/markdown",
}
media_type = media_types.get(ext, "application/octet-stream")
return FileResponse(
path,
media_type=media_type,
headers={"Cache-Control": "public, max-age=3600"},
)

449
domain/plugins/loader.py Normal file
View File

@@ -0,0 +1,449 @@
"""
插件加载器模块
负责:
1. .foxpkg 解包和验证
2. 插件文件部署
3. 后端路由动态加载
4. 处理器动态注册
"""
import io
import json
import shutil
import sys
import zipfile
from importlib.util import module_from_spec, spec_from_file_location
from pathlib import Path
from types import ModuleType
from typing import Any, Dict, List, Optional, Tuple
from fastapi import APIRouter
from domain.plugins.types import (
ManifestProcessorConfig,
ManifestRouteConfig,
PluginManifest,
)
class PluginLoadError(Exception):
"""插件加载错误"""
pass
class PluginLoader:
"""插件加载器"""
PLUGINS_ROOT = Path("data/plugins")
# 已加载的插件模块缓存
_loaded_modules: Dict[str, ModuleType] = {}
# 已挂载的路由追踪
_mounted_routers: Dict[str, List[APIRouter]] = {}
@classmethod
def get_plugin_dir(cls, plugin_key: str) -> Path:
"""获取插件目录"""
return cls.PLUGINS_ROOT / plugin_key
@classmethod
def get_manifest_path(cls, plugin_key: str) -> Path:
"""获取插件 manifest.json 路径"""
return cls.get_plugin_dir(plugin_key) / "manifest.json"
@classmethod
def get_frontend_bundle_path(cls, plugin_key: str, entry: Optional[str] = None) -> Path:
"""获取前端 bundle 路径"""
plugin_dir = cls.get_plugin_dir(plugin_key)
if entry:
return plugin_dir / entry
# 默认位置
return plugin_dir / "frontend" / "index.js"
@classmethod
def get_asset_path(cls, plugin_key: str, asset_path: str) -> Path:
"""获取静态资源路径"""
return cls.get_plugin_dir(plugin_key) / asset_path
# ========== 解包和验证 ==========
@classmethod
def validate_manifest(cls, manifest_data: Dict[str, Any]) -> Tuple[bool, List[str]]:
"""验证 manifest 数据"""
errors: List[str] = []
# 必需字段检查
if not manifest_data.get("key"):
errors.append("manifest 缺少必需字段: key")
if not manifest_data.get("name"):
errors.append("manifest 缺少必需字段: name")
# key 格式检查Java 命名空间格式)
key = manifest_data.get("key", "")
if key:
import re
# 格式: com.example.plugin (至少两级,每级以小写字母开头,可包含小写字母和数字)
if not re.match(r"^[a-z][a-z0-9]*(\.[a-z][a-z0-9]*)+$", key):
errors.append(
"key 格式无效:必须使用命名空间格式(如 com.example.plugin"
"每个部分以小写字母开头,只能包含小写字母和数字,至少两级"
)
# 版本格式检查(简单检查)
version = manifest_data.get("version", "")
if version and not isinstance(version, str):
errors.append("version 必须是字符串")
# 验证 frontend 配置
frontend = manifest_data.get("frontend")
if frontend and isinstance(frontend, dict):
if frontend.get("entry") and not isinstance(frontend["entry"], str):
errors.append("frontend.entry 必须是字符串")
if frontend.get("styles") is not None:
if not isinstance(frontend["styles"], list) or not all(
isinstance(x, str) for x in frontend["styles"]
):
errors.append("frontend.styles 必须是字符串数组")
supported_exts = frontend.get("supportedExts") or frontend.get("supported_exts")
if supported_exts and not isinstance(supported_exts, list):
errors.append("frontend.supportedExts 必须是数组")
use_system_window = frontend.get("useSystemWindow") or frontend.get("use_system_window")
if use_system_window is not None and not isinstance(use_system_window, bool):
errors.append("frontend.useSystemWindow 必须是布尔值")
# 验证 backend 配置
backend = manifest_data.get("backend")
if backend and isinstance(backend, dict):
routes = backend.get("routes", [])
if routes:
for i, route in enumerate(routes):
if not route.get("module"):
errors.append(f"backend.routes[{i}] 缺少 module")
if not route.get("prefix"):
errors.append(f"backend.routes[{i}] 缺少 prefix")
processors = backend.get("processors", [])
if processors:
for i, proc in enumerate(processors):
if not proc.get("module"):
errors.append(f"backend.processors[{i}] 缺少 module")
if not proc.get("type"):
errors.append(f"backend.processors[{i}] 缺少 type")
return len(errors) == 0, errors
@classmethod
def unpack_foxpkg(
cls, file_content: bytes, target_key: Optional[str] = None
) -> Tuple[PluginManifest, Path]:
"""
解包 .foxpkg 文件
Args:
file_content: .foxpkg 文件内容
target_key: 可选,指定安装的插件 key覆盖 manifest 中的 key
Returns:
(manifest, plugin_dir) 元组
Raises:
PluginLoadError: 解包或验证失败
"""
try:
with zipfile.ZipFile(io.BytesIO(file_content)) as zf:
# 读取 manifest.json
try:
manifest_bytes = zf.read("manifest.json")
except KeyError:
raise PluginLoadError("插件包缺少 manifest.json")
try:
manifest_data = json.loads(manifest_bytes.decode("utf-8"))
except json.JSONDecodeError as e:
raise PluginLoadError(f"manifest.json 解析失败: {e}")
# 验证 manifest
valid, errors = cls.validate_manifest(manifest_data)
if not valid:
raise PluginLoadError(f"manifest 验证失败: {'; '.join(errors)}")
# 解析 manifest
try:
manifest = PluginManifest.model_validate(manifest_data)
except Exception as e:
raise PluginLoadError(f"manifest 解析失败: {e}")
# 确定插件 key
plugin_key = target_key or manifest.key
# 验证包内文件
cls._validate_package_files(zf, manifest)
# 部署文件
target_dir = cls.PLUGINS_ROOT / plugin_key
if target_dir.exists():
# 备份旧版本
backup_dir = cls.PLUGINS_ROOT / f"{plugin_key}.backup"
if backup_dir.exists():
shutil.rmtree(backup_dir)
shutil.move(str(target_dir), str(backup_dir))
target_dir.mkdir(parents=True, exist_ok=True)
try:
zf.extractall(target_dir)
except Exception as e:
# 恢复备份
if (cls.PLUGINS_ROOT / f"{plugin_key}.backup").exists():
shutil.rmtree(target_dir, ignore_errors=True)
shutil.move(str(cls.PLUGINS_ROOT / f"{plugin_key}.backup"), str(target_dir))
raise PluginLoadError(f"文件解压失败: {e}")
# 清理备份
backup_dir = cls.PLUGINS_ROOT / f"{plugin_key}.backup"
if backup_dir.exists():
shutil.rmtree(backup_dir, ignore_errors=True)
return manifest, target_dir
except zipfile.BadZipFile:
raise PluginLoadError("无效的插件包格式(非 ZIP 文件)")
@classmethod
def _validate_package_files(cls, zf: zipfile.ZipFile, manifest: PluginManifest) -> None:
"""验证包内文件是否完整"""
file_list = zf.namelist()
# 检查前端入口
if manifest.frontend and manifest.frontend.entry:
if manifest.frontend.entry not in file_list:
raise PluginLoadError(f"前端入口文件不存在: {manifest.frontend.entry}")
# 检查后端模块
if manifest.backend:
if manifest.backend.routes:
for route in manifest.backend.routes:
if route.module not in file_list:
raise PluginLoadError(f"路由模块不存在: {route.module}")
if manifest.backend.processors:
for proc in manifest.backend.processors:
if proc.module not in file_list:
raise PluginLoadError(f"处理器模块不存在: {proc.module}")
# ========== 路由动态加载 ==========
@classmethod
def load_route_module(cls, plugin_key: str, route_config: ManifestRouteConfig) -> APIRouter:
"""
动态加载插件路由模块
Args:
plugin_key: 插件标识
route_config: 路由配置
Returns:
加载的 APIRouter
"""
module_path = cls.get_plugin_dir(plugin_key) / route_config.module
if not module_path.exists():
raise PluginLoadError(f"路由模块不存在: {module_path}")
module_name = f"foxel_plugin_{plugin_key}_route_{module_path.stem}"
try:
spec = spec_from_file_location(module_name, module_path)
if spec is None or spec.loader is None:
raise PluginLoadError(f"无法加载路由模块: {module_path}")
module = module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
# 缓存模块
cls._loaded_modules[f"{plugin_key}:route:{route_config.module}"] = module
# 获取 router
router = getattr(module, "router", None)
if router is None:
raise PluginLoadError(f"路由模块缺少 'router' 对象: {module_path}")
if not isinstance(router, APIRouter):
raise PluginLoadError(f"'router' 不是有效的 APIRouter 实例: {module_path}")
# 创建包装路由器添加前缀
wrapper = APIRouter(prefix=route_config.prefix, tags=route_config.tags or [])
wrapper.include_router(router)
return wrapper
except PluginLoadError:
raise
except Exception as e:
raise PluginLoadError(f"加载路由模块失败 [{module_path}]: {e}")
@classmethod
def load_all_routes(cls, plugin_key: str, manifest: PluginManifest) -> List[APIRouter]:
"""加载插件的所有路由"""
routers: List[APIRouter] = []
if not manifest.backend or not manifest.backend.routes:
return routers
for route_config in manifest.backend.routes:
router = cls.load_route_module(plugin_key, route_config)
routers.append(router)
cls._mounted_routers[plugin_key] = routers
return routers
# ========== 处理器动态注册 ==========
@classmethod
def load_processor_module(
cls, plugin_key: str, processor_config: ManifestProcessorConfig
) -> None:
"""
动态加载并注册处理器模块
Args:
plugin_key: 插件标识
processor_config: 处理器配置
"""
module_path = cls.get_plugin_dir(plugin_key) / processor_config.module
if not module_path.exists():
raise PluginLoadError(f"处理器模块不存在: {module_path}")
module_name = f"foxel_plugin_{plugin_key}_processor_{module_path.stem}"
try:
spec = spec_from_file_location(module_name, module_path)
if spec is None or spec.loader is None:
raise PluginLoadError(f"无法加载处理器模块: {module_path}")
module = module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
# 缓存模块
cls._loaded_modules[f"{plugin_key}:processor:{processor_config.module}"] = module
# 获取处理器工厂
factory = getattr(module, "PROCESSOR_FACTORY", None)
if factory is None:
raise PluginLoadError(f"处理器模块缺少 'PROCESSOR_FACTORY': {module_path}")
# 获取配置 schema
config_schema = getattr(module, "CONFIG_SCHEMA", [])
processor_name = getattr(module, "PROCESSOR_NAME", processor_config.name or processor_config.type)
supported_exts = getattr(module, "SUPPORTED_EXTS", [])
# 注册到处理器注册表
from domain.processors.registry import CONFIG_SCHEMAS, TYPE_MAP
processor_type = processor_config.type
TYPE_MAP[processor_type] = factory
# 获取实例以读取属性
try:
sample = factory()
produces_file = getattr(sample, "produces_file", False)
supports_directory = getattr(sample, "supports_directory", False)
except Exception:
produces_file = False
supports_directory = False
CONFIG_SCHEMAS[processor_type] = {
"type": processor_type,
"name": processor_name,
"supported_exts": supported_exts,
"config_schema": config_schema,
"produces_file": produces_file,
"supports_directory": supports_directory,
"plugin": plugin_key, # 标记来源插件
"module_path": str(module_path),
}
except PluginLoadError:
raise
except Exception as e:
raise PluginLoadError(f"加载处理器模块失败 [{module_path}]: {e}")
@classmethod
def load_all_processors(cls, plugin_key: str, manifest: PluginManifest) -> List[str]:
"""加载插件的所有处理器,返回处理器类型列表"""
processor_types: List[str] = []
if not manifest.backend or not manifest.backend.processors:
return processor_types
for proc_config in manifest.backend.processors:
cls.load_processor_module(plugin_key, proc_config)
processor_types.append(proc_config.type)
return processor_types
# ========== 卸载 ==========
@classmethod
def unload_plugin(cls, plugin_key: str, manifest: Optional[PluginManifest] = None) -> None:
"""
卸载插件的后端组件
Args:
plugin_key: 插件标识
manifest: 可选的 manifest用于确定要卸载的组件
"""
# 卸载处理器
if manifest and manifest.backend and manifest.backend.processors:
from domain.processors.registry import CONFIG_SCHEMAS, TYPE_MAP
for proc_config in manifest.backend.processors:
proc_type = proc_config.type
if proc_type in TYPE_MAP:
del TYPE_MAP[proc_type]
if proc_type in CONFIG_SCHEMAS:
del CONFIG_SCHEMAS[proc_type]
# 清理缓存的模块
keys_to_remove = [k for k in cls._loaded_modules if k.startswith(f"{plugin_key}:")]
for key in keys_to_remove:
module = cls._loaded_modules.pop(key, None)
if module and module.__name__ in sys.modules:
del sys.modules[module.__name__]
# 清理路由追踪注意FastAPI 不支持动态移除路由,需要重启应用)
cls._mounted_routers.pop(plugin_key, None)
@classmethod
def delete_plugin_files(cls, plugin_key: str) -> None:
"""删除插件文件"""
plugin_dir = cls.get_plugin_dir(plugin_key)
if plugin_dir.exists():
shutil.rmtree(plugin_dir)
# 同时删除备份
backup_dir = cls.PLUGINS_ROOT / f"{plugin_key}.backup"
if backup_dir.exists():
shutil.rmtree(backup_dir)
# ========== 读取 manifest ==========
@classmethod
def read_manifest(cls, plugin_key: str) -> Optional[PluginManifest]:
"""从文件系统读取插件 manifest"""
manifest_path = cls.get_manifest_path(plugin_key)
if not manifest_path.exists():
return None
try:
with open(manifest_path, "r", encoding="utf-8") as f:
data = json.load(f)
return PluginManifest.model_validate(data)
except Exception:
return None

View File

@@ -1,48 +1,273 @@
"""
插件服务模块
负责插件的安装、卸载等管理操作
"""
import contextlib
import logging
import shutil
from pathlib import Path
from typing import List, Optional, Union
from fastapi import HTTPException
from domain.plugins.types import PluginCreate, PluginManifestUpdate, PluginOut
from domain.plugins.loader import PluginLoadError, PluginLoader
from domain.plugins.types import (
PluginInstallResult,
PluginManifest,
PluginOut,
)
from models.database import Plugin
logger = logging.getLogger(__name__)
class PluginService:
@classmethod
async def create(cls, payload: PluginCreate) -> PluginOut:
rec = await Plugin.create(**payload.model_dump())
return PluginOut.model_validate(rec)
"""插件服务"""
_plugins_root = Path("data/plugins")
# ========== 工具方法 ==========
@classmethod
async def list_plugins(cls) -> list[PluginOut]:
rows = await Plugin.all().order_by("-id")
return [PluginOut.model_validate(r) for r in rows]
def _get_plugin_dir(cls, plugin_key: str) -> Path:
"""获取插件目录"""
return cls._plugins_root / plugin_key
@classmethod
async def _get_or_404(cls, plugin_id: int) -> Plugin:
rec = await Plugin.get_or_none(id=plugin_id)
def _get_bundle_path(cls, rec: Plugin) -> Path:
"""获取前端 bundle 路径"""
plugin_dir = cls._get_plugin_dir(rec.key)
# 从 manifest 读取
if rec.manifest:
frontend = rec.manifest.get("frontend", {})
entry = frontend.get("entry")
if entry:
return plugin_dir / entry
# 默认位置
return plugin_dir / "frontend" / "index.js"
@classmethod
async def _get_by_key_or_404(cls, key: str) -> Plugin:
"""通过 key 获取插件,不存在则返回 404"""
rec = await Plugin.get_or_none(key=key)
if not rec:
raise HTTPException(status_code=404, detail="Plugin not found")
return rec
@classmethod
async def delete(cls, plugin_id: int) -> None:
rec = await cls._get_or_404(plugin_id)
async def _get_by_key_or_id(cls, key_or_id: Union[str, int]) -> Plugin:
"""通过 key 或 ID 获取插件"""
# 尝试作为 ID
if isinstance(key_or_id, int) or (isinstance(key_or_id, str) and key_or_id.isdigit()):
plugin_id = int(key_or_id)
rec = await Plugin.get_or_none(id=plugin_id)
if rec:
return rec
# 尝试作为 key
if isinstance(key_or_id, str):
rec = await Plugin.get_or_none(key=key_or_id)
if rec:
return rec
raise HTTPException(status_code=404, detail="Plugin not found")
# ========== 安装 ==========
@classmethod
async def install_package(cls, file_content: bytes, filename: str) -> PluginInstallResult:
"""
安装 .foxpkg 插件包
Args:
file_content: 插件包内容
filename: 文件名
Returns:
安装结果
"""
errors: List[str] = []
try:
# 解包
manifest, plugin_dir = PluginLoader.unpack_foxpkg(file_content)
plugin_key = manifest.key
# 检查是否已存在
existing = await Plugin.get_or_none(key=plugin_key)
if existing:
# 更新现有插件
logger.info(f"更新插件: {plugin_key}")
rec = existing
else:
# 创建新插件
logger.info(f"安装新插件: {plugin_key}")
rec = Plugin(key=plugin_key)
# 更新字段
rec.name = manifest.name
rec.version = manifest.version
rec.description = manifest.description
rec.author = manifest.author
rec.website = manifest.website
rec.github = manifest.github
rec.license = manifest.license
rec.manifest = manifest.model_dump(mode="json")
# 从 manifest.frontend 提取前端配置
if manifest.frontend:
rec.open_app = manifest.frontend.open_app or False
rec.supported_exts = manifest.frontend.supported_exts
rec.default_bounds = manifest.frontend.default_bounds
rec.default_maximized = manifest.frontend.default_maximized
rec.icon = manifest.frontend.icon
await rec.save()
# 加载后端组件(如果有)
loaded_routes: List[str] = []
loaded_processors: List[str] = []
if manifest.backend:
# 加载路由
if manifest.backend.routes:
try:
from main import app
routers = PluginLoader.load_all_routes(plugin_key, manifest)
for router in routers:
app.include_router(router)
loaded_routes.append(router.prefix)
except PluginLoadError as e:
errors.append(f"路由加载失败: {e}")
logger.error(f"插件 {plugin_key} 路由加载失败: {e}")
except Exception as e:
errors.append(f"路由加载失败: {e}")
logger.exception(f"插件 {plugin_key} 路由加载异常")
# 加载处理器
if manifest.backend.processors:
try:
processor_types = PluginLoader.load_all_processors(plugin_key, manifest)
loaded_processors = processor_types
except PluginLoadError as e:
errors.append(f"处理器加载失败: {e}")
logger.error(f"插件 {plugin_key} 处理器加载失败: {e}")
except Exception as e:
errors.append(f"处理器加载失败: {e}")
logger.exception(f"插件 {plugin_key} 处理器加载异常")
# 更新加载状态
rec.loaded_routes = loaded_routes if loaded_routes else None
rec.loaded_processors = loaded_processors if loaded_processors else None
await rec.save()
return PluginInstallResult(
success=True,
plugin=PluginOut.model_validate(rec),
message="安装成功" if not errors else "安装完成,但有部分组件加载失败",
errors=errors if errors else None,
)
except PluginLoadError as e:
logger.error(f"插件安装失败: {e}")
return PluginInstallResult(
success=False,
message=str(e),
errors=[str(e)],
)
except Exception as e:
logger.exception("插件安装异常")
return PluginInstallResult(
success=False,
message=f"安装失败: {e}",
errors=[str(e)],
)
# ========== 查询 ==========
@classmethod
async def list_plugins(cls) -> List[PluginOut]:
"""获取所有插件列表"""
rows = await Plugin.all().order_by("-id")
for rec in rows:
try:
manifest = PluginLoader.read_manifest(rec.key)
if manifest:
rec.manifest = manifest.model_dump(mode="json")
except Exception:
continue
return [PluginOut.model_validate(r) for r in rows]
@classmethod
async def get_plugin(cls, key_or_id: Union[str, int]) -> PluginOut:
"""获取单个插件详情"""
rec = await cls._get_by_key_or_id(key_or_id)
try:
manifest = PluginLoader.read_manifest(rec.key)
if manifest:
rec.manifest = manifest.model_dump(mode="json")
except Exception:
pass
return PluginOut.model_validate(rec)
@classmethod
async def get_bundle_path(cls, key_or_id: Union[str, int]) -> Path:
"""获取插件前端 bundle 路径"""
rec = await cls._get_by_key_or_id(key_or_id)
bundle_path = cls._get_bundle_path(rec)
if not bundle_path.exists():
raise HTTPException(status_code=404, detail="Plugin bundle not found")
return bundle_path
@classmethod
async def get_asset_path(cls, key: str, asset_path: str) -> Path:
"""获取插件静态资源路径"""
rec = await cls._get_by_key_or_404(key)
plugin_dir = cls._get_plugin_dir(rec.key)
# 安全检查:防止路径遍历
asset_path = asset_path.lstrip("/")
if ".." in asset_path:
raise HTTPException(status_code=400, detail="Invalid asset path")
full_path = plugin_dir / asset_path
if not full_path.exists():
raise HTTPException(status_code=404, detail="Asset not found")
# 确保路径在插件目录内
try:
full_path.resolve().relative_to(plugin_dir.resolve())
except ValueError:
raise HTTPException(status_code=400, detail="Invalid asset path")
return full_path
# ========== 管理操作 ==========
@classmethod
async def delete(cls, key_or_id: Union[str, int]) -> None:
"""删除/卸载插件"""
rec = await cls._get_by_key_or_id(key_or_id)
# 获取 manifest 用于卸载组件
manifest: Optional[PluginManifest] = None
if rec.manifest:
try:
manifest = PluginManifest.model_validate(rec.manifest)
except Exception:
pass
# 卸载后端组件
if manifest:
PluginLoader.unload_plugin(rec.key, manifest)
# 删除数据库记录
await rec.delete()
@classmethod
async def update(cls, plugin_id: int, payload: PluginCreate) -> PluginOut:
rec = await cls._get_or_404(plugin_id)
rec.url = payload.url
rec.enabled = payload.enabled
await rec.save()
return PluginOut.model_validate(rec)
# 删除文件
with contextlib.suppress(Exception):
plugin_dir = cls._get_plugin_dir(rec.key)
if plugin_dir.exists():
shutil.rmtree(plugin_dir)
@classmethod
async def update_manifest(
cls, plugin_id: int, manifest: PluginManifestUpdate
) -> PluginOut:
rec = await cls._get_or_404(plugin_id)
updates = manifest.model_dump(exclude_none=True)
if updates:
for key, value in updates.items():
setattr(rec, key, value)
await rec.save()
return PluginOut.model_validate(rec)
logger.info(f"插件 {rec.key} 已卸载")

116
domain/plugins/startup.py Normal file
View File

@@ -0,0 +1,116 @@
"""
插件启动加载模块
负责在应用启动时加载所有已安装的插件
"""
import logging
from typing import TYPE_CHECKING, List, Tuple
from domain.plugins.loader import PluginLoadError, PluginLoader
from domain.plugins.types import PluginManifest
if TYPE_CHECKING:
from fastapi import FastAPI
logger = logging.getLogger(__name__)
async def load_installed_plugins(app: "FastAPI") -> Tuple[int, List[str]]:
"""
加载所有已安装的插件
Args:
app: FastAPI 应用实例
Returns:
(成功加载数量, 错误列表)
"""
from models.database import Plugin
errors: List[str] = []
loaded_count = 0
try:
plugins = await Plugin.all()
except Exception as e:
logger.error(f"查询插件列表失败: {e}")
return 0, [f"查询插件列表失败: {e}"]
for plugin in plugins:
if not plugin.key:
continue
try:
# 获取 manifest
manifest = None
if plugin.manifest:
try:
manifest = PluginManifest.model_validate(plugin.manifest)
except Exception:
# 尝试从文件系统读取
manifest = PluginLoader.read_manifest(plugin.key)
else:
manifest = PluginLoader.read_manifest(plugin.key)
if not manifest:
logger.warning(f"插件 {plugin.key} 缺少 manifest跳过加载")
continue
# 加载后端路由
loaded_routes: List[str] = []
if manifest.backend and manifest.backend.routes:
try:
routers = PluginLoader.load_all_routes(plugin.key, manifest)
for router in routers:
app.include_router(router)
loaded_routes.append(router.prefix)
logger.info(f"插件 {plugin.key} 加载了 {len(routers)} 个路由")
except PluginLoadError as e:
errors.append(f"插件 {plugin.key} 路由加载失败: {e}")
logger.error(f"插件 {plugin.key} 路由加载失败: {e}")
# 加载处理器
loaded_processors: List[str] = []
if manifest.backend and manifest.backend.processors:
try:
processor_types = PluginLoader.load_all_processors(plugin.key, manifest)
loaded_processors = processor_types
logger.info(f"插件 {plugin.key} 注册了 {len(processor_types)} 个处理器")
except PluginLoadError as e:
errors.append(f"插件 {plugin.key} 处理器加载失败: {e}")
logger.error(f"插件 {plugin.key} 处理器加载失败: {e}")
# 更新数据库记录
plugin.loaded_routes = loaded_routes if loaded_routes else None
plugin.loaded_processors = loaded_processors if loaded_processors else None
await plugin.save()
loaded_count += 1
logger.info(f"插件 {plugin.key} 加载完成")
except Exception as e:
error_msg = f"插件 {plugin.key} 加载异常: {e}"
errors.append(error_msg)
logger.exception(error_msg)
return loaded_count, errors
async def init_plugins(app: "FastAPI") -> None:
"""
初始化插件系统
在应用启动时调用
"""
logger.info("开始加载已安装插件...")
loaded_count, errors = await load_installed_plugins(app)
if errors:
logger.warning(f"插件加载完成,共 {loaded_count} 个成功,{len(errors)} 个错误")
for error in errors:
logger.warning(f" - {error}")
else:
logger.info(f"插件加载完成,共 {loaded_count} 个插件")

View File

@@ -1,43 +1,119 @@
from typing import Any, Dict, List, Optional
from pydantic import AliasChoices, BaseModel, ConfigDict, Field
from pydantic import BaseModel, ConfigDict, Field
class PluginCreate(BaseModel):
url: str = Field(min_length=1)
enabled: bool = True
# ========== Manifest 相关类型 ==========
class PluginManifestUpdate(BaseModel):
class ManifestFrontend(BaseModel):
"""manifest.json 中的 frontend 配置"""
model_config = ConfigDict(populate_by_name=True, extra="ignore")
key: Optional[str] = None
name: Optional[str] = None
version: Optional[str] = None
entry: Optional[str] = Field(default=None, description="前端入口文件路径")
styles: Optional[List[str]] = Field(default=None, description="前端样式文件路径列表(相对插件根目录)")
open_app: Optional[bool] = Field(
default=None,
alias="openApp",
description="是否支持独立打开",
)
supported_exts: Optional[List[str]] = Field(
default=None,
validation_alias=AliasChoices("supported_exts", "supportedExts"),
alias="supportedExts",
description="支持的文件扩展名列表",
)
default_bounds: Optional[Dict[str, Any]] = Field(
default=None,
validation_alias=AliasChoices("default_bounds", "defaultBounds"),
alias="defaultBounds",
description="默认窗口尺寸",
)
default_maximized: Optional[bool] = Field(
default=None,
validation_alias=AliasChoices("default_maximized", "defaultMaximized"),
alias="defaultMaximized",
description="是否默认最大化",
)
icon: Optional[str] = None
description: Optional[str] = None
author: Optional[str] = None
website: Optional[str] = None
github: Optional[str] = None
icon: Optional[str] = Field(default=None, description="图标路径")
use_system_window: Optional[bool] = Field(
default=None,
alias="useSystemWindow",
description="是否使用系统窗口",
)
class ManifestRouteConfig(BaseModel):
"""manifest.json 中的路由配置"""
model_config = ConfigDict(extra="ignore")
module: str = Field(..., description="路由模块路径")
prefix: str = Field(..., description="路由前缀")
tags: Optional[List[str]] = Field(default=None, description="API 标签")
class ManifestProcessorConfig(BaseModel):
"""manifest.json 中的处理器配置"""
model_config = ConfigDict(extra="ignore")
module: str = Field(..., description="处理器模块路径")
type: str = Field(..., description="处理器类型标识")
name: Optional[str] = Field(default=None, description="处理器显示名称")
class ManifestBackend(BaseModel):
"""manifest.json 中的 backend 配置"""
model_config = ConfigDict(extra="ignore")
routes: Optional[List[ManifestRouteConfig]] = Field(default=None, description="路由列表")
processors: Optional[List[ManifestProcessorConfig]] = Field(
default=None, description="处理器列表"
)
class ManifestDependencies(BaseModel):
"""manifest.json 中的依赖配置"""
model_config = ConfigDict(extra="ignore")
python: Optional[str] = Field(default=None, description="Python 版本要求")
packages: Optional[List[str]] = Field(default=None, description="Python 包依赖列表")
class PluginManifest(BaseModel):
"""完整的 manifest.json 结构"""
model_config = ConfigDict(populate_by_name=True, extra="ignore")
foxpkg: str = Field(default="1.0", description="foxpkg 格式版本")
key: str = Field(..., min_length=1, description="插件唯一标识")
name: str = Field(..., min_length=1, description="插件名称")
version: str = Field(default="1.0.0", description="插件版本")
description: Optional[str] = Field(default=None, description="插件描述")
i18n: Optional[Dict[str, Dict[str, str]]] = Field(
default=None,
description="多语言信息name/description例如{'en': {'name': '...', 'description': '...'}}",
)
author: Optional[str] = Field(default=None, description="作者")
website: Optional[str] = Field(default=None, description="网站")
github: Optional[str] = Field(default=None, description="GitHub 地址")
license: Optional[str] = Field(default=None, description="许可证")
frontend: Optional[ManifestFrontend] = Field(default=None, description="前端配置")
backend: Optional[ManifestBackend] = Field(default=None, description="后端配置")
dependencies: Optional[ManifestDependencies] = Field(default=None, description="依赖配置")
# ========== API 请求/响应类型 ==========
class PluginOut(BaseModel):
"""插件输出模型"""
id: int
url: str
enabled: bool
key: Optional[str] = None
key: str
open_app: bool = False
name: Optional[str] = None
version: Optional[str] = None
supported_exts: Optional[List[str]] = None
@@ -48,5 +124,20 @@ class PluginOut(BaseModel):
author: Optional[str] = None
website: Optional[str] = None
github: Optional[str] = None
license: Optional[str] = None
# 新增字段
manifest: Optional[Dict[str, Any]] = None
loaded_routes: Optional[List[str]] = None
loaded_processors: Optional[List[str]] = None
model_config = ConfigDict(from_attributes=True)
class PluginInstallResult(BaseModel):
"""安装结果"""
success: bool
plugin: Optional[PluginOut] = None
message: Optional[str] = None
errors: Optional[List[str]] = None

View File

@@ -6,9 +6,11 @@ class BaseProcessor(Protocol):
supported_exts: list
config_schema: list
produces_file: bool
supports_directory: bool
requires_input_bytes: bool
async def process(self, input_bytes: bytes, path: str, config: Dict[str, Any]) -> bytes:
"""处理文件内容并返回处理后的内容"""
async def process(self, input_bytes: bytes, path: str, config: Dict[str, Any]) -> Any:
"""处理文件内容/路径并返回结果。produces_file=True 时应返回 bytes/Response。"""
...
# 约定:每个处理器需定义

View File

@@ -9,7 +9,12 @@ from PIL import Image
from ..base import BaseProcessor
from domain.ai.inference import describe_image_base64, get_text_embedding, provider_service
from domain.ai.service import VectorDBService, DEFAULT_VECTOR_DIMENSION
from domain.ai.service import (
VectorDBService,
DEFAULT_VECTOR_DIMENSION,
VECTOR_COLLECTION_NAME,
FILE_COLLECTION_NAME,
)
CHUNK_SIZE = 800
@@ -112,18 +117,20 @@ class VectorIndexProcessor:
action = config.get("action", "create")
index_type = config.get("index_type", "vector")
vector_db = VectorDBService()
collection_name = "vector_collection"
vector_collection = VECTOR_COLLECTION_NAME
file_collection = FILE_COLLECTION_NAME
if action == "destroy":
await vector_db.delete_vector(collection_name, path)
target_collection = file_collection if index_type == "simple" else vector_collection
await vector_db.delete_vector(target_collection, path)
return Response(content=f"文件 {path}{index_type} 索引已销毁", media_type="text/plain")
mime_type = _guess_mime(path)
if index_type == "simple":
await vector_db.ensure_collection(collection_name, vector=False)
await vector_db.delete_vector(collection_name, path)
await vector_db.upsert_vector(collection_name, {
await vector_db.ensure_collection(file_collection, vector=False)
await vector_db.delete_vector(file_collection, path)
await vector_db.upsert_vector(file_collection, {
"path": path,
"source_path": path,
"chunk_id": "filename",
@@ -146,8 +153,8 @@ class VectorIndexProcessor:
if vector_dim <= 0:
vector_dim = DEFAULT_VECTOR_DIMENSION
await vector_db.ensure_collection(collection_name, vector=True, dim=vector_dim)
await vector_db.delete_vector(collection_name, path)
await vector_db.ensure_collection(vector_collection, vector=True, dim=vector_dim)
await vector_db.delete_vector(vector_collection, path)
if file_ext in ["jpg", "jpeg", "png", "bmp"]:
processed_bytes, compression = _compress_image_for_embedding(input_bytes)
@@ -155,7 +162,7 @@ class VectorIndexProcessor:
description = await describe_image_base64(base64_image)
embedding = await get_text_embedding(description)
image_mime = "image/jpeg" if compression else mime_type
await vector_db.upsert_vector(collection_name, {
await vector_db.upsert_vector(vector_collection, {
"path": _chunk_key(path, "image"),
"source_path": path,
"chunk_id": "image",
@@ -177,7 +184,7 @@ class VectorIndexProcessor:
chunks = _chunk_text(text)
if not chunks:
await vector_db.upsert_vector(collection_name, {
await vector_db.upsert_vector(vector_collection, {
"path": _chunk_key(path, "0"),
"source_path": path,
"chunk_id": "0",
@@ -194,7 +201,7 @@ class VectorIndexProcessor:
chunk_count = 0
for chunk_id, chunk_text, start, end in chunks:
embedding = await get_text_embedding(chunk_text)
await vector_db.upsert_vector(collection_name, {
await vector_db.upsert_vector(vector_collection, {
"path": _chunk_key(path, str(chunk_id)),
"source_path": path,
"chunk_id": str(chunk_id),
@@ -213,15 +220,15 @@ class VectorIndexProcessor:
return Response(content="文本文件已索引", media_type="text/plain")
# 其他类型暂未支持向量索引,回退为文件名索引
await vector_db.delete_vector(collection_name, path)
await vector_db.upsert_vector(collection_name, {
"path": _chunk_key(path, "fallback"),
await vector_db.ensure_collection(file_collection, vector=False)
await vector_db.delete_vector(file_collection, path)
await vector_db.upsert_vector(file_collection, {
"path": path,
"source_path": path,
"chunk_id": "filename",
"mime": mime_type,
"type": "filename",
"name": os.path.basename(path),
"embedding": [0.0] * vector_dim,
})
return Response(content="暂不支持该类型的向量索引,已创建文件名索引", media_type="text/plain")

View File

@@ -74,6 +74,10 @@ def discover_processors(force_reload: bool = False) -> list[str]:
if produces_file is None and hasattr(sample, "produces_file"):
produces_file = getattr(sample, "produces_file")
supports_directory = getattr(module, "supports_directory", None)
if supports_directory is None and hasattr(sample, "supports_directory"):
supports_directory = getattr(sample, "supports_directory")
module_file = getattr(module, "__file__", None)
module_path: Optional[str] = None
if module_file:
@@ -101,6 +105,7 @@ def discover_processors(force_reload: bool = False) -> list[str]:
"supported_exts": normalized_exts,
"config_schema": schema,
"produces_file": produces_file if produces_file is not None else False,
"supports_directory": supports_directory if supports_directory is not None else False,
"module_path": module_path,
}

View File

@@ -35,14 +35,20 @@ class ProcessorService:
"supported_exts": meta.get("supported_exts", []),
"config_schema": meta["config_schema"],
"produces_file": meta.get("produces_file", False),
"supports_directory": meta.get("supports_directory", False),
"module_path": meta.get("module_path"),
})
return out
@classmethod
async def process_file(cls, req: ProcessRequest):
processor = cls.get_processor(req.processor_type)
if not processor:
raise HTTPException(404, detail="Processor not found")
is_dir = await VirtualFSService.path_is_directory(req.path)
if is_dir and not req.overwrite:
supports_directory = bool(getattr(processor, "supports_directory", False))
if is_dir and not supports_directory and not req.overwrite:
raise HTTPException(400, detail="Directory processing requires overwrite")
save_to = None if is_dir else (req.path if req.overwrite else req.save_to)

View File

@@ -105,7 +105,10 @@ class TaskQueueService:
if not processor:
raise ValueError(f"Processor {processor_type} not found for task {auto_task.id}")
file_content = await VirtualFSService.read_file(path)
requires_input_bytes = bool(getattr(processor, "requires_input_bytes", True))
file_content = b""
if requires_input_bytes:
file_content = await VirtualFSService.read_file(path)
result = await processor.process(file_content, path, auto_task.processor_config)
save_to = auto_task.processor_config.get("save_to")

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
from pathlib import Path
from typing import Any

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
import mimetypes
from typing import Any, AsyncIterator, Union
@@ -7,7 +5,7 @@ from fastapi import HTTPException
from fastapi.responses import Response
from domain.tasks.service import TaskService
from domain.virtual_fs.thumbnail import is_raw_filename
from domain.virtual_fs.thumbnail import is_raw_filename, raw_bytes_to_jpeg
from .listing import VirtualFSListingMixin
@@ -84,32 +82,9 @@ class VirtualFSFileOpsMixin(VirtualFSListingMixin):
if not rel or rel.endswith("/"):
raise HTTPException(400, detail="Path is a directory")
if is_raw_filename(rel):
import io
import rawpy
from PIL import Image
try:
raw_data = await cls.read_file(path)
try:
with rawpy.imread(io.BytesIO(raw_data)) as raw:
try:
thumb = raw.extract_thumb()
except rawpy.LibRawNoThumbnailError:
thumb = None
if thumb is not None and thumb.format in [rawpy.ThumbFormat.JPEG, rawpy.ThumbFormat.BITMAP]:
im = Image.open(io.BytesIO(thumb.data))
else:
rgb = raw.postprocess(use_camera_wb=False, use_auto_wb=True, output_bps=8)
im = Image.fromarray(rgb)
except Exception as exc:
print(f"rawpy processing failed: {exc}")
raise exc
buf = io.BytesIO()
im.save(buf, "JPEG", quality=90)
content = buf.getvalue()
content = raw_bytes_to_jpeg(raw_data, filename=rel)
return Response(content=content, media_type="image/jpeg")
except Exception as exc:
raise HTTPException(500, detail=f"RAW file processing failed: {exc}")

View File

@@ -1,12 +1,10 @@
from __future__ import annotations
from typing import Any, Dict, List, Tuple
from fastapi import HTTPException
from api.response import page
from domain.adapters.registry import runtime_registry
from domain.ai.service import VectorDBService
from domain.ai.service import VectorDBService, VECTOR_COLLECTION_NAME, FILE_COLLECTION_NAME
from domain.virtual_fs.thumbnail import is_image_filename, is_video_filename
from models import StorageAdapter
@@ -163,13 +161,19 @@ class VirtualFSListingMixin(VirtualFSResolverMixin):
@classmethod
async def _gather_vector_index(cls, full_path: str, limit: int = 20):
vector_db = VectorDBService()
try:
raw_results = await vector_db.search_by_path("vector_collection", full_path, max(limit * 2, 20))
except Exception:
return None
matched = []
if raw_results:
had_success = False
fetch_limit = max(limit * 2, 20)
for collection_name in (VECTOR_COLLECTION_NAME, FILE_COLLECTION_NAME):
try:
raw_results = await vector_db.search_by_path(collection_name, full_path, fetch_limit)
except Exception:
continue
if not raw_results:
had_success = True
continue
had_success = True
buckets = raw_results if isinstance(raw_results, list) else [raw_results]
for bucket in buckets:
if not bucket:
@@ -195,6 +199,9 @@ class VirtualFSListingMixin(VirtualFSResolverMixin):
entry["preview_truncated"] = len(text) > preview_limit
matched.append(entry)
if not had_success:
return None
if not matched:
return {"total": 0, "entries": [], "by_type": {}, "has_more": False}

View File

@@ -1,15 +1,20 @@
from __future__ import annotations
import base64
import datetime as dt
import hashlib
import hmac
import json
import os
import re
import shutil
import uuid
from typing import Dict, Iterable, List, Optional, Tuple
import xml.etree.ElementTree as ET
from typing import Any, AsyncIterator, Dict, Iterable, List, Optional, Tuple
import aiofiles
from fastapi import APIRouter, Request, Response
from fastapi import HTTPException
from domain.audit import AuditAction, audit
from domain.config.service import ConfigService
from domain.virtual_fs.service import VirtualFSService
@@ -20,6 +25,12 @@ router = APIRouter(prefix="/s3", tags=["s3"])
FALSEY = {"0", "false", "off", "no"}
_XML_NS = "http://s3.amazonaws.com/doc/2006-03-01/"
_MPU_ROOT = "data/s3_multipart"
_MPU_META_NAME = "meta.json"
_MPU_PART_DATA_TMPL = "part-{part_number:06d}.bin"
_MPU_PART_META_TMPL = "part-{part_number:06d}.json"
_MPU_PART_META_RE = re.compile(r"^part-(\d{6})\.json$")
class S3Settings(Dict[str, str]):
bucket: str
@@ -71,7 +82,7 @@ async def _ensure_enabled() -> Optional[Response]:
async def _get_settings() -> Tuple[Optional[S3Settings], Optional[Response]]:
bucket = (await ConfigService.get("S3_MAPPING_BUCKET", "foxel")) or "foxel"
region = (await ConfigService.get("S3_MAPPING_REGION", "us-east-1")) or "us-east-1"
region = ((await ConfigService.get("S3_MAPPING_REGION", "")) or "").strip()
base_path = (await ConfigService.get("S3_MAPPING_BASE_PATH", "/")) or "/"
access_key = (await ConfigService.get("S3_MAPPING_ACCESS_KEY")) or ""
secret_key = (await ConfigService.get("S3_MAPPING_SECRET_KEY")) or ""
@@ -121,42 +132,136 @@ def _sign(key: bytes, msg: str) -> bytes:
async def _authorize_sigv4(request: Request, settings: S3Settings) -> Optional[Response]:
auth = request.headers.get("authorization")
if not auth:
return _s3_error("AccessDenied", "Missing Authorization header", status=403)
scheme = "AWS4-HMAC-SHA256"
if not auth.startswith(scheme + " "):
if auth:
if not auth.startswith(scheme + " "):
return _s3_error("InvalidRequest", "Signature Version 4 is required", status=400)
parts: Dict[str, str] = {}
for segment in auth[len(scheme) + 1 :].split(","):
k, _, v = segment.strip().partition("=")
parts[k] = v
credential = parts.get("Credential")
signed_headers = parts.get("SignedHeaders")
signature = parts.get("Signature")
if not credential or not signed_headers or not signature:
return _s3_error("InvalidRequest", "Authorization header is malformed", status=400)
cred_parts = credential.split("/")
if len(cred_parts) != 5 or cred_parts[-1] != "aws4_request":
return _s3_error("InvalidRequest", "Credential scope is invalid", status=400)
access_key, datestamp, region, service, _ = cred_parts
if access_key != settings["access_key"]:
return _s3_error(
"InvalidAccessKeyId",
"The AWS Access Key Id you provided does not exist in our records.",
status=403,
)
if service != "s3":
return _s3_error("InvalidRequest", "Only service 's3' is supported", status=400)
if settings.get("region") and region != settings["region"]:
return _s3_error("AuthorizationHeaderMalformed", f"Region '{region}' is invalid", status=400)
amz_date = request.headers.get("x-amz-date")
if not amz_date or not amz_date.startswith(datestamp):
return _s3_error("AuthorizationHeaderMalformed", "x-amz-date does not match credential scope", status=400)
payload_hash = request.headers.get("x-amz-content-sha256")
if not payload_hash:
return _s3_error("AuthorizationHeaderMalformed", "Missing x-amz-content-sha256", status=400)
if payload_hash.upper().startswith("STREAMING-AWS4-HMAC-SHA256"):
return _s3_error("NotImplemented", "Chunked uploads are not supported", status=400)
signed_header_names = [h.strip().lower() for h in signed_headers.split(";") if h.strip()]
headers = {k.lower(): v for k, v in request.headers.items()}
canonical_headers = []
for name in signed_header_names:
value = headers.get(name)
if value is None:
return _s3_error("AuthorizationHeaderMalformed", f"Signed header '{name}' missing", status=400)
canonical_headers.append(f"{name}:{_normalize_ws(value)}\n")
canonical_request = "\n".join(
[
request.method,
_canonical_uri(request.url.path),
_canonical_query(request.query_params.multi_items()),
"".join(canonical_headers),
";".join(signed_header_names),
payload_hash,
]
)
hashed_request = hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()
scope = "/".join([datestamp, region, "s3", "aws4_request"])
string_to_sign = "\n".join([scheme, amz_date, scope, hashed_request])
k_date = _sign(("AWS4" + settings["secret_key"]).encode("utf-8"), datestamp)
k_region = hmac.new(k_date, region.encode("utf-8"), hashlib.sha256).digest()
k_service = hmac.new(k_region, b"s3", hashlib.sha256).digest()
k_signing = hmac.new(k_service, b"aws4_request", hashlib.sha256).digest()
expected = hmac.new(k_signing, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
if expected != signature:
return _s3_error(
"SignatureDoesNotMatch",
"The request signature we calculated does not match the signature you provided.",
status=403,
)
return None
params = request.query_params
q_multi = params.multi_items()
q_lower = {k.lower(): v for k, v in q_multi}
signature = q_lower.get("x-amz-signature")
if not signature:
return _s3_error("AccessDenied", "Missing Authorization header", status=403)
algorithm = q_lower.get("x-amz-algorithm")
if not algorithm or algorithm != scheme:
return _s3_error("InvalidRequest", "Signature Version 4 is required", status=400)
parts: Dict[str, str] = {}
for segment in auth[len(scheme) + 1 :].split(","):
k, _, v = segment.strip().partition("=")
parts[k] = v
credential = parts.get("Credential")
signed_headers = parts.get("SignedHeaders")
signature = parts.get("Signature")
if not credential or not signed_headers or not signature:
return _s3_error("InvalidRequest", "Authorization header is malformed", status=400)
credential = q_lower.get("x-amz-credential")
signed_headers = q_lower.get("x-amz-signedheaders")
amz_date = q_lower.get("x-amz-date")
expires_raw = q_lower.get("x-amz-expires")
if not credential or not signed_headers or not amz_date:
return _s3_error("AuthorizationQueryParametersError", "Query-string authentication is malformed", status=400)
cred_parts = credential.split("/")
if len(cred_parts) != 5 or cred_parts[-1] != "aws4_request":
return _s3_error("InvalidRequest", "Credential scope is invalid", status=400)
return _s3_error("AuthorizationQueryParametersError", "Credential scope is invalid", status=400)
access_key, datestamp, region, service, _ = cred_parts
if access_key != settings["access_key"]:
return _s3_error("InvalidAccessKeyId", "The AWS Access Key Id you provided does not exist in our records.", status=403)
return _s3_error(
"InvalidAccessKeyId",
"The AWS Access Key Id you provided does not exist in our records.",
status=403,
)
if service != "s3":
return _s3_error("InvalidRequest", "Only service 's3' is supported", status=400)
if region != settings["region"]:
if settings.get("region") and region != settings["region"]:
return _s3_error("AuthorizationHeaderMalformed", f"Region '{region}' is invalid", status=400)
amz_date = request.headers.get("x-amz-date")
if not amz_date or not amz_date.startswith(datestamp):
return _s3_error("AuthorizationHeaderMalformed", "x-amz-date does not match credential scope", status=400)
if not amz_date.startswith(datestamp):
return _s3_error("AuthorizationQueryParametersError", "X-Amz-Date does not match credential scope", status=400)
payload_hash = request.headers.get("x-amz-content-sha256")
if not payload_hash:
return _s3_error("AuthorizationHeaderMalformed", "Missing x-amz-content-sha256", status=400)
if expires_raw:
try:
expires = int(expires_raw)
except ValueError:
expires = 0
if expires > 0:
try:
signed_at = dt.datetime.strptime(amz_date, "%Y%m%dT%H%M%SZ")
if dt.datetime.utcnow() > signed_at + dt.timedelta(seconds=expires):
return _s3_error("AccessDenied", "Request has expired", status=403)
except Exception:
pass
payload_hash = request.headers.get("x-amz-content-sha256") or "UNSIGNED-PAYLOAD"
if payload_hash.upper().startswith("STREAMING-AWS4-HMAC-SHA256"):
return _s3_error("NotImplemented", "Chunked uploads are not supported", status=400)
@@ -166,14 +271,15 @@ async def _authorize_sigv4(request: Request, settings: S3Settings) -> Optional[R
for name in signed_header_names:
value = headers.get(name)
if value is None:
return _s3_error("AuthorizationHeaderMalformed", f"Signed header '{name}' missing", status=400)
return _s3_error("AuthorizationQueryParametersError", f"Signed header '{name}' missing", status=400)
canonical_headers.append(f"{name}:{_normalize_ws(value)}\n")
canonical_query_items = [(k, v) for k, v in q_multi if k.lower() != "x-amz-signature"]
canonical_request = "\n".join(
[
request.method,
_canonical_uri(request.url.path),
_canonical_query(request.query_params.multi_items()),
_canonical_query(canonical_query_items),
"".join(canonical_headers),
";".join(signed_header_names),
payload_hash,
@@ -190,7 +296,11 @@ async def _authorize_sigv4(request: Request, settings: S3Settings) -> Optional[R
k_signing = hmac.new(k_service, b"aws4_request", hashlib.sha256).digest()
expected = hmac.new(k_signing, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
if expected != signature:
return _s3_error("SignatureDoesNotMatch", "The request signature we calculated does not match the signature you provided.", status=403)
return _s3_error(
"SignatureDoesNotMatch",
"The request signature we calculated does not match the signature you provided.",
status=403,
)
return None
@@ -315,7 +425,382 @@ def _resource_path(bucket: str, key: Optional[str] = None) -> str:
return f"/s3/{bucket}"
def _safe_upload_id(upload_id: Optional[str]) -> Optional[str]:
if not upload_id:
return None
value = upload_id.strip()
if not value:
return None
if "/" in value or "\\" in value:
return None
return value
def _mpu_dir(upload_id: str) -> str:
return os.path.join(_MPU_ROOT, upload_id)
def _mpu_meta_path(upload_id: str) -> str:
return os.path.join(_mpu_dir(upload_id), _MPU_META_NAME)
def _mpu_part_data_path(upload_id: str, part_number: int) -> str:
return os.path.join(_mpu_dir(upload_id), _MPU_PART_DATA_TMPL.format(part_number=part_number))
def _mpu_part_meta_path(upload_id: str, part_number: int) -> str:
return os.path.join(_mpu_dir(upload_id), _MPU_PART_META_TMPL.format(part_number=part_number))
async def _read_json(path: str) -> Optional[Dict[str, Any]]:
try:
async with aiofiles.open(path, "r", encoding="utf-8") as f:
raw = await f.read()
data = json.loads(raw or "{}")
return data if isinstance(data, dict) else None
except FileNotFoundError:
return None
except Exception:
return None
async def _write_json(path: str, data: Dict[str, Any]) -> None:
os.makedirs(os.path.dirname(path), exist_ok=True)
async with aiofiles.open(path, "w", encoding="utf-8") as f:
await f.write(json.dumps(data, ensure_ascii=False))
async def _load_mpu_meta(bucket: str, key: str, upload_id: Optional[str]) -> Tuple[Optional[Dict[str, Any]], Optional[Response]]:
safe_id = _safe_upload_id(upload_id)
if not safe_id:
return None, _s3_error(
"NoSuchUpload",
"The specified upload does not exist.",
_resource_path(bucket, key),
status=404,
)
meta = await _read_json(_mpu_meta_path(safe_id))
if not meta or meta.get("bucket") != bucket or meta.get("key") != key:
return None, _s3_error(
"NoSuchUpload",
"The specified upload does not exist.",
_resource_path(bucket, key),
status=404,
)
return meta, None
def _parse_int(value: Optional[str], default: int) -> int:
if value is None:
return default
try:
return int(value)
except ValueError:
return default
async def _create_multipart_upload(request: Request, settings: S3Settings, bucket: str, key: str) -> Response:
os.makedirs(_MPU_ROOT, exist_ok=True)
upload_id = uuid.uuid4().hex
dir_path = _mpu_dir(upload_id)
while True:
try:
os.makedirs(dir_path, exist_ok=False)
break
except FileExistsError:
upload_id = uuid.uuid4().hex
dir_path = _mpu_dir(upload_id)
meta = {
"bucket": bucket,
"key": key,
"virtual_path": _virtual_path(settings, key),
"initiated": _now_iso(),
}
await _write_json(_mpu_meta_path(upload_id), meta)
_, headers = _meta_headers()
xml = (
f"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
f"<CreateMultipartUploadResult xmlns=\"{_XML_NS}\">"
f"<Bucket>{bucket}</Bucket>"
f"<Key>{key}</Key>"
f"<UploadId>{upload_id}</UploadId>"
f"</CreateMultipartUploadResult>"
)
headers.update({"Content-Type": "application/xml"})
return Response(content=xml, media_type="application/xml", headers=headers)
async def _upload_part(request: Request, bucket: str, key: str, upload_id: Optional[str], part_number_raw: Optional[str]) -> Response:
part_number = _parse_int(part_number_raw, 0)
if part_number <= 0:
return _s3_error("InvalidArgument", "partNumber is invalid", _resource_path(bucket, key), status=400)
meta, err = await _load_mpu_meta(bucket, key, upload_id)
if err:
return err
assert meta
safe_id = _safe_upload_id(upload_id)
assert safe_id
part_path = _mpu_part_data_path(safe_id, part_number)
tmp_path = part_path + ".tmp"
md5 = hashlib.md5()
size = 0
async with aiofiles.open(tmp_path, "wb") as f:
async for chunk in request.stream():
if not chunk:
continue
await f.write(chunk)
md5.update(chunk)
size += len(chunk)
etag = '"' + md5.hexdigest() + '"'
os.replace(tmp_path, part_path)
await _write_json(
_mpu_part_meta_path(safe_id, part_number),
{"PartNumber": part_number, "ETag": etag, "Size": size, "LastModified": _now_iso()},
)
_, headers = _meta_headers()
headers.update({"ETag": etag, "Content-Length": "0"})
return Response(status_code=200, headers=headers)
async def _list_parts(request: Request, settings: S3Settings, bucket: str, key: str, upload_id: Optional[str]) -> Response:
meta, err = await _load_mpu_meta(bucket, key, upload_id)
if err:
return err
assert meta
safe_id = _safe_upload_id(upload_id)
assert safe_id
dir_path = _mpu_dir(safe_id)
part_metas: List[Dict[str, Any]] = []
try:
filenames = os.listdir(dir_path)
except FileNotFoundError:
filenames = []
for name in filenames:
m = _MPU_PART_META_RE.match(name)
if not m:
continue
pn = int(m.group(1))
info = await _read_json(os.path.join(dir_path, name))
if not info:
continue
info.setdefault("PartNumber", pn)
part_metas.append(info)
part_metas.sort(key=lambda item: int(item.get("PartNumber") or 0))
max_parts = max(1, min(1000, _parse_int(request.query_params.get("max-parts"), 1000)))
marker = max(0, _parse_int(request.query_params.get("part-number-marker"), 0))
filtered = [p for p in part_metas if int(p.get("PartNumber") or 0) > marker]
is_truncated = len(filtered) > max_parts
shown = filtered[:max_parts]
next_marker = int(shown[-1]["PartNumber"]) if is_truncated and shown else 0
_, headers = _meta_headers()
body = [f"<?xml version=\"1.0\" encoding=\"UTF-8\"?>", f"<ListPartsResult xmlns=\"{_XML_NS}\">"]
body.append(f"<Bucket>{bucket}</Bucket>")
body.append(f"<Key>{key}</Key>")
body.append(f"<UploadId>{safe_id}</UploadId>")
body.append(
f"<Initiator><ID>{settings['access_key']}</ID><DisplayName>Foxel</DisplayName></Initiator>"
)
body.append(
f"<Owner><ID>{settings['access_key']}</ID><DisplayName>Foxel</DisplayName></Owner>"
)
body.append("<StorageClass>STANDARD</StorageClass>")
body.append(f"<PartNumberMarker>{marker}</PartNumberMarker>")
body.append(f"<NextPartNumberMarker>{next_marker}</NextPartNumberMarker>")
body.append(f"<MaxParts>{max_parts}</MaxParts>")
body.append(f"<IsTruncated>{str(is_truncated).lower()}</IsTruncated>")
for part in shown:
pn = int(part.get("PartNumber") or 0)
etag = part.get("ETag") or ""
size = int(part.get("Size") or 0)
last_modified = part.get("LastModified") or _now_iso()
body.append(
f"<Part><PartNumber>{pn}</PartNumber><LastModified>{last_modified}</LastModified><ETag>{etag}</ETag><Size>{size}</Size></Part>"
)
body.append("</ListPartsResult>")
xml = "".join(body)
headers.update({"Content-Type": "application/xml"})
return Response(content=xml, media_type="application/xml", headers=headers)
async def _abort_multipart_upload(bucket: str, key: str, upload_id: Optional[str]) -> Response:
_, err = await _load_mpu_meta(bucket, key, upload_id)
if err:
return err
safe_id = _safe_upload_id(upload_id)
assert safe_id
shutil.rmtree(_mpu_dir(safe_id), ignore_errors=True)
_, headers = _meta_headers()
return Response(status_code=204, headers=headers)
def _parse_complete_parts(body_bytes: bytes) -> List[Tuple[int, str]]:
if not body_bytes:
return []
root = ET.fromstring(body_bytes)
parts: List[Tuple[int, str]] = []
for part_el in root.findall(".//{*}Part"):
pn_el = part_el.find("{*}PartNumber")
etag_el = part_el.find("{*}ETag")
if pn_el is None or pn_el.text is None:
continue
pn = _parse_int(pn_el.text.strip(), 0)
if pn <= 0:
continue
etag = (etag_el.text or "").strip() if etag_el is not None else ""
parts.append((pn, etag))
parts.sort(key=lambda item: item[0])
return parts
async def _complete_multipart_upload(request: Request, settings: S3Settings, bucket: str, key: str, upload_id: Optional[str]) -> Response:
meta, err = await _load_mpu_meta(bucket, key, upload_id)
if err:
return err
assert meta
safe_id = _safe_upload_id(upload_id)
assert safe_id
try:
body_bytes = await request.body()
except Exception:
body_bytes = b""
try:
parts_req = _parse_complete_parts(body_bytes)
except Exception:
return _s3_error("MalformedXML", "The XML you provided was not well-formed.", _resource_path(bucket, key), status=400)
if not parts_req:
return _s3_error("MalformedXML", "CompleteMultipartUpload parts missing.", _resource_path(bucket, key), status=400)
part_metas: List[Dict[str, Any]] = []
for pn, _etag in parts_req:
info = await _read_json(_mpu_part_meta_path(safe_id, pn))
if not info:
return _s3_error("InvalidPart", "One or more of the specified parts could not be found.", _resource_path(bucket, key), status=400)
info.setdefault("PartNumber", pn)
part_metas.append(info)
async def merged_iter() -> AsyncIterator[bytes]:
for info in part_metas:
pn = int(info.get("PartNumber") or 0)
part_path = _mpu_part_data_path(safe_id, pn)
async with aiofiles.open(part_path, "rb") as f:
while True:
chunk = await f.read(1024 * 1024)
if not chunk:
break
yield chunk
await VirtualFSService.write_file_stream(meta.get("virtual_path") or _virtual_path(settings, key), merged_iter(), overwrite=True)
etag = ""
if len(part_metas) == 1:
etag = str(part_metas[0].get("ETag") or "")
else:
md5_bytes = bytearray()
for info in part_metas:
raw = str(info.get("ETag") or "").strip().strip('"')
try:
md5_bytes.extend(bytes.fromhex(raw))
except ValueError:
pass
digest = hashlib.md5(bytes(md5_bytes)).hexdigest() if md5_bytes else hashlib.md5(b"").hexdigest()
etag = '"' + f"{digest}-{len(part_metas)}" + '"'
shutil.rmtree(_mpu_dir(safe_id), ignore_errors=True)
_, headers = _meta_headers()
headers.update({"Content-Type": "application/xml", "ETag": etag})
location = str(request.url.replace(query=""))
xml = (
f"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
f"<CompleteMultipartUploadResult xmlns=\"{_XML_NS}\">"
f"<Location>{location}</Location>"
f"<Bucket>{bucket}</Bucket>"
f"<Key>{key}</Key>"
f"<ETag>{etag}</ETag>"
f"</CompleteMultipartUploadResult>"
)
return Response(content=xml, media_type="application/xml", headers=headers)
async def _list_multipart_uploads(request: Request, settings: S3Settings, bucket: str) -> Response:
os.makedirs(_MPU_ROOT, exist_ok=True)
prefix = request.query_params.get("prefix") or ""
max_uploads = max(1, min(1000, _parse_int(request.query_params.get("max-uploads"), 1000)))
key_marker = request.query_params.get("key-marker") or ""
upload_id_marker = request.query_params.get("upload-id-marker") or ""
uploads: List[Tuple[str, str, str]] = []
try:
ids = os.listdir(_MPU_ROOT)
except FileNotFoundError:
ids = []
for uid in ids:
safe_id = _safe_upload_id(uid)
if not safe_id:
continue
meta = await _read_json(_mpu_meta_path(safe_id))
if not meta:
continue
if meta.get("bucket") != bucket:
continue
key = str(meta.get("key") or "")
if prefix and not key.startswith(prefix):
continue
initiated = str(meta.get("initiated") or _now_iso())
uploads.append((key, safe_id, initiated))
uploads.sort(key=lambda item: (item[0], item[1]))
if key_marker:
uploads = [
it
for it in uploads
if (it[0] > key_marker) or (it[0] == key_marker and it[1] > upload_id_marker)
]
is_truncated = len(uploads) > max_uploads
shown = uploads[:max_uploads]
next_key_marker = shown[-1][0] if is_truncated and shown else ""
next_upload_id_marker = shown[-1][1] if is_truncated and shown else ""
_, headers = _meta_headers()
body = [f"<?xml version=\"1.0\" encoding=\"UTF-8\"?>", f"<ListMultipartUploadsResult xmlns=\"{_XML_NS}\">"]
body.append(f"<Bucket>{bucket}</Bucket>")
body.append(f"<Prefix>{prefix}</Prefix>")
body.append(f"<KeyMarker>{key_marker}</KeyMarker>")
body.append(f"<UploadIdMarker>{upload_id_marker}</UploadIdMarker>")
body.append(f"<NextKeyMarker>{next_key_marker}</NextKeyMarker>")
body.append(f"<NextUploadIdMarker>{next_upload_id_marker}</NextUploadIdMarker>")
body.append(f"<MaxUploads>{max_uploads}</MaxUploads>")
body.append(f"<IsTruncated>{str(is_truncated).lower()}</IsTruncated>")
for key, uid, initiated in shown:
body.append(
f"<Upload><Key>{key}</Key><UploadId>{uid}</UploadId>"
f"<Initiator><ID>{settings['access_key']}</ID><DisplayName>Foxel</DisplayName></Initiator>"
f"<Owner><ID>{settings['access_key']}</ID><DisplayName>Foxel</DisplayName></Owner>"
f"<StorageClass>STANDARD</StorageClass><Initiated>{initiated}</Initiated></Upload>"
)
body.append("</ListMultipartUploadsResult>")
xml = "".join(body)
headers.update({"Content-Type": "application/xml"})
return Response(content=xml, media_type="application/xml", headers=headers)
@router.get("")
@audit(action=AuditAction.READ, description="S3: 列出桶")
async def list_buckets(request: Request):
if (resp := await _ensure_enabled()) is not None:
return resp
@@ -338,6 +823,7 @@ async def list_buckets(request: Request):
@router.get("/{bucket}")
@audit(action=AuditAction.READ, description="S3: 列出对象")
async def list_objects(request: Request, bucket: str):
if (resp := await _ensure_enabled()) is not None:
return resp
@@ -351,6 +837,8 @@ async def list_objects(request: Request, bucket: str):
return auth
params = request.query_params
if "uploads" in params:
return await _list_multipart_uploads(request, settings, bucket)
if params.get("list-type", "2") != "2":
return _s3_error("InvalidArgument", "Only ListObjectsV2 (list-type=2) is supported.", _resource_path(bucket), status=400)
@@ -478,12 +966,18 @@ async def _stat_object(settings: S3Settings, key: str) -> Tuple[Optional[Dict],
@router.api_route("/{bucket}/{object_path:path}", methods=["GET", "HEAD"])
@audit(action=AuditAction.DOWNLOAD, description="S3: 获取对象")
async def object_get_head(request: Request, bucket: str, object_path: str):
settings, error = await _ensure_bucket_and_auth(request, bucket)
if error:
return error
assert settings
key = object_path.lstrip("/")
upload_id = request.query_params.get("uploadId") or request.query_params.get("uploadid")
if upload_id and request.method == "GET":
return await _list_parts(request, settings, bucket, key, upload_id)
if upload_id and request.method == "HEAD":
return _s3_error("MethodNotAllowed", "Method Not Allowed", _resource_path(bucket, key), status=405)
meta, err = await _stat_object(settings, key)
if err:
return err
@@ -502,12 +996,17 @@ async def object_get_head(request: Request, bucket: str, object_path: str):
@router.put("/{bucket}/{object_path:path}")
@audit(action=AuditAction.UPLOAD, description="S3: 上传对象")
async def put_object(request: Request, bucket: str, object_path: str):
settings, error = await _ensure_bucket_and_auth(request, bucket)
if error:
return error
assert settings
key = object_path.lstrip("/")
upload_id = request.query_params.get("uploadId") or request.query_params.get("uploadid")
part_number = request.query_params.get("partNumber") or request.query_params.get("partnumber")
if upload_id and part_number:
return await _upload_part(request, bucket, key, upload_id, part_number)
await VirtualFSService.write_file_stream(_virtual_path(settings, key), request.stream(), overwrite=True)
meta, err = await _stat_object(settings, key)
if err:
@@ -521,13 +1020,35 @@ async def put_object(request: Request, bucket: str, object_path: str):
return Response(status_code=200, headers=headers)
@router.post("/{bucket}/{object_path:path}")
@audit(action=AuditAction.UPLOAD, description="S3: Multipart 上传")
async def post_object(request: Request, bucket: str, object_path: str):
settings, error = await _ensure_bucket_and_auth(request, bucket)
if error:
return error
assert settings
key = object_path.lstrip("/")
params = request.query_params
upload_id = params.get("uploadId") or params.get("uploadid")
if "uploads" in params:
return await _create_multipart_upload(request, settings, bucket, key)
if upload_id:
return await _complete_multipart_upload(request, settings, bucket, key, upload_id)
return _s3_error("InvalidRequest", "Unsupported POST operation.", _resource_path(bucket, key), status=400)
@router.delete("/{bucket}/{object_path:path}")
@audit(action=AuditAction.DELETE, description="S3: 删除对象")
async def delete_object(request: Request, bucket: str, object_path: str):
settings, error = await _ensure_bucket_and_auth(request, bucket)
if error:
return error
assert settings
key = object_path.lstrip("/")
upload_id = request.query_params.get("uploadId") or request.query_params.get("uploadid")
if upload_id:
return await _abort_multipart_upload(bucket, key, upload_id)
try:
await VirtualFSService.delete_path(_virtual_path(settings, key))
except HTTPException as exc:

View File

@@ -1,4 +1,3 @@
from __future__ import annotations
import base64
import hashlib
import mimetypes
@@ -9,6 +8,7 @@ from typing import Optional
from fastapi import APIRouter, Request, Response, HTTPException, Depends
import xml.etree.ElementTree as ET
from domain.audit import AuditAction, audit
from domain.auth.service import AuthService
from domain.auth.types import User, UserInDB
from domain.virtual_fs.service import VirtualFSService
@@ -142,11 +142,13 @@ def _normalize_fs_path(path: str) -> str:
@router.options("/{path:path}")
async def options_root(path: str = "", _enabled: None = Depends(_ensure_webdav_enabled)):
@audit(action=AuditAction.READ, description="WebDAV: OPTIONS", user_kw="user")
async def options_root(_request: Request, path: str = "", _enabled: None = Depends(_ensure_webdav_enabled)):
return Response(status_code=200, headers=_dav_headers())
@router.api_route("/{path:path}", methods=["PROPFIND"])
@audit(action=AuditAction.READ, description="WebDAV: PROPFIND", user_kw="user")
async def propfind(
request: Request,
path: str,
@@ -194,6 +196,7 @@ async def propfind(
@router.get("/{path:path}")
@audit(action=AuditAction.DOWNLOAD, description="WebDAV: GET", user_kw="user")
async def dav_get(
path: str,
request: Request,
@@ -206,8 +209,10 @@ async def dav_get(
@router.head("/{path:path}")
@audit(action=AuditAction.READ, description="WebDAV: HEAD", user_kw="user")
async def dav_head(
path: str,
_request: Request,
_enabled: None = Depends(_ensure_webdav_enabled),
user: User = Depends(_get_basic_user),
):
@@ -232,6 +237,7 @@ async def dav_head(
@router.api_route("/{path:path}", methods=["PUT"])
@audit(action=AuditAction.UPLOAD, description="WebDAV: PUT", user_kw="user")
async def dav_put(
path: str,
request: Request,
@@ -248,8 +254,10 @@ async def dav_put(
@router.api_route("/{path:path}", methods=["DELETE"])
@audit(action=AuditAction.DELETE, description="WebDAV: DELETE", user_kw="user")
async def dav_delete(
path: str,
_request: Request,
_enabled: None = Depends(_ensure_webdav_enabled),
user: User = Depends(_get_basic_user),
):
@@ -259,8 +267,10 @@ async def dav_delete(
@router.api_route("/{path:path}", methods=["MKCOL"])
@audit(action=AuditAction.CREATE, description="WebDAV: MKCOL", user_kw="user")
async def dav_mkcol(
path: str,
_request: Request,
_enabled: None = Depends(_ensure_webdav_enabled),
user: User = Depends(_get_basic_user),
):
@@ -282,7 +292,13 @@ def _parse_destination(dest: str) -> str:
@router.api_route("/{path:path}", methods=["MOVE"])
async def dav_move(path: str, request: Request, user: User = Depends(_get_basic_user)):
@audit(action=AuditAction.UPDATE, description="WebDAV: MOVE", user_kw="user")
async def dav_move(
path: str,
request: Request,
_enabled: None = Depends(_ensure_webdav_enabled),
user: User = Depends(_get_basic_user),
):
full_src = _normalize_fs_path(path)
dest_header = request.headers.get("Destination")
dst = _parse_destination(dest_header or "")
@@ -292,7 +308,13 @@ async def dav_move(path: str, request: Request, user: User = Depends(_get_basic_
@router.api_route("/{path:path}", methods=["COPY"])
async def dav_copy(path: str, request: Request, user: User = Depends(_get_basic_user)):
@audit(action=AuditAction.CREATE, description="WebDAV: COPY", user_kw="user")
async def dav_copy(
path: str,
request: Request,
_enabled: None = Depends(_ensure_webdav_enabled),
user: User = Depends(_get_basic_user),
):
full_src = _normalize_fs_path(path)
dest_header = request.headers.get("Destination")
dst = _parse_destination(dest_header or "")

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
from typing import Any
from fastapi import HTTPException
@@ -25,6 +23,11 @@ class VirtualFSProcessingMixin(VirtualFSTransferMixin):
raise HTTPException(400, detail=f"Processor {processor_type} not found")
actual_is_dir = await cls.path_is_directory(path)
requires_input_bytes = bool(getattr(processor, "requires_input_bytes", True))
if actual_is_dir and bool(getattr(processor, "supports_directory", False)):
if save_to:
raise HTTPException(400, detail="Directory processing does not support custom save_to path")
return await processor.process(b"", path, config)
supported_exts = getattr(processor, "supported_exts", None) or []
allowed_exts = {str(ext).lower().lstrip(".") for ext in supported_exts if isinstance(ext, str)}
@@ -78,7 +81,9 @@ class VirtualFSProcessingMixin(VirtualFSTransferMixin):
if not matches_extension(child_rel):
continue
absolute_path = cls._build_absolute_path(adapter_model.path, child_rel)
data = await cls.read_file(absolute_path)
data = b""
if requires_input_bytes:
data = await cls.read_file(absolute_path)
result = await processor.process(data, absolute_path, config)
if getattr(processor, "produces_file", False):
result_bytes = coerce_result_bytes(result)
@@ -91,7 +96,9 @@ class VirtualFSProcessingMixin(VirtualFSTransferMixin):
return {"processed_files": processed_count}
data = await cls.read_file(path)
data = b""
if requires_input_bytes:
data = await cls.read_file(path)
result = await processor.process(data, path, config)
target_path = save_to

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
from typing import Tuple
from fastapi import HTTPException

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
import mimetypes
import re
@@ -7,7 +5,13 @@ from fastapi import HTTPException, UploadFile
from fastapi.responses import Response
from domain.config.service import ConfigService
from domain.virtual_fs.thumbnail import get_or_create_thumb, is_image_filename, is_raw_filename, is_video_filename
from domain.virtual_fs.thumbnail import (
get_or_create_thumb,
is_image_filename,
is_raw_filename,
is_video_filename,
raw_bytes_to_jpeg,
)
from .temp_link import VirtualFSTempLinkMixin
@@ -18,19 +22,9 @@ class VirtualFSRouteMixin(VirtualFSTempLinkMixin):
full_path = cls._normalize_path(full_path)
if is_raw_filename(full_path):
import io
import rawpy
from PIL import Image
try:
raw_data = await cls.read_file(full_path)
with rawpy.imread(io.BytesIO(raw_data)) as raw:
rgb = raw.postprocess(use_camera_wb=True, output_bps=8)
im = Image.fromarray(rgb)
buf = io.BytesIO()
im.save(buf, "JPEG", quality=90)
content = buf.getvalue()
content = raw_bytes_to_jpeg(raw_data, filename=full_path)
return Response(content=content, media_type="image/jpeg")
except FileNotFoundError:
raise HTTPException(404, detail="File not found")

View File

@@ -1,5 +1,6 @@
from fastapi import APIRouter, Depends, Query
from api.response import success
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.virtual_fs.search.search_service import VirtualFSSearchService
@@ -17,10 +18,11 @@ async def search_files(
user: User = Depends(get_current_active_user),
):
if not q.strip():
return {"items": [], "query": q}
return success({"items": [], "query": q, "mode": mode})
top_k = max(top_k, 1)
page = max(page, 1)
page_size = max(min(page_size, 100), 1)
return await VirtualFSSearchService.search(q, top_k, mode, page, page_size)
data = await VirtualFSSearchService.search(q, top_k, mode, page, page_size)
return success(data)

View File

@@ -2,7 +2,7 @@ from typing import Any, Dict, List, Tuple
from domain.virtual_fs.types import SearchResultItem
from domain.ai.inference import get_text_embedding
from domain.ai.service import VectorDBService
from domain.ai.service import VectorDBService, VECTOR_COLLECTION_NAME, FILE_COLLECTION_NAME
def _normalize_result(raw: Dict[str, Any], source: str, fallback_score: float = 0.0) -> SearchResultItem:
@@ -53,7 +53,7 @@ async def _vector_search(query: str, top_k: int) -> List[SearchResultItem]:
return []
try:
raw_results = await vector_db.search_vectors("vector_collection", embedding, max(top_k, 10))
raw_results = await vector_db.search_vectors(VECTOR_COLLECTION_NAME, embedding, max(top_k, 10))
except Exception:
return []
@@ -68,12 +68,15 @@ async def _filename_search(query: str, page: int, page_size: int) -> Tuple[List[
vector_db = VectorDBService()
limit = max(page * page_size + 1, page_size * (page + 2))
limit = min(limit, 2000)
try:
raw_results = await vector_db.search_by_path("vector_collection", query, limit)
except Exception:
return [], False
records: List[Dict[str, Any]] = []
for collection_name in (FILE_COLLECTION_NAME, VECTOR_COLLECTION_NAME):
try:
raw_results = await vector_db.search_by_path(collection_name, query, limit)
except Exception:
continue
if raw_results:
records.extend(raw_results[0] or [])
records = raw_results[0] if raw_results else []
deduped: List[SearchResultItem] = []
seen_paths: set[str] = set()
for record in records or []:

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
from .common import VirtualFSCommonMixin
from .resolver import VirtualFSResolverMixin
from .listing import VirtualFSListingMixin

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
import base64
import hashlib
import hmac

View File

@@ -1,12 +1,14 @@
from __future__ import annotations
import asyncio
import inspect
import io
import hashlib
import subprocess
import tempfile
from contextlib import suppress
from pathlib import Path
from typing import Tuple
from PIL import Image
from fastapi import HTTPException
ALLOWED_EXT = {"jpg", "jpeg", "png", "webp", "gif", "bmp",
@@ -14,8 +16,12 @@ ALLOWED_EXT = {"jpg", "jpeg", "png", "webp", "gif", "bmp",
RAW_EXT = {"arw", "cr2", "cr3", "nef", "rw2", "orf", "pef", "dng"}
VIDEO_EXT = {"mp4", "mov", "m4v", "avi", "mkv", "wmv", "flv", "webm", "mpg", "mpeg", "3gp"}
MAX_IMAGE_SOURCE_SIZE = 200 * 1024 * 1024
VIDEO_RANGE_LIMIT = 16 * 1024 * 1024 # 16MB
VIDEO_INITIAL_CHUNK = 4 * 1024 * 1024
VIDEO_TAIL_LIMIT = 2 * 1024 * 1024 # 2MB
VIDEO_TAIL_FALLBACK_LIMIT = 4 * 1024 * 1024 # 4MB
VIDEO_HEAD_LIMIT = 2 * 1024 * 1024 # 2MB
VIDEO_HEAD_FALLBACK_LIMIT = 4 * 1024 * 1024 # 4MB
VIDEO_THUMB_SEEK_SECONDS = (15, 10, 5, 3, 1, 0)
VIDEO_BLACK_FRAME_MEAN_THRESHOLD = 12.0
CACHE_ROOT = Path('data/.thumb_cache')
@@ -55,7 +61,6 @@ def _ensure_cache_dir(p: Path):
def _image_to_webp(im, w: int, h: int, fit: str) -> Tuple[bytes, str]:
from PIL import Image
if im.mode not in ("RGB", "RGBA"):
im = im.convert("RGBA" if im.mode in ("P", "LA") else "RGB")
if fit == 'cover':
@@ -78,30 +83,91 @@ def _image_to_webp(im, w: int, h: int, fit: str) -> Tuple[bytes, str]:
return buf.getvalue(), 'image/webp'
def generate_thumb(data: bytes, w: int, h: int, fit: str, is_raw: bool = False) -> Tuple[bytes, str]:
from PIL import Image
if is_raw:
def _load_image_with_pillow(data: bytes):
im = Image.open(io.BytesIO(data))
im.load()
return im
def _load_raw_with_ffmpeg(data: bytes, filename: str | None) -> "Image.Image":
src_path: str | None = None
dst_path: str | None = None
try:
with tempfile.NamedTemporaryFile(suffix=Path(filename or "").suffix or ".raw", delete=False) as src_tmp:
src_tmp.write(data)
src_path = src_tmp.name
dst_tmp = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
dst_path = dst_tmp.name
dst_tmp.close()
cmd = [
"ffmpeg",
"-y",
"-hide_banner",
"-loglevel", "error",
"-i", src_path,
"-frames:v", "1",
dst_path,
]
try:
import rawpy
with rawpy.imread(io.BytesIO(data)) as raw:
try:
thumb = raw.extract_thumb()
except rawpy.LibRawNoThumbnailError:
thumb = None
result = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
except FileNotFoundError as e:
raise RuntimeError("未找到 ffmpeg可执行文件需要在 PATH 中") from e
except subprocess.CalledProcessError as e:
stderr = (e.stderr or b"").decode().strip()
stdout = (e.stdout or b"").decode().strip()
message = stderr or stdout or "ffmpeg 转换 RAW 失败"
raise RuntimeError(message) from e
if thumb is not None and thumb.format in [rawpy.ThumbFormat.JPEG, rawpy.ThumbFormat.BITMAP]:
im = Image.open(io.BytesIO(thumb.data))
else:
rgb = raw.postprocess(
use_camera_wb=False, use_auto_wb=True, output_bps=8)
im = Image.fromarray(rgb)
except Exception as e:
print(f"rawpy processing failed: {e}")
raise e
with open(dst_path, "rb") as f:
img_bytes = f.read()
im = Image.open(io.BytesIO(img_bytes))
im.load()
return im
finally:
if dst_path:
with suppress(FileNotFoundError):
Path(dst_path).unlink()
if src_path:
with suppress(FileNotFoundError):
Path(src_path).unlink()
else:
im = Image.open(io.BytesIO(data))
def load_image_from_bytes(data: bytes, *, filename: str | None = None, is_raw: bool = False):
if not is_raw:
return _load_image_with_pillow(data)
first_error: Exception | None = None
try:
return _load_image_with_pillow(data)
except Exception as exc:
first_error = exc
try:
return _load_raw_with_ffmpeg(data, filename)
except Exception as exc:
msg = f"RAW 解码失败: ffmpeg 处理异常 {exc}"
if first_error:
msg = f"RAW 解码失败: Pillow 异常 {first_error}; ffmpeg 异常 {exc}"
raise RuntimeError(msg) from exc
def raw_bytes_to_jpeg(data: bytes, filename: str | None = None) -> bytes:
im = load_image_from_bytes(data, filename=filename, is_raw=True)
if im.mode != "RGB":
im = im.convert("RGB")
buf = io.BytesIO()
im.save(buf, "JPEG", quality=90)
return buf.getvalue()
def generate_thumb(data: bytes, w: int, h: int, fit: str, is_raw: bool = False, filename: str | None = None) -> Tuple[bytes, str]:
im = load_image_from_bytes(data, filename=filename, is_raw=is_raw)
return _image_to_webp(im, w, h, fit)
@@ -177,42 +243,58 @@ async def _read_range_slice(adapter, root: str, rel: str, start: int, end: int)
return b""
async def _read_video_prefix(adapter, root: str, rel: str, size: int, limit: int = VIDEO_RANGE_LIMIT) -> bytes:
chunk_size = min(VIDEO_INITIAL_CHUNK, limit)
offset = 0
collected = bytearray()
while len(collected) < limit:
end = offset + chunk_size - 1
data = await _read_range_slice(adapter, root, rel, offset, end)
if not data:
break
collected.extend(data)
if len(data) < chunk_size:
break
offset += len(data)
remaining = limit - len(collected)
if remaining <= 0:
break
chunk_size = min(chunk_size * 2, remaining)
if not collected and size <= limit:
read_file = getattr(adapter, "read_file", None)
if callable(read_file):
blob = await read_file(root, rel)
if blob:
return bytes(blob[:limit])
return bytes(collected[:limit])
async def _read_video_head(adapter, root: str, rel: str, size: int, limit: int = VIDEO_HEAD_LIMIT) -> bytes:
end = limit - 1
if size > 0:
end = min(end, size - 1)
if end < 0:
return b""
return await _read_range_slice(adapter, root, rel, 0, end)
async def _run_ffmpeg_extract_frame(src_path: str, dst_path: str):
async def _read_video_tail(adapter, root: str, rel: str, size: int, limit: int) -> Tuple[bytes, int]:
if size <= 0:
return b"", 0
start = max(0, size - limit)
end = size - 1
data = await _read_range_slice(adapter, root, rel, start, end)
return data, start
def _write_video_sparse_file(rel: str, head_bytes: bytes, tail_bytes: bytes, tail_offset: int) -> str:
suffix = Path(rel).suffix or ".mp4"
src_tmp = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
src_path = src_tmp.name
try:
if head_bytes:
src_tmp.write(head_bytes)
src_tmp.flush()
finally:
src_tmp.close()
if tail_bytes:
with open(src_path, "r+b") as f:
f.seek(max(0, int(tail_offset)))
f.write(tail_bytes)
f.flush()
return src_path
async def _run_ffmpeg_extract_frame(src_path: str, dst_path: str, *, seek_seconds: float | None = None):
cmd = [
"ffmpeg",
"-y",
"-hide_banner",
"-loglevel", "error",
"-i", src_path,
]
is_http_input = src_path.startswith(("http://", "https://"))
if is_http_input and seek_seconds is not None:
cmd += ["-ss", str(seek_seconds), "-i", src_path]
else:
cmd += ["-i", src_path]
if seek_seconds is not None:
cmd += ["-ss", str(seek_seconds)]
cmd += [
"-frames:v", "1",
dst_path,
]
@@ -231,32 +313,72 @@ async def _run_ffmpeg_extract_frame(src_path: str, dst_path: str):
raise RuntimeError(message)
async def _generate_video_thumb(video_bytes: bytes, rel: str, w: int, h: int, fit: str) -> Tuple[bytes, str]:
from PIL import Image
def _frame_mean_luma(im) -> float:
from PIL import ImageStat
gray = im.convert("L").resize((64, 64))
return float(ImageStat.Stat(gray).mean[0])
suffix = Path(rel).suffix or ".mp4"
src_tmp = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
src_path = src_tmp.name
try:
src_tmp.write(video_bytes)
src_tmp.flush()
finally:
src_tmp.close()
def _is_black_image_bytes(image_bytes: bytes) -> bool:
from PIL import Image
with Image.open(io.BytesIO(image_bytes)) as im:
im.load()
return _frame_mean_luma(im) < VIDEO_BLACK_FRAME_MEAN_THRESHOLD
async def _generate_video_thumb_from_src_path(src_path: str, w: int, h: int, fit: str) -> Tuple[bytes, str]:
from PIL import Image
dst_tmp = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
dst_path = dst_tmp.name
dst_tmp.close()
best: tuple[float, bytes, str] | None = None
last_error: Exception | None = None
try:
await _run_ffmpeg_extract_frame(src_path, dst_path)
with Image.open(dst_path) as im:
im.load()
return _image_to_webp(im, w, h, fit)
for seek_seconds in VIDEO_THUMB_SEEK_SECONDS:
try:
with suppress(FileNotFoundError):
Path(dst_path).unlink()
await _run_ffmpeg_extract_frame(src_path, dst_path, seek_seconds=seek_seconds)
with Image.open(dst_path) as im:
im.load()
mean = _frame_mean_luma(im)
webp_bytes, mime = _image_to_webp(im, w, h, fit)
if best is None or mean > best[0]:
best = (mean, webp_bytes, mime)
if mean >= VIDEO_BLACK_FRAME_MEAN_THRESHOLD:
return webp_bytes, mime
except Exception as e:
last_error = e
continue
if best is not None:
return best[1], best[2]
if last_error is not None:
raise last_error
raise RuntimeError("ffmpeg 截帧失败")
finally:
with suppress(FileNotFoundError):
Path(dst_path).unlink()
async def _generate_video_thumb_from_segments(
head_bytes: bytes,
tail_bytes: bytes,
tail_offset: int,
rel: str,
w: int,
h: int,
fit: str,
) -> Tuple[bytes, str]:
src_path = _write_video_sparse_file(rel, head_bytes, tail_bytes, tail_offset)
try:
return await _generate_video_thumb_from_src_path(src_path, w, h, fit)
finally:
with suppress(FileNotFoundError):
Path(src_path).unlink()
with suppress(FileNotFoundError):
Path(dst_path).unlink()
async def get_or_create_thumb(adapter, adapter_id: int, root: str, rel: str, w: int, h: int, fit: str = 'cover'):
@@ -295,28 +417,87 @@ async def get_or_create_thumb(adapter, adapter_id: int, root: str, rel: str, w:
if not thumb_bytes:
if is_video:
try:
video_bytes = await _read_video_prefix(adapter, root, rel, size)
except HTTPException:
raise
except Exception as e:
print(f"Video prefix read failed: {e}")
raise HTTPException(500, detail=f"Video read failed: {e}")
async def _maybe_transcoding_thumb() -> Tuple[bytes, str] | None:
fid = (stat or {}).get("fid") if isinstance(stat, dict) else None
get_url = getattr(adapter, "get_video_transcoding_url", None)
if not fid or not callable(get_url):
return None
try:
url = await get_url(str(fid))
except Exception as e:
print(f"Video transcoding url fetch failed: {e}")
return None
if not url:
return None
try:
return await _generate_video_thumb_from_src_path(url, w, h, fit)
except Exception as e:
print(f"Video transcoding thumbnail generation failed: {e}")
return None
if not video_bytes:
def _is_hevc_decoder_missing(exc: Exception) -> bool:
msg = str(exc).lower()
return ("no decoder found" in msg) and ("hevc" in msg or "h265" in msg)
async def _read_head(limit: int) -> bytes:
try:
return await _read_video_head(adapter, root, rel, size, limit=limit)
except HTTPException:
raise
except Exception as e:
print(f"Video head read failed: {e}")
raise HTTPException(500, detail=f"Video read failed: {e}")
async def _read_tail(limit: int) -> Tuple[bytes, int]:
try:
return await _read_video_tail(adapter, root, rel, size, limit=limit)
except HTTPException:
raise
except Exception as e:
print(f"Video tail read failed: {e}")
raise HTTPException(500, detail=f"Video read failed: {e}")
head_bytes = await _read_head(VIDEO_HEAD_LIMIT)
tail_bytes, tail_offset = await _read_tail(VIDEO_TAIL_LIMIT)
if not head_bytes and not tail_bytes:
raise HTTPException(500, detail="Unable to read video data for thumbnail")
try:
thumb_bytes, mime = await _generate_video_thumb(video_bytes, rel, w, h, fit)
except Exception as e:
print(f"Video thumbnail generation failed: {e}")
raise HTTPException(
500, detail=f"Video thumbnail generation failed: {e}")
thumb_bytes, mime = await _generate_video_thumb_from_segments(
head_bytes, tail_bytes, tail_offset, rel, w, h, fit
)
except Exception as e1:
if _is_hevc_decoder_missing(e1):
got = await _maybe_transcoding_thumb()
if got is not None:
thumb_bytes, mime = got
if not thumb_bytes:
try:
tail_bytes, tail_offset = await _read_tail(VIDEO_TAIL_FALLBACK_LIMIT)
thumb_bytes, mime = await _generate_video_thumb_from_segments(
head_bytes, tail_bytes, tail_offset, rel, w, h, fit
)
except HTTPException:
raise
except Exception as e2:
print(f"Video thumbnail generation failed: {e2}")
raise HTTPException(500, detail=f"Video thumbnail generation failed: {e2}")
if thumb_bytes and _is_black_image_bytes(thumb_bytes):
try:
head_bytes = await _read_head(VIDEO_HEAD_FALLBACK_LIMIT)
retry_thumb, retry_mime = await _generate_video_thumb_from_segments(
head_bytes, tail_bytes, tail_offset, rel, w, h, fit
)
if retry_thumb and not _is_black_image_bytes(retry_thumb):
thumb_bytes, mime = retry_thumb, retry_mime
except Exception:
pass
else:
read_data = await adapter.read_file(root, rel)
try:
thumb_bytes, mime = generate_thumb(
read_data, w, h, fit, is_raw=is_raw_filename(rel))
read_data, w, h, fit, is_raw=is_raw_filename(rel), filename=rel)
except Exception as e:
print(e)
raise HTTPException(

View File

@@ -1,5 +1,3 @@
from __future__ import annotations
import shutil
from pathlib import Path
from typing import Any, Dict, List, Tuple

View File

@@ -1,4 +1,5 @@
#!/bin/bash
set -e
python migrate/run.py
exec gunicorn -k uvicorn.workers.UvicornWorker -w 1 -b 0.0.0.0:80 main:app
port="${FOXEL_PORT:-80}"
exec gunicorn -k uvicorn.workers.UvicornWorker -w 1 -b "0.0.0.0:${port}" main:app

10
main.py
View File

@@ -52,10 +52,19 @@ async def spa_fallback_middleware(request: Request, call_next):
@asynccontextmanager
async def lifespan(app: FastAPI):
os.makedirs("data/db", exist_ok=True)
os.makedirs("data/plugins", exist_ok=True)
await init_db()
await runtime_registry.refresh()
await ConfigService.set("APP_VERSION", VERSION)
await task_queue_service.start_worker()
# 加载已安装的插件
from domain.plugins.startup import init_plugins
await init_plugins(app)
# 在所有路由加载完成后,挂载静态文件服务(放在最后以避免覆盖 API 路由)
app.mount("/", SPAStaticFiles(directory="web/dist", html=True, check_dir=False), name="static")
try:
yield
finally:
@@ -86,7 +95,6 @@ app.add_middleware(
allow_methods=["*"],
allow_headers=["*"],
)
app.mount("/", SPAStaticFiles(directory="web/dist", html=True, check_dir=False), name="static")
if __name__ == "__main__":
import uvicorn
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)

View File

@@ -168,22 +168,28 @@ class ShareLink(Model):
class Plugin(Model):
id = fields.IntField(pk=True)
url = fields.CharField(max_length=2048)
enabled = fields.BooleanField(default=True)
key = fields.CharField(max_length=100, null=True)
key = fields.CharField(max_length=100, unique=True) # 插件唯一标识
name = fields.CharField(max_length=255, null=True)
version = fields.CharField(max_length=50, null=True)
supported_exts = fields.JSONField(null=True)
default_bounds = fields.JSONField(null=True)
default_maximized = fields.BooleanField(null=True)
icon = fields.CharField(max_length=2048, null=True)
description = fields.TextField(null=True)
author = fields.CharField(max_length=255, null=True)
website = fields.CharField(max_length=2048, null=True)
github = fields.CharField(max_length=2048, null=True)
license = fields.CharField(max_length=100, null=True)
# 完整 manifest 存储
manifest = fields.JSONField(null=True)
# 前端相关配置(从 manifest.frontend 提取)
open_app = fields.BooleanField(default=False)
supported_exts = fields.JSONField(null=True)
default_bounds = fields.JSONField(null=True)
default_maximized = fields.BooleanField(null=True)
icon = fields.CharField(max_length=2048, null=True)
# 已加载的组件追踪
loaded_routes = fields.JSONField(null=True) # ["/api/plugins/xxx", ...]
loaded_processors = fields.JSONField(null=True) # ["processor_type", ...]
created_at = fields.DatetimeField(auto_now_add=True)
updated_at = fields.DatetimeField(auto_now=True)

View File

@@ -3,24 +3,21 @@ name = "foxel"
version = "1"
description = "foxel.cc"
readme = "README.md"
requires-python = ">=3.13"
requires-python = ">=3.14"
dependencies = [
"aioboto3>=15.2.0",
"aiofiles>=25.1.0",
"fastapi>=0.116.1",
"passlib[bcrypt]>=1.7.4",
"bcrypt>=3.2.2,<4.0",
"pillow>=11.3.0",
"pyjwt>=2.10.1",
"pysocks>=1.7.1",
"python-dotenv>=1.1.1",
"python-multipart>=0.0.20",
"qdrant-client>=1.15.1",
"rawpy>=0.25.1",
"telethon>=1.41.2",
"tortoise-orm>=0.25.1",
"uvicorn>=0.37.0",
"pymilvus[milvus-lite]>=2.6.2",
"aioboto3>=15.5.0",
"bcrypt>=5.0.0",
"fastapi>=0.127.0",
"paramiko>=4.0.0",
"pydantic[email]>=2.11.7",
"pillow>=12.0.0",
"pydantic[email]>=2.12.5",
"pyjwt>=2.10.1",
"pymilvus[milvus-lite]>=2.6.5",
"pysocks>=1.7.1",
"python-dotenv>=1.2.1",
"python-multipart>=0.0.21",
"qdrant-client>=1.16.2",
"telethon>=1.42.0",
"tortoise-orm>=0.25.3",
"uvicorn>=0.40.0",
]

View File

@@ -1,31 +1,30 @@
#!/bin/bash
#================================================================================
# Foxel 一键部署与更新脚本
#
# 作者: maxage
# 版本: 1.7 (增加下载镜像, 解决网络问题)
# 描述: 此脚本用于自动化安装、配置和管理 Foxel 项目 (使用 Docker Compose)。
# - 智能检测现有安装,提供安装向导和管理菜单两种模式。
# - 自动检测并安装依赖。
# - 为国内用户提供镜像源切换选项。
#
# 一键运行命令:
# Foxel 一键安装与管理脚本Docker Compose
# 一键运行:
# bash <(curl -sL "https://raw.githubusercontent.com/DrizzleTime/Foxel/main/setup/foxel.sh?_=$(date +%s)")
#================================================================================
#
# --- 消息打印函数 ---
info() {
echo "[信息] $1"
}
# --- 输出可关闭颜色NO_COLOR=1 ---
if [[ -t 1 && -z "${NO_COLOR:-}" ]]; then
C_RESET='\033[0m'
C_RED='\033[31m'
C_GREEN='\033[32m'
C_YELLOW='\033[33m'
C_BLUE='\033[34m'
else
C_RESET=''
C_RED=''
C_GREEN=''
C_YELLOW=''
C_BLUE=''
fi
warn() {
echo "[警告] $1"
}
error() {
echo "[错误] $1"
}
info() { printf "%b[信息]%b %s\n" "$C_BLUE" "$C_RESET" "$*"; }
success() { printf "%b[成功]%b %s\n" "$C_GREEN" "$C_RESET" "$*"; }
warn() { printf "%b[警告]%b %s\n" "$C_YELLOW" "$C_RESET" "$*"; }
error() { printf "%b[错误]%b %s\n" "$C_RED" "$C_RESET" "$*"; }
# --- 基础函数 ---
command_exists() {
@@ -34,16 +33,33 @@ command_exists() {
confirm_action() {
local prompt_message="$1"
printf "%s" "${prompt_message} (y/n): "
read confirmation
if [[ "$confirmation" =~ ^[Yy]$ ]]; then
return 0 # Yes
local default="${2:-N}"
local hint='[y/N]'
local confirmation
if [[ "$default" =~ ^[Yy]$ ]]; then
default="Y"
hint='[Y/n]'
else
return 1 # No
default="N"
hint='[y/N]'
fi
while true; do
read -r -p "${prompt_message} ${hint}: " confirmation
if [[ -z "$confirmation" ]]; then
[[ "$default" == "Y" ]] && return 0 || return 1
fi
case "$confirmation" in
[Yy]|[Yy][Ee][Ss]) return 0 ;;
[Nn]|[Nn][Oo]) return 1 ;;
*) warn "请输入 y 或 n。" ;;
esac
done
}
# --- IP地址检测函数 (只输出IP) ---
# --- IP地址检测函数只输出IP ---
get_public_ipv4() {
curl -4 -s --max-time 2 https://api.ipify.org || \
curl -4 -s --max-time 2 https://ifconfig.me/ip || \
@@ -65,7 +81,7 @@ get_private_ip() {
# --- 依赖与环境检查 ---
check_and_install_dependencies() {
info "正在检查所需依赖..."
info "检查依赖..."
declare -A deps=( [curl]="curl" [openssl]="openssl" [ss]="iproute2" )
local missing_deps=()
for cmd in "${!deps[@]}"; do
@@ -75,8 +91,8 @@ check_and_install_dependencies() {
done
if [ ${#missing_deps[@]} -gt 0 ]; then
warn "检测到以下依赖项缺失: ${missing_deps[*]}"
if confirm_action "是否尝试自动安装它们"; then
warn "缺少依赖: ${missing_deps[*]}"
if confirm_action "是否尝试自动安装?" "Y"; then
local pm_cmd=""
if command_exists apt-get; then pm_cmd="sudo apt-get update && sudo apt-get install -y";
elif command_exists yum; then pm_cmd="sudo yum install -y";
@@ -87,12 +103,12 @@ check_and_install_dependencies() {
for cmd in "${!deps[@]}"; do
if ! command_exists "$cmd"; then error "依赖 '${deps[$cmd]}' 自动安装失败。"; exit 1; fi
done
info "依赖已成功安装。"
success "依赖安装完成。"
else
error "用户取消了安装。请先手动安装依赖: ${missing_deps[*]}"; exit 1
fi
else
info "所有基础依赖已满足。"
success "依赖已满足。"
fi
}
@@ -101,64 +117,107 @@ initialize_environment() {
if ! command_exists docker; then
error "未找到 Docker。请参照官方文档安装: https://docs.docker.com/engine/install/"; exit 1;
fi
if ! docker info &> /dev/null; then error "Docker deamon 未在运行。请先启动 Docker。"; exit 1; fi
info "Docker 环境检测通过。"
if ! docker info &> /dev/null; then error "Docker daemon 未在运行。请先启动 Docker。"; exit 1; fi
success "Docker 环境正常。"
if command_exists docker-compose; then COMPOSE_CMD="docker-compose";
elif docker compose version &> /dev/null; then COMPOSE_CMD="docker compose";
else error "未找到 Docker Compose。请安装 Docker Compose v1 或 v2。"; exit 1; fi
info "检测到 Docker Compose 命令: $COMPOSE_CMD"
info "Docker Compose: $COMPOSE_CMD"
}
set_image_source_official() {
sed -i -E 's|^([[:space:]]*)#?image:[[:space:]]*ghcr\.io/drizzletime/foxel:latest|\1image: ghcr.io/drizzletime/foxel:latest|' compose.yaml
sed -i -E 's|^([[:space:]]*)#?image:[[:space:]]*ghcr\.nju\.edu\.cn/drizzletime/foxel:latest|\1#image: ghcr.nju.edu.cn/drizzletime/foxel:latest|' compose.yaml
}
set_image_source_mirror() {
sed -i -E 's|^([[:space:]]*)#?image:[[:space:]]*ghcr\.io/drizzletime/foxel:latest|\1#image: ghcr.io/drizzletime/foxel:latest|' compose.yaml
sed -i -E 's|^([[:space:]]*)#?image:[[:space:]]*ghcr\.nju\.edu\.cn/drizzletime/foxel:latest|\1image: ghcr.nju.edu.cn/drizzletime/foxel:latest|' compose.yaml
}
choose_image_source() {
echo
info "请选择镜像源:"
echo "1) ghcr.io (默认)"
echo "2) ghcr.nju.edu.cn (国内)"
local image_choice
read -r -p "请选择 [1-2] (默认 1): " image_choice
image_choice="${image_choice:-1}"
case "$image_choice" in
1)
set_image_source_official
info "已选择: ghcr.io"
;;
2)
set_image_source_mirror
info "已选择: ghcr.nju.edu.cn"
;;
*)
warn "无效选择,使用默认 ghcr.io"
set_image_source_official
;;
esac
}
# --- 新安装流程 ---
install_new_foxel() {
info "--- 开始 Foxel 全新安装 ---"
local install_path
info "开始全新安装..."
local foxel_dir
local default_dir="/opt/foxel"
while true; do
read -p "请输入您想在哪里创建 Foxel 的数据目录 (例如: /opt/docker): " install_path
if [[ -z "$install_path" ]]; then warn "输入不能为空,请重新输入。"; continue; fi
if [ ! -d "$install_path" ]; then
if confirm_action "目录 '$install_path' 不存在。您想现在创建它吗?"; then
mkdir -p "$install_path"
if [ $? -eq 0 ]; then info "目录 '$install_path' 创建成功。"; break;
else error "创建目录 '$install_path' 失败。"; fi
else info "操作已取消。"; fi
else info "将使用已存在的目录 '$install_path'。"; break; fi
read -r -p "请输入 Foxel 安装目录 (默认: ${default_dir}): " foxel_dir
foxel_dir="${foxel_dir:-$default_dir}"
if [[ -f "$foxel_dir/compose.yaml" ]]; then
warn "检测到已存在: $foxel_dir/compose.yaml"
if confirm_action "是否覆盖它?" "N"; then
mv "$foxel_dir/compose.yaml" "$foxel_dir/compose.yaml.bak.$(date +%s)"
info "已备份为: $foxel_dir/compose.yaml.bak.*"
else
continue
fi
fi
if [[ -d "$foxel_dir" ]]; then
break
fi
if confirm_action "目录不存在,是否创建?" "Y"; then
if mkdir -p "$foxel_dir"; then
break
fi
error "创建目录失败: $foxel_dir"
fi
done
echo
local foxel_dir="$install_path/Foxel"
info "将在 '$foxel_dir' 目录中创建所需文件..."
info "准备目录: $foxel_dir"
mkdir -p "$foxel_dir/data/"{db,mount} && chmod 777 "$foxel_dir/data/"{db,mount}
if [ $? -ne 0 ]; then error "创建或设置子目录权限失败。"; exit 1; fi
cd "$foxel_dir" || exit
info "正在下载 'compose.yaml'..."
info "下载 compose.yaml..."
local COMPOSE_MIRROR_URL="https://ghproxy.com/https://raw.githubusercontent.com/DrizzleTime/Foxel/main/compose.yaml"
local COMPOSE_OFFICIAL_URL="https://raw.githubusercontent.com/DrizzleTime/Foxel/main/compose.yaml"
if ! curl -L -o compose.yaml "$COMPOSE_MIRROR_URL"; then
warn "镜像下载失败,正在尝试官方源下载..."
if ! curl -L -o compose.yaml "$COMPOSE_OFFICIAL_URL"; then
if ! curl -fsSL -o compose.yaml "$COMPOSE_MIRROR_URL"; then
warn "镜像下载失败,尝试官方源..."
if ! curl -fsSL -o compose.yaml "$COMPOSE_OFFICIAL_URL"; then
error "下载 'compose.yaml' 失败。请检查您的网络连接。"; exit 1;
fi
fi
info "'compose.yaml' 下载成功。"
success "compose.yaml 下载成功。"
echo
if confirm_action "您的服务器是否位于中国大陆(以便为您选择更快的镜像源)?"; then
info "正在切换到国内镜像源..."
sed -i 's|^\( *\)image: ghcr.io/drizzletime/foxel:latest|\1#image: ghcr.io/drizzletime/foxel:latest|' compose.yaml
sed -i 's|^\( *\)#image: ghcr.nju.edu.cn/drizzletime/foxel:latest|\1image: ghcr.nju.edu.cn/drizzletime/foxel:latest|' compose.yaml
info "已成功切换到 ghcr.nju.edu.cn 镜像源。"
else
info "将使用默认的 ghcr.io 官方镜像源。"
fi
choose_image_source
echo
local new_port
while true; do
read -p "请输入新的对外端口 (或直接按回车使用默认 8088): " new_port
read -r -p "请输入对外端口 (默认 8088): " new_port
if [[ -z "$new_port" ]]; then
new_port="8088"
info "将使用默认端口 8088。"
@@ -173,30 +232,29 @@ install_new_foxel() {
if ss -tuln | grep -q ":${new_port}\b"; then
warn "端口 $new_port 已被占用,请换一个。"
else
sed -i "s/\"8088:80\"/\"$new_port:80\"/" compose.yaml
sed -i -E "s|(FOXEL_HOST_PORT:-)[0-9]{1,5}|\\1$new_port|" compose.yaml
info "端口已成功修改为 $new_port"
break
fi
done
echo
if ! confirm_action "是否需要生成新的随机密钥 (推荐)(选择 'n' 将使用默认值)"; then
if ! confirm_action "是否生成新的随机密钥(推荐)?" "Y"; then
info "将使用 'compose.yaml' 文件中的默认密钥。"
else
info "正在生成新的随机密钥..."
sed -i "s|SECRET_KEY=.*|SECRET_KEY=$(openssl rand -base64 32)|" compose.yaml
sed -i "s|TEMP_LINK_SECRET_KEY=.*|TEMP_LINK_SECRET_KEY=$(openssl rand -base64 32)|" compose.yaml
info "新的密钥已成功生成并替换。"
success "新的密钥已写入 compose.yaml。"
fi
echo
if confirm_action "所有配置已准备就绪!您想现在启动 Foxel 项目吗"; then
info "正在启动 Foxel 服务... 这可能需要一些时间来拉取镜像。"
if confirm_action "配置完成,是否现在启动 Foxel" "Y"; then
info "启动中(首次会拉取镜像,可能需要几分钟)..."
$COMPOSE_CMD pull && $COMPOSE_CMD up -d
if [ $? -eq 0 ]; then
info "Foxel 部署成功!"
info "-------------------------------------------------"
info "正在检测服务器IP地址请稍候..."
success "Foxel 已启动。"
info "正在检测访问地址..."
# 先捕获所有IP地址
local public_ipv4=$(get_public_ipv4 2>/dev/null)
@@ -206,7 +264,7 @@ install_new_foxel() {
local ip_found=false
echo
info "部署完成!您可以通过以下地址访问 Foxel:"
info "访问地址:"
if [[ -n "$private_ip" ]]; then
echo " - 局域网地址: http://${private_ip}:${final_port}"
@@ -226,12 +284,16 @@ install_new_foxel() {
warn "未能自动检测到服务器IP地址。"
echo " 请手动使用 http://[您的服务器IP]:${final_port} 访问它。"
fi
echo "-------------------------------------------------"
echo
info "常用命令:"
echo " - 启动/更新: cd $foxel_dir && $COMPOSE_CMD up -d"
echo " - 停止: cd $foxel_dir && $COMPOSE_CMD stop"
echo " - 日志: cd $foxel_dir && $COMPOSE_CMD logs -f"
else
error "启动 Foxel 失败。请运行 'cd $foxel_dir && $COMPOSE_CMD logs' 查看日志。"
fi
else
info "操作已取消。您可以稍后进入 '$foxel_dir' 并手动运行 '$COMPOSE_CMD up -d'。"
info "已跳过启动。稍后可运行cd $foxel_dir && $COMPOSE_CMD up -d"
fi
}
@@ -291,7 +353,7 @@ manage_existing_installation() {
case $choice in
1) # 更新
warn "更新前,强烈建议您备份 '$foxel_dir/data' 目录!"
if confirm_action "您确定要继续更新吗?"; then
if confirm_action "确认继续更新?" "Y"; then
info "正在拉取最新镜像..."
$COMPOSE_CMD pull
info "正在使用新镜像重新部署..."
@@ -302,14 +364,14 @@ manage_existing_installation() {
2) # 卸载
warn "这将停止并删除 Foxel 容器及相关网络!"
warn "强烈建议您先备份 '$foxel_dir/data' 目录!"
if confirm_action "您确定要继续卸载吗?"; then
if confirm_action "确认继续卸载?" "N"; then
info "正在停止并移除容器..."
$COMPOSE_CMD down
if confirm_action "是否删除所有数据卷(这将删除数据库等所有数据)?"; then
if confirm_action "是否删除所有数据卷(删除数据库等数据)?" "N"; then
$COMPOSE_CMD down -v
info "数据卷已删除。"
fi
if confirm_action "是否删除整个 Foxel 安装目录 '$foxel_dir'"; then
if confirm_action "是否删除 Foxel 安装目录 '$foxel_dir'" "N"; then
rm -rf "$foxel_dir"
info "安装目录已删除。"
fi
@@ -320,7 +382,7 @@ manage_existing_installation() {
3) # 重新安装
warn "重新安装将完全删除当前的 Foxel 实例(包括数据),然后进入全新安装流程。"
warn "在继续之前,请务必备份好您的重要数据!"
if confirm_action "您确定要重新安装吗?"; then
if confirm_action "确认继续重新安装?" "N"; then
info "正在执行卸载..."
$COMPOSE_CMD down -v && rm -rf "$foxel_dir"
info "旧实例已彻底移除。"
@@ -344,9 +406,8 @@ manage_existing_installation() {
# --- 主函数 ---
main() {
clear
local SCRIPT_VERSION="1.7"
echo "================================================="
info "欢迎使用 Foxel 一键安装与管理脚本 (版本: ${SCRIPT_VERSION})"
info "欢迎使用 Foxel 一键安装与管理脚本"
echo "================================================="
echo

157
setup/foxel_cli.py Normal file
View File

@@ -0,0 +1,157 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import os
import secrets
import sqlite3
import string
import sys
from pathlib import Path
PROJECT_ROOT = Path(__file__).resolve().parents[1]
if str(PROJECT_ROOT) not in sys.path:
sys.path.insert(0, str(PROJECT_ROOT))
from domain.auth.service import get_password_hash
from domain.config.service import VERSION
def _project_root() -> Path:
return PROJECT_ROOT
def _supports_color() -> bool:
return sys.stderr.isatty() and not os.getenv("NO_COLOR")
def _print_banner() -> None:
if not sys.stderr.isatty():
return
banner = "\n".join(
[
"███████╗ ██████╗ ██╗ ██╗███████╗██╗",
"██╔════╝██╔═══██╗╚██╗██╔╝██╔════╝██║",
"█████╗ ██║ ██║ ╚███╔╝ █████╗ ██║",
"██╔══╝ ██║ ██║ ██╔██╗ ██╔══╝ ██║",
"██║ ╚██████╔╝██╔╝ ██╗███████╗███████╗",
"╚═╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝╚══════╝",
]
)
title = f"Foxel Admin CLI {VERSION}"
if _supports_color():
c_reset = "\033[0m"
c_bold = "\033[1m"
c_orange = "\033[38;5;208m"
c_orange_light = "\033[38;5;214m"
c_orange_lighter = "\033[38;5;220m"
banner_lines = banner.splitlines()
shades = [
c_orange,
c_orange_light,
c_orange_lighter,
c_orange_lighter,
c_orange_light,
c_orange,
]
for line, color in zip(banner_lines, shades, strict=False):
print(f"{c_bold}{color}{line}{c_reset}", file=sys.stderr)
print(f"{c_bold}{title}{c_reset}\n", file=sys.stderr)
else:
print(banner, file=sys.stderr)
print(f"{title}\n", file=sys.stderr)
def _default_db_path() -> Path:
return _project_root() / "data/db/db.sqlite3"
def _gen_password(length: int) -> str:
alphabet = string.ascii_letters + string.digits
return "".join(secrets.choice(alphabet) for _ in range(length))
def _find_user(conn: sqlite3.Connection, username_or_email: str) -> tuple[int, str] | None:
cursor = conn.cursor()
cursor.execute("SELECT id, username FROM user WHERE username = ?", (username_or_email,))
row = cursor.fetchone()
if row:
return int(row[0]), str(row[1])
cursor.execute("SELECT id, username FROM user WHERE email = ?", (username_or_email,))
row = cursor.fetchone()
if row:
return int(row[0]), str(row[1])
normalized = username_or_email.strip().lower()
if normalized and normalized != username_or_email:
cursor.execute("SELECT id, username FROM user WHERE email = ?", (normalized,))
row = cursor.fetchone()
if row:
return int(row[0]), str(row[1])
return None
def _cmd_reset_password(args: argparse.Namespace) -> int:
db_path = Path(args.db).expanduser() if args.db else _default_db_path()
if args.random:
password = _gen_password(args.length)
else:
password = args.password
hashed_password = get_password_hash(password)
conn = sqlite3.connect(str(db_path))
try:
user = _find_user(conn, args.username_or_email)
if not user:
print(f"用户不存在: {args.username_or_email}", file=sys.stderr)
return 1
user_id, username = user
conn.execute(
"UPDATE user SET hashed_password = ? WHERE id = ?",
(hashed_password, user_id),
)
conn.commit()
finally:
conn.close()
if args.random:
print(password)
print(f"已重置用户密码: {username} (id={user_id})", file=sys.stderr)
return 0
def _build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(prog="foxel")
subparsers = parser.add_subparsers(dest="command", required=True)
reset_password = subparsers.add_parser("reset-password", help="重置用户密码")
reset_password.add_argument("username_or_email", help="用户名或邮箱")
reset_password.add_argument("password", nargs="?", help="新密码(或用 --random")
reset_password.add_argument("--random", action="store_true", help="生成随机密码并输出到 stdout")
reset_password.add_argument("--length", type=int, default=16, help="随机密码长度(默认 16")
reset_password.add_argument("--db", help="sqlite db 路径(默认 data/db/db.sqlite3")
reset_password.set_defaults(func=_cmd_reset_password)
return parser
def main(argv: list[str] | None = None) -> int:
_print_banner()
parser = _build_parser()
args = parser.parse_args(argv)
if args.command == "reset-password" and not args.random and not args.password:
parser.error("reset-password 需要提供 password 或使用 --random")
return int(args.func(args))
if __name__ == "__main__":
raise SystemExit(main())

1026
uv.lock generated

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -12,9 +12,23 @@ export default tseslint.config([
extends: [
js.configs.recommended,
tseslint.configs.recommended,
reactHooks.configs['recommended-latest'],
reactRefresh.configs.vite,
],
plugins: {
'react-hooks': reactHooks,
'react-refresh': reactRefresh,
},
rules: {
'@typescript-eslint/no-explicit-any': 'off',
'react-hooks/rules-of-hooks': 'error',
'react-hooks/exhaustive-deps': 'warn',
'react-refresh/only-export-components': [
'error',
{
allowConstantExport: true,
allowExportNames: ['routes', 'useAuth', 'useTheme', 'useAppWindows', 'useI18n'],
},
],
},
languageOptions: {
ecmaVersion: 2020,
globals: globals.browser,

View File

@@ -6,13 +6,13 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Foxel</title>
<link rel='stylesheet'
href='https://chinese-fonts-cdn.deno.dev/packages/maple-mono-cn/dist/MapleMono-CN-Regular/result.css' />
href='https://foxel.cc/fonts/result.css' />
</head>
<body>
<style>
* {
font-family: 'Maple Mono CN';
font-family: 'Maple Mono Normal NL NF CN';
}
</style>
<div id="root"></div>

View File

@@ -10,30 +10,27 @@
"preview": "vite preview"
},
"dependencies": {
"@ant-design/icons": "5.x",
"@ant-design/v5-patch-for-react-19": "^1.0.3",
"@ant-design/icons": "6",
"@monaco-editor/react": "^4.7.0",
"@uiw/react-md-editor": "^4.0.8",
"antd": "^5.27.0",
"artplayer": "^5.2.5",
"antd": "6",
"artplayer": "^5.3.0",
"date-fns": "^4.1.0",
"monaco-editor": "^0.53.0",
"react": "^19.1.1",
"react-dom": "^19.1.1",
"react": "^19.2.3",
"react-dom": "^19.2.3",
"react-markdown": "^10.1.0",
"react-router": "^7.8.0"
"react-router": "^7.11.0"
},
"devDependencies": {
"@eslint/js": "^9.33.0",
"@types/react": "^19.1.10",
"@types/react-dom": "^19.1.7",
"@vitejs/plugin-react": "^5.0.0",
"eslint": "^9.33.0",
"eslint-plugin-react-hooks": "^5.2.0",
"eslint-plugin-react-refresh": "^0.4.20",
"globals": "^16.3.0",
"typescript": "~5.8.3",
"typescript-eslint": "^8.39.1",
"vite": "^7.1.2"
"@eslint/js": "^9.39.2",
"@types/react": "^19.2.7",
"@types/react-dom": "^19.2.3",
"@vitejs/plugin-react": "^5.1.2",
"eslint": "^9.39.2",
"eslint-plugin-react-hooks": "^7.0.1",
"eslint-plugin-react-refresh": "^0.4.26",
"globals": "^16.5.0",
"typescript": "~5.9.3",
"typescript-eslint": "^8.51.0",
"vite": "^7.3.0"
}
}

28
web/plugin-frame.html Normal file
View File

@@ -0,0 +1,28 @@
<!doctype html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Foxel Plugin Frame</title>
<link rel='stylesheet' href='https://foxel.cc/fonts/result.css' />
<style>
html,
body,
#root {
height: 100%;
margin: 0;
}
* {
font-family: 'Maple Mono Normal NL NF CN';
}
</style>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/plugin-frame.ts"></script>
</body>
</html>

View File

@@ -49,7 +49,7 @@ async function request<T = any>(url: string, options: RequestOptions = {}): Prom
} else {
errMsg = (typeof data?.detail === 'string') ? data.detail : (data.detail ? JSON.stringify(data.detail) : JSON.stringify(data));
}
} catch (_) { }
} catch { void 0; }
throw new Error(errMsg || `Request failed: ${resp.status}`);
}

View File

@@ -1,13 +1,13 @@
import request from './client';
export async function getConfig(key: string) {
return request<{ key: string; value: string }>('/config?key=' + encodeURIComponent(key));
return request<{ key: string; value: string }>('/config/?key=' + encodeURIComponent(key));
}
export async function setConfig(key: string, value: string) {
export async function setConfig(key: string, value?: string | null) {
const form = new FormData();
form.append('key', key);
form.append('value', value);
form.append('value', value ?? '');
return request('/config/', { method: 'POST', formData: form });
}

View File

@@ -28,7 +28,67 @@ export interface RepoQueryParams {
pageSize?: number;
}
// foxel-core 应用中心的数据结构
export interface FoxelCoreApp {
key: string;
version: string;
name: {
zh: string;
en: string;
};
description: {
zh: string;
en: string;
};
author: string;
website: string;
tags: {
zh: string[];
en: string[];
};
approvedAt: number;
detailUrl: string;
downloadUrl: string;
}
export interface FoxelCoreAppsResponse {
apps: FoxelCoreApp[];
}
export interface FoxelCoreAppVersion {
version: string;
name: {
zh: string;
en: string;
};
description: {
zh: string;
en: string;
};
author: string;
website: string;
tags: {
zh: string[];
en: string[];
};
approvedAt: number;
releaseNotesMd: string | null;
}
export interface FoxelCoreAppDetail {
key: string;
latest: FoxelCoreAppVersion & {
downloadUrl: string;
};
versions: FoxelCoreAppVersion[];
}
export interface FoxelCoreAppDetailResponse {
app: FoxelCoreAppDetail;
}
const CENTER_BASE = 'https://center.foxel.cc';
const FOXEL_CORE_BASE = 'https://foxel.cc';
export function buildCenterUrl(path: string) {
return new URL(path, CENTER_BASE).href;
@@ -50,3 +110,46 @@ export async function fetchRepoList(params: RepoQueryParams = {}): Promise<RepoL
return await resp.json();
}
/**
* 从 foxel-core 应用中心获取应用列表
*/
export async function fetchFoxelCoreApps(query?: string): Promise<FoxelCoreApp[]> {
const url = new URL('/api/apps', FOXEL_CORE_BASE);
const q = query?.trim();
if (q) {
url.searchParams.set('q', q);
}
const resp = await fetch(url.href);
if (!resp.ok) {
throw new Error(`Failed to fetch apps: ${resp.status}`);
}
const data: FoxelCoreAppsResponse = await resp.json();
return data.apps;
}
/**
* 从 foxel-core 应用中心获取应用详情(含历史版本)
*/
export async function fetchFoxelCoreAppDetail(appKey: string): Promise<FoxelCoreAppDetail> {
const url = `${FOXEL_CORE_BASE}/api/apps/${encodeURIComponent(appKey)}`;
const resp = await fetch(url);
if (!resp.ok) {
throw new Error(`Failed to fetch app detail: ${resp.status}`);
}
const data: FoxelCoreAppDetailResponse = await resp.json();
return data.app;
}
/**
* 从 foxel-core 下载应用包文件
*/
export async function downloadFoxelCoreApp(app: Pick<FoxelCoreApp, 'key' | 'version' | 'downloadUrl'>): Promise<File> {
const url = `${FOXEL_CORE_BASE}${app.downloadUrl}`;
const resp = await fetch(url);
if (!resp.ok) {
throw new Error(`Failed to download app: ${resp.status}`);
}
const blob = await resp.blob();
const filename = `${app.key}-${app.version}.foxpkg`;
return new File([blob], filename, { type: 'application/octet-stream' });
}

View File

@@ -2,45 +2,67 @@ import request from './client';
export interface PluginItem {
id: number;
url: string;
enabled: boolean;
key?: string | null;
key: string;
open_app?: boolean | null;
name?: string | null;
version?: string | null;
supported_exts?: string[] | null;
default_bounds?: Record<string, any> | null;
default_bounds?: Record<string, number> | null;
default_maximized?: boolean | null;
icon?: string | null;
description?: string | null;
author?: string | null;
website?: string | null;
github?: string | null;
license?: string | null;
manifest?: Record<string, unknown> | null;
loaded_routes?: string[] | null;
loaded_processors?: string[] | null;
}
export interface PluginCreate {
url: string;
enabled?: boolean;
}
export interface PluginManifestUpdate {
key?: string;
name?: string;
version?: string;
supported_exts?: string[];
default_bounds?: Record<string, any>;
default_maximized?: boolean;
icon?: string;
description?: string;
author?: string;
website?: string;
github?: string;
export interface PluginInstallResult {
success: boolean;
plugin?: PluginItem;
message?: string;
errors?: string[];
}
export const pluginsApi = {
/**
* 获取已安装插件列表
*/
list: () => request<PluginItem[]>(`/plugins`),
create: (payload: PluginCreate) => request<PluginItem>(`/plugins`, { method: 'POST', json: payload }),
remove: (id: number) => request(`/plugins/${id}`, { method: 'DELETE' }),
update: (id: number, payload: PluginCreate) => request<PluginItem>(`/plugins/${id}`, { method: 'PUT', json: payload }),
updateManifest: (id: number, payload: PluginManifestUpdate) => request<PluginItem>(`/plugins/${id}/metadata`, { method: 'POST', json: payload }),
};
/**
* 获取单个插件详情
*/
get: (key: string) => request<PluginItem>(`/plugins/${key}`),
/**
* 安装插件(上传 .foxpkg
*/
install: async (file: File): Promise<PluginInstallResult> => {
const formData = new FormData();
formData.append('file', file);
return request<PluginInstallResult>(`/plugins/install`, {
method: 'POST',
formData,
});
},
/**
* 删除/卸载插件
*/
remove: (key: string) => request(`/plugins/${key}`, { method: 'DELETE' }),
/**
* 获取插件 bundle URL
*/
getBundleUrl: (key: string) => `/api/plugins/${key}/bundle.js`,
/**
* 获取插件资源 URL
*/
getAssetUrl: (key: string, assetPath: string) =>
`/api/plugins/${key}/assets/${assetPath}`,
};

View File

@@ -16,6 +16,7 @@ export interface ProcessorTypeMeta {
supported_exts: string[];
config_schema: ProcessorTypeField[];
produces_file: boolean;
supports_directory?: boolean;
module_path?: string | null;
}

View File

@@ -107,7 +107,7 @@ export const vfsApi = {
const json = JSON.parse(xhr.responseText);
if (json.code === 0) return resolve(json.data);
return reject(new Error(json.msg || json.message || 'Upload failed'));
} catch (e) {
} catch {
return reject(new Error('Invalid response'));
}
} else {
@@ -115,7 +115,7 @@ export const vfsApi = {
try {
const json = JSON.parse(xhr.responseText);
err = json.detail || json.msg || json.message || err;
} catch (_) {}
} catch { void 0; }
reject(new Error(err));
}
}

View File

@@ -1,14 +1,15 @@
import React, { useRef, useEffect, useCallback } from 'react';
import { Space, Button } from 'antd';
import { FullscreenExitOutlined, FullscreenOutlined, CloseOutlined, MinusOutlined } from '@ant-design/icons';
import type { AppDescriptor, AppComponentProps } from './types';
import type { AppDescriptor, AppComponentProps, AppOpenComponentProps } from './types';
import type { VfsEntry } from '../api/client';
export interface AppWindowItem {
id: string;
app: AppDescriptor;
entry: VfsEntry;
filePath: string;
kind: 'file' | 'app';
entry?: VfsEntry;
filePath?: string;
maximized: boolean;
minimized: boolean;
x: number;
@@ -17,12 +18,14 @@ export interface AppWindowItem {
height: number;
}
type AppWindowPatch = Partial<Pick<AppWindowItem, 'maximized' | 'minimized' | 'x' | 'y' | 'width' | 'height'>>;
interface AppWindowsLayerProps {
windows: AppWindowItem[];
onClose: (id: string) => void;
onToggleMax: (id: string) => void;
onBringToFront: (id: string) => void;
onUpdateWindow: (id: string, patch: Partial<AppWindowItem>) => void;
onUpdateWindow: (id: string, patch: AppWindowPatch) => void;
}
export const AppWindowsLayer: React.FC<AppWindowsLayerProps> = ({ windows, onClose, onToggleMax, onBringToFront, onUpdateWindow }) => {
@@ -54,8 +57,8 @@ export const AppWindowsLayer: React.FC<AppWindowsLayerProps> = ({ windows, onClo
const { id, startX, startY, originX, originY } = dragRef.current;
const dx = e.clientX - startX;
const dy = e.clientY - startY;
let newX = Math.max(0, originX + dx);
let newY = Math.max(48, originY + dy);
const newX = Math.max(0, originX + dx);
const newY = Math.max(0, originY + dy);
dragRef.current.newX = newX;
dragRef.current.newY = newY;
const el = windowEls.current[id];
@@ -193,13 +196,23 @@ export const AppWindowsLayer: React.FC<AppWindowsLayerProps> = ({ windows, onClo
return (
<>
{visibleWindows.map((w, idx) => {
const AppComp = w.app.component as React.FC<AppComponentProps>;
const isFileWindow = w.kind !== 'app';
const FileComp = w.app.component as React.FC<AppComponentProps>;
const OpenComp = w.app.openAppComponent as React.FC<AppOpenComponentProps> | undefined;
const ContentComp = (isFileWindow ? FileComp : OpenComp) as React.FC<any> | undefined;
const useSystemWindow = w.app.useSystemWindow !== false; // 默认为 true
const titleText = isFileWindow ? `${w.app.name} - ${w.entry?.name || ''}` : w.app.name;
if (!ContentComp) {
return null;
}
if (!useSystemWindow) {
return (
<div
key={w.id}
ref={el => { windowEls.current[w.id] = el; }}
onMouseDown={() => onBringToFront(w.id)}
style={{
position: 'fixed',
top: w.maximized ? 0 : w.y,
@@ -223,16 +236,20 @@ export const AppWindowsLayer: React.FC<AppWindowsLayerProps> = ({ windows, onClo
overflow: 'hidden',
background: 'transparent'
}}
>
<AppComp
filePath={w.filePath}
entry={w.entry}
onRequestClose={() => onClose(w.id)}
/>
</div>
</div>
);
}
>
{isFileWindow ? (
<ContentComp
filePath={w.filePath || ''}
entry={w.entry as VfsEntry}
onRequestClose={() => onClose(w.id)}
/>
) : (
<ContentComp onRequestClose={() => onClose(w.id)} />
)}
</div>
</div>
);
}
// 否则继续使用系统窗口渲染(不改动原有逻辑)
const interacting = isInteracting(w.id);
return (
@@ -290,9 +307,9 @@ export const AppWindowsLayer: React.FC<AppWindowsLayerProps> = ({ windows, onClo
paddingRight: 8,
flex: 1
}}
>
{w.app.name} - {w.entry.name}
</span>
>
{titleText}
</span>
<Space size={4}>
<Button
type="text"
@@ -351,11 +368,15 @@ export const AppWindowsLayer: React.FC<AppWindowsLayerProps> = ({ windows, onClo
}}
>
{!w.maximized && resizeHandles(w)}
<AppComp
filePath={w.filePath}
entry={w.entry}
onRequestClose={() => onClose(w.id)}
/>
{isFileWindow ? (
<ContentComp
filePath={w.filePath || ''}
entry={w.entry as VfsEntry}
onRequestClose={() => onClose(w.id)}
/>
) : (
<ContentComp onRequestClose={() => onClose(w.id)} />
)}
</div>
</div>
);

View File

@@ -1,654 +0,0 @@
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import {
FileOutlined,
DatabaseOutlined,
ExpandOutlined,
BgColorsOutlined,
ClockCircleOutlined,
FolderOutlined,
AimOutlined,
BulbOutlined,
ThunderboltOutlined,
AlertOutlined,
CameraOutlined,
ApiOutlined,
FieldTimeOutlined,
} from '@ant-design/icons';
import { API_BASE_URL, vfsApi, type VfsEntry } from '../../api/client';
import type { AppComponentProps } from '../types';
import { ImageCanvas } from './components/ImageCanvas';
import { ViewerControls } from './components/ViewerControls';
import { Filmstrip } from './components/Filmstrip';
import { InfoPanel } from './components/InfoPanel';
import type { HistogramData, RgbColor, InfoItem } from './components/types';
import { viewerStyles } from './styles';
interface ExplorerSnapshot {
path: string;
entries: VfsEntry[];
pagination?: { page: number; page_size: number; total: number };
sortBy?: string;
sortOrder?: string;
timestamp: number;
}
interface FileStat {
name?: string;
is_dir?: boolean;
size?: number;
mtime?: number;
mode?: number;
path?: string;
type?: string;
exif?: Record<string, unknown>;
}
declare global {
interface WindowEventMap {
'foxel:file-explorer-page': CustomEvent<ExplorerSnapshot>;
}
}
type ExplorerAwareWindow = Window & { __FOXEL_LAST_EXPLORER_PAGE__?: ExplorerSnapshot };
const DEFAULT_TONE: RgbColor = { r: 28, g: 32, b: 46 };
const isImageEntry = (ent: VfsEntry) => {
if (ent.is_dir) return false;
const maybe = ent as VfsEntry & { has_thumbnail?: boolean };
if (typeof maybe.has_thumbnail === 'boolean' && maybe.has_thumbnail) return true;
const ext = ent.name.split('.').pop()?.toLowerCase();
if (!ext) return false;
return ['png', 'jpg', 'jpeg', 'gif', 'webp', 'bmp', 'avif', 'ico', 'tif', 'tiff', 'svg', 'heic', 'heif', 'arw', 'cr2', 'cr3', 'nef', 'rw2', 'orf', 'pef', 'dng'].includes(ext);
};
const buildThumbUrl = (fullPath: string, w = 180, h = 120) => {
const base = API_BASE_URL.replace(/\/+$/, '');
const clean = fullPath.replace(/^\/+/, '');
return `${base}/fs/thumb/${encodeURI(clean)}?w=${w}&h=${h}&fit=cover`;
};
const getDirectory = (fullPath: string) => {
const path = fullPath.startsWith('/') ? fullPath : `/${fullPath}`;
const idx = path.lastIndexOf('/');
if (idx <= 0) return '/';
return path.slice(0, idx) || '/';
};
const joinPath = (dir: string, name: string) => {
if (dir === '/' || dir === '') return `/${name}`;
return `${dir.replace(/\/$/, '')}/${name}`;
};
const clamp = (value: number, min: number, max: number) => Math.max(min, Math.min(max, value));
const parseNumberish = (raw: unknown): number | null => {
if (typeof raw === 'number') return raw;
if (typeof raw !== 'string') return null;
if (raw.includes('/')) {
const [a, b] = raw.split('/').map(v => Number(v));
if (!Number.isNaN(a) && !Number.isNaN(b) && b !== 0) return a / b;
}
const val = Number(raw);
return Number.isNaN(val) ? null : val;
};
const humanFileSize = (size: number | undefined) => {
if (typeof size !== 'number') return '-';
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
let value = size;
let index = 0;
while (value >= 1024 && index < units.length - 1) {
value /= 1024;
index += 1;
}
return `${value.toFixed(index === 0 ? 0 : 1)} ${units[index]}`;
};
const readExplorerSnapshot = (dir: string): ExplorerSnapshot | null => {
if (typeof window === 'undefined') return null;
const snap = (window as ExplorerAwareWindow).__FOXEL_LAST_EXPLORER_PAGE__;
if (!snap) return null;
const snapshotPath = snap.path === '' ? '/' : snap.path;
const normalizedSnap = snapshotPath.endsWith('/') && snapshotPath !== '/' ? snapshotPath.slice(0, -1) : snapshotPath;
const normalizedTarget = dir.endsWith('/') && dir !== '/' ? dir.slice(0, -1) : dir;
if (normalizedSnap !== normalizedTarget) return null;
return snap;
};
const formatDateTime = (ts?: number) => {
if (!ts) return '-';
try {
return new Date(ts * 1000).toLocaleString();
} catch {
return '-';
}
};
const clampChannel = (value: number) => Math.max(0, Math.min(255, value));
const mixColor = (base: RgbColor, target: RgbColor, ratio: number): RgbColor => ({
r: clampChannel(base.r * (1 - ratio) + target.r * ratio),
g: clampChannel(base.g * (1 - ratio) + target.g * ratio),
b: clampChannel(base.b * (1 - ratio) + target.b * ratio),
});
const rgbToRgba = (color: RgbColor, alpha: number) => `rgba(${Math.round(color.r)}, ${Math.round(color.g)}, ${Math.round(color.b)}, ${alpha})`;
const computeImageStats = (img: HTMLImageElement): { histogram: HistogramData | null; dominantColor: RgbColor | null } => {
try {
const maxSide = 720;
const naturalWidth = img.naturalWidth || 1;
const naturalHeight = img.naturalHeight || 1;
const ratio = Math.min(1, maxSide / Math.max(naturalWidth, naturalHeight));
const width = Math.max(1, Math.floor(naturalWidth * ratio));
const height = Math.max(1, Math.floor(naturalHeight * ratio));
const canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
const ctx = canvas.getContext('2d', { willReadFrequently: true });
if (!ctx) return { histogram: null, dominantColor: null };
ctx.drawImage(img, 0, 0, width, height);
const { data } = ctx.getImageData(0, 0, width, height);
const r = new Array(256).fill(0);
const g = new Array(256).fill(0);
const b = new Array(256).fill(0);
let rTotal = 0;
let gTotal = 0;
let bTotal = 0;
let count = 0;
for (let i = 0; i < data.length; i += 4) {
r[data[i]] += 1;
g[data[i + 1]] += 1;
b[data[i + 2]] += 1;
rTotal += data[i];
gTotal += data[i + 1];
bTotal += data[i + 2];
count += 1;
}
const histogram: HistogramData = { r, g, b };
if (count === 0) return { histogram, dominantColor: null };
const dominantColor: RgbColor = {
r: rTotal / count,
g: gTotal / count,
b: bTotal / count,
};
return { histogram, dominantColor };
} catch {
return { histogram: null, dominantColor: null };
}
};
export const ImageViewerApp: React.FC<AppComponentProps> = ({ filePath, entry, onRequestClose }) => {
const normalizedInitialPath = filePath.startsWith('/') ? filePath : `/${filePath}`;
const [activeEntry, setActiveEntry] = useState<VfsEntry>(entry);
const [activePath, setActivePath] = useState<string>(normalizedInitialPath);
const [imageUrl, setImageUrl] = useState<string>();
const [loading, setLoading] = useState(true);
const [error, setError] = useState<string>();
const [stat, setStat] = useState<FileStat | null>(null);
const [histogram, setHistogram] = useState<HistogramData | null>(null);
const [dominantColor, setDominantColor] = useState<RgbColor | null>(null);
const [scale, setScale] = useState(1);
const [offset, setOffset] = useState({ x: 0, y: 0 });
const [rotate, setRotate] = useState(0);
const [isDragging, setIsDragging] = useState(false);
const [filmstrip, setFilmstrip] = useState<VfsEntry[]>([]);
const [pageInfo, setPageInfo] = useState<{ page: number; total: number; pageSize: number } | null>(null);
const containerRef = useRef<HTMLDivElement | null>(null);
const imageRef = useRef<HTMLImageElement | null>(null);
const dragPointRef = useRef<{ x: number; y: number } | null>(null);
const pinchDistanceRef = useRef<number | null>(null);
const transitionRef = useRef(false);
const filmstripRefs = useRef<Record<string, HTMLDivElement | null>>({});
const directory = useMemo(() => getDirectory(activePath), [activePath]);
const baseTone = useMemo<RgbColor>(() => dominantColor ?? DEFAULT_TONE, [dominantColor]);
const containerStyle = useMemo(() => {
const light = mixColor(baseTone, { r: 255, g: 255, b: 255 }, 0.18);
const shadow = mixColor(baseTone, { r: 0, g: 0, b: 0 }, 0.62);
return {
...viewerStyles.container,
background: `linear-gradient(135deg, ${rgbToRgba(light, 0.78)} 0%, ${rgbToRgba(baseTone, 0.86)} 48%, ${rgbToRgba(shadow, 0.96)} 100%)`,
};
}, [baseTone]);
const mainBackdropStyle = useMemo(() => {
const glow = mixColor(baseTone, { r: 255, g: 255, b: 255 }, 0.32);
const shade = mixColor(baseTone, { r: 0, g: 0, b: 0 }, 0.7);
return {
...viewerStyles.mainBackdrop,
background: `radial-gradient(circle at 18% 22%, ${rgbToRgba(glow, 0.38)}, ${rgbToRgba(shade, 0.94)} 68%)`,
};
}, [baseTone]);
const viewerStyle = useMemo(() => {
const surface = mixColor(baseTone, { r: 0, g: 0, b: 0 }, 0.45);
const edge = mixColor(baseTone, { r: 0, g: 0, b: 0 }, 0.65);
return {
...viewerStyles.viewer,
background: `linear-gradient(145deg, ${rgbToRgba(surface, 0.7)} 0%, ${rgbToRgba(edge, 0.92)} 100%)`,
backdropFilter: 'blur(28px)',
};
}, [baseTone]);
const controlsStyle = useMemo(() => {
const tone = mixColor(baseTone, { r: 0, g: 0, b: 0 }, 0.52);
return {
...viewerStyles.controls,
background: rgbToRgba(tone, 0.74),
backdropFilter: 'blur(18px)',
};
}, [baseTone]);
const filmstripShellStyle = useMemo(() => {
const tone = mixColor(baseTone, { r: 0, g: 0, b: 0 }, 0.56);
return {
...viewerStyles.filmstripShell,
background: rgbToRgba(tone, 0.7),
backdropFilter: 'blur(22px)',
};
}, [baseTone]);
const getThumbUrl = useCallback((item: VfsEntry) => {
const full = joinPath(directory, item.name);
return buildThumbUrl(full, 160, 120);
}, [directory]);
const sidePanelStyle = useMemo(() => {
const panel = mixColor(baseTone, { r: 0, g: 0, b: 0 }, 0.6);
const border = rgbToRgba(mixColor(baseTone, { r: 255, g: 255, b: 255 }, 0.1), 0.28);
return {
...viewerStyles.sidePanel,
background: rgbToRgba(panel, 0.8),
backdropFilter: 'blur(28px)',
borderLeft: `1px solid ${border}`,
};
}, [baseTone]);
const histogramCardStyle = useMemo(() => {
const tone = mixColor(baseTone, { r: 0, g: 0, b: 0 }, 0.55);
const stroke = rgbToRgba(mixColor(baseTone, { r: 255, g: 255, b: 255 }, 0.12), 0.2);
return {
...viewerStyles.histogramCard,
background: rgbToRgba(tone, 0.58),
border: `1px solid ${stroke}`,
};
}, [baseTone]);
useEffect(() => {
const normalized = filePath.startsWith('/') ? filePath : `/${filePath}`;
setActiveEntry(entry);
setActivePath(normalized);
}, [entry, filePath]);
useEffect(() => {
let cancelled = false;
setLoading(true);
setError(undefined);
setHistogram(null);
setDominantColor(null);
const cleaned = activePath.replace(/^\/+/, '');
Promise.all([
vfsApi.getTempLinkToken(cleaned),
vfsApi.stat(activePath) as Promise<FileStat>,
])
.then(([token, metadata]) => {
if (cancelled) return;
setImageUrl(vfsApi.getTempPublicUrl(token.token));
setStat(metadata);
setScale(1);
setRotate(0);
setOffset({ x: 0, y: 0 });
})
.catch((err: unknown) => {
if (!cancelled) {
setError(err instanceof Error ? err.message : '加载失败');
}
})
.finally(() => {
if (!cancelled) setLoading(false);
});
return () => {
cancelled = true;
};
}, [activePath]);
const refreshFilmstrip = useCallback((dir: string) => {
const snap = readExplorerSnapshot(dir);
if (snap) {
const images = snap.entries.filter(isImageEntry);
const ensured = images.some(item => item.name === activeEntry.name) ? images : [...images, activeEntry];
setFilmstrip(ensured);
if (snap.pagination) {
setPageInfo({
page: snap.pagination.page,
pageSize: snap.pagination.page_size,
total: snap.pagination.total,
});
} else {
setPageInfo(null);
}
return;
}
setFilmstrip([activeEntry]);
setPageInfo(null);
}, [activeEntry]);
useEffect(() => {
refreshFilmstrip(directory);
}, [directory, refreshFilmstrip]);
useEffect(() => {
const handler = () => refreshFilmstrip(directory);
window.addEventListener('foxel:file-explorer-page', handler);
return () => window.removeEventListener('foxel:file-explorer-page', handler);
}, [directory, refreshFilmstrip]);
useEffect(() => {
const el = filmstripRefs.current[activeEntry.name];
if (el) {
el.scrollIntoView({ behavior: 'smooth', inline: 'center', block: 'nearest' });
}
}, [activeEntry, filmstrip]);
useEffect(() => {
const keyHandler = (e: KeyboardEvent) => {
if (e.key === 'ArrowRight') {
e.preventDefault();
switchRelative(1);
} else if (e.key === 'ArrowLeft') {
e.preventDefault();
switchRelative(-1);
} else if ((e.key === '+' || e.key === '=') && (e.ctrlKey || e.metaKey)) {
e.preventDefault();
zoom(1.15);
} else if ((e.key === '-' || e.key === '_') && (e.ctrlKey || e.metaKey)) {
e.preventDefault();
zoom(0.85);
}
};
window.addEventListener('keydown', keyHandler);
return () => window.removeEventListener('keydown', keyHandler);
});
const zoom = useCallback((factor: number) => {
setScale(prev => {
const next = clamp(prev * factor, 0.08, 10);
transitionRef.current = true;
window.setTimeout(() => { transitionRef.current = false; }, 120);
return next;
});
}, []);
const rotateImage = () => {
setRotate(prev => {
transitionRef.current = true;
window.setTimeout(() => { transitionRef.current = false; }, 180);
return (prev + 90) % 360;
});
};
const resetView = () => {
transitionRef.current = true;
window.setTimeout(() => { transitionRef.current = false; }, 160);
setScale(1);
setOffset({ x: 0, y: 0 });
setRotate(0);
};
const fitToScreen = () => {
resetView();
};
const onWheel = (e: React.WheelEvent) => {
e.preventDefault();
const container = containerRef.current;
if (!container) return;
const rect = container.getBoundingClientRect();
const cx = e.clientX - rect.left - rect.width / 2;
const cy = e.clientY - rect.top - rect.height / 2;
setScale(prev => {
const factor = e.deltaY < 0 ? 1.12 : 0.88;
const next = clamp(prev * factor, 0.08, 10);
const ratio = next / prev;
setOffset(off => ({ x: off.x - cx * (ratio - 1), y: off.y - cy * (ratio - 1) }));
transitionRef.current = true;
window.setTimeout(() => { transitionRef.current = false; }, 120);
return next;
});
};
const onMouseDown = (e: React.MouseEvent) => {
if (e.button !== 0) return;
e.preventDefault();
setIsDragging(true);
dragPointRef.current = { x: e.clientX, y: e.clientY };
};
const onMouseMove = (e: React.MouseEvent) => {
if (!isDragging || !dragPointRef.current) return;
e.preventDefault();
const dx = e.clientX - dragPointRef.current.x;
const dy = e.clientY - dragPointRef.current.y;
dragPointRef.current = { x: e.clientX, y: e.clientY };
setOffset(off => ({ x: off.x + dx, y: off.y + dy }));
};
const stopDragging = () => {
setIsDragging(false);
dragPointRef.current = null;
};
const dist = (t1: React.Touch, t2: React.Touch) => Math.hypot(t1.clientX - t2.clientX, t1.clientY - t2.clientY);
const onTouchStart = (e: React.TouchEvent) => {
if (e.touches.length === 1) {
const t = e.touches[0];
dragPointRef.current = { x: t.clientX, y: t.clientY };
} else if (e.touches.length === 2) {
pinchDistanceRef.current = dist(e.touches[0], e.touches[1]);
}
};
const onTouchMove = (e: React.TouchEvent) => {
if (e.touches.length === 1 && dragPointRef.current) {
const t = e.touches[0];
const dx = t.clientX - dragPointRef.current.x;
const dy = t.clientY - dragPointRef.current.y;
dragPointRef.current = { x: t.clientX, y: t.clientY };
setOffset(off => ({ x: off.x + dx, y: off.y + dy }));
} else if (e.touches.length === 2 && pinchDistanceRef.current) {
const dNow = dist(e.touches[0], e.touches[1]);
const ratio = dNow / pinchDistanceRef.current;
pinchDistanceRef.current = dNow;
setScale(prev => clamp(prev * ratio, 0.08, 10));
}
};
const onTouchEnd = () => {
pinchDistanceRef.current = null;
dragPointRef.current = null;
};
const onDoubleClick = (e: React.MouseEvent) => {
e.preventDefault();
const next = scale > 1.4 ? 1 : 2.2;
const container = containerRef.current;
if (!container) {
setScale(next);
return;
}
const rect = container.getBoundingClientRect();
const cx = e.clientX - rect.left - rect.width / 2;
const cy = e.clientY - rect.top - rect.height / 2;
const ratio = next / scale;
setScale(next);
setOffset(off => ({ x: off.x - cx * (ratio - 1), y: off.y - cy * (ratio - 1) }));
};
const handleImageLoaded = () => {
const img = imageRef.current;
if (!img) return;
const stats = computeImageStats(img);
setHistogram(stats.histogram);
setDominantColor(stats.dominantColor);
};
const switchEntry = (target: VfsEntry) => {
const nextPath = joinPath(directory, target.name);
setActiveEntry(target);
setActivePath(nextPath);
};
const switchRelative = (step: number) => {
if (filmstrip.length <= 1) return;
const currentIndex = filmstrip.findIndex(item => item.name === activeEntry.name);
if (currentIndex === -1) return;
const target = filmstrip[(currentIndex + step + filmstrip.length) % filmstrip.length];
if (target) switchEntry(target);
};
const scaleLabel = `${(scale * 100).toFixed(scale >= 1 ? 0 : 1)}%`;
const imageStyle: React.CSSProperties = {
maxWidth: '100%',
maxHeight: '100%',
transform: `translate(${offset.x}px, ${offset.y}px) scale(${scale}) rotate(${rotate}deg)`,
transition: transitionRef.current ? 'transform 0.18s cubic-bezier(.4,.8,.4,1)' : undefined,
cursor: isDragging ? 'grabbing' : scale > 1 ? 'grab' : 'zoom-in',
willChange: 'transform',
};
const controlsNode = (
<ViewerControls
style={controlsStyle}
onPrev={() => switchRelative(-1)}
onNext={() => switchRelative(1)}
onZoomIn={() => zoom(1.18)}
onZoomOut={() => zoom(0.82)}
onRotate={rotateImage}
onReset={resetView}
onFit={fitToScreen}
disableSwitch={filmstrip.length <= 1}
/>
);
const exif = (stat?.exif ?? {}) as Record<string, unknown>;
const infoIconStyle: React.CSSProperties = { fontSize: 15, color: 'rgba(255,255,255,0.62)' };
const exifValue = (key: string): string | number | null => {
const value = exif[key];
if (typeof value === 'string' || typeof value === 'number') return value;
return null;
};
const focalLength = (() => {
const v = parseNumberish(exifValue('37386') ?? exifValue('37377'));
return v ? `${v.toFixed(1)} mm` : null;
})();
const aperture = (() => {
const v = parseNumberish(exifValue('33437') ?? exifValue('37378'));
return v ? `f/${v.toFixed(1)}` : null;
})();
const exposure = (() => {
const v = parseNumberish(exifValue('33434'));
if (!v) return null;
if (v >= 1) return `${v.toFixed(1)} s`;
const denom = Math.max(1, Math.round(1 / v));
return `1/${denom}`;
})();
const isoValue = exifValue('34855') ?? exifValue('34864');
const width = parseNumberish(exifValue('40962'));
const height = parseNumberish(exifValue('40963'));
const colorSpace = exifValue('40961');
const cameraMake = exifValue('271');
const cameraModel = exifValue('272');
const lensModel = exifValue('42036');
const captureTime = exifValue('36867') ?? exifValue('36868') ?? exifValue('306');
const basicList: InfoItem[] = [
{ label: '文件名', value: activeEntry.name, icon: <FileOutlined style={infoIconStyle} /> },
{ label: '文件大小', value: humanFileSize(stat?.size), icon: <DatabaseOutlined style={infoIconStyle} /> },
{ label: '分辨率', value: width && height ? `${width} × ${height}` : null, icon: <ExpandOutlined style={infoIconStyle} /> },
{ label: '颜色空间', value: colorSpace ?? null, icon: <BgColorsOutlined style={infoIconStyle} /> },
{ label: '修改时间', value: stat?.mtime ? formatDateTime(stat.mtime) : null, icon: <ClockCircleOutlined style={infoIconStyle} /> },
{ label: '路径', value: typeof stat?.path === 'string' ? stat.path : activePath, icon: <FolderOutlined style={infoIconStyle} /> },
];
const shootingList: InfoItem[] = [
{ label: '焦距', value: focalLength, icon: <AimOutlined style={infoIconStyle} /> },
{ label: '光圈', value: aperture, icon: <BulbOutlined style={infoIconStyle} /> },
{ label: '快门', value: exposure, icon: <ThunderboltOutlined style={infoIconStyle} /> },
{ label: 'ISO', value: isoValue != null ? isoValue.toString() : null, icon: <AlertOutlined style={infoIconStyle} /> },
];
const deviceList: InfoItem[] = [
{
label: '相机',
value: cameraModel ? `${cameraMake ? `${cameraMake} ` : ''}${cameraModel}` : (cameraMake ?? null),
icon: <CameraOutlined style={infoIconStyle} />,
},
{ label: '镜头', value: lensModel ?? null, icon: <ApiOutlined style={infoIconStyle} /> },
];
const miscList: InfoItem[] = [
{ label: '拍摄时间', value: captureTime, icon: <FieldTimeOutlined style={infoIconStyle} /> },
];
return (
<div style={containerStyle}>
<section style={viewerStyles.main}>
<div style={mainBackdropStyle} />
<div style={viewerStyles.mainContent}>
<ImageCanvas
containerRef={containerRef}
imageRef={imageRef}
viewerStyle={viewerStyle}
controls={controlsNode}
scaleLabel={scaleLabel}
imageStyle={imageStyle}
loading={loading}
error={error}
imageUrl={imageUrl}
activeEntry={activeEntry}
onRequestClose={onRequestClose}
onImageLoad={handleImageLoaded}
onWheel={onWheel}
onMouseDown={onMouseDown}
onMouseMove={onMouseMove}
onMouseLeave={stopDragging}
onMouseUp={stopDragging}
onDoubleClick={onDoubleClick}
onTouchStart={onTouchStart}
onTouchMove={onTouchMove}
onTouchEnd={onTouchEnd}
/>
<Filmstrip
shellStyle={filmstripShellStyle}
listStyle={viewerStyles.filmstrip}
entries={filmstrip}
activeEntry={activeEntry}
onSelect={switchEntry}
filmstripRefs={filmstripRefs}
pageInfo={pageInfo}
getThumbUrl={getThumbUrl}
/>
</div>
</section>
<InfoPanel
style={sidePanelStyle}
histogramCardStyle={histogramCardStyle}
title={activeEntry.name}
captureTime={captureTime ?? null}
basicList={basicList}
shootingList={shootingList}
deviceList={deviceList}
miscList={miscList}
histogram={histogram}
/>
</div>
);
};

View File

@@ -1,94 +0,0 @@
import React from 'react';
import { Typography } from 'antd';
import type { VfsEntry } from '../../../api/client';
interface PageInfo {
page: number;
total: number;
pageSize: number;
}
interface FilmstripProps {
shellStyle: React.CSSProperties;
listStyle: React.CSSProperties;
entries: VfsEntry[];
activeEntry: VfsEntry;
onSelect: (entry: VfsEntry) => void;
filmstripRefs: React.MutableRefObject<Record<string, HTMLDivElement | null>>;
pageInfo: PageInfo | null;
getThumbUrl: (entry: VfsEntry) => string;
}
export const Filmstrip: React.FC<FilmstripProps> = ({
shellStyle,
listStyle,
entries,
activeEntry,
onSelect,
filmstripRefs,
pageInfo,
getThumbUrl,
}) => (
<div style={shellStyle}>
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: 10 }}>
<Typography.Text style={{ color: 'rgba(255,255,255,0.72)', fontWeight: 500 }}>
· {entries.length}
</Typography.Text>
{pageInfo && (
<Typography.Text style={{ color: 'rgba(255,255,255,0.45)', fontSize: 12 }}>
{pageInfo.page} / {Math.max(1, Math.ceil(pageInfo.total / pageInfo.pageSize))}
</Typography.Text>
)}
</div>
<div style={listStyle}>
{entries.map(item => {
const active = item.name === activeEntry.name;
return (
<div
key={`${item.name}-${item.mtime ?? ''}`}
ref={el => { filmstripRefs.current[item.name] = el; }}
onClick={() => onSelect(item)}
style={{
width: 84,
height: 64,
overflow: 'hidden',
border: active ? '2px solid #4e9bff' : '2px solid transparent',
boxShadow: active ? '0 0 0 4px rgba(78,155,255,0.28)' : '0 10px 28px rgba(0,0,0,0.45)',
cursor: 'pointer',
position: 'relative',
flex: '0 0 auto',
}}
>
<img
src={getThumbUrl(item)}
alt={item.name}
style={{ width: '100%', height: '100%', objectFit: 'cover', filter: active ? 'saturate(1)' : 'saturate(0.65)' }}
/>
{active && (
<div
style={{
position: 'absolute',
bottom: 4,
left: 6,
right: 6,
padding: '2px 4px',
background: 'rgba(0,0,0,0.55)',
color: '#fff',
fontSize: 10,
whiteSpace: 'nowrap',
overflow: 'hidden',
textOverflow: 'ellipsis',
}}
>
{item.name}
</div>
)}
</div>
);
})}
{entries.length === 0 && (
<div style={{ color: 'rgba(255,255,255,0.45)' }}></div>
)}
</div>
</div>
);

View File

@@ -1,99 +0,0 @@
import React from 'react';
import { Spin, Typography, Tooltip, Button } from 'antd';
import { CloseOutlined } from '@ant-design/icons';
import type { VfsEntry } from '../../../api/client';
import { viewerStyles } from '../styles';
interface ImageCanvasProps {
containerRef: React.RefObject<HTMLDivElement | null>;
imageRef: React.RefObject<HTMLImageElement | null>;
viewerStyle: React.CSSProperties;
controls: React.ReactNode;
scaleLabel: string;
imageStyle: React.CSSProperties;
loading: boolean;
error?: string;
imageUrl?: string;
activeEntry: VfsEntry;
onRequestClose: () => void;
onImageLoad: () => void;
onWheel: React.WheelEventHandler<HTMLDivElement>;
onMouseDown: React.MouseEventHandler<HTMLDivElement>;
onMouseMove: React.MouseEventHandler<HTMLDivElement>;
onMouseLeave: React.MouseEventHandler<HTMLDivElement>;
onMouseUp: React.MouseEventHandler<HTMLDivElement>;
onDoubleClick: React.MouseEventHandler<HTMLDivElement>;
onTouchStart: React.TouchEventHandler<HTMLDivElement>;
onTouchMove: React.TouchEventHandler<HTMLDivElement>;
onTouchEnd: React.TouchEventHandler<HTMLDivElement>;
}
export const ImageCanvas: React.FC<ImageCanvasProps> = ({
containerRef,
imageRef,
viewerStyle,
controls,
scaleLabel,
imageStyle,
loading,
error,
imageUrl,
activeEntry,
onRequestClose,
onImageLoad,
onWheel,
onMouseDown,
onMouseMove,
onMouseLeave,
onMouseUp,
onDoubleClick,
onTouchStart,
onTouchMove,
onTouchEnd,
}) => (
<div
ref={containerRef}
style={viewerStyle}
onWheel={onWheel}
onMouseDown={onMouseDown}
onMouseMove={onMouseMove}
onMouseLeave={onMouseLeave}
onMouseUp={onMouseUp}
onDoubleClick={onDoubleClick}
onTouchStart={onTouchStart}
onTouchMove={onTouchMove}
onTouchEnd={onTouchEnd}
>
<div style={viewerStyles.viewerCloseWrap}>
<Tooltip title="关闭">
<Button
type="text"
icon={<CloseOutlined />}
onClick={onRequestClose}
style={viewerStyles.viewerClose}
/>
</Tooltip>
</div>
{loading ? (
<Spin tip="加载中" />
) : error ? (
<Typography.Text type="danger">{error}</Typography.Text>
) : imageUrl ? (
<img
ref={imageRef}
src={imageUrl}
alt={activeEntry.name}
onLoad={onImageLoad}
draggable={false}
crossOrigin="anonymous"
style={imageStyle}
/>
) : (
<Typography.Text></Typography.Text>
)}
<div style={viewerStyles.scaleBadge}>{scaleLabel}</div>
{controls}
</div>
);

View File

@@ -1,116 +0,0 @@
import React from 'react';
import { Typography, Empty } from 'antd';
import type { HistogramData, InfoItem } from './types';
interface InfoPanelProps {
style: React.CSSProperties;
histogramCardStyle: React.CSSProperties;
title: string;
captureTime: string | number | null;
basicList: InfoItem[];
shootingList: InfoItem[];
deviceList: InfoItem[];
miscList: InfoItem[];
histogram: HistogramData | null;
}
const SectionTitle: React.FC<{ children: React.ReactNode }> = ({ children }) => (
<Typography.Title level={5} style={{ color: '#fff', fontSize: 15, marginTop: 24, marginBottom: 12 }}>
{children}
</Typography.Title>
);
const HistogramPlot: React.FC<{ data: HistogramData | null }> = ({ data }) => {
if (!data) {
return <Empty description="无法解析直方图" image={Empty.PRESENTED_IMAGE_SIMPLE} />;
}
const width = 260;
const height = 140;
const max = Math.max(...data.r, ...data.g, ...data.b, 1);
const toPath = (arr: number[]) => arr
.map((value, index) => {
const x = (index / 255) * width;
const y = height - (value / max) * height;
return `${index === 0 ? 'M' : 'L'}${x.toFixed(2)},${y.toFixed(2)}`;
})
.join(' ');
return (
<svg width={width} height={height} viewBox={`0 0 ${width} ${height}`} style={{ width: '100%' }}>
<rect x={0} y={0} width={width} height={height} fill="rgba(255,255,255,0.04)" />
<path d={toPath(data.r)} stroke="rgba(255,99,132,0.88)" fill="none" strokeWidth={1.3} />
<path d={toPath(data.g)} stroke="rgba(75,192,192,0.88)" fill="none" strokeWidth={1.3} />
<path d={toPath(data.b)} stroke="rgba(54,162,235,0.88)" fill="none" strokeWidth={1.3} />
</svg>
);
};
const InfoRows: React.FC<{ items: InfoItem[] }> = ({ items }) => (
<div style={{ display: 'grid', gridTemplateColumns: '100px 1fr', rowGap: 10, columnGap: 12 }}>
{items
.filter(item => item.value !== null && item.value !== undefined && item.value !== '')
.map(item => (
<React.Fragment key={item.label}>
<span style={{ display: 'inline-flex', alignItems: 'center', gap: 6, color: 'rgba(255,255,255,0.55)' }}>
{item.icon && <span style={{ display: 'inline-flex', alignItems: 'center' }}>{item.icon}</span>}
<span>{item.label}</span>
</span>
<span style={{ color: '#fff', wordBreak: 'break-all' }}>{item.value}</span>
</React.Fragment>
))}
</div>
);
export const InfoPanel: React.FC<InfoPanelProps> = ({
style,
histogramCardStyle,
title,
captureTime,
basicList,
shootingList,
deviceList,
miscList,
histogram,
}) => (
<aside style={style}>
<Typography.Title level={3} style={{ color: '#fff', marginTop: 6, wordBreak: 'break-all' }}>
{title}
</Typography.Title>
{captureTime && (
<Typography.Text style={{ color: 'rgba(255,255,255,0.6)' }}> {captureTime}</Typography.Text>
)}
<SectionTitle></SectionTitle>
<InfoRows items={basicList} />
{shootingList.some(i => i.value) && (
<>
<SectionTitle></SectionTitle>
<InfoRows items={shootingList} />
</>
)}
{deviceList.some(i => i.value) && (
<>
<SectionTitle></SectionTitle>
<InfoRows items={deviceList} />
</>
)}
{miscList.some(i => i.value) && (
<>
<SectionTitle></SectionTitle>
<InfoRows items={miscList} />
</>
)}
<SectionTitle></SectionTitle>
<div style={histogramCardStyle}>
<HistogramPlot data={histogram} />
<div style={{ marginTop: 12, display: 'flex', gap: 12, fontSize: 12 }}>
<span style={{ color: 'rgba(255,99,132,0.88)' }}>R</span>
<span style={{ color: 'rgba(75,192,192,0.88)' }}>G</span>
<span style={{ color: 'rgba(54,162,235,0.88)' }}>B</span>
</div>
</div>
</aside>
);

View File

@@ -1,73 +0,0 @@
import React from 'react';
import { Button, Tooltip } from 'antd';
import {
LeftOutlined,
RightOutlined,
ZoomInOutlined,
ZoomOutOutlined,
RotateRightOutlined,
ReloadOutlined,
CompressOutlined,
} from '@ant-design/icons';
interface ViewerControlsProps {
style: React.CSSProperties;
onPrev: () => void;
onNext: () => void;
onZoomIn: () => void;
onZoomOut: () => void;
onRotate: () => void;
onReset: () => void;
onFit: () => void;
disableSwitch: boolean;
}
export const ViewerControls: React.FC<ViewerControlsProps> = ({
style,
onPrev,
onNext,
onZoomIn,
onZoomOut,
onRotate,
onReset,
onFit,
disableSwitch,
}) => (
<div style={style}>
<Tooltip title="上一张">
<Button
shape="circle"
type="text"
icon={<LeftOutlined />}
onClick={onPrev}
disabled={disableSwitch}
style={{ color: '#fff' }}
/>
</Tooltip>
<Tooltip title="缩小">
<Button shape="circle" type="text" icon={<ZoomOutOutlined />} onClick={onZoomOut} style={{ color: '#fff' }} />
</Tooltip>
<Tooltip title="放大">
<Button shape="circle" type="text" icon={<ZoomInOutlined />} onClick={onZoomIn} style={{ color: '#fff' }} />
</Tooltip>
<Tooltip title="旋转 90°">
<Button shape="circle" type="text" icon={<RotateRightOutlined />} onClick={onRotate} style={{ color: '#fff' }} />
</Tooltip>
<Tooltip title="重置">
<Button shape="circle" type="text" icon={<ReloadOutlined />} onClick={onReset} style={{ color: '#fff' }} />
</Tooltip>
<Tooltip title="适应窗口">
<Button shape="circle" type="text" icon={<CompressOutlined />} onClick={onFit} style={{ color: '#fff' }} />
</Tooltip>
<Tooltip title="下一张">
<Button
shape="circle"
type="text"
icon={<RightOutlined />}
onClick={onNext}
disabled={disableSwitch}
style={{ color: '#fff' }}
/>
</Tooltip>
</div>
);

View File

@@ -1,19 +0,0 @@
import type { ReactNode } from 'react';
export interface HistogramData {
r: number[];
g: number[];
b: number[];
}
export interface RgbColor {
r: number;
g: number;
b: number;
}
export interface InfoItem {
label: string;
value: string | number | null;
icon?: ReactNode;
}

View File

@@ -1,18 +0,0 @@
import type { AppDescriptor } from '../types';
import { ImageViewerApp } from './ImageViewer.tsx';
export const descriptor: AppDescriptor = {
key: 'image-viewer',
name: '图片查看器',
iconUrl: 'https://api.iconify.design/mdi:image.svg',
supported: (entry) => {
if (entry.is_dir) return false;
const ext = entry.name.split('.').pop()?.toLowerCase() || '';
return ['png', 'jpg', 'jpeg', 'gif', 'webp', 'svg', 'bmp', 'ico', 'avif', 'arw', 'cr2', 'cr3', 'nef', 'rw2', 'orf', 'pef', 'dng'].includes(ext);
},
component: ImageViewerApp,
default: true,
defaultMaximized:true,
useSystemWindow:false,
defaultBounds: { width: 820, height: 620, x: 140, y: 96 }
};

View File

@@ -1,106 +0,0 @@
export const viewerStyles = {
container: {
width: '100%',
height: '100%',
boxSizing: 'border-box' as const,
display: 'grid',
gridTemplateColumns: 'minmax(0, 1fr) 320px',
columnGap: 0,
color: '#fff',
overflow: 'hidden',
},
main: {
position: 'relative' as const,
overflow: 'hidden',
display: 'flex',
flexDirection: 'column' as const,
boxShadow: '0 28px 80px rgba(0,0,0,0.55)',
minHeight: 0,
},
mainBackdrop: {
position: 'absolute' as const,
inset: 0,
},
mainContent: {
position: 'relative' as const,
zIndex: 1,
display: 'flex',
flexDirection: 'column' as const,
flex: 1,
padding: 0,
minHeight: 0,
minWidth: 0,
},
viewer: {
flex: 1,
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
position: 'relative' as const,
overflow: 'hidden',
boxShadow: '0 24px 60px rgba(0,0,0,0.5)',
touchAction: 'none' as const,
minHeight: 0,
},
controls: {
position: 'absolute' as const,
bottom: 16,
left: '50%',
transform: 'translateX(-50%)',
display: 'flex',
gap: 16,
padding: '8px 18px',
borderRadius: 24,
alignItems: 'center',
},
scaleBadge: {
position: 'absolute' as const,
bottom: 64,
left: 16,
color: 'rgba(255,255,255,0.7)',
fontSize: 12,
letterSpacing: 0.2,
},
filmstripShell: {
marginTop: 0,
padding: '3px 12px',
boxShadow: '0 16px 42px rgba(0,0,0,0.52)',
},
filmstrip: {
display: 'flex',
overflowX: 'auto' as const,
gap: 12,
paddingBottom: 4,
},
sidePanel: {
boxShadow: '0 28px 80px rgba(0,0,0,0.55)',
padding: '20px 24px',
display: 'flex',
flexDirection: 'column' as const,
overflowY: 'auto' as const,
minHeight: 0,
},
histogramCard: {
padding: '12px 12px 18px',
background: 'rgba(0,0,0,0.34)',
borderRadius: 0,
},
viewerCloseWrap: {
position: 'absolute' as const,
top: 16,
right: 16,
zIndex: 2,
},
viewerClose: {
color: '#fff',
background: 'rgba(0,0,0,0.4)',
border: '1px solid rgba(255,255,255,0.25)',
boxShadow: '0 8px 18px rgba(0,0,0,0.45)',
borderRadius: '100%',
width: 32,
height: 32,
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
},
};

View File

@@ -1,82 +0,0 @@
import React, { useEffect, useState } from 'react';
import { vfsApi } from '../../api/client';
import type { AppComponentProps } from '../types';
import { Spin, Result, Button } from 'antd';
import { useSystemStatus } from '../../contexts/SystemContext';
export const OfficeViewerApp: React.FC<AppComponentProps> = ({ filePath, onRequestClose }) => {
const systemStatus = useSystemStatus();
const [url, setUrl] = useState<string>();
const [loading, setLoading] = useState(true);
const [err, setErr] = useState<string>();
useEffect(() => {
let cancelled = false;
setLoading(true);
setErr(undefined);
setUrl(undefined);
vfsApi.getTempLinkToken(filePath.replace(/^\/+/, ''))
.then(res => {
if (cancelled) return;
const baseUrl = systemStatus?.file_domain || window.location.origin;
const fullUrl = new URL(res.url, baseUrl).href;
const officeUrl = `https://view.officeapps.live.com/op/embed.aspx?src=${encodeURIComponent(fullUrl)}`;
setUrl(officeUrl);
})
.catch(e => {
if (!cancelled) {
setErr(e.message || '加载文档链接失败');
}
})
.finally(() => {
if (!cancelled) {
setLoading(false);
}
});
return () => {
cancelled = true;
};
}, [filePath]);
if (loading) {
return (
<div style={{ width: '100%', height: '100%', display: 'flex', alignItems: 'center', justifyContent: 'center' }}>
<Spin tip="正在准备文档..." />
</div>
);
}
if (err) {
return (
<Result
status="error"
title="无法加载文档"
subTitle={err}
extra={<Button type="primary" onClick={onRequestClose}></Button>}
/>
);
}
return (
<div style={{ width: '100%', height: '100%', background: 'var(--ant-color-bg-container, #fff)' }}>
{url ? (
<iframe
src={url}
width="100%"
height="100%"
frameBorder="0"
title="Office Document Viewer"
/>
) : (
<Result
status="warning"
title="文档链接无效"
subTitle="未能成功生成文档的在线查看链接。"
extra={<Button type="primary" onClick={onRequestClose}></Button>}
/>
)}
</div>
);
};

View File

@@ -1,16 +0,0 @@
import type { AppDescriptor } from '../types';
import { OfficeViewerApp } from './OfficeViewer.tsx';
export const descriptor: AppDescriptor = {
key: 'office-viewer',
name: 'Office 文档查看器',
iconUrl: 'https://api.iconify.design/mdi:file-word-box.svg',
supported: (entry) => {
if (entry.is_dir) return false;
const ext = entry.name.split('.').pop()?.toLowerCase() || '';
return ['docx', 'xlsx', 'pptx', 'doc', 'xls', 'ppt'].includes(ext);
},
component: OfficeViewerApp,
default: true,
defaultBounds: { width: 1024, height: 768, x: 150, y: 100 }
};

View File

@@ -1,74 +0,0 @@
import React, { useEffect, useState } from 'react';
import { Spin, Result, Button } from 'antd';
import type { AppComponentProps } from '../types';
import { vfsApi } from '../../api/client';
export const PdfViewerApp: React.FC<AppComponentProps> = ({ filePath, onRequestClose }) => {
const [url, setUrl] = useState<string>();
const [loading, setLoading] = useState(true);
const [err, setErr] = useState<string>();
useEffect(() => {
let cancelled = false;
setLoading(true);
setErr(undefined);
setUrl(undefined);
vfsApi.getTempLinkToken(filePath.replace(/^\/+/, ''))
.then(res => {
if (cancelled) return;
const publicUrl = vfsApi.getTempPublicUrl(res.token);
setUrl(publicUrl + '#toolbar=1&navpanes=1');
})
.catch(e => {
if (!cancelled) setErr(e.message || '获取临时链接失败');
})
.finally(() => {
if (!cancelled) setLoading(false);
});
return () => { cancelled = true; };
}, [filePath]);
if (loading) {
return (
<div style={{ width: '100%', height: '100%', display: 'flex', alignItems: 'center', justifyContent: 'center' }}>
<Spin tip="正在加载 PDF..." />
</div>
);
}
if (err) {
return (
<Result
status="error"
title="无法加载 PDF"
subTitle={err}
extra={<Button type="primary" onClick={onRequestClose}></Button>}
/>
);
}
if (!url) {
return (
<Result
status="warning"
title="无可用链接"
subTitle="未能生成 PDF 的临时访问链接"
extra={<Button type="primary" onClick={onRequestClose}></Button>}
/>
);
}
return (
<div style={{ width: '100%', height: '100%', background: 'var(--ant-color-bg-container, #fff)' }}>
<iframe
src={url}
width="100%"
height="100%"
title="PDF Viewer"
style={{ border: 'none' }}
/>
</div>
);
};

View File

@@ -1,16 +0,0 @@
import type { AppDescriptor } from '../types';
import { PdfViewerApp } from './PdfViewer';
export const descriptor: AppDescriptor = {
key: 'pdf-viewer',
name: 'PDF 查看器',
iconUrl: 'https://api.iconify.design/mdi:file-pdf-box.svg',
supported: (entry) => {
if (entry.is_dir) return false;
const ext = entry.name.split('.').pop()?.toLowerCase() || '';
return ext === 'pdf';
},
component: PdfViewerApp,
default: true,
defaultBounds: { width: 1024, height: 768, x: 160, y: 100 },
};

View File

@@ -1,59 +1,109 @@
import React, { useRef, useState } from 'react';
import type { AppComponentProps } from '../types';
import { vfsApi } from '../../api/vfs';
import { loadPluginFromUrl, ensureManifest, type RegisteredPlugin } from '../../plugins/runtime';
import React, { useEffect, useMemo, useRef } from 'react';
import type { AppComponentProps, AppOpenComponentProps } from '../types';
import type { PluginItem } from '../../api/plugins';
import { useAsyncSafeEffect } from '../../hooks/useAsyncSafeEffect';
import { useI18n } from '../../i18n';
export interface PluginAppHostProps extends AppComponentProps {
plugin: PluginItem;
}
export const PluginAppHost: React.FC<PluginAppHostProps> = ({ plugin, filePath, entry, onRequestClose }) => {
const containerRef = useRef<HTMLDivElement>(null);
const [error, setError] = useState<string | null>(null);
function buildPluginFrameUrl(params: Record<string, string>): string {
const qs = new URLSearchParams(params);
return `/plugin-frame.html?${qs.toString()}`;
}
/**
* 插件宿主组件 - 文件打开模式
* 使用 iframe 隔离渲染与样式,避免插件污染宿主 DOM/CSS。
* 注意:同源且不加 sandbox 时,不是安全沙箱(插件仍可通过 window.parent 访问宿主)。
*/
export const PluginAppHost: React.FC<PluginAppHostProps> = ({
plugin,
filePath,
onRequestClose,
}) => {
const iframeRef = useRef<HTMLIFrameElement>(null);
const onCloseRef = useRef(onRequestClose);
onCloseRef.current = onRequestClose;
const { t } = useI18n();
const pluginRef = useRef<RegisteredPlugin | null>(null);
useAsyncSafeEffect(
async ({ isDisposed }) => {
try {
const p = await loadPluginFromUrl(plugin.url);
if (isDisposed()) return;
pluginRef.current = p;
await ensureManifest(plugin.id, p);
if (isDisposed()) return;
const token = await vfsApi.getTempLinkToken(filePath);
if (isDisposed()) return;
const downloadUrl = vfsApi.getTempPublicUrl(token.token);
if (isDisposed() || !containerRef.current) return;
await p.mount(containerRef.current, {
filePath,
entry,
urls: { downloadUrl },
host: { close: () => onCloseRef.current() },
});
} catch (e: any) {
if (!isDisposed()) setError(e?.message || t('Plugin run failed'));
}
},
[plugin.id, plugin.url, filePath],
() => {
try {
if (pluginRef.current?.unmount && containerRef.current) {
pluginRef.current.unmount(containerRef.current);
}
} catch {}
},
const src = useMemo(
() =>
buildPluginFrameUrl({
pluginKey: plugin.key,
mode: 'file',
filePath,
}),
[plugin.key, filePath]
);
if (error) {
return <div style={{ padding: 12, color: 'red' }}>{t('Plugin Error')}: {error}</div>;
}
useEffect(() => {
const onMessage = (ev: MessageEvent) => {
if (ev.origin !== window.location.origin) return;
if (ev.source !== iframeRef.current?.contentWindow) return;
const data = ev.data as any;
if (!data || typeof data !== 'object') return;
if (data.type === 'foxel-plugin:close' && data.pluginKey === plugin.key) {
onCloseRef.current();
}
};
return <div ref={containerRef} style={{ width: '100%', height: '100%', overflow: 'auto' }} />;
window.addEventListener('message', onMessage);
return () => window.removeEventListener('message', onMessage);
}, [plugin.key]);
return (
<iframe
ref={iframeRef}
src={src}
title={`plugin:${plugin.key}`}
style={{ width: '100%', height: '100%', border: 0, display: 'block' }}
/>
);
};
export interface PluginAppOpenHostProps extends AppOpenComponentProps {
plugin: PluginItem;
}
/**
* 插件宿主组件 - 独立应用模式
* 使用 iframe 隔离渲染与样式,避免插件污染宿主 DOM/CSS。
* 注意:同源且不加 sandbox 时,不是安全沙箱(插件仍可通过 window.parent 访问宿主)。
*/
export const PluginAppOpenHost: React.FC<PluginAppOpenHostProps> = ({ plugin, onRequestClose }) => {
const iframeRef = useRef<HTMLIFrameElement>(null);
const onCloseRef = useRef(onRequestClose);
onCloseRef.current = onRequestClose;
const src = useMemo(
() =>
buildPluginFrameUrl({
pluginKey: plugin.key,
mode: 'app',
}),
[plugin.key]
);
useEffect(() => {
const onMessage = (ev: MessageEvent) => {
if (ev.origin !== window.location.origin) return;
if (ev.source !== iframeRef.current?.contentWindow) return;
const data = ev.data as any;
if (!data || typeof data !== 'object') return;
if (data.type === 'foxel-plugin:close' && data.pluginKey === plugin.key) {
onCloseRef.current();
}
};
window.addEventListener('message', onMessage);
return () => window.removeEventListener('message', onMessage);
}, [plugin.key]);
return (
<iframe
ref={iframeRef}
src={src}
title={`plugin:${plugin.key}:app`}
style={{ width: '100%', height: '100%', border: 0, display: 'block' }}
/>
);
};

View File

@@ -1,273 +0,0 @@
import React, { useState, useEffect, useCallback, useRef, useMemo, Suspense } from 'react';
import { Layout, Spin, Button, Space, message } from 'antd';
import type { AppComponentProps } from '../types';
import { vfsApi } from '../../api/vfs';
import request from '../../api/client';
const MonacoEditor = React.lazy(() => import('@monaco-editor/react'));
const MarkdownEditor = React.lazy(() => import('@uiw/react-md-editor'));
const { Header, Content } = Layout;
export const TextEditorApp: React.FC<AppComponentProps> = ({ filePath, entry, onRequestClose }) => {
const [loading, setLoading] = useState(true);
const [saving, setSaving] = useState(false);
const [content, setContent] = useState('');
const [initialContent, setInitialContent] = useState('');
const [truncated, setTruncated] = useState(false);
const MAX_PREVIEW_BYTES = 1024 * 1024; // 1MB
const isDirty = content !== initialContent;
const onRequestCloseRef = useRef(onRequestClose);
onRequestCloseRef.current = onRequestClose;
const ext = useMemo(() => entry.name.split('.').pop()?.toLowerCase() || '', [entry.name]);
const isMarkdown = ext === 'md' || ext === 'markdown';
const monacoLanguage = useMemo(() => {
switch (ext) {
// Web technologies
case 'js':
case 'jsx':
return 'javascript';
case 'ts':
case 'tsx':
return 'typescript';
case 'html':
case 'htm':
return 'html';
case 'css':
return 'css';
case 'scss':
case 'sass':
return 'scss';
case 'less':
return 'less';
case 'vue':
return 'html'; // Vue files are primarily HTML with some JS/TS
// Data formats
case 'json':
return 'json';
case 'yaml':
case 'yml':
return 'yaml';
case 'xml':
return 'xml';
case 'toml':
return 'ini'; // TOML is similar to INI
case 'ini':
case 'cfg':
case 'conf':
return 'ini';
// Programming languages
case 'py':
return 'python';
case 'java':
return 'java';
case 'c':
return 'c';
case 'cpp':
case 'cc':
case 'cxx':
return 'cpp';
case 'h':
case 'hpp':
case 'hxx':
return 'cpp'; // Header files use C++ highlighting
case 'php':
return 'php';
case 'rb':
return 'ruby';
case 'go':
return 'go';
case 'rs':
return 'rust';
case 'swift':
return 'swift';
case 'kt':
return 'kotlin';
case 'scala':
return 'scala';
case 'cs':
return 'csharp';
case 'fs':
return 'fsharp';
case 'vb':
return 'vb';
case 'pl':
case 'pm':
return 'perl';
case 'r':
return 'r';
case 'lua':
return 'lua';
case 'dart':
return 'dart';
// Database
case 'sql':
return 'sql';
// Shell and scripts
case 'sh':
case 'bash':
case 'zsh':
case 'fish':
return 'shell';
case 'ps1':
return 'powershell';
case 'bat':
case 'cmd':
return 'bat';
// Build and config files
case 'dockerfile':
return 'dockerfile';
case 'makefile':
return 'makefile';
case 'gradle':
return 'groovy';
case 'cmake':
return 'cmake';
// Markdown
case 'md':
case 'markdown':
return 'markdown';
// Plain text and logs
case 'txt':
case 'log':
case 'gitignore':
case 'gitattributes':
case 'editorconfig':
case 'prettierrc':
default:
return 'plaintext';
}
}, [ext]);
useEffect(() => {
const loadFile = async () => {
try {
setLoading(true);
setTruncated(false);
const shouldTruncate = (entry.size ?? 0) > MAX_PREVIEW_BYTES;
if (shouldTruncate) {
const enc = encodeURI(filePath.replace(/^\/+/, ''));
const resp = await request(`/fs/file/${enc}`, {
method: 'GET',
headers: { Range: `bytes=0-${MAX_PREVIEW_BYTES - 1}` },
rawResponse: true,
});
const buf = await (resp as Response).arrayBuffer();
const text = new TextDecoder().decode(buf);
setContent(text);
setInitialContent(text);
setTruncated(true);
} else {
const data = await vfsApi.readFile(filePath);
const text = typeof data === 'string' ? data : new TextDecoder().decode(data);
setContent(text);
setInitialContent(text);
}
} catch (error) {
message.error(`加载文件失败: ${error instanceof Error ? error.message : '未知错误'}`);
onRequestCloseRef.current();
} finally {
setLoading(false);
}
};
loadFile();
}, [filePath, entry.size]);
const handleSave = useCallback(async () => {
if (truncated) {
message.warning('大文件仅预览前 1MB已禁用保存');
return;
}
if (!isDirty) return;
try {
setSaving(true);
const blob = new Blob([content], { type: 'text/plain' });
await vfsApi.uploadFile(filePath, blob);
setInitialContent(content);
message.success('保存成功');
} catch (error) {
message.error(`保存文件失败: ${error instanceof Error ? error.message : '未知错误'}`);
} finally {
setSaving(false);
}
}, [content, filePath, isDirty, truncated]);
useEffect(() => {
const handleKeyDown = (event: KeyboardEvent) => {
if ((event.ctrlKey || event.metaKey) && event.key === 's') {
event.preventDefault();
handleSave();
}
};
window.addEventListener('keydown', handleKeyDown);
return () => {
window.removeEventListener('keydown', handleKeyDown);
};
}, [handleSave]);
return (
<Layout style={{ height: '100%', background: 'var(--ant-color-bg-container, #ffffff)' }}>
<Header
style={{
background: 'var(--ant-color-bg-layout, #f0f2f5)',
padding: '0 16px',
height: 40,
display: 'flex',
alignItems: 'center',
justifyContent: 'space-between',
borderBottom: '1px solid var(--ant-color-border-secondary, #d9d9d9)'
}}
>
<span style={{ color: 'var(--ant-color-text, rgba(0,0,0,0.88))' }}>
{entry.name} {isDirty && '*'} {truncated && '(大文件仅预览前 1MB编辑与保存已禁用'}
</span>
<Space>
<Button type="primary" size="small" onClick={handleSave} loading={saving} disabled={!isDirty || truncated}>
</Button>
</Space>
</Header>
<Content style={{ position: 'relative', overflow: 'auto', height: 'calc(100% - 40px)' }}>
{loading ? (
<div style={{ display: 'flex', justifyContent: 'center', alignItems: 'center', height: '100%' }}>
<Spin />
</div>
) : (
isMarkdown ? (
<Suspense fallback={<Spin style={{ marginTop: 24 }} />}>
<MarkdownEditor
value={content}
onChange={(val) => setContent(val || '')}
height="100%"
preview={truncated ? 'preview' : 'live'}
/>
</Suspense>
) : (
<Suspense fallback={<Spin style={{ marginTop: 24 }} />}>
<MonacoEditor
value={content}
onChange={(val) => setContent(val || '')}
height="100%"
language={monacoLanguage}
options={{
readOnly: truncated,
minimap: { enabled: false },
scrollBeyondLastLine: false,
wordWrap: 'on',
fontSize: 13,
}}
/>
</Suspense>
)
)}
</Content>
</Layout>
);
};

View File

@@ -1,36 +0,0 @@
import type { AppDescriptor } from '../types';
import { TextEditorApp } from './TextEditor.tsx';
export const descriptor: AppDescriptor = {
key: 'text-editor',
name: '文本编辑器',
iconUrl: 'https://api.iconify.design/mdi:file-document-outline.svg',
supported: (entry) => {
if (entry.is_dir) return false;
const ext = entry.name.split('.').pop()?.toLowerCase() || '';
// Supports common text and code formats
return [
// Text formats
'txt', 'md', 'markdown', 'log',
// Data formats
'json', 'yaml', 'yml', 'xml', 'toml', 'ini', 'cfg', 'conf',
// Web technologies
'html', 'htm', 'css', 'scss', 'sass', 'less', 'js', 'jsx', 'ts', 'tsx', 'vue',
// Programming languages
'py', 'java', 'c', 'cpp', 'cc', 'cxx', 'h', 'hpp', 'hxx',
'php', 'rb', 'go', 'rs', 'swift', 'kt', 'scala', 'clj', 'cljs',
'cs', 'vb', 'fs', 'pl', 'pm', 'r', 'lua', 'dart', 'elm',
// Database
'sql',
// Shell and scripts
'sh', 'bash', 'zsh', 'fish', 'ps1', 'bat', 'cmd',
// Build and config files
'dockerfile', 'makefile', 'gradle', 'cmake',
// Other common text files
'gitignore', 'gitattributes', 'editorconfig', 'prettierrc'
].includes(ext);
},
component: TextEditorApp,
default: true,
defaultBounds: { width: 1024, height: 768, x: 120, y: 80 }
};

View File

@@ -1,46 +0,0 @@
import React, { useEffect, useRef } from 'react';
import Artplayer from 'artplayer';
import { vfsApi } from '../../api/client';
import type { AppComponentProps } from '../types';
export const VideoPlayerApp: React.FC<AppComponentProps> = ({ filePath }) => {
const artRef = useRef<HTMLDivElement | null>(null);
const artInstance = useRef<Artplayer | null>(null);
useEffect(() => {
//
const safePath = filePath.replace(/^\/+/, '').split('#').map((seg, idx) => idx === 0 ? seg : encodeURIComponent('#') + seg).join('');
const videoUrl = vfsApi.streamUrl(safePath);
if (artRef.current) {
artInstance.current = new Artplayer({
container: artRef.current,
url: videoUrl,
autoplay: true,
fullscreen: true,
fullscreenWeb: true,
pip: true,
setting: true,
playbackRate: true,
});
}
return () => {
if (artInstance.current) {
artInstance.current.destroy();
}
};
}, [filePath]);
return (
<div
ref={artRef}
style={{
width: '100%',
height: '100%',
backgroundColor: '#000'
}}
/>
);
};

View File

@@ -1,16 +0,0 @@
import type { AppDescriptor } from '../types';
import { VideoPlayerApp } from './VideoPlayer.tsx';
export const descriptor: AppDescriptor = {
key: 'video-player',
name: '视频播放器',
iconUrl: 'https://api.iconify.design/mdi:video.svg',
supported: (entry) => {
if (entry.is_dir) return false;
const ext = entry.name.split('.').pop()?.toLowerCase() || '';
return ['mp4','webm','ogg','m4v','mov','mkv','avi','wmv','flv','3gp'].includes(ext);
},
component: VideoPlayerApp,
default: true,
defaultBounds: { width: 960, height: 600, x: 180, y: 120 }
};

View File

@@ -2,69 +2,113 @@ import type { VfsEntry } from '../api/client';
import type { AppDescriptor } from './types';
import React from 'react';
import { pluginsApi, type PluginItem } from '../api/plugins';
import { PluginAppHost } from './PluginHost';
import { PluginAppHost, PluginAppOpenHost } from './PluginHost';
import { getPluginAssetUrl } from '../plugins/runtime';
const apps: AppDescriptor[] = [];
// 使用 import.meta.glob 动态导入所有应用
const appModules = import.meta.glob('./*/index.ts');
/**
* 获取插件的唯一 key
*/
function getPluginAppKey(p: PluginItem): string {
return `plugin:${p.key}`;
}
async function loadApps() {
for (const path in appModules) {
const module = await appModules[path]();
if (module && typeof module === 'object' && 'descriptor' in module) {
const descriptor = (module as { descriptor: AppDescriptor }).descriptor;
if (!apps.find(a => a.key === descriptor.key)) {
apps.push(descriptor);
}
}
/**
* 解析插件图标 URL
* 支持绝对路径、相对路径(插件资源)、外部 URL
*/
function resolvePluginIcon(p: PluginItem): string | undefined {
if (!p.icon) return undefined;
// 外部 URL
if (p.icon.startsWith('http://') || p.icon.startsWith('https://')) {
return p.icon;
}
try {
const items = await pluginsApi.list();
items.filter(p => p.enabled !== false).forEach((p) => registerPluginAsApp(p));
} catch (e) {
// 绝对路径
if (p.icon.startsWith('/')) {
return p.icon;
}
// 插件资源路径
return getPluginAssetUrl(p.key, p.icon);
}
function resolvePluginUseSystemWindow(p: PluginItem): boolean | undefined {
const frontend = (p.manifest as any)?.frontend as any;
const value = frontend?.use_system_window ?? frontend?.useSystemWindow;
return typeof value === 'boolean' ? value : undefined;
}
function registerPluginAsApp(p: PluginItem) {
const key = 'plugin:' + p.id;
if (apps.find(a => a.key === key)) return;
const key = getPluginAppKey(p);
if (apps.find((a) => a.key === key)) return;
const supported = (entry: VfsEntry) => {
if (entry.is_dir) return false;
const ext = entry.name.split('.').pop()?.toLowerCase() || '';
if (!p.supported_exts || p.supported_exts.length === 0) return true;
return p.supported_exts.includes(ext);
};
apps.push({
key,
name: p.name || `插件 ${p.id}`,
name: p.name || `插件 ${p.key}`,
supported,
component: (props: any) => React.createElement(PluginAppHost, { plugin: p, ...props }),
iconUrl: p.icon || undefined,
openAppComponent: p.open_app
? (props: any) => React.createElement(PluginAppOpenHost, { plugin: p, ...props })
: undefined,
iconUrl: resolvePluginIcon(p),
default: false,
defaultBounds: p.default_bounds || undefined,
defaultMaximized: p.default_maximized || undefined,
useSystemWindow: resolvePluginUseSystemWindow(p),
description: p.description || undefined,
author: p.author || undefined,
supportedExts: p.supported_exts || undefined,
website: p.website || undefined,
github: p.github || undefined,
});
}
loadApps();
async function loadApps() {
try {
const items = await pluginsApi.list();
items.forEach((p) => registerPluginAsApp(p));
} catch {
void 0;
}
}
const appsLoadedPromise = loadApps();
export async function ensureAppsLoaded() {
await appsLoadedPromise;
}
export function listPluginApps(): AppDescriptor[] {
return apps;
}
export function getAppsForEntry(entry: VfsEntry): AppDescriptor[] {
return apps.filter(a => a.supported(entry));
return apps.filter((a) => a.supported(entry));
}
export function getAppByKey(key: string): AppDescriptor | undefined {
return apps.find(a => a.key === key);
return apps.find((a) => a.key === key);
}
export function getDefaultAppForEntry(entry: VfsEntry): AppDescriptor | undefined {
if (entry.is_dir) return;
const ext = entry.name.split('.').pop()?.toLowerCase() || '';
if (!ext) return apps.find(a => a.supported(entry) && a.default);
if (!ext) return apps.find((a) => a.supported(entry) && a.default);
const saved = localStorage.getItem(`app.default.${ext}`);
if (saved) {
return apps.find(a => a.key === saved && a.supported(entry)) || undefined;
return apps.find((a) => a.key === saved && a.supported(entry)) || undefined;
}
return apps.find(a => a.supported(entry) && a.default);
return apps.find((a) => a.supported(entry) && a.default);
}
export type { AppDescriptor };
@@ -73,24 +117,40 @@ export type { AppComponentProps } from './types';
export async function reloadPluginApps() {
try {
const items = await pluginsApi.list();
const keepKeys = new Set(items.filter(p => p.enabled !== false).map(p => 'plugin:' + p.id));
// 生成要保留的 key 集合
const keepKeys = new Set(items.map((p) => getPluginAppKey(p)));
// 移除已卸载的插件应用
for (let i = apps.length - 1; i >= 0; i--) {
const a = apps[i];
if (a.key.startsWith('plugin:') && !keepKeys.has(a.key)) {
if (!keepKeys.has(a.key)) {
apps.splice(i, 1);
}
}
items.filter(p => p.enabled !== false).forEach(p => {
const key = 'plugin:' + p.id;
const existing = apps.find(a => a.key === key);
// 更新或添加插件应用
items.forEach((p) => {
const key = getPluginAppKey(p);
const existing = apps.find((a) => a.key === key);
if (!existing) {
registerPluginAsApp(p);
} else {
existing.name = p.name || `插件 ${p.id}`;
// 更新现有应用信息
existing.name = p.name || `插件 ${p.key}`;
existing.defaultBounds = p.default_bounds || undefined;
existing.defaultMaximized = p.default_maximized || undefined;
existing.iconUrl = p.icon || existing.iconUrl;
existing.useSystemWindow = resolvePluginUseSystemWindow(p);
existing.iconUrl = resolvePluginIcon(p);
existing.description = p.description || undefined;
existing.author = p.author || undefined;
existing.supportedExts = p.supported_exts || undefined;
existing.openAppComponent = p.open_app
? (props: any) => React.createElement(PluginAppOpenHost, { plugin: p, ...props })
: undefined;
}
});
} catch { }
} catch {
void 0;
}
}

View File

@@ -6,14 +6,28 @@ export interface AppComponentProps {
onRequestClose: () => void;
}
export interface AppOpenComponentProps {
onRequestClose: () => void;
}
export interface AppDescriptor {
key: string;
name: string;
supported: (entry: VfsEntry) => boolean;
component: React.ComponentType<AppComponentProps>;
/**
* 独立打开应用(不依赖文件)
* 缺省表示该应用仅支持“通过文件打开”。
*/
openAppComponent?: React.ComponentType<AppOpenComponentProps>;
iconUrl?: string;
default?: boolean;
defaultMaximized?: boolean;
description?: string;
author?: string;
supportedExts?: string[];
website?: string;
github?: string;
/**
* 应用窗口的默认位置与尺寸(非最大化时生效)
* 任意字段缺省则按系统默认/级联偏移。

Some files were not shown because too many files have changed in this diff Show More