Compare commits

..

25 Commits
1.0.9 ... 1.1.4

Author SHA1 Message Date
amtoaer
668c67da53 chore: bump version from 1.1.3 to 1.1.4 2024-01-20 15:50:12 +08:00
ᴀᴍᴛᴏᴀᴇʀ
9204bbb4ad fix: 修复新的配置项没有写入配置文件的问题,扩充单行字符限制 (#33) 2024-01-20 15:37:43 +08:00
ᴀᴍᴛᴏᴀᴇʀ
d467750d4f feat: 支持指定编码优先级 (#32) 2024-01-20 15:16:48 +08:00
amtoaer
641cc3f48b chore: 优化 dockerfile,缩小镜像体积 2024-01-06 02:13:00 +08:00
amtoaer
345c764463 fix: 修复 docker 退出时不会释放资源的问题 2024-01-06 00:41:28 +08:00
amtoaer
85b7d3dc9b chore: 恢复 dockerfile 写法,难以复用缓存但减小容器体积 2024-01-05 23:34:32 +08:00
amtoaer
f1ada17f30 chore: bump version from 1.1.2 to 1.1.3 2024-01-05 01:15:12 +08:00
amtoaer
cb0ac7eb67 chore: 开启自动提交和自动标签 2024-01-05 01:13:36 +08:00
amtoaer
31efedbde9 chore: 修复依赖异常,优化 dockerfile 流程 2024-01-05 01:11:10 +08:00
amtoaer
3defb07325 chore: 存版本号并添加入口,方便触发版本间的迁移逻辑 2024-01-04 22:13:03 +08:00
amtoaer
e36f829e70 chore: 引入 bump-version 并正确设置版本号 2024-01-04 22:04:10 +08:00
amtoaer
c20b579523 chore: 排序一下依赖 2024-01-04 21:54:27 +08:00
amtoaer
ceec222604 chore: 更新上游依赖,修复刷新 cookie 失败的错误 2024-01-04 21:50:28 +08:00
amtoaer
60ea7795ae chore: 修改基础镜像标签 2024-01-04 21:07:08 +08:00
DDSDerek
6cbacbd127 chore: Optimization docker (#17)
* feat: docker build adds cache

* fix: dockerfile optimization

* doc: dockerhub pictures are not displayed properly

---------

Co-authored-by: DDSRem <1448139087@qq.com>
2024-01-04 20:51:03 +08:00
DDSDerek
8ea2fbe0f9 fix: docker meta username error (#16)
Co-authored-by: DDSRem <1448139087@qq.com>
2023-12-30 14:31:48 +08:00
DDSDerek
e3fded16ac feat: support arm64 architecture (#15)
Co-authored-by: DDSRem <1448139087@qq.com>
2023-12-30 14:22:26 +08:00
amtoaer
961913c4fb doc: 加入字幕相关文档 2023-12-07 22:11:37 +08:00
amtoaer
fa20e5efee feat: 开放弹幕的各项设置 2023-12-07 21:45:18 +08:00
amtoaer
38fb0a4560 fix: 安全地移除配置项 2023-12-07 21:29:57 +08:00
amtoaer
9e94e3b73e chore: try except 按块分割,移除无用的设置项 2023-12-07 21:15:40 +08:00
amtoaer
b955a9fe45 chore: 替换掉被标记 deprecated 的方法 2023-12-06 18:17:17 +08:00
amtoaer
9d151b4731 feat: 命令默认不覆盖现有内容,更新文档 2023-12-06 01:19:08 +08:00
amtoaer
1686c1a8df feat: 支持弹幕下载 2023-12-06 00:39:46 +08:00
amtoaer
de6eaeb4a6 chore: 整理代码逻辑,留出下载字幕的入口 2023-12-06 00:00:42 +08:00
14 changed files with 1122 additions and 734 deletions

View File

@@ -12,6 +12,20 @@ jobs:
- -
name: Checkout name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
-
name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ secrets.DOCKERHUB_USERNAME }}/bili-sync
tags: |
type=raw,value=debug
-
name: Set Up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set Up Buildx
uses: docker/setup-buildx-action@v3
- -
name: Login to DockerHub name: Login to DockerHub
uses: docker/login-action@v3 uses: docker/login-action@v3
@@ -23,7 +37,12 @@ jobs:
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: . context: .
file: ./Dockerfile file: Dockerfile
platforms: |
linux/amd64
linux/arm64/v8
push: true push: true
tags: | tags: ${{ steps.meta.outputs.tags }}
${{ secrets.DOCKERHUB_USERNAME }}/bili-sync:debug labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, scope=${{ github.workflow }}

View File

@@ -12,6 +12,21 @@ jobs:
- -
name: Checkout name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
-
name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ secrets.DOCKERHUB_USERNAME }}/bili-sync
tags: |
type=raw,value=${{ github.ref_name }}
type=raw,value=latest
-
name: Set Up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set Up Buildx
uses: docker/setup-buildx-action@v3
- -
name: Login to DockerHub name: Login to DockerHub
uses: docker/login-action@v3 uses: docker/login-action@v3
@@ -23,11 +38,15 @@ jobs:
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: . context: .
file: ./Dockerfile file: Dockerfile
platforms: |
linux/amd64
linux/arm64/v8
push: true push: true
tags: | tags: ${{ steps.meta.outputs.tags }}
${{ secrets.DOCKERHUB_USERNAME }}/bili-sync:${{ github.ref_name }} labels: ${{ steps.meta.outputs.labels }}
${{ secrets.DOCKERHUB_USERNAME }}/bili-sync:latest cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, scope=${{ github.workflow }}
- -
name: Update DockerHub description name: Update DockerHub description
uses: peter-evans/dockerhub-description@v3 uses: peter-evans/dockerhub-description@v3

View File

@@ -1,22 +1,41 @@
FROM python:3.11.6-alpine3.18 AS base FROM python:3.11.7-alpine3.19 as base
WORKDIR /app WORKDIR /app
ENV BILI_IN_DOCKER=true ENV LANG=zh_CN.UTF-8 \
TZ=Asia/Shanghai \
BILI_IN_DOCKER=true
RUN apk add --no-cache ffmpeg tini \
&& apk add --no-cache --virtual .build-deps \
gcc \
musl-dev \
libffi-dev \
openssl-dev \
&& pip install poetry==1.7.1 pip3-autoremove==1.2.0
COPY poetry.lock pyproject.toml ./ COPY poetry.lock pyproject.toml ./
RUN apk add ffmpeg \ RUN poetry config virtualenvs.create false \
&& apk add --no-cache --virtual .build-deps \ && poetry install --only main --no-root \
gcc \ && pip3-autoremove -y poetry pip3-autoremove \
musl-dev \ && apk del .build-deps \
libffi-dev \ && rm -rf \
openssl-dev \ /root/.cache \
&& pip install poetry \ /tmp/*
&& poetry config virtualenvs.create false \
&& poetry install --no-dev --no-interaction --no-ansi \
&& apk del .build-deps
COPY . . COPY . .
ENTRYPOINT [ "python", "entry.py" ] FROM scratch
WORKDIR /app
ENV LANG=zh_CN.UTF-8 \
TZ=Asia/Shanghai \
BILI_IN_DOCKER=true
COPY --from=base / /
ENTRYPOINT [ "tini", "python", "entry.py" ]
VOLUME [ "/app/config", "/app/data", "/app/thumb", "/Videos/Bilibilis" ]

View File

@@ -1,4 +1,4 @@
.PHONY: install fmt start-daemon start-once .PHONY: install fmt start-daemon start-once db-init db-migrate db-upgrade sync-conf
install: install:
@echo "Installing dependencies..." @echo "Installing dependencies..."
@@ -23,3 +23,9 @@ db-migrate:
db-upgrade: db-upgrade:
@poetry run aerich upgrade @poetry run aerich upgrade
sync-conf:
@echo "Syncing config..."
@cp ${CONFIG_SRC} ./config/
@cp ${DB_SRC} ./data/
@echo "Done."

View File

@@ -13,15 +13,23 @@
## 工作截图 ## 工作截图
![下载视频](asset/run.png) ![下载视频](https://raw.githubusercontent.com/amtoaer/bili-sync/main/asset/run.png)
![EMBY 识别](asset/emby.png) ![EMBY 识别](https://raw.githubusercontent.com/amtoaer/bili-sync/main/asset/emby.png)
## 配置文件 ## 配置文件
对于配置文件的前五项,请参考[凭据获取流程](https://nemo2011.github.io/bilibili-api/#/get-credential)。 对于配置文件的前五项,请参考[凭据获取流程](https://nemo2011.github.io/bilibili-api/#/get-credential)。
```python ```python
@dataclass
class SubtitleConfig(DataClassJsonMixin):
font_name: str = "微软雅黑,黑体" # 字体
font_size: float = 40 # 字号
alpha: float = 0.8 # 透明度
fly_time: float = 5 # 滚动弹幕持续时间
static_time: float = 10 # 静态弹幕持续时间
class Config(DataClassJsonMixin): class Config(DataClassJsonMixin):
sessdata: str = "" sessdata: str = ""
bili_jct: str = "" bili_jct: str = ""
@@ -29,8 +37,8 @@ class Config(DataClassJsonMixin):
dedeuserid: str = "" dedeuserid: str = ""
ac_time_value: str = "" ac_time_value: str = ""
interval: int = 20 # 任务执行的间隔时间 interval: int = 20 # 任务执行的间隔时间
favorite_ids: list[int] = field(default_factory=list) # 收藏夹的 id
path_mapper: dict[int, str] = field(default_factory=dict) # 收藏夹的 id 到存储目录的映射 path_mapper: dict[int, str] = field(default_factory=dict) # 收藏夹的 id 到存储目录的映射
subtitle: SubtitleConfig = field(default_factory=SubtitleConfig) # 字幕相关设置
``` ```
程序默认会将配置文件存储于 `${程序路径}/config/config.json`,数据库文件存储于 `${程序路径}/data/data.db`,如果发现不存在则新建并写入初始配置。 程序默认会将配置文件存储于 `${程序路径}/config/config.json`,数据库文件存储于 `${程序路径}/data/data.db`,如果发现不存在则新建并写入初始配置。
@@ -75,11 +83,15 @@ services:
"dedeuserid": "xxxxxxxxxxxxxxxxxx", "dedeuserid": "xxxxxxxxxxxxxxxxxx",
"ac_time_value": "xxxxxxxxxxxxxxxxxx", "ac_time_value": "xxxxxxxxxxxxxxxxxx",
"interval": 20, "interval": 20,
"favorite_ids": [
711322958
],
"path_mapper": { "path_mapper": {
"711322958": "/Videos/Bilibilis/Bilibili-711322958/" "711322958": "/Videos/Bilibilis/Bilibili-711322958/"
},
"subtitle": {
"font_name": "微软雅黑,黑体",
"font_size": 40.0,
"alpha": 0.8,
"fly_time": 5.0,
"static_time": 10.0
} }
} }
``` ```
@@ -94,9 +106,23 @@ services:
2. `recheck` 2. `recheck`
将本地不存在的视频文件标记成未下载,下次定时任务触发时将一并下载。 将本地不存在的视频文件标记成未下载,下次定时任务触发时将一并下载。
3. `upper_thumb` 3. `refresh_refresh_poster`
手动触发全量下载 up 主头像,为使用老版本时下载的没有 up 头像的视频添加头像 更新本地视频的封面
3. `refresh_upper`
更新本地up的头像和元数据。
3. `refresh_nfo`
更新本地视频的元数据。(如标签、标题等信息)
3. `refresh_video`
更新本地的视频源文件。
3. `refresh_subtitle`
更新本地的弹幕文件。
**对于以 refresh 开头的命令,均支持 --force 参数,如果有 --force 参数,将全量覆盖对应内容,否则默认仅更新缺失的部分。**
## 路线图 ## 路线图

View File

@@ -1,11 +1,14 @@
import asyncio import asyncio
import functools
from pathlib import Path
from typing import Callable
from loguru import logger from loguru import logger
from constants import MediaStatus, MediaType from constants import MediaStatus, MediaType
from models import FavoriteItem, Upper from models import FavoriteItem
from processor import download_content, process_video from processor import process_favorite_item
from utils import aexists, amakedirs, aremove from utils import aexists, aremove
async def recheck(): async def recheck():
@@ -37,52 +40,62 @@ async def recheck():
logger.info("Database updated.") logger.info("Database updated.")
async def upper_thumb(): async def _refresh_favorite_item_info(
"""将up主的头像批量写入数据库从不支持up主头像的版本升级上来后需要手动调用一次""" path_getter: Callable[[FavoriteItem], list[Path]],
makedir_tasks = [] process_poster: bool = False,
other_tasks = [] process_video: bool = False,
for upper in await Upper.all(): process_nfo: bool = False,
if all( process_upper: bool = False,
await asyncio.gather( process_subtitle: bool = False,
aexists(upper.thumb_path), aexists(upper.meta_path) force: bool = False,
) ):
): items = await FavoriteItem.filter(downloaded=True).prefetch_related("upper")
logger.info( if force:
"Upper {} {} already exists, skipped.", upper.mid, upper.name # 如果强制刷新,那么就先把现存的所有内容删除
) await asyncio.gather(
makedir_tasks.append(amakedirs(upper.thumb_path.parent, exist_ok=True)) *[aremove(path) for item in items for path in path_getter(item)],
logger.info("Saving metadata for upper {} {}...", upper.mid, upper.name) return_exceptions=True,
other_tasks.extend(
[
upper.save_metadata(),
download_content(upper.thumb, upper.thumb_path),
]
) )
await asyncio.gather(*makedir_tasks)
await asyncio.gather(*other_tasks)
logger.info("All done.")
async def refresh_tags():
"""刷新已存在的视频的标签,从不支持标签的版本升级上来后需要手动调用一次"""
items = await FavoriteItem.filter(
downloaded=True,
tags=None,
).prefetch_related("upper")
await asyncio.gather(
*[aremove(item.nfo_path) for item in items],
return_exceptions=True,
)
await asyncio.gather( await asyncio.gather(
*[ *[
process_video( process_favorite_item(
item, item,
process_poster=False, process_poster=process_poster,
process_video=False, process_video=process_video,
process_nfo=True, process_nfo=process_nfo,
process_upper=False, process_upper=process_upper,
process_subtitle=process_subtitle,
) )
for item in items for item in items
], ],
return_exceptions=True, return_exceptions=True,
) )
refresh_nfo = functools.partial(
_refresh_favorite_item_info, lambda item: [item.nfo_path], process_nfo=True
)
refresh_poster = functools.partial(
_refresh_favorite_item_info,
lambda item: [item.poster_path],
process_poster=True,
)
refresh_video = functools.partial(
_refresh_favorite_item_info,
lambda item: [item.video_path],
process_video=True,
)
refresh_upper = functools.partial(
_refresh_favorite_item_info,
lambda item: item.upper_path,
process_upper=True,
)
refresh_subtitle = functools.partial(
_refresh_favorite_item_info,
lambda item: [item.subtitle_path],
process_subtitle=True,
)

View File

@@ -1,10 +1,19 @@
import asyncio import asyncio
import os
import signal
import sys import sys
import uvloop import uvloop
from loguru import logger from loguru import logger
from commands import recheck, refresh_tags, upper_thumb from commands import (
recheck,
refresh_nfo,
refresh_poster,
refresh_subtitle,
refresh_upper,
refresh_video,
)
from models import init_model from models import init_model
from processor import cleanup, process from processor import cleanup, process
from settings import settings from settings import settings
@@ -14,15 +23,22 @@ asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
async def entry() -> None: async def entry() -> None:
await init_model() await init_model()
for command, func in [ force = any("force" in _ for _ in sys.argv)
for command, func in (
("once", process), ("once", process),
("recheck", recheck), ("recheck", recheck),
("upper_thumb", upper_thumb), ("refresh_poster", refresh_poster),
("refresh_tags", refresh_tags), ("refresh_upper", refresh_upper),
]: ("refresh_nfo", refresh_nfo),
("refresh_video", refresh_video),
("refresh_subtitle", refresh_subtitle),
):
if any(command in _ for _ in sys.argv): if any(command in _ for _ in sys.argv):
logger.info("Running {}...", command) logger.info("Running {}...", command)
await func() if command.startswith("refresh"):
await func(force=force)
else:
await func()
return return
logger.info("Running daemon...") logger.info("Running daemon...")
while True: while True:
@@ -31,8 +47,16 @@ async def entry() -> None:
if __name__ == "__main__": if __name__ == "__main__":
# 确保 docker 退出时正确触发资源释放
signal.signal(signal.SIGTERM, lambda *_: os.kill(os.getpid(), signal.SIGINT))
with asyncio.Runner() as runner: with asyncio.Runner() as runner:
try: try:
runner.run(entry()) runner.run(entry())
except Exception:
logger.exception("Unexpected error occurred, exiting...")
except KeyboardInterrupt:
logger.error("Exit Signal Received, exiting...")
finally: finally:
logger.info("Cleaning up resources...")
runner.run(cleanup()) runner.run(cleanup())
logger.info("Done, exited.")

View File

@@ -0,0 +1,14 @@
from tortoise import BaseDBAsyncClient
async def upgrade(db: BaseDBAsyncClient) -> str:
return """
CREATE TABLE IF NOT EXISTS "program" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
"version" VARCHAR(20) NOT NULL
);"""
async def downgrade(db: BaseDBAsyncClient) -> str:
return """
DROP TABLE IF EXISTS "program";"""

View File

@@ -14,6 +14,7 @@ from constants import (
) )
from settings import settings from settings import settings
from utils import aopen from utils import aopen
from version import VERSION
class FavoriteList(Model): class FavoriteList(Model):
@@ -40,15 +41,11 @@ class Upper(Model):
@property @property
def thumb_path(self) -> Path: def thumb_path(self) -> Path:
return ( return DEFAULT_THUMB_PATH / str(self.mid)[0] / f"{self.mid}" / "folder.jpg"
DEFAULT_THUMB_PATH / str(self.mid)[0] / f"{self.mid}" / "folder.jpg"
)
@property @property
def meta_path(self) -> Path: def meta_path(self) -> Path:
return ( return DEFAULT_THUMB_PATH / str(self.mid)[0] / f"{self.mid}" / "person.nfo"
DEFAULT_THUMB_PATH / str(self.mid)[0] / f"{self.mid}" / "person.nfo"
)
async def save_metadata(self): async def save_metadata(self):
async with aopen(self.meta_path, "w") as f: async with aopen(self.meta_path, "w") as f:
@@ -73,16 +70,12 @@ class FavoriteItem(Model):
id = fields.IntField(pk=True) id = fields.IntField(pk=True)
name = fields.CharField(max_length=255) name = fields.CharField(max_length=255)
type = fields.IntEnumField(enum_type=MediaType) type = fields.IntEnumField(enum_type=MediaType)
status = fields.IntEnumField( status = fields.IntEnumField(enum_type=MediaStatus, default=MediaStatus.NORMAL)
enum_type=MediaStatus, default=MediaStatus.NORMAL
)
bvid = fields.CharField(max_length=255) bvid = fields.CharField(max_length=255)
desc = fields.TextField() desc = fields.TextField()
cover = fields.TextField() cover = fields.TextField()
tags = fields.JSONField(null=True) tags = fields.JSONField(null=True)
favorite_list = fields.ForeignKeyField( favorite_list = fields.ForeignKeyField("models.FavoriteList", related_name="items")
"models.FavoriteList", related_name="items"
)
upper = fields.ForeignKeyField("models.Upper", related_name="uploads") upper = fields.ForeignKeyField("models.Upper", related_name="uploads")
ctime = fields.DatetimeField() ctime = fields.DatetimeField()
pubtime = fields.DatetimeField() pubtime = fields.DatetimeField()
@@ -100,38 +93,39 @@ class FavoriteItem(Model):
@property @property
def tmp_video_path(self) -> Path: def tmp_video_path(self) -> Path:
return ( return Path(settings.path_mapper[self.favorite_list_id]) / f"tmp_{self.bvid}_video"
Path(settings.path_mapper[self.favorite_list_id])
/ f"tmp_{self.bvid}_video"
)
@property @property
def tmp_audio_path(self) -> Path: def tmp_audio_path(self) -> Path:
return ( return Path(settings.path_mapper[self.favorite_list_id]) / f"tmp_{self.bvid}_audio"
Path(settings.path_mapper[self.favorite_list_id])
/ f"tmp_{self.bvid}_audio"
)
@property @property
def video_path(self) -> Path: def video_path(self) -> Path:
return ( return Path(settings.path_mapper[self.favorite_list_id]) / f"{self.bvid}.mp4"
Path(settings.path_mapper[self.favorite_list_id])
/ f"{self.bvid}.mp4"
)
@property @property
def nfo_path(self) -> Path: def nfo_path(self) -> Path:
return ( return Path(settings.path_mapper[self.favorite_list_id]) / f"{self.bvid}.nfo"
Path(settings.path_mapper[self.favorite_list_id])
/ f"{self.bvid}.nfo"
)
@property @property
def poster_path(self) -> Path: def poster_path(self) -> Path:
return ( return Path(settings.path_mapper[self.favorite_list_id]) / f"{self.bvid}-poster.jpg"
Path(settings.path_mapper[self.favorite_list_id])
/ f"{self.bvid}-poster.jpg" @property
) def upper_path(self) -> list[Path]:
return [
self.upper.thumb_path,
self.upper.meta_path,
]
@property
def subtitle_path(self) -> Path:
return Path(settings.path_mapper[self.favorite_list_id]) / f"{self.bvid}.zh-CN.default.ass"
class Program(Model):
id = fields.IntField(pk=True)
version = fields.CharField(max_length=20)
async def init_model() -> None: async def init_model() -> None:
@@ -143,3 +137,13 @@ async def init_model() -> None:
) )
process = await create_subprocess_exec(*migrate_commands) process = await create_subprocess_exec(*migrate_commands)
await process.communicate() await process.communicate()
program, created = await Program.get_or_create(
defaults={
"version": VERSION,
}
)
if created or program.version != VERSION:
# 把新版本的迁移逻辑写在这里
pass
program.version = VERSION
await program.save()

1169
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,10 +3,10 @@ import datetime
from asyncio import Semaphore, create_subprocess_exec from asyncio import Semaphore, create_subprocess_exec
from asyncio.subprocess import DEVNULL from asyncio.subprocess import DEVNULL
from bilibili_api import favorite_list, video from bilibili_api import ass, favorite_list, video
from bilibili_api.exceptions import ResponseCodeException from bilibili_api.exceptions import ResponseCodeException
from loguru import logger from loguru import logger
from tortoise import Tortoise from tortoise.connection import connections
from constants import FFMPEG_COMMAND, MediaStatus, MediaType from constants import FFMPEG_COMMAND, MediaStatus, MediaType
from credential import credential from credential import credential
@@ -20,7 +20,7 @@ anchor = datetime.date.today()
async def cleanup() -> None: async def cleanup() -> None:
await client.aclose() await client.aclose()
await Tortoise.close_connections() await connections.close_all()
def concurrent_decorator(concurrency: int) -> callable: def concurrent_decorator(concurrency: int) -> callable:
@@ -45,9 +45,7 @@ async def manage_model(medias: list[dict], fav_list: FavoriteList) -> None:
) )
for media in medias for media in medias
] ]
await Upper.bulk_create( await Upper.bulk_create(uppers, on_conflict=["mid"], update_fields=["name", "thumb"])
uppers, on_conflict=["mid"], update_fields=["name", "thumb"]
)
items = [ items = [
FavoriteItem( FavoriteItem(
name=media["title"], name=media["title"],
@@ -91,12 +89,7 @@ async def process() -> None:
except Exception: except Exception:
logger.exception("Failed to refresh credential.") logger.exception("Failed to refresh credential.")
return return
for favorite_id in settings.favorite_ids: for favorite_id in settings.path_mapper:
if favorite_id not in settings.path_mapper:
logger.warning(
f"Favorite {favorite_id} not in path mapper, ignored."
)
continue
await process_favorite(favorite_id) await process_favorite(favorite_id)
@@ -119,10 +112,8 @@ async def process_favorite(favorite_id: int) -> None:
while True: while True:
page += 1 page += 1
if page > 1: if page > 1:
favorite_video_list = ( favorite_video_list = await favorite_list.get_video_favorite_list_content(
await favorite_list.get_video_favorite_list_content( favorite_id, page=page, credential=credential
favorite_id, page=page, credential=credential
)
) )
# 先看看对应 bvid 的记录是否存在 # 先看看对应 bvid 的记录是否存在
existed_items = await FavoriteItem.filter( existed_items = await FavoriteItem.filter(
@@ -130,14 +121,10 @@ async def process_favorite(favorite_id: int) -> None:
bvid__in=[media["bvid"] for media in favorite_video_list["medias"]], bvid__in=[media["bvid"] for media in favorite_video_list["medias"]],
) )
# 记录一下获得的列表中的 bvid 和 fav_time # 记录一下获得的列表中的 bvid 和 fav_time
media_info = { media_info = {(media["bvid"], media["fav_time"]) for media in favorite_video_list["medias"]}
(media["bvid"], media["fav_time"])
for media in favorite_video_list["medias"]
}
# 如果有 bvid 和 fav_time 都相同的记录,说明已经到达了上次处理到的位置 # 如果有 bvid 和 fav_time 都相同的记录,说明已经到达了上次处理到的位置
continue_flag = not media_info & { continue_flag = not media_info & {
(item.bvid, int(item.fav_time.timestamp())) (item.bvid, int(item.fav_time.timestamp())) for item in existed_items
for item in existed_items
} }
await manage_model(favorite_video_list["medias"], fav_list) await manage_model(favorite_video_list["medias"], fav_list)
if not (continue_flag and favorite_video_list["has_more"]): if not (continue_flag and favorite_video_list["has_more"]):
@@ -149,28 +136,39 @@ async def process_favorite(favorite_id: int) -> None:
downloaded=False, downloaded=False,
).prefetch_related("upper") ).prefetch_related("upper")
await asyncio.gather( await asyncio.gather(
*[process_video(item) for item in all_unprocessed_items], *[process_favorite_item(item) for item in all_unprocessed_items],
return_exceptions=True, return_exceptions=True,
) )
logger.info("Favorite {} {} processed successfully.", favorite_id, title) logger.info("Favorite {} {} processed successfully.", favorite_id, title)
@concurrent_decorator(4) @concurrent_decorator(4)
async def process_video( async def process_favorite_item(
fav_item: FavoriteItem, fav_item: FavoriteItem,
process_poster=True, process_poster=True,
process_video=True, process_video=True,
process_nfo=True, process_nfo=True,
process_upper=True, process_upper=True,
process_subtitle=True,
) -> None: ) -> None:
logger.info("Start to process video {} {}", fav_item.bvid, fav_item.name) logger.info("Start to process video {} {}", fav_item.bvid, fav_item.name)
if fav_item.type != MediaType.VIDEO: if fav_item.type != MediaType.VIDEO:
logger.warning("Media {} is not a video, skipped.", fav_item.name) logger.warning("Media {} is not a video, skipped.", fav_item.name)
return return
v = video.Video(fav_item.bvid, credential=credential) v = video.Video(fav_item.bvid, credential=credential)
# 如果没有获取过 tags那么尝试获取一下
try: try:
if process_upper: if fav_item.tags is None:
# 写入 up 主头像 fav_item.tags = [_["tag_name"] for _ in await v.get_tags()]
except Exception:
logger.exception(
"Failed to get tags of video {} {}",
fav_item.bvid,
fav_item.name,
)
if process_upper:
try:
if not all( if not all(
await asyncio.gather( await asyncio.gather(
aexists(fav_item.upper.thumb_path), aexists(fav_item.upper.thumb_path),
@@ -180,9 +178,7 @@ async def process_video(
await amakedirs(fav_item.upper.thumb_path.parent, exist_ok=True) await amakedirs(fav_item.upper.thumb_path.parent, exist_ok=True)
await asyncio.gather( await asyncio.gather(
fav_item.upper.save_metadata(), fav_item.upper.save_metadata(),
download_content( download_content(fav_item.upper.thumb, fav_item.upper.thumb_path),
fav_item.upper.thumb, fav_item.upper.thumb_path
),
return_exceptions=True, return_exceptions=True,
) )
else: else:
@@ -191,20 +187,16 @@ async def process_video(
fav_item.upper.mid, fav_item.upper.mid,
fav_item.upper.name, fav_item.upper.name,
) )
if process_nfo: except Exception:
logger.exception(
"Failed to process upper {} {}",
fav_item.upper.mid,
fav_item.upper.name,
)
if process_nfo:
try:
if not await aexists(fav_item.nfo_path): if not await aexists(fav_item.nfo_path):
if fav_item.tags is None:
try:
fav_item.tags = [
_["tag_name"] for _ in await v.get_tags()
]
except Exception:
logger.exception(
"Failed to get tags of video {} {}",
fav_item.bvid,
fav_item.name,
)
# 写入 nfo
await EpisodeInfo( await EpisodeInfo(
title=fav_item.name, title=fav_item.name,
plot=fav_item.desc, plot=fav_item.desc,
@@ -224,17 +216,65 @@ async def process_video(
fav_item.bvid, fav_item.bvid,
fav_item.name, fav_item.name,
) )
if process_poster: except Exception:
# 写入 poster logger.exception(
"Failed to process nfo of video {} {}",
fav_item.bvid,
fav_item.name,
)
if process_poster:
try:
if not await aexists(fav_item.poster_path): if not await aexists(fav_item.poster_path):
await download_content(fav_item.cover, fav_item.poster_path) try:
await download_content(fav_item.cover, fav_item.poster_path)
except Exception:
logger.exception(
"Failed to download poster of video {} {}",
fav_item.bvid,
fav_item.name,
)
else: else:
logger.info( logger.info(
"Poster of {} {} already exists, skipped.", "Poster of {} {} already exists, skipped.",
fav_item.bvid, fav_item.bvid,
fav_item.name, fav_item.name,
) )
if process_video: except Exception:
logger.exception(
"Failed to process poster of video {} {}",
fav_item.bvid,
fav_item.name,
)
if process_subtitle:
try:
if not await aexists(fav_item.subtitle_path):
await ass.make_ass_file_danmakus_protobuf(
v,
0,
str(fav_item.subtitle_path.resolve()),
credential=credential,
font_name=settings.subtitle.font_name,
font_size=settings.subtitle.font_size,
alpha=settings.subtitle.alpha,
fly_time=settings.subtitle.fly_time,
static_time=settings.subtitle.static_time,
)
else:
logger.info(
"Subtitle of {} {} already exists, skipped.",
fav_item.bvid,
fav_item.name,
)
except Exception:
logger.exception(
"Failed to process subtitle of video {} {}",
fav_item.bvid,
fav_item.name,
)
if process_video:
try:
if await aexists(fav_item.video_path): if await aexists(fav_item.video_path):
fav_item.downloaded = True fav_item.downloaded = True
logger.info( logger.info(
@@ -247,11 +287,9 @@ async def process_video(
detector = video.VideoDownloadURLDataDetecter( detector = video.VideoDownloadURLDataDetecter(
await v.get_download_url(page_index=0) await v.get_download_url(page_index=0)
) )
streams = detector.detect_best_streams() streams = detector.detect_best_streams(codecs=settings.codec)
if detector.check_flv_stream(): if detector.check_flv_stream():
await download_content( await download_content(streams[0].url, fav_item.tmp_video_path)
streams[0].url, fav_item.tmp_video_path
)
process = await create_subprocess_exec( process = await create_subprocess_exec(
FFMPEG_COMMAND, FFMPEG_COMMAND,
"-i", "-i",
@@ -264,12 +302,8 @@ async def process_video(
fav_item.tmp_video_path.unlink() fav_item.tmp_video_path.unlink()
else: else:
await asyncio.gather( await asyncio.gather(
download_content( download_content(streams[0].url, fav_item.tmp_video_path),
streams[0].url, fav_item.tmp_video_path download_content(streams[1].url, fav_item.tmp_audio_path),
),
download_content(
streams[1].url, fav_item.tmp_audio_path
),
) )
process = await create_subprocess_exec( process = await create_subprocess_exec(
FFMPEG_COMMAND, FFMPEG_COMMAND,
@@ -287,34 +321,31 @@ async def process_video(
fav_item.tmp_video_path.unlink() fav_item.tmp_video_path.unlink()
fav_item.tmp_audio_path.unlink() fav_item.tmp_audio_path.unlink()
fav_item.downloaded = True fav_item.downloaded = True
logger.info( except ResponseCodeException as e:
"{} {} processed successfully.", match e.code:
fav_item.bvid, case 62002:
fav_item.name, fav_item.status = MediaStatus.INVISIBLE
) case -404:
except ResponseCodeException as e: fav_item.status = MediaStatus.DELETED
match e.code: case _:
case 62002: logger.exception(
fav_item.status = MediaStatus.INVISIBLE "Failed to process video {} {}, error_code: {}",
case -404: fav_item.bvid,
fav_item.status = MediaStatus.DELETED fav_item.name,
case _: e.code,
logger.exception( )
"Failed to process video {} {}, error_code: {}", if fav_item.status != MediaStatus.NORMAL:
logger.error(
"Video {} {} is not available, marked as {}",
fav_item.bvid, fav_item.bvid,
fav_item.name, fav_item.name,
e.code, fav_item.status.text,
) )
return except Exception:
logger.error( logger.exception("Failed to process video {} {}", fav_item.bvid, fav_item.name)
"Video {} {} is not available, marked as {}", await fav_item.save()
fav_item.bvid, logger.info(
fav_item.name, "{} {} is processed successfully.",
fav_item.status.text, fav_item.bvid,
) fav_item.name,
except Exception: )
logger.exception(
"Failed to process video {} {}", fav_item.bvid, fav_item.name
)
finally:
await fav_item.save()

View File

@@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "bili-sync" name = "bili-sync"
version = "1.0.1" version = "1.1.4"
description = "" description = ""
authors = ["amtoaer <amtoaer@gmail.com>"] authors = ["amtoaer <amtoaer@gmail.com>"]
license = "GPL-3.0" license = "GPL-3.0"
@@ -8,24 +8,26 @@ readme = "README.md"
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.11" python = "^3.11"
bilibili-api-python = { git = "https://github.com/amtoaer/bilibili-api.git", rev = "dev" }
dataclasses-json = "0.6.2"
tortoise-orm = "0.20.0"
loguru = "0.7.2"
uvloop = "0.19.0"
aiofiles = "23.2.1"
aerich = "0.7.2" aerich = "0.7.2"
aiofiles = "23.2.1"
bilibili-api-python = {git = "https://github.com/amtoaer/bilibili-api", rev = "dev"}
dataclasses-json = "0.6.2"
loguru = "0.7.2"
pydantic = "2.5.3"
tortoise-orm = "0.20.0"
uvloop = "0.19.0"
[tool.poetry.group.dev.dependencies] [tool.poetry.group.dev.dependencies]
black = "23.11.0" black = "23.11.0"
ruff = "0.1.6" bump-my-version = "0.15.4"
ipython = "8.17.2" ipython = "8.17.2"
ruff = "0.1.6"
[tool.black] [tool.black]
line-length = 80 line-length = 100
[tool.ruff] [tool.ruff]
line-length = 80 line-length = 100
select = [ select = [
"F", # https://beta.ruff.rs/docs/rules/#pyflakes-f "F", # https://beta.ruff.rs/docs/rules/#pyflakes-f
"E", "E",
@@ -60,6 +62,23 @@ tortoise_orm = "constants.TORTOISE_ORM"
location = "./migrations" location = "./migrations"
src_folder = "./." src_folder = "./."
[tool.bumpversion]
commit = true
message = "chore: bump version from {current_version} to {new_version}"
tag = true
tag_name = "{new_version}"
tag_message = ""
current_version = "1.1.4"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
[[tool.bumpversion.files]]
filename = "version.py"
[[tool.bumpversion.files]]
filename = "pyproject.toml"
[build-system] [build-system]
requires = ["poetry-core"] requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"

View File

@@ -1,63 +1,73 @@
from dataclasses import dataclass, field, fields
from pathlib import Path from pathlib import Path
from typing import Self
from dataclasses_json import DataClassJsonMixin from bilibili_api.video import VideoCodecs
from pydantic import BaseModel, Field, field_validator
from pydantic_core import PydanticCustomError
from typing_extensions import Annotated
from constants import DEFAULT_CONFIG_PATH from constants import DEFAULT_CONFIG_PATH
@dataclass class SubtitleConfig(BaseModel):
class Config(DataClassJsonMixin): font_name: str = "微软雅黑,黑体" # 字体
sessdata: str = "" font_size: float = 40 # 字号
bili_jct: str = "" alpha: float = 0.8 # 透明度
buvid3: str = "" fly_time: float = 5 # 滚动弹幕持续时间
dedeuserid: str = "" static_time: float = 10 # 静态弹幕持续时间
ac_time_value: str = ""
interval: int = 20
favorite_ids: list[int] = field(default_factory=list)
path_mapper: dict[int, str] = field(default_factory=dict)
def validate(self) -> Self:
"""所有值必须被设置""" class Config(BaseModel):
if not all(getattr(self, f.name) for f in fields(self)): sessdata: Annotated[str, Field(min_length=1)] = ""
raise ValueError("Some config values are not set.") bili_jct: Annotated[str, Field(min_length=1)] = ""
return self buvid3: Annotated[str, Field(min_length=1)] = ""
dedeuserid: Annotated[str, Field(min_length=1)] = ""
ac_time_value: Annotated[str, Field(min_length=1)] = ""
interval: int = 20
path_mapper: dict[int, str] = Field(default_factory=dict)
subtitle: SubtitleConfig = Field(default_factory=SubtitleConfig)
codec: list[VideoCodecs] = Field(
default_factory=lambda: [
VideoCodecs.AV1,
VideoCodecs.AVC,
VideoCodecs.HEV,
],
min_length=1,
)
@field_validator("codec", mode="after")
def codec_validator(cls, codecs: list[VideoCodecs]) -> list[VideoCodecs]:
if len(codecs) != len(set(codecs)):
raise PydanticCustomError("unique_list", "List must be unique")
return codecs
@staticmethod @staticmethod
def load(path: Path | None = None) -> Self: def load(path: Path | None = None) -> "Config":
if not path: if not path:
path = DEFAULT_CONFIG_PATH path = DEFAULT_CONFIG_PATH
try: try:
with path.open("r") as f: with path.open("r") as f:
return Config.schema().loads(f.read()) return Config.model_validate_json(f.read())
except Exception as e: except Exception as e:
raise RuntimeError(f"Failed to load config file: {path}") from e raise RuntimeError(f"Failed to load config file: {path}") from e
def save(self, path: Path | None = None) -> Self: def save(self, path: Path | None = None) -> "Config":
if not path: if not path:
path = DEFAULT_CONFIG_PATH path = DEFAULT_CONFIG_PATH
try: try:
path.parent.mkdir(parents=True, exist_ok=True) path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w") as f: with path.open("w") as f:
f.write( f.write(Config.model_dump_json(self, indent=4))
Config.schema().dumps(self, indent=4, ensure_ascii=False)
)
return self return self
except Exception as e: except Exception as e:
raise RuntimeError(f"Failed to save config file: {path}") from e raise RuntimeError(f"Failed to save config file: {path}") from e
def init_settings() -> Config: def init_settings() -> Config:
return ( if not DEFAULT_CONFIG_PATH.exists():
( # 配置文件不存在的情况下,写入空的默认值
Config.load(DEFAULT_CONFIG_PATH) Config().save(DEFAULT_CONFIG_PATH)
if DEFAULT_CONFIG_PATH.exists() # 读取配置文件,校验出错会抛出异常,校验通过则重新保存一下配置文件(写入新配置项的默认值)
else Config() return Config.load(DEFAULT_CONFIG_PATH).save()
)
.save(DEFAULT_CONFIG_PATH)
.validate()
)
settings = init_settings() settings = init_settings()

1
version.py Normal file
View File

@@ -0,0 +1 @@
VERSION = "1.1.4"