feat: adjust prompt; make gpt request can read config from file;add recentMessageQuantityForLlm config;

This commit is contained in:
geekgeekrun
2025-04-11 23:42:32 +08:00
parent 85e27dc6ef
commit 27bc63f63f
6 changed files with 111 additions and 74 deletions

View File

@@ -11,6 +11,7 @@
"throttleIntervalMinutes": 10,
"rechatLimitDay": 21,
"geminiApiKey": "",
"rechatContentSource": 1
"rechatContentSource": 1,
"recentMessageQuantityForLlm": 8
}
}

View File

@@ -1,6 +1,7 @@
import { Page } from 'puppeteer'
import { sleepWithRandomDelay, sleep } from '@geekgeekrun/utils/sleep.mjs';
import { sleepWithRandomDelay, sleep } from '@geekgeekrun/utils/sleep.mjs'
import { completes } from '@geekgeekrun/utils/gpt-request.mjs'
import { readConfigFile } from '@geekgeekrun/geek-auto-start-chat-with-boss/runtime-file-utils.mjs'
export const sendLookForwardReplyEmotion = async (page: Page) => {
const emotionEntryButtonProxy = await page.$('.chat-conversation .message-controls .btn-emotion')
@@ -17,20 +18,57 @@ export const sendLookForwardReplyEmotion = async (page: Page) => {
await lookForwardReplyEmojiProxy!.click()
}
const resumeContent = ``
// let _index = 0
export const sendGptContent = async (page: Page, chatRecords) => {
const chatList = [
{
role: 'system',
content:
'你是一个求职消息发送机器人正在帮助一位求职者在Boss直聘上寻找一份工作。求职者需要向招聘者聊天得到招聘者的回复后方能获得一次投递简历的机会。你需要“从求职者简历中提取到的信息来生成一个要发送给招聘者的消息”。同时你需要注意每次发送的内容一定要接续之前发送的内容最好不要和已发过的内容重复。求职者简历信息如下' +
``
content: `
**核心指令:**
你是一个智能求职助手需要根据用户简历生成30字左右的提醒消息满足以下要求
1. 每次生成需满足:
- √ 包含1个核心技能 + 1个成果量化
- √ 使用不同句式模板至少准备5种
- √ 谦虚一些,头衔、工作年限等在历史记录信息中出现一次就好
- ✗ 禁止与最近发送的几条相似或雷同
- ✗ 禁止使用专业术语堆砌
- ✗ 严禁出现简历之外的词语
**简历分析层:**
请从以下简历内容中提取关键要素:\n${resumeContent}\n
---
要求提取:
1. 硬技能:编程语言/技术栈/工具证书等至少提取5项
2. 项目经历与成果业绩、带量化数据的结果至少3条
3. 软技能:沟通/管理等至少2项
4. 特殊成就:奖项/专利等(可选)
**消息生成层:**
根据上述要素随机组合生成消息
**质量控制层:**
每次生成前执行:
1. 检查历史记录
2. 确保技能/成果组合未重复
3. 所生成的新消息严禁包含最近8条已经发过的内容包括但不限于职位名称
4. 字数严格控制在10-40字
5. 避免感叹号等激进符号
6. 减少头衔“资深”、“高级”出现的频率严禁出现“专家”、“老兵”减少工作年限“x年”出现的频率
**输出格式:**
请确保仅回复一句话以JSON响应不要包含其他解释或内容数据结构参考\`{"response": "这里是将会发送给招聘者的内容"}\``
}
]
chatList.push({
role: 'user',
content:
'请帮我写一句开场白。请确保仅响应一句话以JSON响应数据结构参考`{"response": "这里是将会发送给招聘者的内容"}`'
'请根据我的简历,帮我写一句谦逊有礼貌的开场白。开头包含“您好”等类似敬语、结尾包含“期待回复”等类似话术。不必包含简历中的具体内容,但需要表达出应聘意向。请确保仅响应一句话以JSON响应数据结构参考`{"response": "这里是将会发送给招聘者的内容"}`'
})
// chatRecords = chatRecords.slice(chatRecords.length - _index)
// debugger
for (const record of chatRecords) {
const assistantJsonContent = JSON.stringify({
response: record.text
@@ -42,20 +80,25 @@ export const sendGptContent = async (page: Page, chatRecords) => {
chatList.push({
role: 'user',
content:
'请根据接续之前你所回复的内容,根据我的简历,写一句自我介绍,注意尽量不要和之前的聊天内容重复。请确保仅响应一句话以JSON响应数据结构参考`{"response": "这里是将会发送给招聘者的内容"}`'
'围绕我简历中关于自我介绍、技术栈、工作经历、项目描述、项目业绩等内容,写一句自我介绍。开头不必包含“您好”、结尾不必包含“期待回复”;务必确保本次所回复的内容不能与之前所回复的内容雷同或相似。请确保仅回复一句话以JSON响应,不要包含其他解释或内容;数据结构参考:`{"response": "这里是将会发送给招聘者的内容"}`'
})
}
console.log(chatList)
debugger
const res = await completes(chatList)
const llmConfig = await readConfigFile('llm.json')
const res = await completes(
{
baseURL: llmConfig.providerCompleteApiUrl,
apiKey: llmConfig.providerApiSecret,
model: llmConfig.model
},
chatList
)
console.log(res)
// _index++
let textToSend
try {
const rawMarkdownText = res?.message?.content
textToSend = JSON.parse(
rawMarkdownText.replace(/^```json/m, '').replace(/```$/m, '')
)?.response
textToSend = JSON.parse(rawMarkdownText.replace(/^```json/m, '').replace(/```$/m, ''))?.response
if (!textToSend) {
throw new Error(`empty content. ${err?.message} ${res?.message?.content}`)
}
@@ -67,12 +110,9 @@ export const sendGptContent = async (page: Page, chatRecords) => {
await chatInputHandle.click()
await sleep(500)
await chatInputHandle.click()
await chatInputHandle.type(
textToSend,
{
delay: 50
}
)
await chatInputHandle.type(textToSend, {
delay: 50
})
await sleep(1000)
const sendButtonSelector = `.chat-conversation .message-controls .chat-op .btn-send:not(.disabled)`
await page.click(sendButtonSelector)

View File

@@ -21,6 +21,9 @@ import { messageForSaveFilter } from '../../../common/utils/chat-list'
const throttleIntervalMinutes =
readConfigFile('boss.json').autoReminder?.throttleIntervalMinutes ?? 10
const rechatLimitDay = readConfigFile('boss.json').autoReminder?.rechatLimitDay ?? 21
const recentMessageQuantityForLlm =
readConfigFile('boss.json').autoReminder?.recentMessageQuantityForLlm ?? 8
const dbInitPromise = initDb(getPublicDbFilePath())
export const pageMapByName: {
@@ -281,10 +284,10 @@ const mainLoop = async () => {
// eslint-disable-next-line no-constant-condition
if (1 + 1 === 2) {
try {
const messageListForGpt = historyMessageList.filter(it => it.bizType !== 101 && it.isSelf)
debugger
const messageListForGpt = historyMessageList.filter(it => it.bizType !== 101 && it.isSelf).slice(-recentMessageQuantityForLlm)
await sendGptContent(pageMapByName.boss!, messageListForGpt)
} catch (err) {
console.log(err)
await sendLookForwardReplyEmotion(pageMapByName.boss!)
}
} else {

View File

@@ -115,20 +115,21 @@ const llmPresetList: {
providerCompleteApiUrl: 'https://api.deepseek.com'
}
},
{
name: '通过 Ollama 部署的 DeepSeek-R114B模型',
config: {
model: 'deepseek-r1:14b',
providerApiSecret: 'ollama',
providerCompleteApiUrl: 'http://127.0.0.1:11434'
}
},
// TODO:
// {
// name: '通过 Ollama 部署的 DeepSeek-R114B模型',
// config: {
// model: 'deepseek-r1:14b',
// providerApiSecret: 'ollama',
// providerCompleteApiUrl: 'http://127.0.0.1:11434/v1'
// }
// },
{
name: '通过 Ollama 部署的 Qwen2.57B模型',
config: {
model: 'qwen2.5:7b',
providerApiSecret: 'ollama',
providerCompleteApiUrl: 'http://127.0.0.1:11434'
providerCompleteApiUrl: 'http://127.0.0.1:11434/v1'
}
}
]

View File

@@ -37,7 +37,7 @@
RECHAT_CONTENT_SOURCE.GEMINI_WITH_CHAT_CONTEXT
"
>
<el-form-item prop="geminiApiKey">
<el-form-item>
<div class="flex flex-items-center">
<el-button size="small" type="primary" @click="handleClickConfigLlm">
配置大语言模型
@@ -53,34 +53,21 @@
</div>
</div>
</el-form-item>
<el-form-item prop="recentMessageQuantityForLlm">
<div>
携带最近
<el-input-number
v-model="formContent.autoReminder.recentMessageQuantityForLlm"
class="w-120px"
:min="8"
:max="20"
:precision="0"
:step="1"
></el-input-number>
次聊天内容作为上下文生成新消息
</div>
</el-form-item>
</template>
<!-- <el-form-item
v-if="
formContent.autoReminder.rechatContentSource ===
RECHAT_CONTENT_SOURCE.GEMINI_WITH_CHAT_CONTEXT
"
prop="geminiApiKey"
>
<div class="flex">
Gemini API 密钥&nbsp;<el-button type="text" @click.prevent="goToGeminiNanoApiKeyPage">
没有密钥?点击此处申请一个
</el-button>
</div>
<el-input v-model="formContent.autoReminder.geminiApiKey" />
</el-form-item>
<el-form-item
v-if="
formContent.autoReminder.rechatContentSource ===
RECHAT_CONTENT_SOURCE.GEMINI_WITH_CHAT_CONTEXT
"
prop="resumeAbstract"
>
<div class="w-full">
<div class="flex">简历、求职期望摘要&nbsp;<el-button type="text">例子</el-button></div>
<el-input type="textarea" v-model="formContent.autoReminder.resumeAbstract" />
</div>
<el-button class="mt-8px">预览Prompt</el-button>
</el-form-item> -->
</div>
<el-form-item label="跟进间隔(分钟)" prop="throttleIntervalMinutes">
<el-input-number
@@ -127,9 +114,8 @@ const formContent = ref({
autoReminder: {
throttleIntervalMinutes: 10,
rechatLimitDay: 21,
geminiApiKey: '',
rechatContentSource: 1,
resumeAbstract: ''
recentMessageQuantityForLlm: 8
}
})
@@ -150,10 +136,15 @@ electron.ipcRenderer.invoke('fetch-config-file-content').then((res) => {
const conf = res.config['boss.json']?.autoReminder || {}
conf.throttleIntervalMinutes = conf.throttleIntervalMinutes ?? 10
conf.rechatLimitDay = conf.rechatLimitDay ?? 21
conf.geminiApiKey = conf.geminiApiKey ?? ''
conf.rechatContentSource = conf.rechatContentSource ?? 1
conf.resumeAbstract = conf.resumeAbstract ?? ''
conf.recentMessageQuantityForLlm =
typeof conf.recentMessageQuantityForLlm === 'number'
? conf.recentMessageQuantityForLlm > 20
? 20
: conf.recentMessageQuantityForLlm < 8
? 8
: parseInt(conf.recentMessageQuantityForLlm)
: 8
formContent.value.autoReminder = conf
})
@@ -175,10 +166,6 @@ const formRules = {
cb()
}
}
},
geminiApiKey: {
required: true,
message: '请输入 Gemini API Key'
}
}

View File

@@ -1,19 +1,24 @@
import OpenAI from "openai";
const GPT_API_KEY = `sk-40fdef46fee24402bc05311304fce7a1`
export async function completes(messages) {
export async function completes(
{
baseURL,
apiKey,
model
},
messages
) {
const openai = new OpenAI({
baseURL: 'https://api.deepseek.com',
apiKey: GPT_API_KEY
baseURL,
apiKey,
});
const completion = await openai.chat.completions.create({
messages,
model: "deepseek-chat",
model,
frequency_penalty: 0,
max_tokens: 100,
temperature: 0.2
temperature: 0.1
});
console.log(completion.choices[0].message.content);