feat(tools): 支持 OpenAI API 的 JSON 格式返回结果

- 在 call_openai_api 函数中添加对 JSON 格式返回结果的支持
- 增加 llm_request.format 参数处理,将用户 prompt 与格式要求合并
- 添加 response_format 参数到 OpenAI API 请求
- 更新示例,使用 JSON 格式返回结果
This commit is contained in:
carry 2025-04-19 21:10:22 +08:00
parent 1e829c9268
commit 5a21c8598a

View File

@ -16,21 +16,30 @@ async def call_openai_api(llm_request: LLMRequest, rounds: int = 1, llm_paramete
total_duration = 0.0 total_duration = 0.0
total_tokens = TokensUsage() total_tokens = TokensUsage()
prompt = llm_request.prompt
round_start = datetime.now(timezone.utc)
if llm_request.format:
prompt += "\n请以JSON格式返回结果" + llm_request.format
for i in range(rounds): for i in range(rounds):
round_start = datetime.now(timezone.utc)
try: try:
round_start = datetime.now(timezone.utc) messages = [{"role": "user", "content": prompt}]
messages = [{"role": "user", "content": llm_request.prompt}] create_args = {
response = await client.chat.completions.create( "model": llm_request.api_provider.model_id,
model=llm_request.api_provider.model_id, "messages": messages,
messages=messages, "temperature": llm_parameters.temperature if llm_parameters else None,
temperature=llm_parameters.temperature if llm_parameters else None, "max_tokens": llm_parameters.max_tokens if llm_parameters else None,
max_tokens=llm_parameters.max_tokens if llm_parameters else None, "top_p": llm_parameters.top_p if llm_parameters else None,
top_p=llm_parameters.top_p if llm_parameters else None, "frequency_penalty": llm_parameters.frequency_penalty if llm_parameters else None,
frequency_penalty=llm_parameters.frequency_penalty if llm_parameters else None, "presence_penalty": llm_parameters.presence_penalty if llm_parameters else None,
presence_penalty=llm_parameters.presence_penalty if llm_parameters else None, "seed": llm_parameters.seed if llm_parameters else None
seed=llm_parameters.seed if llm_parameters else None } # 处理format参数
)
if llm_request.format:
create_args["response_format"] = {"type": "json_object"}
response = await client.chat.completions.create(**create_args)
round_end = datetime.now(timezone.utc) round_end = datetime.now(timezone.utc)
duration = (round_end - round_start).total_seconds() duration = (round_end - round_start).total_seconds()
@ -84,15 +93,19 @@ async def call_openai_api(llm_request: LLMRequest, rounds: int = 1, llm_paramete
return llm_request return llm_request
if __name__ == "__main__": if __name__ == "__main__":
from json_example import generate_example_json
from sqlmodel import Session, select from sqlmodel import Session, select
from global_var import get_sql_engine, init_global_var from global_var import get_sql_engine, init_global_var
from schema import dataset_item
init_global_var("workdir") init_global_var("workdir")
api_state = "1 deepseek-chat" api_state = "1 deepseek-chat"
with Session(get_sql_engine()) as session: with Session(get_sql_engine()) as session:
api_provider = session.exec(select(APIProvider).where(APIProvider.id == int(api_state.split(" ")[0]))).first() api_provider = session.exec(select(APIProvider).where(APIProvider.id == int(api_state.split(" ")[0]))).first()
llm_request = LLMRequest( llm_request = LLMRequest(
prompt="你好,世界!", prompt="测试,随便说点什么",
api_provider=api_provider api_provider=api_provider,
format=generate_example_json(dataset_item)
) )
# # 单次调用示例 # # 单次调用示例