From 5fc3b4950b94daafca21190c48023e2e572a24f0 Mon Sep 17 00:00:00 2001 From: carry Date: Sun, 20 Apr 2025 18:40:51 +0800 Subject: [PATCH] =?UTF-8?q?refactor(schema):=20=E4=BF=AE=E6=94=B9=20LLMRes?= =?UTF-8?q?ponse=20=E4=B8=AD=20API=20=E5=93=8D=E5=BA=94=E5=86=85=E5=AE=B9?= =?UTF-8?q?=E7=9A=84=E5=AD=97=E6=AE=B5=E5=90=8D=E7=A7=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 将 LLMResponse 类中的 response_content 字段重命名为 content - 更新字段类型从 dict 改为 str,以更准确地表示响应内容 - 在 reasoning.py 中相应地修改了调用 LLMResponse 时的参数 --- schema/dataset_generation.py | 2 +- tools/reasoning.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/schema/dataset_generation.py b/schema/dataset_generation.py index 2940f2d..f5d666a 100644 --- a/schema/dataset_generation.py +++ b/schema/dataset_generation.py @@ -33,7 +33,7 @@ class LLMResponse(SQLModel): ) response_id: str = Field(..., description="响应的唯一ID") tokens_usage: TokensUsage = Field(default_factory=TokensUsage, description="token使用信息") - response_content: dict = Field(default_factory=dict, description="API响应的内容") + content: str = Field(default_factory=dict, description="API响应的内容") total_duration: float = Field(default=0.0, description="请求的总时长,单位为秒") llm_parameters: Optional[LLMParameters] = Field(default=None, description="LLM参数") diff --git a/tools/reasoning.py b/tools/reasoning.py index ee632be..95e254d 100644 --- a/tools/reasoning.py +++ b/tools/reasoning.py @@ -68,7 +68,7 @@ async def call_openai_api(llm_request: LLMRequest, rounds: int = 1, llm_paramete llm_request.response.append(LLMResponse( response_id=response.id, tokens_usage=tokens_usage, - response_content={"content": response.choices[0].message.content}, + content = response.choices[0].message.content, total_duration=duration, llm_parameters=llm_parameters )) @@ -79,7 +79,7 @@ async def call_openai_api(llm_request: LLMRequest, rounds: int = 1, llm_paramete llm_request.response.append(LLMResponse( response_id=f"error-round-{i+1}", - response_content={"error": str(e)}, + content={"error": str(e)}, total_duration=duration )) if llm_request.error is None: @@ -120,4 +120,4 @@ if __name__ == "__main__": print(f"\n3次调用结果 - 总耗时: {result.total_duration:.2f}s") print(f"总token使用: prompt={result.total_tokens_usage.prompt_tokens}, completion={result.total_tokens_usage.completion_tokens}") for i, resp in enumerate(result.response, 1): - print(f"响应{i}: {resp.response_content}") \ No newline at end of file + print(f"响应{i}: {resp.content}") \ No newline at end of file