diff --git a/schema/dataset_generation.py b/schema/dataset_generation.py index 2940f2d..f5d666a 100644 --- a/schema/dataset_generation.py +++ b/schema/dataset_generation.py @@ -33,7 +33,7 @@ class LLMResponse(SQLModel): ) response_id: str = Field(..., description="响应的唯一ID") tokens_usage: TokensUsage = Field(default_factory=TokensUsage, description="token使用信息") - response_content: dict = Field(default_factory=dict, description="API响应的内容") + content: str = Field(default_factory=dict, description="API响应的内容") total_duration: float = Field(default=0.0, description="请求的总时长,单位为秒") llm_parameters: Optional[LLMParameters] = Field(default=None, description="LLM参数") diff --git a/tools/reasoning.py b/tools/reasoning.py index ee632be..95e254d 100644 --- a/tools/reasoning.py +++ b/tools/reasoning.py @@ -68,7 +68,7 @@ async def call_openai_api(llm_request: LLMRequest, rounds: int = 1, llm_paramete llm_request.response.append(LLMResponse( response_id=response.id, tokens_usage=tokens_usage, - response_content={"content": response.choices[0].message.content}, + content = response.choices[0].message.content, total_duration=duration, llm_parameters=llm_parameters )) @@ -79,7 +79,7 @@ async def call_openai_api(llm_request: LLMRequest, rounds: int = 1, llm_paramete llm_request.response.append(LLMResponse( response_id=f"error-round-{i+1}", - response_content={"error": str(e)}, + content={"error": str(e)}, total_duration=duration )) if llm_request.error is None: @@ -120,4 +120,4 @@ if __name__ == "__main__": print(f"\n3次调用结果 - 总耗时: {result.total_duration:.2f}s") print(f"总token使用: prompt={result.total_tokens_usage.prompt_tokens}, completion={result.total_tokens_usage.completion_tokens}") for i, resp in enumerate(result.response, 1): - print(f"响应{i}: {resp.response_content}") \ No newline at end of file + print(f"响应{i}: {resp.content}") \ No newline at end of file