Compare commits
8 Commits
8edae2b862
...
7c3aa12cd4
Author | SHA1 | Date | |
---|---|---|---|
![]() |
7c3aa12cd4 | ||
![]() |
342f180639 | ||
![]() |
f6beeb8788 | ||
![]() |
ef8b1a5115 | ||
![]() |
b4da95e81e | ||
![]() |
728c38dd2d | ||
![]() |
083542069c | ||
![]() |
7dad189b8c |
@ -1,13 +1,13 @@
|
||||
% 摘要
|
||||
\begin{center}
|
||||
{\zihao{3}\textbf{毕业论文系统设计}}\par
|
||||
{\zihao{-4}\songti 计算机科学与技术 \quad 专业 \quad 计科211(创) \quad 张三 \par
|
||||
指导教师:李四教授}
|
||||
{\zihao{-4}\songti \major \quad 专业 \quad \classname \quad \studentname \par
|
||||
指导教师:\adviser}
|
||||
\end{center}
|
||||
|
||||
% 中文摘要
|
||||
\begin{onecolabstract}
|
||||
\noindent{}\makebox[5em][l]{{\zihao{4}\textbf{摘要}}}{\songti \zihao{-4}大型语言模型(LLMs)在通用代码生成任务中表现出色,但在处理包含专有知识的企业私有代码库时,其性能往往受限。针对此问题,本文提出并实现了一个基于文档驱动的自适应编码大模型微调框架。该框架的核心创新在于:首先,通过深度解析技术文档(以Markdown格式为例),自动抽取关键信息(如函数签名、类定义、用法示例等)并结合预设模板生成高质量的指令微调(SFT)训练语料;其次,利用参数高效微调技术(如QLoRA)对预训练的编码大模型(以Qwen为例)进行针对性优化,使其精准适配私有库的特定语法、结构和编程范式;最后,整合了包括数据持久化(SQLite+TinyDB)、训练监控(TensorBoard)和交互式前端(Gradio)在内的完整工作流。实验结果表明,该框架能够有效提升大模型在私有库代码生成任务上的准确性和实用性,显著减少对人工标注的依赖,为实现企业级软件开发的智能化和高效化提供了一套自动化、可扩展的解决方案。
|
||||
\noindent{}\makebox[5em][l]{{\zihao{4}\textbf{摘要}}}{\songti \zihao{-4}大型语言模型(LLMs)在通用代码生成任务中表现出色,但在处理包含专有知识的企业私有代码库时,其性能往往受限。针对此问题,本文提出并实现了一个基于文档驱动的自适应编码大模型微调框架。该框架的核心创新在于:首先,通过深度解析技术文档(Markdown格式),自动抽取信息并结合预设模板生成高质量的指令微调(SFT)训练语料;其次,利用参数高效微调技术(如QLoRA)对预训练的编码大模型(以qwen2.5为例)进行针对性优化,使其精准适配私有库的特定语法、结构和编程范式;最后,整合了包括数据持久化(SQLite+TinyDB)、训练监控(TensorBoard)和交互式前端(Gradio)在内的完整工作流。实验结果表明,该框架能够有效提升大模型在私有库代码生成任务上的准确性和实用性,为实现企业级软件开发的智能化和高效化提供了一套自动化、可扩展的解决方案。
|
||||
}\par
|
||||
\noindent{}\makebox[5em][l]{{\zihao{4}\textbf{关键词}}}{\zihao{-4}\songti 大型语言模型; 代码生成; 模型微调; 参数高效微调; QLoRA; 文档驱动; 自动化; 私有库; 自然语言处理; Gradio
|
||||
}\par
|
||||
@ -15,7 +15,7 @@
|
||||
|
||||
% 英文摘要
|
||||
\begin{onecolabstract}
|
||||
\noindent{}\makebox[10em][l]{{\zihao{4} \textbf{ABSTRACT}}}{\zihao{-4}Large Language Models (LLMs) excel in general code generation tasks, but their performance is often limited when handling enterprise private code repositories containing proprietary knowledge. To address this issue, this paper proposes and implements a document-driven adaptive fine-tuning framework for large code models. The core innovations of this framework are: first, by deeply parsing technical documentation (using Markdown format as an example), it automatically extracts key information (such as function signatures, class definitions, usage examples, etc.) and combines them with preset templates to generate high-quality instruction fine-tuning (SFT) training data; second, it utilizes parameter-efficient fine-tuning techniques (such as QLoRA) to specifically optimize a pre-trained large code model (taking Qwen as an example), enabling it to accurately adapt to the specific syntax, structure, and programming paradigms of the private library; finally, it integrates a complete workflow including data persistence (SQLite+TinyDB), training monitoring (TensorBoard), and an interactive frontend (Gradio). Experimental results demonstrate that this framework can effectively improve the accuracy and practicality of large models in private library code generation tasks, significantly reduce reliance on manual annotation, and provide an automated, scalable solution for intelligent and efficient enterprise software development.
|
||||
\noindent{}\makebox[10em][l]{{\zihao{4} \textbf{ABSTRACT}}}{\zihao{-4}Large Language Models (LLMs) excel in general code generation tasks, but their performance is often limited when handling enterprise private code repositories containing proprietary knowledge. To address this issue, this paper proposes and implements a document-driven adaptive fine-tuning framework for large code models. The core innovations of this framework are: first, by deeply parsing technical documentation (Markdown format), it automatically extracts information and combines it with preset templates to generate high-quality instruction fine-tuning (SFT) training data; second, it utilizes parameter-efficient fine-tuning techniques (such as QLoRA) to specifically optimize a pre-trained large code model (taking qwen2.5 as an example), enabling it to accurately adapt to the specific syntax, structure, and programming paradigms of the private library; finally, it integrates a complete workflow including data persistence (SQLite+TinyDB), training monitoring (TensorBoard), and an interactive frontend (Gradio). Experimental results demonstrate that this framework can effectively improve the accuracy and practicality of large models in private library code generation tasks, and provide an automated, scalable solution for intelligent and efficient enterprise software development.
|
||||
}\par
|
||||
\noindent{}\makebox[10em][l]{{\zihao{4}\textbf{KEYWORDS}}}{\zihao{-4}Large Language Models; Code Generation; Model Fine-tuning; Parameter-Efficient Fine-tuning; QLoRA; Document-Driven; Automation; Private Library; Natural Language Processing; Gradio
|
||||
}\par
|
||||
|
@ -2,7 +2,7 @@
|
||||
\begin{titlepage}
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[scale=0.5]{pic//logo//logo.jpg}
|
||||
\includegraphics[scale=0.5]{pic//logo.jpg}
|
||||
\end{figure}
|
||||
\vspace{0.2cm}
|
||||
\centering
|
||||
@ -14,17 +14,17 @@
|
||||
\begin{flushleft}
|
||||
{{\songti \zihao{-3} \qquad\qquad\qquad 课题名称}\quad{\zihao{-4}\dlmu[7.5cm]{基于文档驱动的自适应编码大模型微调框架}}\par}
|
||||
\vspace{0.5cm}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 学\qquad 院}\quad\dlmu[7.5cm]{计算机科学与网络工程学院}\par}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 学\qquad 院}\quad\dlmu[7.5cm]{\department}\par}
|
||||
\vspace{0.5cm}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 专\qquad 业}\quad\dlmu[7.5cm]{计算机科学与技术}\par}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 专\qquad 业}\quad\dlmu[7.5cm]{\major}\par}
|
||||
\vspace{0.5cm}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 班级名称}\quad\dlmu[7.5cm]{计科211(创)}\par}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 班级名称}\quad\dlmu[7.5cm]{\classname}\par}
|
||||
\vspace{0.5cm}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 学生姓名}\quad\dlmu[7.5cm]{张三}\par}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 学生姓名}\quad\dlmu[7.5cm]{\studentname}\par}
|
||||
\vspace{0.5cm}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 学\qquad 号}\quad\dlmu[7.5cm]{20210001}\par}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 学\qquad 号}\quad\dlmu[7.5cm]{\studentid}\par}
|
||||
\vspace{0.5cm}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 指导老师}\quad\dlmu[7.5cm]{李四教授}\par}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 指导老师}\quad\dlmu[7.5cm]{\adviser}\par}
|
||||
\vspace{0.5cm}
|
||||
{{\songti\zihao{-3} \qquad\qquad\qquad 完成日期}\quad\dlmu[7.5cm]{\number\year 年\number\month 月\number\day 日}\par}
|
||||
\end{flushleft}
|
||||
|
@ -43,7 +43,7 @@
|
||||
|
||||
\subsubsection{基础模型选择}
|
||||
|
||||
本实验选择Qwen2.5-3B作为基础模型进行微调。该模型是阿里云开源的新一代大语言模型,具有以下特点:
|
||||
本实验选择qwen2.5-3B作为基础模型进行微调。该模型是阿里云开源的新一代大语言模型,具有以下特点:
|
||||
\begin{itemize}
|
||||
\item 性能表现:在 MMLU、GSM8K、BBH 等权威测试中优于多数同参数级开源模型。
|
||||
\item 参数规模:3.09B参数量(非嵌入参数2.77B),在保持较高性能的同时,对计算资源要求相对较低。
|
||||
@ -131,7 +131,7 @@
|
||||
|
||||
\subsection{微调效果验证}
|
||||
|
||||
经过微调后的模型能够对相关内容做出准确回答,图\ref{fig:after_train}展示了训练后的效果。
|
||||
经过微调后的模型能够对相关内容做出准确回答,图\ref{fig:after_train}展示了训练后的效果,本框架将通用大语言模型定向优化为具备企业特定代码生成能力的专用模型,在保持模型通用能力的同时,显著提升了其在特定领域的表现,为企业级软件开发的智能化与高效化提供了有力支持。
|
||||
|
||||
\begin{figure}[htbp]
|
||||
\centering
|
||||
@ -139,5 +139,3 @@
|
||||
\caption{模型微调后的效果}
|
||||
\label{fig:after_train}
|
||||
\end{figure}
|
||||
|
||||
综上所述,实验结果验证了本框架的有效性。通过文档驱动的自适应微调,成功将通用大语言模型定向优化为具备企业特定代码生成能力的专用模型,在保持模型通用能力的同时,显著提升了其在特定领域的表现,为企业级软件开发的智能化与高效化提供了有力支持。
|
||||
|
8
paper/latex/config.tex
Normal file
8
paper/latex/config.tex
Normal file
@ -0,0 +1,8 @@
|
||||
% 个人信息配置
|
||||
\newcommand{\studentname}{张三}
|
||||
\newcommand{\studentid}{20210001}
|
||||
\newcommand{\classname}{计科211(创)}
|
||||
\newcommand{\department}{计算机科学与网络工程学院}
|
||||
\newcommand{\major}{计算机科学与技术}
|
||||
\newcommand{\adviser}{李四教授}
|
||||
\newcommand{\thesistitle}{基于文档驱动的自适应编码大模型微调框架}
|
@ -1,4 +1,5 @@
|
||||
\documentclass[12pt,a4paper]{article}
|
||||
\input{config}
|
||||
\usepackage{graphicx}
|
||||
\usepackage{ctex}
|
||||
\usepackage{indentfirst}
|
||||
@ -128,26 +129,36 @@
|
||||
{
|
||||
\setlength{\baselineskip}{23pt}
|
||||
|
||||
% 引入各章节文件
|
||||
\newpage
|
||||
\input{chapters/introduction}
|
||||
\clearpage
|
||||
|
||||
\newpage
|
||||
\input{chapters/technology}
|
||||
\clearpage
|
||||
|
||||
\newpage
|
||||
\input{chapters/requirement}
|
||||
\clearpage
|
||||
|
||||
\newpage
|
||||
\input{chapters/implementation}
|
||||
\clearpage
|
||||
|
||||
\newpage
|
||||
\input{chapters/verification}
|
||||
\clearpage
|
||||
|
||||
\newpage
|
||||
\input{chapters/conclusion}
|
||||
|
||||
\clearpage
|
||||
|
||||
% 参考文献
|
||||
\newpage
|
||||
\input{chapters/references}
|
||||
%致谢
|
||||
\clearpage
|
||||
|
||||
\newpage
|
||||
\input{chapters/acknowledgement}
|
||||
\clearpage
|
||||
}
|
||||
\end{document}
|
Before Width: | Height: | Size: 31 KiB After Width: | Height: | Size: 31 KiB |
Binary file not shown.
Before Width: | Height: | Size: 125 KiB After Width: | Height: | Size: 151 KiB |
@ -1,11 +1,21 @@
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# 设置全局字体大小
|
||||
plt.rcParams.update({
|
||||
'font.size': 16, # 全局字体大小
|
||||
'axes.titlesize': 20, # 标题字体大小
|
||||
'axes.labelsize': 16, # 坐标轴标签字体大小
|
||||
'xtick.labelsize': 14, # x轴刻度标签字体大小
|
||||
'ytick.labelsize': 14, # y轴刻度标签字体大小
|
||||
'legend.fontsize': 14, # 图例字体大小
|
||||
})
|
||||
|
||||
# 读取CSV文件
|
||||
data = pd.read_csv('training_data.csv')
|
||||
|
||||
# 创建图表
|
||||
plt.figure(figsize=(12, 9))
|
||||
plt.figure(figsize=(12, 12))
|
||||
|
||||
# 绘制梯度范数变化曲线
|
||||
plt.subplot(3, 1, 1) # 修改为 3行1列的第1个
|
||||
|
Loading…
x
Reference in New Issue
Block a user