@techreport{943ebb4c30da47f9a03da05284f4e6c9,
title = "GenFT: A Generative Parameter-Efficient Fine-Tuning Method for Pretrained Foundation Models",
abstract = "Pretrained Foundation Models (PFMs) have transformed numerous applications by enabling efficient adaptation to customized tasks. Parameter-Efficient Fine-Tuning (PEFT) has emerged as a resource-efficient alternative to full fine-tuning, especially leveraging reparameterized weights ΔW to adapt models for downstream tasks. However, a critical yet underexplored question remains: can we utilize well-pretrained weights W0 to guide the update of task-specific ΔW, avoiding inefficient training it from scratch? To end this, we propose Generative Parameter-Efficient Fine-Tuning (GenFT), a novel method that extracts structured, transferable information from W0 for efficient ΔW training. To extract row and column structure information, GenFT applies row and column transformations to distill essential patterns from W0. A tailored policy further decomposes ΔW into layer-shared and layer-specific components, balancing information reuse and individualized flexibility. GenFT is simple yet effective, achieving superior performance across CV and NLP tasks. Extensive experiments on VTAB-1K, FGVC, and GLUE benchmarks demonstrate that GenFT outperforms state-of-the-art PEFT methods, offering a new perspective for efficient model adaptation. ",
keywords = "cs.LG",
author = "Baoquan Zhang and Guangning Xu and Ng, {Michael K.}",
year = "2025",
month = may,
day = "21",
doi = "10.48550/arXiv.2506.11042",
language = "English",
series = "arXiv",
publisher = "Cornell University",
address = "United States",
type = "WorkingPaper",
institution = "Cornell University",
}