@inproceedings{5c9c870b866947609f005beb421010da,
title = "Learning on Transformers is Provable Low-Rank and Sparse: A One-layer Analysis",
abstract = "Efficient training and inference algorithms, such as low-rank adaption and model pruning, have shown impressive performance for learning Transformer-based large foundation models. However, due to the technical challenges of the non-convex optimization caused by the complicated architecture of Transformers, the theoretical study of why these methods can be applied to learn Transformers is mostly elusive. To the best of our knowledge, this paper shows the first theoretical analysis of the property of low-rank and sparsity of one-layer Transformers by characterizing the trained model after convergence using stochastic gradient descent. By focusing on a data model based on label-relevant and label-irrelevant patterns, we quantify that the gradient updates of trainable parameters are low-rank, which de-pends on the number of label-relevant patterns. We also analyze how model pruning affects the generalization while improving computation efficiency and conclude that proper magnitude-based pruning has a slight effect on the testing performance. We implement numerical experiments to support our findings.",
keywords = "low-rank adaption, mechanism, model pruning, Transformer",
author = "Hongkang Li and Meng Wang and Shuai Zhang and Sijia Liu and Chen, {Pin Yu}",
note = "Publisher Copyright: {\textcopyright} 2024 IEEE.; 13rd IEEE Sensor Array and Multichannel Signal Processing Workshop, SAM 2024 ; Conference date: 08-07-2024 Through 11-07-2024",
year = "2024",
doi = "10.1109/SAM60225.2024.10636559",
language = "English (US)",
series = "Proceedings of the IEEE Sensor Array and Multichannel Signal Processing Workshop",
publisher = "IEEE Computer Society",
booktitle = "2024 IEEE 13rd Sensor Array and Multichannel Signal Processing Workshop, SAM 2024",
address = "United States",
}