@inproceedings{b1af7e6e9ab247b18b0d4437fa7086f0,
title = "Understanding and Estimating Error Propagation in Neural Networks for Scientific Data Analysis",
abstract = "Neural networks are increasingly integrated into scientific discovery, where input data reduction and model quantization play a key role in accelerating inference. However, understanding and mitigating the impact of these techniques on output error is critical for ensuring reliable results, particularly in tasks demanding high numerical precision. This paper introduces a comprehensive framework for optimizing neural network inference in scientific computing by combining data reduction and model quantization while maintaining error-controlled outcomes. We develop theoretical analyses to bound error propagation under these techniques and propose a framework that balances computational performance with error constraints. Evaluation on real-world learning-based combustion simulations and satellite image classification shows that our derived error bounds accurately predict observed errors while enabling significant computational speedup under our framework. This work highlights the potential for further leveraging advancements in modern lossy compression algorithms and hardware accelerators that support lower-precision formats.",
author = "Weiming He and Qi Chen and Qian Gong and Jing Li and Qing Liu and Norbert Podhorszki and Scott Klasky and Kisung Jung and Cristian Lacey and Jackie Chen and Hongjian Zhu",
note = "Publisher Copyright: {\textcopyright} 2025 IEEE.; 41st IEEE International Conference on Data Engineering, ICDE 2025 ; Conference date: 19-05-2025 Through 23-05-2025",
year = "2025",
doi = "10.1109/ICDE65448.2025.00143",
language = "English (US)",
series = "Proceedings - International Conference on Data Engineering",
publisher = "IEEE Computer Society",
pages = "1869--1881",
booktitle = "Proceedings - 2025 IEEE 41st International Conference on Data Engineering, ICDE 2025",
address = "United States",
}