@inproceedings{290b20a85f6041f6a11c325ae84f718f,
title = "IMA-GNN: In-Memory Acceleration of Centralized and Decentralized Graph Neural Networks at the Edge",
abstract = "In this paper, we propose IMA-GNN as an In-Memory Accelerator for centralized and decentralized Graph Neural Network inference, explore its potential in both settings and provide a guideline for the community targeting flexible and efficient edge computation. Leveraging IMA-GNN, we first model the computation and communication latencies of edge devices. We then present practical case studies on GNN-based taxi demand and supply prediction and also adopt four large graph datasets to quantitatively compare and analyze centralized and decentralized settings. Our cross-layer simulation results demonstrate that on average, IMA-GNN in the centralized setting can obtain ∼790x communication speed-up compared to the decentralized GNN setting. However, the decentralized setting performs computation ∼1400x faster while reducing the power consumption per device. This further underlines the need for a hybrid semi-decentralized GNN approach.",
keywords = "edge computing, graph neural network, in-memory computing",
author = "Mehrdad Morsali and Mahmoud Nazzal and Abdallah Khreishah and Shaahin Angizi",
note = "Publisher Copyright: {\textcopyright} 2023 ACM.; 33rd Great Lakes Symposium on VLSI, GLSVLSI 2023 ; Conference date: 05-06-2023 Through 07-06-2023",
year = "2023",
month = jun,
day = "5",
doi = "10.1145/3583781.3590248",
language = "English (US)",
series = "Proceedings of the ACM Great Lakes Symposium on VLSI, GLSVLSI",
publisher = "Association for Computing Machinery",
pages = "3--8",
booktitle = "GLSVLSI 2023 - Proceedings of the Great Lakes Symposium on VLSI 2023",
}