@inproceedings{4fb831602ba74aed8d61c3d02aac9fc3,
title = "IMCE: Energy-efficient bit-wise in-memory convolution engine for deep neural network",
abstract = "In this paper, we pave a novel way towards the concept of bit-wise In-Memory Convolution Engine (IMCE) that could implement the dominant convolution computation of Deep Convolutional Neural Networks (CNN) within memory. IMCE employs parallel computational memory sub-array as a fundamental unit based on our proposed Spin Orbit Torque Magnetic Random Access Memory (SOT-MRAM) design. Then, we propose an accelerator system architecture based on IMCE to efficiently process low bit-width CNNs. This architecture can be leveraged to greatly reduce energy consumption dealing with convolutional layers and also accelerate CNN inference. The device to architecture co-simulation results show that the proposed system architecture can process low bit-width AlexNet on ImageNet data-set favorably with 785.25μJ/img, which consumes ∼3× less energy than that of recent RRAM based counterpart. Besides, the chip area is ∼4× smaller.",
author = "Shaahin Angizi and Zhezhi He and Farhana Parveen and Deliang Fan",
note = "Publisher Copyright: {\textcopyright} 2018 IEEE.; 23rd Asia and South Pacific Design Automation Conference, ASP-DAC 2018 ; Conference date: 22-01-2018 Through 25-01-2018",
year = "2018",
month = feb,
day = "20",
doi = "10.1109/ASPDAC.2018.8297291",
language = "English (US)",
series = "Proceedings of the Asia and South Pacific Design Automation Conference, ASP-DAC",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "111--116",
booktitle = "ASP-DAC 2018 - 23rd Asia and South Pacific Design Automation Conference, Proceedings",
address = "United States",
}