@inproceedings{7e8a2d3c934c42cbb0403871b07ad233,
title = "Watch It, Don't Imagine It: Creating a Better Caption-Occlusion Metric by Collecting More Ecologically Valid Judgments from DHH Viewers",
abstract = "Television captions blocking visual information causes dissatisfaction among Deaf and Hard of Hearing (DHH) viewers, yet existing caption evaluation metrics do not consider occlusion. To create such a metric, DHH participants in a recent study imagined how bad it would be if captions blocked various on-screen text or visual content. To gather more ecologically valid data for creating an improved metric, we asked 24 DHH participants to give subjective judgments of caption quality after actually watching videos, and a regression analysis revealed which on-screen contents' occlusion related to users' judgments. For several video genres, a metric based on our new dataset out-performed the prior state-of-the-art metric for predicting the severity of captions occluding content during videos, which had been based on that prior study. We contribute empirical findings for improving DHH viewers' experience, guiding the placement of captions to minimize occlusions, and automated evaluation of captioning quality in television broadcasts.",
keywords = "Accessibility, Caption, Metric, Regression",
author = "Amin, \{Akhter Al\} and Saad Hassan and Sooyeon Lee and Matt Huenerfauth",
note = "Publisher Copyright: {\textcopyright} 2022 ACM.; 2022 CHI Conference on Human Factors in Computing Systems, CHI 2022 ; Conference date: 30-04-2022 Through 05-05-2022",
year = "2022",
month = apr,
day = "29",
doi = "10.1145/3491102.3517681",
language = "English (US)",
series = "Conference on Human Factors in Computing Systems - Proceedings",
publisher = "Association for Computing Machinery",
booktitle = "CHI 2022 - Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems",
}