@inproceedings{10.1145/3613904.3642095,
title = {ReHEarSSE: Recognizing Hidden-in-the-Ear Silently Spelled Expressions},
author = {Xuefu Dong and Yifei Chen and Yuuki Nishiyama and Kaoru Sezaki and Yuntao Wang and Ken Christofferson and Alex Mariakakis},
url = {https://doi.org/10.1145/3613904.3642095},
doi = {10.1145/3613904.3642095},
isbn = {9798400703300},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the 2024 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {Honolulu, HI, USA},
series = {CHI '24},
abstract = {Silent speech interaction (SSI) allows users to discreetly input text without using their hands. Existing wearable SSI systems typically require custom devices and are limited to a small lexicon, limiting their utility to a small set of command words. This work proposes ReHEarSSE, an earbud-based ultrasonic SSI system capable of generalizing to words that do not appear in its training dataset, providing support for nearly an entire dictionary’s worth of words. As a user silently spells words, ReHEarSSE uses autoregressive features to identify subtle changes in ear canal shape. ReHEarSSE infers words using a deep learning model trained to optimize connectionist temporal classification (CTC) loss with an intermediate embedding that accounts for different letters and transitions between them. We find that ReHEarSSE recognizes 100 unseen words with an accuracy of 89.3%.},
keywords = {Acoustic sensing, autoregressive model, earable computing, silent speech interface, text entry},
pubstate = {published},
tppubtype = {inproceedings}
}