2023
Auda, Jonas; Gruenefeld, Uwe; Mayer, Sven; Schneegass, Stefan
The Actuality-Time Continuum: Visualizing Interactions and Transitions Taking Place in Cross-Reality Systems Proceedings Article
In: Proceedings of the 1st Joint Workshop on Cross Reality, 2023.
@inproceedings{auda2023actuality,
title = {The Actuality-Time Continuum: Visualizing Interactions and Transitions Taking Place in Cross-Reality Systems},
author = {Jonas Auda and Uwe Gruenefeld and Sven Mayer and Stefan Schneegass},
year = {2023},
date = {2023-10-15},
booktitle = {Proceedings of the 1st Joint Workshop on Cross Reality},
abstract = {In the last decade, researchers contributed an increasing number of cross-reality systems and their evaluations. Going beyond individual technologies such as Virtual or Augmented Reality, these systems introduce novel approaches that help to solve relevant problems such as the integration of bystanders or physical objects. However, cross-reality systems are complex by nature, and describing the interactions and transitions taking place is a challenging task. Thus, in this paper, we propose the idea of the Actuality-Time Continuum that aims to enable researchers and designers alike to visualize complex cross-reality experiences. Moreover, we present four visualization examples that illustrate the potential of our proposal and conclude with an outlook on future perspectives.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Auda, Jonas; Gruenefeld, Uwe; Faltaous, Sarah; Mayer, Sven; Schneegass, Stefan
A Scoping Survey on Cross-Reality Systems Journal Article
In: ACM Comput. Surv., 2023, ISSN: 0360-0300, (Just Accepted).
@article{auda2023scoping,
title = {A Scoping Survey on Cross-Reality Systems},
author = { Jonas Auda and Uwe Gruenefeld and Sarah Faltaous and Sven Mayer and Stefan Schneegass},
doi = {10.1145/3616536},
issn = {0360-0300},
year = {2023},
date = {2023-09-01},
urldate = {2023-09-01},
journal = {ACM Comput. Surv.},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Immersive technologies such as Virtual Reality (VR) and Augmented Reality (AR) empower users to experience digital realities. Known as distinct technology classes, the lines between them are becoming increasingly blurry with recent technological advancements. New systems enable users to interact across technology classes or transition between them \textendash referred to as cross-reality systems. Nevertheless, these systems are not well-understood. Hence, in this paper, we conducted a scoping literature review to classify and analyze cross-reality systems proposed in previous work. First, we define these systems by distinguishing three different types. Thereafter, we compile a literature corpus of 306 relevant publications, analyze the proposed systems, and present a comprehensive classification, including research topics, involved environments, and transition types. Based on the gathered literature, we extract nine guiding principles that can inform the development of cross-reality systems. We conclude with research challenges and opportunities.},
note = {Just Accepted},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bemmann, Florian; Mayer, Carmen; Mayer, Sven
Leveraging Mobile Sensing Technology for Societal Change Towards more Sustainable Behavior Proceedings Article
In: Workshop Proceedings of the HCI for Climate Change: Imagining Sustainable Futures, 2023.
@inproceedings{bemmann2023leveraging,
title = {Leveraging Mobile Sensing Technology for Societal Change Towards more Sustainable Behavior},
author = {Florian Bemmann and Carmen Mayer and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/04/bemmann2023leveraging.pdf
https://sites.google.com/fbk.eu/hci-climate-change/},
doi = {10.48550/arXiv.2303.12426},
year = {2023},
date = {2023-04-23},
urldate = {2023-04-23},
booktitle = {Workshop Proceedings of the HCI for Climate Change: Imagining Sustainable Futures},
series = {HCI4CC\'23},
abstract = {A pro-environmental attitude in the general population is essential to combat climate change. Society as a whole has the power to change economic processes through market demands and to exert pressure on policymakers - both are key social factors that currently undermine the goals of decarbonization. Creating long-lasting, sustainable attitudes is challenging and behavior change technologies do hard to overcome their limitations. Environmental psychology proposes social factors to be relevant, a.o. creating a global identity feeling and widening one’s view beyond the own bubble. From our experience in the field of mobile sensing and psychometric data inferences, we see strong potential in mobile sensing technologies to implement the aforementioned goals. We present concrete ideas in this paper, aiming to refine and extend them with the workshop and evaluate them afterward.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bemmann, Florian; Mayer, Sven
User-Centered Sustainable Technology Design: A Reflection on Human-Computer Interaction Research for a Sustainable Society Proceedings Article
In: Joint Proceedings of ICT4S 2023 Doctoral Symposium, Demonstrations & Posters Track and Workshops, 2023.
@inproceedings{bemmann2023user,
title = {User-Centered Sustainable Technology Design: A Reflection on Human-Computer Interaction Research for a Sustainable Society},
author = {Florian Bemmann and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/11/bemmann2023user.pdf},
year = {2023},
date = {2023-07-05},
urldate = {2023-07-05},
booktitle = {Joint Proceedings of ICT4S 2023 Doctoral Symposium, Demonstrations \& Posters Track and Workshops},
abstract = {As consumers and deciders, people play the core role in tackling climate change, as their choices steer company behavior and political decisions. Human-Computer Interaction (HCI) in the sustainability domain so far focused on changing individual behavior; however, this has reached a dead end. Research has shown that extrinsic factors (such as cost and everyday convenience) mostly oppose sustainable behavior and are rarely overcome by intrinsic motivations. In this paper, we reflect on the past approaches HCI has studied to foster environmental sustainability. Bringing together recent work from environmental psychology, behavioral psychology, and HCI, we point to insights that future work could benefit from incorporating: What people think should instead be focused on what people do individually. A global identity perception can overcome moral shortsightedness, and considering people’s cultural context and worldview is essential for technology to make an impact. We map such psychological aspects to user-centered technology concepts. We discuss practical implications with an emphasis on real-world applicability and critically discuss the ethics of technology. Our work sparks ideas and discussions to inspire future sustainable human-computer interaction research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chiossi, Francesco; Turgut, Yagiz; Welsch, Robin; Mayer, Sven
Adapting Visual Complexity Based on Electrodermal Activity Improves Working Memory Performance in Virtual Reality Journal Article
In: Proc. ACM Hum.-Comput. Interact., iss. 7, no. MobileHCI, 2023.
@article{chiossi2023adapting,
title = {Adapting Visual Complexity Based on Electrodermal Activity Improves Working Memory Performance in Virtual Reality},
author = {Francesco Chiossi and Yagiz Turgut and Robin Welsch and Sven Mayer},
doi = {10.1145/3604243},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Proc. ACM Hum.-Comput. Interact.},
number = {MobileHCI},
issue = {7},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Biocybernetic loops encompass users\' state detection and system adaptation based on physiological signals. Current adaptive systems limit the adaptation to task features such as task difficulty or multitasking demands. However, virtual reality allows the manipulation of task-irrelevant elements in the environment. We present a physiologically adaptive system that adjusts the virtual environment based on physiological arousal, i.e., electrodermal activity. We conducted a user study with our adaptive system in social virtual reality to verify improved performance. Here, participants completed an n-back task, and we adapted the visual complexity of the environment by changing the number of non-player characters. Our results show that an adaptive virtual reality can control users\' comfort, performance, and workload by adapting the visual complexity based on physiological arousal. Thus, our physiologically adaptive system improves task performance and perceived workload. Finally, we embed our findings in physiological computing and discuss applications in various scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Chiossi, Francesco; Ou, Changkun; Mayer, Sven
Exploring Physiological Correlates of Visual Complexity Adaptation: Insights from EDA, ECG, and EEG Data for Adaptation Evaluation in VR Adaptive Systems Proceedings Article
In: Extended Abstracts of the 2023 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, New York, NY, USA, 2023.
@inproceedings{chiossi2023exploring,
title = {Exploring Physiological Correlates of Visual Complexity Adaptation: Insights from EDA, ECG, and EEG Data for Adaptation Evaluation in VR Adaptive Systems},
author = {Francesco Chiossi and Changkun Ou and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/03/chiossi2023exploring.pdf
https://github.com/mimuc/vr-adaptation-eeg-evaluation},
doi = {10.1145/3544549.3585624},
year = {2023},
date = {2023-04-23},
urldate = {2023-04-23},
booktitle = {Extended Abstracts of the 2023 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {CHI EA \'23},
abstract = {Physiologically-adaptive Virtual Reality can drive interactions and adjust virtual content to better fit users\' needs and support specific goals. However, the complexity of psychophysiological inference hinders efficient adaptation as the relationship between cognitive and physiological features rarely show one-to-one correspondence. Therefore, it is necessary to employ multimodal approaches to evaluate the effect of adaptations. In this work, we analyzed a multimodal dataset (EEG, ECG, and EDA) acquired during interaction with a VR-adaptive system that employed EDA as input for adaptation of secondary task difficulty. We evaluated the effect of dynamic adjustments on different physiological features and their correlation. Our results show that when the adaptive system increased the secondary task difficulty, theta, beta, and phasic EDA features increased. Moreover, we found a high correlation between theta, alpha, and beta oscillations during difficulty adjustments. Our results show how specific EEG and EDA features can be employed for evaluating VR adaptive systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Chiossi, Francesco; Mayer, Sven
How Can Mixed Reality Benefit From Physiologically-Adaptive Systems? Challenges and Opportunities for Human Factors Proceedings Article
In: Workshop on the Future of Computational Approaches for Understanding and Adapting User Interfaces, 2023.
@inproceedings{chiossi2023how,
title = {How Can Mixed Reality Benefit From Physiologically-Adaptive Systems? Challenges and Opportunities for Human Factors},
author = {Francesco Chiossi and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/04/chiossi2023how.pdf
https://arxiv.org/abs/2303.17978},
doi = {10.48550/arXiv.2303.17978},
year = {2023},
date = {2023-04-23},
urldate = {2023-04-23},
booktitle = {Workshop on the Future of Computational Approaches for Understanding and Adapting User Interfaces},
abstract = {Mixed Reality (MR) allows users to interact with digital objects in a physical environment, but several limitations have hampered widespread adoption. Physiologically adaptive systems detecting user’s states can drive interaction and address these limitations. Here, we highlight potential usability and interaction limitations in MR and how physiologically adaptive systems can benefit MR experiences and applications. We specifically address potential applications for human factors and operational settings such as healthcare, education, and entertainment.We further discuss benefits and applications in light of ethical and privacy concerns. The use of physiologically adaptive systems in MR has the potential to revolutionize human-computer interactions and provide users with a more personalized and engaging experience.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chiossi, Francesco; Kosch, Thomas; Menghini, Luca; Villa, Steeven; Mayer, Sven
SensCon: Embedding Physiological Sensing into Virtual Reality Controllers Journal Article
In: Proc. ACM Hum.-Comput. Interact., iss. 7, no. MobileHCI, 2023.
@article{chiossi2023senscon,
title = {SensCon: Embedding Physiological Sensing into Virtual Reality Controllers},
author = {Francesco Chiossi and Thomas Kosch and Luca Menghini and Steeven Villa and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/07/chiossi2023senscon.pdf},
doi = {10.1145/3604270},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Proc. ACM Hum.-Comput. Interact.},
number = {MobileHCI},
issue = {7},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Virtual reality experiences increasingly use physiological data for virtual environment adaptations to evaluate user experience and immersion. Previous research required complex medical-grade equipment to collect physiological data, limiting real-world applicability. To overcome this, we present SensCon for skin conductance and heart rate data acquisition. To identify the optimal sensor location in the controller, we conducted a first study investigating users\' controller grasp behavior. In a second study, we evaluated the performance of SensCon against medical-grade devices in six scenarios regarding user experience and signal quality. Users subjectively preferred SensCon in terms of usability and user experience. Moreover, the signal quality evaluation showed satisfactory accuracy across static, dynamic, and cognitive scenarios. Therefore, SensCon reduces the complexity of capturing and adapting the environment via real-time physiological data. By open-sourcing SensCon, we enable researchers and practitioners to adapt their virtual reality environment effortlessly. Finally, we discuss possible use cases for virtual reality-embedded physiological sensing.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dix, Alan; Mayer, Sven; Palanque, Philippe; Panizzi, Emanuele; Spano, Lucio Davide
Engineering Interactive Systems Embedding AI Technologies Proceedings Article
In: Companion Proceedings of the 2023 ACM SIGCHI Symposium on Engineering Interactive Computing Systems, pp. 90–92, Association for Computing Machinery, Swansea, United Kingdom, 2023, ISBN: 9798400702068.
@inproceedings{dix2023engineering,
title = {Engineering Interactive Systems Embedding AI Technologies},
author = { Alan Dix and Sven Mayer and Philippe Palanque and Emanuele Panizzi and Lucio Davide Spano},
url = {https://sven-mayer.com/wp-content/uploads/2023/11/dix2023engineering.pdf},
doi = {10.1145/3596454.3597195},
isbn = {9798400702068},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Companion Proceedings of the 2023 ACM SIGCHI Symposium on Engineering Interactive Computing Systems},
pages = {90\textendash92},
publisher = {Association for Computing Machinery},
address = {Swansea, United Kingdom},
series = {EICS \'23 Companion},
abstract = {This workshop aims at bringing together researchers and practitioners interested in the engineering of interactive systems which embed AI technologies (as for instance, recommender systems engines). The objective is to identify (from experience reported by participants) methods, techniques, and tools to support the inclusion of such AI technologies in interactive systems. A specific focus will be on the guarantee that user-relevant properties such as usability and user experience are accounted for. Another focus will be on the identification and definition of architectures supporting those integrations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Grootjen, Jesse W.; Weingärtner, Henrike; Mayer, Sven
Highlighting the Challenges of Blinks in Eye Tracking for Interactive Systems Proceedings Article
In: Proceedings of the 2023 Symposium on Eye Tracking Research and Applications, Association for Computing Machinery, Tubingen, Germany, 2023, ISBN: 9798400701504.
@inproceedings{grootjen2023highlighting,
title = {Highlighting the Challenges of Blinks in Eye Tracking for Interactive Systems},
author = { Jesse W. Grootjen and Henrike Weing\"{a}rtner and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/06/grootjen2023highlighting.pdf},
doi = {10.1145/3588015.3589202},
isbn = {9798400701504},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Proceedings of the 2023 Symposium on Eye Tracking Research and Applications},
publisher = {Association for Computing Machinery},
address = {Tubingen, Germany},
series = {ETRA \'23},
abstract = {Eye tracking is the basis for many intelligent systems to predict user actions. A core challenge with eye-tracking data is that it inherently suffers from missing data due to blinks. Approaches such as intent prediction and user state recognition process gaze data using neural networks; however, they often have difficulty handling missing information. In an effort to understand how prior work dealt with missing data, we found that researchers often simply ignore missing data or adopt use-case-specific approaches, such as artificially filling in missing data. This inconsistency in handling missing data in eye tracking hinders the development of effective intelligent systems for predicting user actions and limits reproducibility. Furthermore, this can even lead to incorrect results. Thus, this lack of standardization calls for investigating possible solutions to improve the consistency and effectiveness of processing eye-tracking data for user action prediction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Haliburton, Luke; Kheirinejad, Saba; Schmidt, Albrecht; Mayer, Sven
Exploring Smart Standing Desks to Foster a Healthier Workplace Journal Article
In: Proc. ACM Interact. Mob. Wearable Ubiquitous Technol., vol. 5, no. 3, 2023.
@article{haliburton2023exploring,
title = {Exploring Smart Standing Desks to Foster a Healthier Workplace},
author = { Luke Haliburton and Saba Kheirinejad and Albrecht Schmidt and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/05/haliburton2023exploring.pdf},
doi = {10.1145/3478121},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.},
volume = {5},
number = {3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Sedentary behavior is endemic in modern workplaces, contributing to negative physical and mental health outcomes. Although adjustable standing desks are increasing in popularity, people still avoid standing.We developed an open-source plug-and-play system to remotely control standing desks and investigated three system modes with a three-week in-the-wild user study (N=15). Interval mode forces users to stand once per hour, causing frustration. Adaptive mode nudges users to stand every hour unless the user has stood already. Smart mode, which raises the desk during breaks, was the best rated, contributing to increased standing time with the most positive qualitative feedback. However, non-computer activities need to be accounted for in the future. Therefore, our results indicate that a smart standing desk that shifts modes at opportune times has the most potential to reduce sedentary behavior in the workplace. We contribute our open-source system and insights for future intelligent workplace well-being systems.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Haliburton, Luke; Ghebremedhin, Sinksar; Welsch, Robin; Schmidt, Albrecht; Mayer, Sven
Investigating Labeler Bias in Face Annotation for Machine Learning Miscellaneous
2023.
@misc{haliburton2023investigating,
title = {Investigating Labeler Bias in Face Annotation for Machine Learning},
author = {Luke Haliburton and Sinksar Ghebremedhin and Robin Welsch and Albrecht Schmidt and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/11/haliburton2023investigating.pdf},
doi = {10.48550/arXiv.2301.09902},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
abstract = {In a world increasingly reliant on artificial intelligence, it is more important than ever to consider the ethical implications of artificial intelligence on humanity. One key under-explored challenge is la-beler bias, which can create inherently biased datasets for training and subsequently lead to inaccurate or unfair decisions in health-care, employment, education, and law enforcement. Hence, we con-ducted a study to investigate and measure the existence of labeler bias using images of people from different ethnicities and sexes in a labeling task. Our results show that participants hold stereotypes that influence their decision-making process and that labeler demo-graphics impact assigned labels. We also discuss how labeler bias influences datasets and, subsequently, the models trained on them. Overall, a high degree of transparency must be maintained through-out the entire artificial intelligence training process to identify and correct biases in the data as early as possible.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}

Leusmann, Jan; Oechsner, Carl; Prinz, Johanna; Welsch, Robin; Mayer, Sven
A Database for Kitchen Objects: Investigating Danger Perception in the Context of Human-Robot Interaction Proceedings Article
In: Extended Abstracts of the 2023 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, New York, NY, USA, 2023.
@inproceedings{leusmann2023database,
title = {A Database for Kitchen Objects: Investigating Danger Perception in the Context of Human-Robot Interaction },
author = {Jan Leusmann and Carl Oechsner and Johanna Prinz and Robin Welsch and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/03/leusmann2023database.pdf
https://hri-objects.leusmann.io/},
doi = {10.1145/3544549.3585884},
year = {2023},
date = {2023-04-23},
urldate = {2023-04-23},
booktitle = {Extended Abstracts of the 2023 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {CHI EA\'23},
abstract = {In the future, humans collaborating closely with cobots in everyday tasks will require handing each other objects. So far, researchers have optimized human-robot collaboration concerning measures such as trust, safety, and enjoyment. However, as the objects themselves influence these measures, we need to investigate how humans perceive the danger level of objects. Thus, we created a database of 153 kitchen objects and conducted an online survey (N=300) investigating their perceived danger level. We found that (1) humans perceive kitchen objects vastly differently, (2) the object-holder has a strong effect on the danger perception, and (3) prior user knowledge increases the perceived danger of robots handling those objects. This shows that future human-robot collaboration studies must investigate different objects for a holistic image. We contribute a wiki-like open-source database to allow others to study predefined danger scenarios and eventually build object-aware systems: https://hri-objects.leusmann.io/.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Leusmann, Jan; Wiese, Jannik; Ziarko, Moritz; Mayer, Sven
Investigating Opportunities for Active Smart Assistants to Initiate Interactions With Users Proceedings Article Forthcoming
In: Proceedings of the International Conference on Mobile and Ubiquitous Multimedia, Forthcoming.
@inproceedings{leusmann2023investigating,
title = {Investigating Opportunities for Active Smart Assistants to Initiate Interactions With Users},
author = {Jan Leusmann and Jannik Wiese and Moritz Ziarko and Sven Mayer},
url = {https://osf.io/9qv54/?view_only=cc677d8072d14899b5af0153fe865bc4},
doi = {10.1145/3626705.3631787},
year = {2023},
date = {2023-12-03},
urldate = {2023-12-03},
booktitle = {Proceedings of the International Conference on Mobile and Ubiquitous Multimedia},
series = {MUM EA\'23},
abstract = {Passive voice assistants such as Alexa are widespread, responding to user requests. However, due to the rise of domestic robots, we envision active smart assistants initiating interactions seamlessly, weaving themselves into the user\'s context, and enabling more suitable interaction. While robots already deliver the hardware, only recently have the advancements in artificial intelligence enabled assistants to grasp the human and the environments to support such visions. We combined hardware with artificial intelligence to build an attentive robot. Here, we present a robotic head prototype discovering and following the users in a room supported by video and sound. We contribute (1) the design and implementation of a prototype system for an active smart assistant and (2) a discussion on design principles for systems engaging in human conversations. This work aims to provide foundations for future research for active smart assistants. },
keywords = {},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}

Leusmann, Jan; Wang, Chao; Gienger, Michael; Schmidt, Albrecht; Mayer, Sven
Understanding the Uncertainty Loop of Human-Robot Interaction Proceedings Article
In: Proceedings of the Socially Assistive Robots as Decision Makers: Transparency, Motivations, and Intentions Workshop, 2023.
@inproceedings{leusmann2023understanding,
title = {Understanding the Uncertainty Loop of Human-Robot Interaction},
author = {Jan Leusmann and Chao Wang and Michael Gienger and Albrecht Schmidt and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/03/gruenefeld2022workshop.pdf
https://arxiv.org/abs/2303.07889},
doi = {10.48550/arXiv.2303.07889},
year = {2023},
date = {2023-04-23},
urldate = {2023-04-23},
booktitle = {Proceedings of the Socially Assistive Robots as Decision Makers: Transparency, Motivations, and Intentions Workshop},
series = {SARs: TMI\'23},
abstract = {Recently the field of Human-Robot Interaction gained popularity due to the wide range of possibilities of how robots can support humans during daily tasks. One such form is supportive robots, socially assistive robots built explicitly for communicating with humans, e.g., as service robots or personal companions. As they understand humans through artificial intelligence, these robots can sometimes make wrong assumptions about the humans\' current state and give an unexpected response. In human-human conversations, unexpected responses happen frequently. However, it is currently unclear how such robots should act if they understand that human did not expect their response or even show the uncertainty of their response in the first place. For this, we explore the different forms of potential uncertainties during human-robot conversations and how humanoids can communicate these uncertainties through verbal and non-verbal cues.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Jingyi; Park, Hyerim; Welsch, Robin; Mayer, Sven; Butz, Andreas
SeatmateVR: Proxemic Cues for Close Bystander-Awareness in Virtual Reality Journal Article
In: Proc. ACM Hum.-Comput. Interact., vol. 7, no. ISS, 2023.
@article{li2023seatmatevr,
title = {SeatmateVR: Proxemic Cues for Close Bystander-Awareness in Virtual Reality},
author = { Jingyi Li and Hyerim Park and Robin Welsch and Sven Mayer and Andreas Butz},
url = {https://sven-mayer.com/wp-content/uploads/2023/11/li2023seatmatevr.pdf},
doi = {10.1145/3626474},
year = {2023},
date = {2023-11-01},
urldate = {2023-11-01},
journal = {Proc. ACM Hum.-Comput. Interact.},
volume = {7},
number = {ISS},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Prior research explored ways to alert virtual reality users of bystanders entering the play area from afar. However, in confined social settings like sharing a couch with seatmates, bystanders\' proxemic cues, such as distance, are limited during interruptions, posing challenges for proxemic-aware systems. To address this, we investigated three visualizations, using a 2D animoji, a fully-rendered avatar, and their combination, to gradually share bystanders\' orientation and location during interruptions. In a user study (N=22), participants played virtual reality games while responding to questions from their seatmates. We found that the avatar preserved game experiences yet did not support the fast identification of seatmates as the animoji did. Instead, users preferred the mixed visualization, where they found the seatmate\'s orientation cues instantly in their view and were gradually guided to the person\'s actual location. We discuss implications for fine-grained proxemic-aware virtual reality systems to support interaction in constrained social spaces.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Ou, Changkun; Mayer, Sven; Butz, Andreas
The Impact of Expertise in the Loop for Exploring Machine Rationality Proceedings Article
In: Proceedings of the 28th International Conference on Intelligent User Interfaces, Association for Computing Machinery, Sydney, Australia, 2023.
BibTeX | Links:
@inproceedings{ou2023expertise,
title = {The Impact of Expertise in the Loop for Exploring Machine Rationality},
author = { Changkun Ou and Sven Mayer and Andreas Butz},
url = {https://sven-mayer.com/wp-content/uploads/2023/02/ou2023expertise.pdf
https://github.com/changkun/expertise-loop},
doi = {10.1145/3581641.3584040},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Proceedings of the 28th International Conference on Intelligent User Interfaces},
publisher = {Association for Computing Machinery},
address = {Sydney, Australia},
series = {IUI \'23},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Rusu, Marius; Mayer, Sven
Deep Learning Super-Resolution Network Facilitating Fiducial Tangibles on Capacitive Touchscreens Proceedings Article
In: Proceedings of the 42nd ACM Conference on Human Factors in Computing Systems, Association for Computing Machinery, Hamburg, Germany, 2023.
@inproceedings{rusu2023deep,
title = {Deep Learning Super-Resolution Network Facilitating Fiducial Tangibles on Capacitive Touchscreens},
author = {Marius Rusu and Sven Mayer },
url = {https://sven-mayer.com/wp-content/uploads/2023/02/rusu2023deep.pdf
https://github.com/mimuc/super-resolution-for-fiducial-tangibles},
doi = {10.1145/3544548.3580987},
year = {2023},
date = {2023-04-23},
urldate = {2023-04-23},
booktitle = {Proceedings of the 42nd ACM Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {Hamburg, Germany},
series = {CHI \'23},
abstract = {Over the last years, we have seen many approaches using tangibles to address the limited expressiveness of touchscreens. Mainstream tangible detection uses fiducial markers embedded in the tangibles. However, the coarse sensor size of capacitive touchscreens makes tangibles bulky, limiting their usefulness. We propose a novel deep-learning super-resolution network to facilitate fiducial tangibles on capacitive touchscreens better. In detail, our network super-resolves the markers enabling off-the-shelf detection algorithms to track tangibles reliably. Our network generalizes to unseen marker sets, such as AprilTag, ArUco, and ARToolKit. Therefore, we are not limited to a fixed number of distinguishable objects and do not require data collection and network training for new fiducial markers. With extensive evaluation including real-world users and five showcases, we demonstrate the applicability of our open-source approach on commodity mobile devices and further highlight the potential of tangibles on capacitive touchscreens.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Terzimehić, Nađa; Bemmann, Florian; Halsner, Miriam; Mayer, Sven
A Mixed-Method Exploration into the Mobile Phone Rabbit Hole Journal Article
In: Proc. ACM Hum.-Comput. Interact., iss. 7, no. MobileHCI, 2023.
@article{terzimehic2023mixed,
title = {A Mixed-Method Exploration into the Mobile Phone Rabbit Hole},
author = {Na{d}a Terzimehi\'{c} and Florian Bemmann and Miriam Halsner and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/07/terzimehic2023mixed.pdf},
doi = {10.1145/3604241},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Proc. ACM Hum.-Comput. Interact.},
number = {MobileHCI},
issue = {7},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Smartphones provide various functions supporting users in their daily lives. However, the temptation of getting distracted and tuning out is high leading to so-called rabbit holes. To quantify rabbit hole behavior, we developed an Android tracking application that collects smartphone usage enriched with experience sampling questionnaires. We analyzed 14,395 smartphone use sessions from 21 participants, collected over two weeks, showing that rabbit hole sessions are significantly longer and contain more user interaction, revealing a certain level of restlessness in use. The context of rabbit hole sessions and subjective results revealed different triggers for spending more time on the phone. Next, we conduct an expert focus group (N=6) to put the gained insights into perspective and formulate a definition of the mobile phone rabbit hole. Our results form the foundation for predicting and communicating the mobile phone rabbit hole, especially when prolonged smartphone use results in regret.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Weber, Thomas; Thiel, Rafael Vinicius Mourão; Mayer, Sven
Supporting Software Developers Through a Gaze-Based Adaptive IDE Proceedings Article
In: Proceedings of Mensch Und Computer 2023, Association for Computing Machinery, Rapperswil, Switzerland, 2023.
@inproceedings{weber2023supporting,
title = {Supporting Software Developers Through a Gaze-Based Adaptive IDE},
author = {Thomas Weber and Rafael Vinicius Mour\~{a}o Thiel and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/11/weber2023supporting.pdf},
doi = {10.1145/3603555.3603571},
year = {2023},
date = {2023-09-03},
urldate = {2023-09-03},
booktitle = {Proceedings of Mensch Und Computer 2023},
publisher = {Association for Computing Machinery},
address = {Rapperswil, Switzerland},
series = {MuC\'23},
abstract = {Highly complex systems, such as software development tools, constantly gain features and, consequently, complexity and, thus, risk overwhelming or distracting the user. We argue that automation and adaptation could help users to focus on their work. However, the challenge is to correctly and promptly determine when to adapt what, as often the users\' intent is unclear. To assist software developers, we build a gaze-adaptive integrated development environment using the developers\' gaze as the source for learning appropriate adaptation. Beyond our experience of using gaze for an adaptive user interface, we also report first feedback from developers regarding the desirability of such a user interface, which indicated that adaptations for development tools need to strike a careful balance between automation and user control. Nonetheless, the developers see the value in a gaze-based adaptive user interface and how it could improve software development tools going forward.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Weber, Thomas; Mayer, Sven
Usability and Adoption of Graphical Data-Driven Development Tools Miscellaneous
2023.
BibTeX | Links:
@misc{weber2023usability,
title = {Usability and Adoption of Graphical Data-Driven Development Tools},
author = {Thomas Weber and Sven Mayer},
doi = {10.48550/arXiv.2311.05540},
year = {2023},
date = {2023-01-01},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}

Weiss, Yannick; Villa, Steeven; Schmidt, Albrecht; Mayer, Sven; Müller, Florian
Using Pseudo-Stiffness to Enrich the Haptic Experience in Virtual Reality Proceedings Article
In: Proceedings of the 42nd ACM Conference on Human Factors in Computing Systems, Association for Computing Machinery, New York, NY, USA, 2023.
@inproceedings{weiss2023using,
title = {Using Pseudo-Stiffness to Enrich the Haptic Experience in Virtual Reality},
author = {Yannick Weiss and Steeven Villa and Albrecht Schmidt and Sven Mayer and Florian M\"{u}ller},
url = {https://sven-mayer.com/wp-content/uploads/2023/03/weiss2023using.pdf},
doi = {10.1145/3544548.3581223},
year = {2023},
date = {2023-04-23},
urldate = {2023-04-23},
booktitle = {Proceedings of the 42nd ACM Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {CHI \'23},
abstract = {Providing users with a haptic sensation of the hardness and softness of objects in virtual reality is an open challenge. While physical props and haptic devices help, their haptic properties do not allow for dynamic adjustments. To overcome this limitation, we present a novel technique for changing the perceived stiffness of objects based on a visuo-haptic illusion. We achieved this by manipulating the hands\' Control-to-Display (C/D) ratio in virtual reality while pressing down on an object with fixed stiffness. In the first study (N=12), we determine the detection thresholds of the illusion. Our results show that we can exploit a C/D ratio from 0.7 to 3.5 without user detection. In the second study (N=12), we analyze the illusion\'s impact on the perceived stiffness. Our results show that participants perceive the objects to be up to 28.1% softer and 8.9% stiffer, allowing for various haptic applications in virtual reality. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Windl, Maximiliane; Scheidle, Anna; George, Ceenu; Mayer, Sven
Investigating Security Indicators for Hyperlinking Within the Metaverse Proceedings Article
In: Ninth Symposium on Usable Privacy and Security (SOUPS 2023), USENIX Association, Anaheim, CA, 2023.
@inproceedings{windl2023investigating,
title = {Investigating Security Indicators for Hyperlinking Within the Metaverse},
author = {Maximiliane Windl and Anna Scheidle and Ceenu George and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/08/windl2023investigating.pdf},
year = {2023},
date = {2023-08-06},
urldate = {2023-08-06},
booktitle = {Ninth Symposium on Usable Privacy and Security (SOUPS 2023)},
publisher = {USENIX Association},
address = {Anaheim, CA},
abstract = {Security indicators, such as the padlock icon indicating SSL encryption in browsers, are established mechanisms to convey secure connections. Currently, such indicators mainly exist for browsers and mobile environments. With the rise of the metaverse, we investigate how to mark secure transitions between applications in virtual reality to so-called sub-metaverses. For this, we first conducted in-depth interviews with domain experts (N=8) to understand the general design dimensions for security indicators in virtual reality (VR). Using these insights and considering additional design constraints, we implemented the five most promising indicators and evaluated them in a user study (N=25). While the visual blinking indicator placed in the periphery performed best regarding accuracy and task completion time, participants subjectively preferred the static visual indicator above the portal. Moreover, the latter received high scores regarding understandability while still being rated low regarding intrusiveness and disturbance. Our findings contribute to a more secure and enjoyable metaverse experience.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Windl, Maximiliane; Winterhalter, Verena; Schmidt, Albrecht; Mayer, Sven
Understanding and Mitigating Technology-Facilitated Privacy Violations in the Physical World Proceedings Article
In: Proceedings of the 42nd ACM Conference on Human Factors in Computing Systems, Association for Computing Machinery, New York, NY, USA, 2023.
@inproceedings{windl2023understanding,
title = {Understanding and Mitigating Technology-Facilitated Privacy Violations in the Physical World},
author = {Maximiliane Windl and Verena Winterhalter and Albrecht Schmidt and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2023/03/windl2023understanding.pdf},
doi = {10.1145/3544548.3580909},
year = {2023},
date = {2023-04-23},
urldate = {2023-04-23},
booktitle = {Proceedings of the 42nd ACM Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {CHI \'23},
abstract = {We are constantly surrounded by technology that collects and processes sensitive data, paving the way for privacy violations. Yet, current research investigating technology-facilitated privacy violations in the physical world is scattered and focused on specific scenarios or investigates such violations purely from an expert\'s perspective. Informed through a large-scale online survey, we first construct a scenario taxonomy based on user-experienced privacy violations in the physical world through technology. We then validate our taxonomy and establish mitigation strategies using interviews and co-design sessions with privacy and security experts. In summary, this work contributes (1) a refined scenario taxonomy for technology-facilitated privacy violations in the physical world, (2) an understanding of how privacy violations manifest in the physical world, (3) a decision tree on how to inform users, and (4) a design space to create notices whenever adequate. With this, we contribute a conceptual framework to enable a privacy-preserving technology-connected world.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Bemmann, Florian; Windl, Maximiliane; Erbe, Jonas; Mayer, Sven; Hussmann, Heinrich
The Influence of Transparency and Control on the Willingness of Data Sharing in Adaptive Mobile Apps Journal Article
In: Proc. ACM Hum.-Comput. Interact., iss. 6, no. MobileHCI, 2022.
@article{bemmann2022influence,
title = {The Influence of Transparency and Control on the Willingness of Data Sharing in Adaptive Mobile Apps},
author = { Florian Bemmann and Maximiliane Windl and Jonas Erbe and Sven Mayer and Heinrich Hussmann},
url = {https://sven-mayer.com/wp-content/uploads/2022/07/bemmann2022influence.pdf},
doi = {10.1145/3546724},
year = {2022},
date = {2022-09-28},
urldate = {2022-09-28},
journal = {Proc. ACM Hum.-Comput. Interact.},
number = {MobileHCI},
issue = {6},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Today, adaptive mobile applications use mobile sensing and user tracking, allowing for adaptation to the users\' context and needs. This raises several privacy concerns. Privacy dashboards provide transparency and sharing control; however, their impact on the users\' behavior is unclear. To shed light on the effects of (a) transparency and (b) control features, we developed a mobile sensing privacy dashboard and evaluated it in the wild (N=227). We found that the pure presentation of raw logging data is rather deterring, and users tend to use the app less, but offering the user control over the data collection can compensate for that. Users used the control features rarely and, as such, did not affect the data collocation. Our work informs the design of future privacy-enhancing interfaces in applications relying on passively collected mobile sensing data. Moreover, our results encourage the adoption of privacy dashboards in the applications and relieve developers from concerns about the negative influences of transparency information on the quality of collected data.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chiossi, Francesco; Zagermann, Johannes; Karolus, Jakob; Rodrigues, Nils; Balestrucci, Priscilla; Weiskopf, Daniel; Ehinger, Benedikt; Feuchtner, Tiare; Reiterer, Harald; Chuang, Lewis L.; Ernst, Marc; Bulling, Andreas; Mayer, Sven; Schmidt, Albrecht
Adapting Visualizations and Interfaces to the User Journal Article
In: it - Information Technology, 2022.
BibTeX | Links:
@article{chiossi2022adapting,
title = {Adapting Visualizations and Interfaces to the User},
author = {Francesco Chiossi and Johannes Zagermann and Jakob Karolus and Nils Rodrigues and Priscilla Balestrucci and Daniel Weiskopf and Benedikt Ehinger and Tiare Feuchtner and Harald Reiterer and Lewis L. Chuang and Marc Ernst and Andreas Bulling and Sven Mayer and Albrecht Schmidt},
url = {https://sven-mayer.com/wp-content/uploads/2022/08/chiossi2022adapting.pdf},
doi = {10.1515/itit-2022-0035},
year = {2022},
date = {2022-08-30},
urldate = {2022-08-30},
journal = {it - Information Technology},
publisher = {De Gruyter Oldenbourg},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Chiossi, Francesco; Welsch, Robin; Villa, Steeven; Chuang, Lewis L.; Mayer, Sven
Designing a Physiological Loop for the Adaptation of Virtual Human Characters in a Social VR Scenario Proceedings Article
In: 2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops, 2022.
@inproceedings{chiossi2022designing,
title = {Designing a Physiological Loop for the Adaptation of Virtual Human Characters in a Social VR Scenario},
author = {Francesco Chiossi and Robin Welsch and Steeven Villa and Lewis L. Chuang and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2022/02/chiossi2022designing.pdf
https://www.youtube.com/watch?v=OdQeaU5NvTM},
doi = {10.1109/VRW55335.2022.00140},
year = {2022},
date = {2022-03-12},
urldate = {2022-03-12},
booktitle = {2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops},
series = {VRW\'22},
abstract = {Social virtual reality is getting mainstream not only for entertainment purposes but also for productivity and education. This makes the design of social VR scenarios functional to support the operator\'s performance. We present a physiologically-adaptive system that optimizes for visual complexity in a dual-task scenario based on electrodermal activity. Specifically, we propose a system that adapts the amount of non-player characters while jointly performing an N-Back task (primary) and visual detection task (secondary). Our preliminary results show that when optimizing the complexity of the secondary task, users report an improved user experience.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chiossi, Francesco; Welsch, Robin; Villa, Steeven; Chuang, Lewis L.; Mayer, Sven
Virtual Reality Adaptation using Electrodermal Activity to Support User Experience Journal Article
In: Big Data and Cognitive Computing, vol. 6, iss. 2, pp. 19, 2022.
@article{chiossi2022virtual,
title = {Virtual Reality Adaptation using Electrodermal Activity to Support User Experience},
author = {Francesco Chiossi and Robin Welsch and Steeven Villa and Lewis L. Chuang and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2022/09/chiossi2022virtual.pdf},
doi = {10.3390/bdcc6020055},
year = {2022},
date = {2022-05-29},
urldate = {2022-05-29},
journal = {Big Data and Cognitive Computing},
volume = {6},
issue = {2},
pages = {19},
abstract = {Virtual reality is increasingly used for tasks such as work and education. Thus, rendering scenarios that do not interfere with such goals and deplete user experience are becoming progressively more relevant. We present a physiologically adaptive system that optimizes the virtual environment based on physiological arousal, i.e., electrodermal activity. We investigated the usability of the adaptive system in a simulated social virtual reality scenario. Participants completed an n-back task (primary) and a visual detection (secondary) task. Here, we adapted the visual complexity of the secondary task in the form of the number of non-player characters of the secondary task to accomplish the primary task. We show that an adaptive virtual reality can improve users’ comfort by adapting to physiological arousal regarding the task complexity. Our findings suggest that physiologically adaptive virtual reality systems can improve users’ experience in a wide range of scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Deja, Jordan Aiko; Mayer, Sven; Pucihar, Klen Čopič; Kljun, Matjaž
A Survey of Augmented Piano Prototypes: Has Augmentation Improved Learning Experiences? Honorable Mention Journal Article
In: Proc. ACM Hum.-Comput. Interact., 2022.
BibTeX | Links:
@article{deja2022survey,
title = {A Survey of Augmented Piano Prototypes: Has Augmentation Improved Learning Experiences?},
author = {Jordan Aiko Deja and Sven Mayer and Klen {\v{C}}opi{\v{c}} Pucihar and Matja{\v{z}} Kljun},
url = {https://sven-mayer.com/wp-content/uploads/2022/08/deja2022survey.pdf
https://arxiv.org/abs/2208.09929},
doi = {10.1145/3567719},
year = {2022},
date = {2022-11-20},
urldate = {2022-11-20},
journal = {Proc. ACM Hum.-Comput. Interact.},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Deja, Jordan Aiko; Mayer, Sven; Pucihar, Klen Čopič; Kljun, Matjaž
The Vision of a Human-Centered Piano Proceedings Article
In: Proceedings of the 2022 Workshop on Intelligent Music Interfaces: When Interactive Assistance and Augmentation Meet Musical Instruments , 2022.
@inproceedings{deja2022vision,
title = {The Vision of a Human-Centered Piano},
author = { Jordan Aiko Deja and Sven Mayer and Klen {\v{C}}opi{\v{c}} Pucihar and Matja{\v{z}} Kljun},
url = {https://sven-mayer.com/wp-content/uploads/2022/04/deja2022vision.pdf
https://arxiv.org/abs/2204.06945},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 2022 Workshop on Intelligent Music Interfaces: When Interactive Assistance and Augmentation Meet Musical Instruments
},
abstract = {For around 300 years, humans have been learning to play the modern piano either with a teacher or on their own. In recent years teaching and learning have been enhanced using augmented technologies that support novices. Other technologies have also tried to improve different use cases with the piano, such as composing and performing. Researchers and practitioners have showcased several forms of augmentation, from hardware improvements, sound quality, rendering projected visualizations to gesture-based and immersive technologies. Today, the landscape of piano augmentations is very diverse, and it is unclear how to describe the ideal piano and its features. In this work, we discuss how the human-centered piano \textendash the piano that has been designed with humans in the center of the design process and that effectively supports tasks performed on it \textendash can support pianists. In detail, we present the three tasks of learning, composing, and improvising in which a human-centered piano would be beneficial for the pianist.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Dietz, Dennis; Oechsner, Carl; Mayer, Sven; Butz, Andreas
Highland VR: Exploring Virtual Reality for Collaborative Balance Training Proceedings Article
In: Proceedings of the 2022 Workshop on VR [we are] training, 2022.
@inproceedings{dietz2022highland,
title = {Highland VR: Exploring Virtual Reality for Collaborative Balance Training},
author = {Dennis Dietz and Carl Oechsner and Sven Mayer and Andreas Butz},
url = {https://sven-mayer.com/wp-content/uploads/2022/05/dietz2022highland.pdf},
year = {2022},
date = {2022-04-30},
urldate = {2022-04-30},
booktitle = {Proceedings of the 2022 Workshop on VR [we are] training},
abstract = {Today virtual reality applications mainly allow consumers to engage in immersive alternative realities for fun and entertainment. However, researchers and therapists investigate their use for skill improvement and even fear prevention. In this work, we focus on balance training in virtual reality, which is directly linked to fear of heights. We first present a high definition virtual world supporting the training. Next, we highlight how different training attempts can support the learning process. Finally, we propose including the collaborative aspect into balance training, allowing for collaborative training and helping instructors to integrate and adapt to prior training sessions performed at home. Beyond balance training, the collaborative aspect will be helpful whenever feedback and performance review is required.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Dietz, Dennis; Oechsner, Carl; Ou, Changkun; Chiossi, Francesco; Sarto, Fabio; Mayer, Sven; Butz, Andreas
Walk This Beam: Impact of Different Balance Assistance Strategies and Height Exposure on Performance and Physiological Arousal in VR Best Paper Proceedings Article
In: ACM Symposium on Virtual Reality Software and Technology, ACM, 2022.
@inproceedings{dietz2022walk,
title = {Walk This Beam: Impact of Different Balance Assistance Strategies and Height Exposure on Performance and Physiological Arousal in VR},
author = {Dennis Dietz and Carl Oechsner and Changkun Ou and Francesco Chiossi and Fabio Sarto and Sven Mayer and Andreas Butz},
url = {https://sven-mayer.com/wp-content/uploads/2022/10/dietz2022walk.pdf},
doi = {10.1145/3562939.3567818},
year = {2022},
date = {2022-11-29},
urldate = {2022-11-29},
booktitle = {ACM Symposium on Virtual Reality Software and Technology},
publisher = {ACM},
series = {VRST\'22},
abstract = {Dynamic balance is an essential skill for the human upright gait;
therefore, regular balance training can improve postural control and reduce the risk of injury. Even slight variations in walking conditions like height or ground conditions can significantly impact walking performance. Virtual reality is used as a helpful tool to simulate such challenging situations. However, there is no agreement on design strategies for balance training in virtual reality under stressful environmental conditions such as height exposure. We investigate how two different training strategies, imitation learning, and gamified learning, can help dynamic balance control performance across different stress conditions. Moreover, we evaluate the stress response as indexed by peripheral physiological measures of stress, perceived workload, and user experience. Both approaches were tested against a baseline of no instructions and against each other. Thereby, we show that a learning-by-imitation approach immediately helps dynamic balance control, decreases stress, improves attention focus, and diminishes perceived workload. A gamified approach can lead to users being overwhelmed by the additional task. Finally, we discuss how our approaches could be adapted for balance training and applied to injury rehabilitation and prevention.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
therefore, regular balance training can improve postural control and reduce the risk of injury. Even slight variations in walking conditions like height or ground conditions can significantly impact walking performance. Virtual reality is used as a helpful tool to simulate such challenging situations. However, there is no agreement on design strategies for balance training in virtual reality under stressful environmental conditions such as height exposure. We investigate how two different training strategies, imitation learning, and gamified learning, can help dynamic balance control performance across different stress conditions. Moreover, we evaluate the stress response as indexed by peripheral physiological measures of stress, perceived workload, and user experience. Both approaches were tested against a baseline of no instructions and against each other. Thereby, we show that a learning-by-imitation approach immediately helps dynamic balance control, decreases stress, improves attention focus, and diminishes perceived workload. A gamified approach can lead to users being overwhelmed by the additional task. Finally, we discuss how our approaches could be adapted for balance training and applied to injury rehabilitation and prevention.

Eska, Bettina; Villa, Steeven; Mayer, Sven; Niess, Jasmin
Designing a Wearable Sensor-Fusion Toolkit for Motor Skill Learning Proceedings Article
In: Proceedings of the 2022 Workshop on Toolkits & Wearables: Developing Toolkits for Exploring Wearable Designs, 2022.
@inproceedings{eska2022designing,
title = {Designing a Wearable Sensor-Fusion Toolkit for Motor Skill Learning},
author = {Bettina Eska and Steeven Villa and Sven Mayer and Jasmin Niess},
url = {https://sven-mayer.com/wp-content/uploads/2022/05/eska2022designing.pdf},
year = {2022},
date = {2022-04-30},
urldate = {2022-04-30},
booktitle = {Proceedings of the 2022 Workshop on Toolkits \& Wearables: Developing Toolkits for Exploring Wearable Designs},
abstract = {User movement data is essential for providing feedback in the area of motor-skill learning. For instance, when learning a new sport such as dancing, people can benefit from meaningful technology-based feedback. However, movement tracking equipment for real-time feedback is costly and challenging to implement. In contrast, wearable devices tracking users\' movements are accessible and lightweight. While their lower cost makes them available to a broader audience, several open issues include sensor placement, sensor count, and data synchronization. To address these issues, we propose a wearable sensor-fusion approach for motor skill learning that allows researchers and developers to use one or multiple body-worn sensors for motion tracking. The extracted motion can then be used to deliver real-time feedback on the user\'s performance, supporting positive learning experiences.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Gruenefeld, Uwe; Auda, Jonas; Mathis, Florian; Schneegass, Stefan; Khamis, Mohamed; Gugenheimer, Jan; Mayer, Sven
VRception: Rapid Prototyping of Cross-Reality Systems in Virtual Reality Honorable Mention Proceedings Article
In: Proceedings of the 41st ACM Conference on Human Factors in Computing Systems, Association for Computing Machinery, New Orleans, United States, 2022.
@inproceedings{gruenefeld2022vrception,
title = {VRception: Rapid Prototyping of Cross-Reality Systems in Virtual Reality},
author = {Uwe Gruenefeld and Jonas Auda and Florian Mathis and Stefan Schneegass and Mohamed Khamis and Jan Gugenheimer and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2022/03/gruenefeld2022vrception.pdf
https://github.com/UweGruenefeld/VRception
https://www.youtube.com/watch?v=siG4WCnz4u8},
doi = {10.1145/3491102.3501821},
year = {2022},
date = {2022-04-01},
urldate = {2022-04-01},
booktitle = {Proceedings of the 41st ACM Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {New Orleans, United States},
series = {CHI \'22},
abstract = {Cross-reality systems empower users to transition along the realityvirtuality continuum or collaborate with others experiencing different manifestations of it. However, prototyping these systems is challenging, as it requires sophisticated technical skills, time, and often expensive hardware. We present VRception, a concept and toolkit for quick and easy prototyping of cross-reality systems. By simulating all levels of the reality-virtuality continuum entirely in Virtual Reality, our concept overcomes the asynchronicity of realities, eliminating technical obstacles. Our VRception Toolkit leverages this concept to allow rapid prototyping of cross-reality systems and easy remixing of elements from all continuum levels. We replicated six cross-reality papers using our toolkit and presented them to their authors. Interviews with them revealed that our toolkit sufficiently replicates their core functionalities and allows quick iterations. Additionally, remote participants used our toolkit in pairs to collaboratively implement prototypes in about eight minutes that they would have otherwise expected to take days.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gruenefeld, Uwe; Auda, Jonas; Mathis, Florian; Khamis, Mohamed; Gugenheimer, Jan; Mayer, Sven; Nebeling, Michael; Billinghurst, Mark
1st Workshop on Prototyping Cross-Reality Systems Proceedings Article
In: 2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct, IEEE IEEE, New York, NY, USA, 2022.
@inproceedings{gruenefeld2022workshop,
title = {1st Workshop on Prototyping Cross-Reality Systems},
author = {Uwe Gruenefeld and Jonas Auda and Florian Mathis and Mohamed Khamis and Jan Gugenheimer and Sven Mayer and Michael Nebeling and Mark Billinghurst},
url = {https://sven-mayer.com/wp-content/uploads/2023/03/gruenefeld2022workshop.pdf
https://crossreality.hcigroup.de/},
year = {2022},
date = {2022-10-16},
urldate = {2022-10-16},
booktitle = {2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct},
publisher = {IEEE},
address = {New York, NY, USA},
organization = {IEEE},
series = {ISMAR\'22},
abstract = {Cross-Reality (CR) systems offer different levels of virtuality to their users, enabling them to either transition along the reality-virtuality continuum or collaborate with each other across different manifestations. Many Augmented (AR) and Virtual Reality (VR) systems are inherently cross-reality since the amount of augmentation of the physical world (AR) or the influence of the physical environment (VR) varies over time. However, traditional prototyping approaches often focus on one specific manifestation, and so are less feasible for prototyping cross-reality systems. In this workshop, we aim to discuss current challenges, solutions, and opportunities that arise from prototyping CR systems and their interactions. We offer attendees a balanced mix of presentation and interactive sessions, including (provocative) research positions and video demonstrations of existing CR prototyping tools. Ultimately, the workshop aims to start a discussion inside the ISMAR community about the current challenges and novel concepts around prototyping CR systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Hirsch, Linda; Li, Jingyi; Mayer, Sven; Butz, Andreas
A Survey of Natural Design for Interaction Proceedings Article
In: Proceedings of Mensch Und Computer 2022, Association for Computing Machinery, Darmstadt, Germany, 2022.
@inproceedings{hirsch2022survery,
title = {A Survey of Natural Design for Interaction},
author = { Linda Hirsch and Jingyi Li and Sven Mayer and Andreas Butz},
url = {https://sven-mayer.com/wp-content/uploads/2022/07/hirsch2022survey.pdf},
doi = {10.1145/3543758.3543773},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of Mensch Und Computer 2022},
publisher = {Association for Computing Machinery},
address = {Darmstadt, Germany},
series = {MuC\'22},
abstract = {The term "Natural Design" has various meanings and applications within and beyond the human-computer interaction community. Yet, there is no consensus on whether it is a relevant design approach or only a descriptive term without profound meaning. We investigated the current understanding and design potential of "Natural Design" for interaction in a systematic literature review. By analyzing and rating 113 papers, we identified 47 relevant papers that applied Natural Design in different contexts. The understanding of the approach changes from nature-related inspirations to context-dependent naturalness based on increasing familiarity or expectations. We present a structured overview of these relevant papers, contribute a systematic Natural Design model for interaction and add 20 implications for applying Natural Design to natural user interfaces, natural interaction, or computation. We identified "Natural Design" as a relevant design approach to create intuitive and embedded interfaces that can profit from related concepts outside human-computer interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Yanhong; Kothiyal, Aditi; Weber, Thomas; Rossmy, Beat; Mayer, Sven; Hussmann, Heinrich
Designing Tangible as an Orchestration Tool for Collaborative Activities Journal Article
In: Multimodal Technologies and Interaction, 2022.
BibTeX | Links:
@article{li2022designing,
title = {Designing Tangible as an Orchestration Tool for Collaborative Activities},
author = {Yanhong Li and Aditi Kothiyal and Thomas Weber and Beat Rossmy and Sven Mayer and Heinrich Hussmann},
url = {https://sven-mayer.com/wp-content/uploads/2022/04/li2022designing.pdf},
doi = {10.3390/mti6050030},
year = {2022},
date = {2022-04-20},
urldate = {2022-04-20},
journal = {Multimodal Technologies and Interaction},
publisher = {MDPI},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Yanhong; Liang, Meng; Preissing, Julian; Bachl, Nadine; Weber, Thomas; Mayer, Sven; Hussmann, Heinrich
A Meta-Analysis of Tangible Learning Studies from the TEI Conference Proceedings Article
In: Sixteenth International Conference on Tangible, Embedded, and Embodied Interaction, Association for Computing Machinery, Daejeon, Republic of Korea, 2022.
@inproceedings{li2022meta,
title = {A Meta-Analysis of Tangible Learning Studies from the TEI Conference},
author = {Yanhong Li and Meng Liang and Julian Preissing and Nadine Bachl and Thomas Weber and Sven Mayer and Heinrich Hussmann},
url = {https://sven-mayer.com/wp-content/uploads/2022/02/amy2022meta.pdf},
doi = {10.1145/3490149.3501313},
year = {2022},
date = {2022-02-13},
urldate = {2022-02-13},
booktitle = {Sixteenth International Conference on Tangible, Embedded, and Embodied Interaction},
publisher = {Association for Computing Machinery},
address = {Daejeon, Republic of Korea},
series = {TEI '22},
abstract = {Tangible learning has received increasing attention. However, in the recent decade, it has no comprehensive overview. This study aimed to fill the gap and reviewed 92 publications from all the TEI conference proceedings (2007\textendash2021). We analysed previous studies' characteristics (e.g., study purpose and interactive modalities) and elaborated on three common topics: collaborative tangible learning, tangibles' impacts on learning, and comparisons between tangibles and other interfaces. Three key findings were: (1)~Tangibles impacts learning because it could scaffold learning, change learning behaviour, and improve learning emotion; (2)~We should see the effectiveness of tangibles with rational and critical minds. Finally, some studies emphasised too much on the interaction of tangibles and ignored their metaphor meanings. For future work, we suggest avoiding an intensive cluster on collaboration and children and thus, consider other valuable areas, e.g., tangibles for teachers, tangibles' social and emotional impacts on students, tangible interaction's meaning and metaphor.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Li, Yanhong; Gao, Zhenhan; Egger, Sabrina; Mayer, Sven; Hussmann, Heinrich
Tangible Interfaces Support Young Children's Goal Interdependence Proceedings Article
In: Proceedings of Mensch Und Computer 2022, Association for Computing Machinery, Darmstadt, Germany, 2022.
@inproceedings{li2022tangible,
title = {Tangible Interfaces Support Young Children\'s Goal Interdependence},
author = {Yanhong Li and Zhenhan Gao and Sabrina Egger and Sven Mayer and Heinrich Hussmann},
doi = {10.1145/3543758.3543782},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of Mensch Und Computer 2022},
publisher = {Association for Computing Machinery},
address = {Darmstadt, Germany},
series = {MuC\'22},
abstract = {Understanding how to contribute to group work is challenging, especially for young children. To have a productive group process, we need to know the mechanism of positive interdependence, which is a fundamental element of successful collaboration. Unfortunately, although there are many suggestions for promoting positive interdependence with tangible technologies, there are few guidelines for structuring children\'s interdependent collaboration. Therefore, we designed two tangible games, UnitRry and CollabMaze, using weak and strong goal interdependent designs. We conducted two user studies with 32 children. Our investigation revealed three main findings. First, weak and strong goal interdependent interfaces had high enjoyment and interdependence. Second, tangible interfaces help young children have more idea communication and need less task time to solve the tasks. Finally, young children using tangible interfaces were more engaged in the tasks. In the long run, our results can improve the design of tangible interfaces for young children\'s collaboration and help them have a better collaborative experience. Furthermore, our findings showed the value of tangible technologies compared with tablet applications in facilitating children\'s collaboration.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Li, Jingyi; Hirsch, Linda; Lu, Tianyang; Mayer, Sven; Butz, Andreas
A Touch of Realities: Car-Interior-Based Haptic Interaction Supports In-Car VR Recovery from Interruptions Proceedings Article
In: Proceedings of Mensch Und Computer 2022, Association for Computing Machinery, Darmstadt, Germany, 2022.
@inproceedings{li2022touch,
title = {A Touch of Realities: Car-Interior-Based Haptic Interaction Supports In-Car VR Recovery from Interruptions},
author = { Jingyi Li and Linda Hirsch and Tianyang Lu and Sven Mayer and Andreas Butz},
url = {https://sven-mayer.com/wp-content/uploads/2023/01/li2022touch.pdf},
doi = {10.1145/3543758.3543768},
year = {2022},
date = {2022-09-05},
urldate = {2022-09-05},
booktitle = {Proceedings of Mensch Und Computer 2022},
publisher = {Association for Computing Machinery},
address = {Darmstadt, Germany},
series = {MuC\'22},
abstract = {Real-world interruptions will challenge virtual reality (VR) users in future everyday transport. For example, while passengers are immersed at a virtual beach, an incoming phone call might interrupt their presence and relaxation. We investigated how to help users recover from such interruptions by exploring haptic and visual cues that help them recall their prior presence in VR. We approached this by developing a passive haptic display for rear-seat passengers using an interactive armrest. In a lab study (N=30), participants played with virtual sand to relax, feeling the changes in the real armrest and seeing them on the virtual beach. We compared this multi-sensory experience to the single modalities (just visuals or just haptics). The results showed that the multi-modal experience lowered awareness of the armrest more and fostered a feeling of connectedness to the virtual world after real-world interruptions. We propose using car-interior-based haptic displays to support in-car VR recovery from interruptions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Oechsner, Carl; Mayer, Sven; Butz, Andreas
Challenges and Opportunities of Cooperative Robots as Cooking Appliances Proceedings Article
In: Proceedings of the 2022 Workshop on Engaging with Automation , 2022.
@inproceedings{oechsner2022challenges,
title = {Challenges and Opportunities of Cooperative Robots as Cooking Appliances},
author = {Carl Oechsner and Sven Mayer and Andreas Butz},
url = {https://sven-mayer.com/wp-content/uploads/2022/05/oechsner2022challenges.pdf},
year = {2022},
date = {2022-04-30},
urldate = {2022-04-30},
booktitle = {Proceedings of the 2022 Workshop on Engaging with Automation },
series = {AutomationXP22},
abstract = {Robots allow humans to offload repetitive tasks to be executed with high precision. However, when we enter the space of collaboration with robots, this opens up the great potential to directly support humans. In the future, we envision that cooperative robots are integrated into kitchens as every other cooking appliance. In this work, we present several scenarios where the robot supports the human in the kitchen. We then outline the opportunities of cooperative robots in kitchens and the challenges that such a setup brings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Ou, Changkun; Buschek, Daniel; Mayer, Sven; Butz, Andreas
The Human in the Infinite Loop: A Case Study on Revealing and Explaining Human-AI Interaction Loop Failures Honorable Mention Proceedings Article
In: Proceedings of Mensch Und Computer 2022, Association for Computing Machinery, Darmstadt, Germany, 2022.
@inproceedings{ou2022human,
title = {The Human in the Infinite Loop: A Case Study on Revealing and Explaining Human-AI Interaction Loop Failures},
author = { Changkun Ou and Daniel Buschek and Sven Mayer and Andreas Butz},
url = {https://sven-mayer.com/wp-content/uploads/2022/07/ou2022human.pdf
https://arxiv.org/abs/2207.12761
},
doi = {10.1145/3543758.3543761},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of Mensch Und Computer 2022},
publisher = {Association for Computing Machinery},
address = {Darmstadt, Germany},
series = {MuC\'22},
abstract = {Interactive AI systems increasingly employ a human-in-the-loop strategy. This creates new challenges for the HCI community when designing such systems. We reveal and investigate some of these challenges in a case study with an industry partner, and developed a prototype human-in-the-loop system for preference-guided 3D model processing. Two 3D artists used it in their daily work for 3 months. We found that the human-AI loop often did not converge towards a satisfactory result and designed a lab study (N=20) to investigate this further. We analyze interaction data and user feedback through the lens of theories of human judgment to explain the observed human-in-the-loop failures with two key insights: 1) optimization using preferential choices lacks mechanisms to deal with inconsistent and contradictory human judgments; 2) machine outcomes, in turn, influence future user inputs via heuristic biases and loss aversion. To mitigate these problems, we propose descriptive UI design guidelines. Our case study draws attention to challenging and practically relevant imperfections in human-AI loops that need to be considered when designing human-in-the-loop systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Steuerlein, Benedict; Mayer, Sven
Conductive Fiducial Tangibles for Everyone: A Data Simulation-Based Toolkit using Deep Learning Journal Article
In: Proc. ACM Hum.-Comput. Interact., iss. 6, no. MobileHCI, 2022.
@article{steuerlein2022conductive,
title = {Conductive Fiducial Tangibles for Everyone: A Data Simulation-Based Toolkit using Deep Learning},
author = { Benedict Steuerlein and Sven Mayer},
url = {https://github.com/mimuc/Conductive-Fiducial-Marker-Simulation-Toolkit
https://sven-mayer.com/wp-content/uploads/2022/07/steuerlein2022conductive.pdf
https://youtu.be/39SGX0k20Mo},
doi = {10.1145/3546718},
year = {2022},
date = {2022-09-28},
urldate = {2022-09-28},
journal = {Proc. ACM Hum.-Comput. Interact.},
number = {MobileHCI},
issue = {6},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {While tangibles enrich the interaction with touchscreens, with projected capacitive screens being mainstream, the recognition possibilities of tangibles are nearly lost. Deep learning approaches to improve the recognition of conductive triangles require collecting huge amounts of data and domain-specific knowledge for hyperparameter tuning. To overcome this drawback, we present a toolkit that allows everyone to train a deep learning tangible recognizer based on simulated data. Our toolkit uses a pre-trained Generative Adversarial Network to simulate the imprint of fiducial tangibles, which we then use to train a deployable recognizer based on our pre-defined neuronal network architecture. Our evaluation shows that our approach can recognize fiducial tangibles such as AprilTags with an average accuracy of 99.3% and an average rotation error of only 4.9°. Thus, our toolkit is a plug-and-play solution requiring no domain knowledge and no data collection but allows designers to use deep learning approaches in their design process.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Ullerich, Jamie; Windl, Maximiliane; Bulling, Andreas; Mayer, Sven
ThumbPitch: Enriching Thumb Interaction on Mobile Touchscreens using Deep Learning Proceedings Article
In: Proceedings of the 34th Australian Conference on Human-Computer Interaction Proceedings, Association for Computing Machinery, Canberra, NSW, Australia, 2022.
@inproceedings{ullerich2022thumbpitch,
title = {ThumbPitch: Enriching Thumb Interaction on Mobile Touchscreens using Deep Learning},
author = { Jamie Ullerich and Maximiliane Windl and Andreas Bulling and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2022/08/ullerich2022thumbpitch.pdf},
doi = {10.1145/3572921.3572925},
year = {2022},
date = {2022-11-29},
urldate = {2022-11-29},
booktitle = {Proceedings of the 34th Australian Conference on Human-Computer Interaction Proceedings},
publisher = {Association for Computing Machinery},
address = {Canberra, NSW, Australia},
series = {OzCHI\'22},
abstract = {Today touchscreens are one of the most common input devices for everyday ubiquitous interaction. Yet, capacitive touchscreens are limited in expressiveness; thus, a large body of work has focused on extending the input capabilities of touchscreens. One promising approach is to use index finger orientation; however, this requires a two-handed interaction and poses ergonomic constraints. We propose using the thumb\'s pitch as an additional input dimension to counteract these limitations, enabling one-handed interaction scenarios. Our deep convolutional neural network detecting the thumb\'s pitch is trained on more than 230,000 ground truth images recorded using a motion tracking system. We highlight the potential of ThumbPitch by proposing several use cases that exploit the higher expressiveness, especially for one-handed scenarios. We tested three use cases in a validation study and validated our model. Our model achieved a mean error of only 11.9°.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Villa, Steeven; Mayer, Sven
Cobity: A Plug-And-Play Toolbox to Deliver Haptics in Virtual Reality Proceedings Article
In: Proceedings of Mensch Und Computer 2022, Association for Computing Machinery, Darmstadt, Germany, 2022.
@inproceedings{villa2022cobity,
title = {Cobity: A Plug-And-Play Toolbox to Deliver Haptics in Virtual Reality},
author = { Steeven Villa and Sven Mayer},
url = {https://github.com/xteeven/Cobity
https://sven-mayer.com/wp-content/uploads/2022/06/villa2022cobity.pdf},
doi = {10.1145/3543758.3543775},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of Mensch Und Computer 2022},
publisher = {Association for Computing Machinery},
address = {Darmstadt, Germany},
series = {MuC\'22},
abstract = {Haptics increase the presence in virtual reality applications. However, providing room-scale haptics is an open challenge. Cobots (robotic systems that are safe for human use) are a promising approach, requiring in-depth engineering skills. Control is done on a low abstraction level and requires complex procedures and implementations. In contrast, 3D tools such as Unity allow to quickly prototype a wide range of environments for which cobots could deliver haptic feedback. To overcome this disconnect, we present Cobity, an open-source plug-and-play solution to control the cobot using the virtual environment, enabling fast prototyping of a wide range of haptic experiences. We present a Unity plugin that allows controlling the cobot using the end-effector\'s target pose (cartesian position and angles); the values are then converted into velocities and streamed to the cobot inverse kinematic solver using a specially designed C++ library. Our results show that Cobity enables rapid prototyping with high precision for haptics. We argue that Cobity simplifies the creation of a wide range of haptic feedback applications enabling designers and researchers in human-computer interaction without robotics experience to quickly prototype virtual reality experiences with haptic sensations. We highlight this potential by presenting four different show cases.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Villa, Steeven; Mayer, Sven; Hartcher-O'Brien, Jess; Schmidt, Albrecht; and Tonja-Katrin Machulla,
Extended Mid-air Ultrasound Haptics for Virtual Reality Journal Article
In: Proc. ACM Hum.-Comput. Interact., 2022.
@article{villa2022extended,
title = {Extended Mid-air Ultrasound Haptics for Virtual Reality},
author = {Steeven Villa and Sven Mayer and Jess Hartcher-O\'Brien and Albrecht Schmidt and and Tonja-Katrin Machulla},
url = {https://sven-mayer.com/wp-content/uploads/2022/10/villa2022extended.pdf
https://www.youtube.com/watch?v=Ahr2ZLrFbFE
https://www.youtube.com/watch?v=hFRNFVL8wOI},
doi = {10.1145/3567731},
year = {2022},
date = {2022-11-20},
urldate = {2022-11-20},
journal = {Proc. ACM Hum.-Comput. Interact.},
abstract = {Mid-air haptics allow bare-hand tactile stimulation; however, it has a constrained workspace, making it unsuitable for room-scale haptics. We present a novel approach to rendering mid-air haptic sensations in a large rendering volume by turning a static array into a dynamic array following the user\'s hand. We used a 6DOF robot to drive a haptic ultrasound array over a large 3D space. Our system enables rendering room-scale mid-air experiences while preserving bare-hand interaction, thus, providing tangibility for virtual environments. To evaluate our approach, we performed three evaluations. First, we performed a technical system evaluation, showcasing the feasibility of such a system. Next, we conducted three psychophysical experiments, showing that the motion does not affect the user\'s perception with high likelihood. Lastly, we explored seven use cases that showcase our system\'s potential using a user study. We discuss challenges and opportunities in how large-scale mid-air haptics can contribute toward room-scale haptic feedback. Thus, with our system, we contribute to general haptic mid-air feedback on a large scale.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Völkel, Sarah Theres; Schoedel, Ramona; Kaya, Lale; Mayer, Sven
User Perceptions of Extraversion in Chatbots after Repeated Use Proceedings Article
In: Proceedings of the 41st ACM Conference on Human Factors in Computing Systems, Association for Computing Machinery, New Orleans, United States, 2022.
@inproceedings{volkel2022user,
title = {User Perceptions of Extraversion in Chatbots after Repeated Use},
author = {Sarah Theres V\"{o}lkel and Ramona Schoedel and Lale Kaya and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2022/04/voelkel2022user.pdf
https://www.youtube.com/watch?v=he9d989wthw
https://www.medien.ifi.lmu.de/extraversion-chatbots/},
doi = {10.1145/3491102.3502058},
year = {2022},
date = {2022-04-01},
urldate = {2022-04-01},
booktitle = {Proceedings of the 41st ACM Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {New Orleans, United States},
series = {CHI\'22},
abstract = {Whilst imbuing robots and voice assistants with personality has been found to positively impact user experience, little is known about user perceptions of personality in purely text-based chatbots. In a within-subjects study, we asked N=34 participants to interact with three chatbots with different levels of Extraversion (extraverted, average, introverted), each over the course of four days. We systematically varied the chatbots\' responses to manipulate Extraversion based on work in the psycholinguistics of human behaviour. Our results show that participants perceived the extraverted and average chatbots as such, whereas verbal cues transferred from human behaviour were insufficient to create an introverted chatbot. Whilst most participants preferred interacting with the extraverted chatbot, participants engaged significantly more with the introverted chatbot as indicated by the users\' average number of written words. We discuss implications for researchers and practitioners on how to design chatbot personalities that can adapt to user preferences.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jiang, Weiwei; Yang, Kangning; Windl, Maximiliane; Chiossi, Francesco; Tag, Benjamin; Mayer, Sven; Sarsenbayeva, Zhanna
Current Challenges of Using Wearable Devices for Online Emotion Sensing Proceedings Article
In: Proceedings of the 2022 Workshop on Future of Emotion in Human-Computer Interaction, 2022.
@inproceedings{weiwei2022current,
title = {Current Challenges of Using Wearable Devices for Online Emotion Sensing},
author = {Weiwei Jiang and Kangning Yang and Maximiliane Windl and Francesco Chiossi and Benjamin Tag and Sven Mayer and Zhanna Sarsenbayeva},
url = {https://sven-mayer.com/wp-content/uploads/2022/04/weiwei2022current.pdf
https://arxiv.org/abs/2208.05206},
doi = {10.48550/arXiv.2208.05206},
year = {2022},
date = {2022-04-30},
urldate = {2022-04-30},
booktitle = {Proceedings of the 2022 Workshop on Future of Emotion in Human-Computer Interaction},
abstract = {A growing number of wearable devices is becoming increasingly non-invasive, readily available, and versatile for measuring different physiological signals. This renders them ideal for inferring the emotional states of their users. Despite the success of wearable devices in recent emotion studies, there are still several challenges to be addressed. In this position paper, we compare currently available wearables that can be used for emotion-sensing and identify the challenges and opportunities for future researchers. Our investigation opens the discussion of what is missing for in-the-wild for emotion-sensing studies.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Windl, Maximiliane; Mayer, Sven
The Skewed Privacy Concerns of Bystanders in Smart Environments Journal Article
In: Proc. ACM Hum.-Comput. Interact., iss. 6, no. MobileHCI, 2022.
@article{windl2022skewed,
title = {The Skewed Privacy Concerns of Bystanders in Smart Environments},
author = { Maximiliane Windl and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2022/07/windl2022skewed.pdf
https://maximiliane-windl.com/skewed-bystanders/},
doi = {10.1145/3546719},
year = {2022},
date = {2022-09-28},
urldate = {2022-09-28},
journal = {Proc. ACM Hum.-Comput. Interact.},
number = {MobileHCI},
issue = {6},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {As ubiquitous computing brings sensors and actuators directly into our homes, they introduce privacy concerns for the owners and bystanders. However, privacy concerns may vary among devices and depend on the bystanders\' social relation to the owner. In this work, we hypothesize 1) that bystanders assign more privacy concerns to smart home devices than personal computing devices such as smartphones, even though they have the same capabilities, and 2) a stronger social relationship mitigates some of the bystanders\' privacy concerns. By conducting an online survey (n=170), we found that personal computing devices are perceived as significantly less privacy concerning than smart home devices while having equal capabilities. By varying the assumed social relationship, we further found that a stronger connection to the owner reduces privacy concerns. Thus, as bystanders underestimate the risk of personal computing devices and are generally concerned about smart home devices, it is essential to alert the user about the presence of both. We argue that bystanders have to be informed about the privacy risks while entering a new space, in the best case, already in the entrance area.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zagermann, Johannes; Hubenschmid, Sebastian; Balestrucci, Priscilla; Feuchtner, Tiare; Mayer, Sven; Ernst, Marc O; Schmidt, Albrecht; Reiterer, Harald
Complementary Interfaces for Visual Computing Journal Article
In: it - Information Technology, vol. 64, no. 5, 2022.
BibTeX | Links:
@article{zagermann2022complementary,
title = {Complementary Interfaces for Visual Computing},
author = {Johannes Zagermann and Sebastian Hubenschmid and Priscilla Balestrucci and Tiare Feuchtner and Sven Mayer and Marc O Ernst and Albrecht Schmidt and Harald Reiterer},
url = {https://sven-mayer.com/wp-content/uploads/2022/11/zagermann2022complementary.pdf},
doi = {10.1515/itit-2022-0031},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {it - Information Technology},
volume = {64},
number = {5},
publisher = {De Gruyter Oldenbourg},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Guanhua; Hindennach, Susanne; Leusmann, Jan; Bühler, Felix; Steuerlein, Benedict; Mayer, Sven; Bâce, Mihai; Bulling, Andreas
Predicting Next Actions and Latent Intents during Text Formatting Proceedings Article
In: Proceedings of the CHI Workshop Computational Approaches for Understanding, Generating, and Adapting User Interfaces, pp. 1–6, 2022.
@inproceedings{zhang2022predicting,
title = {Predicting Next Actions and Latent Intents during Text Formatting},
author = { Guanhua Zhang and Susanne Hindennach and Jan Leusmann and Felix B\"{u}hler and Benedict Steuerlein and Sven Mayer and Mihai B\^{a}ce and Andreas Bulling},
url = {https://sven-mayer.com/wp-content/uploads/2022/08/zhang2022predicting.pdf
https://perceptualui.org/publications/zhang22_caugaui/},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the CHI Workshop Computational Approaches for Understanding, Generating, and Adapting User Interfaces},
pages = {1--6},
abstract = {In this work we investigate the challenging task of predicting user intents from mouse and keyboard input as well as gaze behaviour. In contrast to prior work we study intent prediction at two different resolutions on the behavioural timeline: predicting future input actions as well as latent intents to achieve a high-level interaction goal. Results from a user study (N=15) on a sample text formatting task show that the sequence of prior actions is more informative for intent prediction than gaze. Only using the action sequence, we can predict the next action and the high-level intent with an accuracy of 66% and 96%, respectively. In contrast, accuracy when using features extracted from gaze behaviour was significantly lower, at 41% and 46%. This finding is important for the development of future anticipatory user interfaces that aim to proactively adapt to user intents and interaction goals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2021

Ahuja, Karan; Mayer, Sven; Goel, Mayank; Harrison, Chris
Pose-on-the-Go: Approximating User Pose with Smartphone Sensor Fusion and Inverse Kinematics Proceedings Article
In: Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, New York, New York, USA, 2021.
@inproceedings{ahuja2021pose,
title = {Pose-on-the-Go: Approximating User Pose with Smartphone Sensor Fusion and Inverse Kinematics},
author = {Karan Ahuja and Sven Mayer and Mayank Goel and Chris Harrison},
url = {https://sven-mayer.com/wp-content/uploads/2021/05/ahuja2021pose.pdf
https://youtu.be/msU1M8Z3mTU},
doi = {10.1145/3411764.3445582},
year = {2021},
date = {2021-05-08},
urldate = {2021-05-08},
booktitle = {Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {New York, New York, USA},
series = {CHI \'21},
abstract = {We present Pose-on-the-Go, a full-body pose estimation system that uses sensors already found in today’s smartphones. This stands in contrast to prior systems, which require worn or external sensors. We achieve this result via extensive sensor fusion, leveraging a phone’s front and rear cameras, the user-facing depth camera, touchscreen, and IMU. Even still, we are missing data about a user’s body (e.g., angle of the elbow joint), and so we use inverse kinematics to estimate and animate probable body poses. We provide a detailed evaluation of our system, benchmarking it against a professional-grade Vicon tracking system. We conclude with a series of demonstration applications that underscore the unique potential of our approach, which could be enabled on many modern smartphones with a simple software update.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Auda, Jonas; Verheyen, Nils; Mayer, Sven; Schneegass, Stefan
Flyables: Haptic Input Devices for Virtual Reality using Quadcopters Honorable Mention Proceedings Article
In: ACM Symposium on Virtual Reality Software and Technology, 2021.
@inproceedings{auda2021flyables,
title = {Flyables: Haptic Input Devices for Virtual Reality using Quadcopters},
author = {Jonas Auda and Nils Verheyen and Sven Mayer and Stefan Schneegass},
url = {https://sven-mayer.com/wp-content/uploads/2021/10/auda2021flyables.pdf},
doi = {10.1145/3489849.3489855},
year = {2021},
date = {2021-12-08},
urldate = {2021-12-08},
booktitle = {ACM Symposium on Virtual Reality Software and Technology},
series = {VRST \'21},
abstract = {Virtual Reality (VR) has made its way into everyday life. While VR delivers an ever-increasing level of immersion, controls and their haptics are still limited. Current VR headsets come with dedicated controllers that are used to control every virtual interface element. However, the controller input mostly differs from the virtual interface. This reduces immersion. To provide a more realistic input, we present Flyables, a toolkit that provides matching haptics for virtual user interface elements using quadcopters. We took five common virtual UI elements and built their physical counterparts. We attached them to quadcopters to deliver on-demand haptic feedback. In a user study, we compared Flyables to controller-based VR input. While controllers still outperform Flyables in terms of precision and task completion time, we found that Flyables present a more natural and playful way to interact with VR environments. Based on the results from the study, we outline research challenges that could improve interaction with Flyables in the future.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Choi, Frederick; Mayer, Sven; Harrison, Chris
3D Hand Pose Estimation on Conventional Capacitive Touchscreens Submission Proceedings Article
In: Proceedings of the 23rd International Conference on Mobile Human-Computer Interaction, 2021.
@inproceedings{choi:2021:hand,
title = {3D Hand Pose Estimation on Conventional Capacitive Touchscreens Submission},
author = {Frederick Choi and Sven Mayer and Chris Harrison},
url = {https://youtu.be/9ghxsNzqkSw
https://github.com/figlab/3DHandPose
https://sven-mayer.com/wp-content/uploads/2021/09/choi2021hand.pdf},
doi = {10.1145/3447526.3472045},
year = {2021},
date = {2021-09-27},
urldate = {2021-09-27},
booktitle = {Proceedings of the 23rd International Conference on Mobile Human-Computer Interaction},
series = {MobileHCI\'21},
abstract = {Contemporary mobile devices with touchscreens capture the X/Y position of finger tips on the screen and pass these coordinates to applications as though the input were points in space. Of course, human hands are much more sophisticated, able to form rich 3D poses capable of far more complex interactions than poking at a screen. In this paper, we describe how conventional capacitive touchscreens can be used to estimate 3D hand pose, enabling richer interaction opportunities. Importantly, our software-only approach requires no special or new sensors, either internal or external. As a proof of concept, we use an off-the-shelf Samsung Tablet flashed with a custom kernel. After describing our software pipeline, we report findings from our user study, which shows our 3D joint tracking accuracy is around. We conclude with several example applications we built to illustrate the potential of our approach. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Toure, Donovan; Welsch, Robin; Mayer, Sven
The Future of Proxemic Interaction in Smart Factories Proceedings Article
In: Proceedings of the Automation Experience at the Workplace, 2021.
BibTeX | Links:
@inproceedings{donovan2021future,
title = {The Future of Proxemic Interaction in Smart Factories},
author = {Donovan Toure and Robin Welsch and Sven Mayer },
url = {https://sven-mayer.com/wp-content/uploads/2021/05/toure2021future.pdf},
year = {2021},
date = {2021-05-07},
urldate = {2021-05-07},
booktitle = {Proceedings of the Automation Experience at the Workplace},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Le, Huy Viet; Mayer, Sven; Henze, Niels
Deep Learning for Human-Computer Interaction Journal Article
In: Interactions, 2021.
BibTeX | Links:
@article{huy2021deep,
title = {Deep Learning for Human-Computer Interaction},
author = {Huy Viet Le and Sven Mayer and Niels Henze},
url = {https://sven-mayer.com/wp-content/uploads/2021/01/huy2021deep.pdf},
doi = {10.1145/3436958},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {Interactions},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
institution = {ACM},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Mayer, Sven; Xu, Xiangyu; Harrison, Chris
Super-Resolution Capacitive Touchscreens Proceedings Article
In: Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems, Association for Computing Machinery, New York, New York, USA, 2021.
@inproceedings{mayer2021super,
title = {Super-Resolution Capacitive Touchscreens},
author = {Sven Mayer and Xiangyu Xu and Chris Harrison},
url = {https://sven-mayer.com/wp-content/uploads/2021/01/mayer2021super.pdf
https://youtu.be/vyLD2CSOMCE
https://github.com/FIGLAB/Super-Resolution-Dataset},
doi = {10.1145/3411764.3445703},
year = {2021},
date = {2021-05-08},
urldate = {2021-05-08},
booktitle = {Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems},
publisher = {Association for Computing Machinery},
address = {New York, New York, USA},
series = {CHI \'21},
abstract = {Capacitive touchscreens are near-ubiquitous in today’s touch-driven devices, such as smartphones and tablets. By using rows and columns of electrodes, specialized touch controllers are able to capture a 2D image of capacitance at the surface of a screen. For over a decade, capacitive "pixels" have been around 4 millimeters in size \textendash a surprisingly low resolution that precludes a wide range of interesting applications. In this paper, we show how super-resolution techniques, long used in fields such as biology and astronomy, can be applied to capacitive touchscreen data. By integrating data from many frames, our software-only process is able to resolve geometric details finer than the original sensor resolution. This opens the door to passive tangibles with higher-density fiducials and also recognition of every-day metal objects, such as keys and coins. We built several applications to illustrate the potential of our approach and report the findings of a multipart evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Penzkofer, Anna; Müller, Philipp; Bühler, Felix; Mayer, Sven; Bulling, Andreas
ConAn: A Usable Tool for Multimodal Conversation Analysis Proceedings Article
In: Proceedings of the 2021 International Conference on Multimodal Interaction, 2021.
@inproceedings{penzkofer2021conan,
title = {ConAn: A Usable Tool for Multimodal Conversation Analysis},
author = {Anna Penzkofer and Philipp M\"{u}ller and Felix B\"{u}hler and Sven Mayer and Andreas Bulling},
url = {https://youtu.be/H2KfZNgx6CQ
https://perceptualui.org/publications/penzkofer21_icmi/
https://sven-mayer.com/wp-content/uploads/2022/02/penzkofer2021conan.pdf},
doi = {10.1145/3462244.3479886},
year = {2021},
date = {2021-10-18},
urldate = {2021-10-18},
booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},
series = {ICMI'21},
abstract = {Multimodal analysis of group behavior is a key task in human-computer interaction, and in the social and behavioral sciences, but is often limited to more easily controllable laboratory settings or requires elaborate multi-sensor setups and time-consuming manual data annotation. We presentConAn\textendash a usable tool to explore and automatically analyze non-verbal behavior of multiple persons during natural group conversations. In contrast to traditional multi-sensor setups, our tool only requires a single 360°cameraand uses state-of-the-art computer vision methods to automatically extract behavioral indicators, such as gaze direction, facial expressions, and speaking activity. As such, our tool allows for easy and fast deployment and supports researchers in understanding individual behavior, group interaction dynamics, and in quantifying user-object interactions. We illustrate the benefits ofConAnonthree sample use cases: conversation analysis, assessment of collaboration quality, and impact of technology on audience behavior. Taken together, ConAn represents an important step towards democratizing automatic conversation analysis in HCI and beyond.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Prange, Sarah; Mayer, Sven; Bittl, Maria-Lena; Hassib, Mariam; Alt, Florian
Investigating User Perceptions Towards Wearable Mobile Electromyography Proceedings Article
In: Human-Computer Interaction -- INTERACT 2021, pp. 339–360, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-85610-6.
@inproceedings{prange2021investigatingb,
title = {Investigating User Perceptions Towards Wearable Mobile Electromyography},
author = {Sarah Prange and Sven Mayer and Maria-Lena Bittl and Mariam Hassib and Florian Alt },
url = {https://sven-mayer.com/wp-content/uploads/2021/08/prange2021investigating.pdf},
doi = {10.1007/978-3-030-85610-6_20},
isbn = {978-3-030-85610-6},
year = {2021},
date = {2021-08-30},
urldate = {2021-08-30},
booktitle = {Human-Computer Interaction -- INTERACT 2021},
pages = {339--360},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Wearables capture physiological user data, enabling novel user interfaces that can identify users, adapt to the user state, and contribute to the quantified self. At the same time, little is known about users' perception of this new technology. In this paper, we present findings from a user study (N = 36) in which participants used an electromyography (EMG) wearable and a visualization of data collected from EMG wearables. We found that participants are highly unaware of what EMG data can reveal about them. Allowing them to explore their physiological data makes them more reluctant to share this data. We conclude with deriving guidelines, to help designers of physiological data-based user interfaces to (a) protect users' privacy, (b) better inform them, and (c) ultimately support the uptake of this technology.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Rothe, Sylvia; Welsch, Robin; Mayer, Sven
Spatial Sound Concepts for F-Formations in Social VR Proceedings Article
In: In the Proceedings of the 2021 Social VR Workshop – A New Medium for Remote Communication and Collaboration, 2021.
@inproceedings{rothe2021spatial,
title = {Spatial Sound Concepts for F-Formations in Social VR},
author = {Sylvia Rothe and Robin Welsch and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2021/05/rothe2021spatial.pdf},
year = {2021},
date = {2021-05-07},
urldate = {2021-05-07},
booktitle = {In the Proceedings of the 2021 Social VR Workshop \textendash A New Medium for Remote Communication and Collaboration},
abstract = {Directional audio is key for fluent conversations in the virtual world. To allow directional audio, F-formations (facing formations) are core to understand the constellation between conversation partners. We present our approach to developing a sound concept in Social VR based on F-formations. For this, we introduce several F-formations and explain requirements for the sound design. We discuss our first experiences in observing several communication situations. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Schmidt, Albrecht; Mayer, Sven; Buschek, Daniel
Introduction to Intelligent User Interfaces Proceedings Article
In: Conference on Human Factors in Computing Systems Extended Abstracts, Association for Computing Machinery, 2021.
@inproceedings{schmidt2021introduction,
title = {Introduction to Intelligent User Interfaces},
author = {Albrecht Schmidt and Sven Mayer and Daniel Buschek},
url = {https://sven-mayer.com/wp-content/uploads/2021/01/schmidt2021introduction.pdf
https://iui-lecture.org/},
doi = {10.1145/3411763.3445021},
year = {2021},
date = {2021-05-08},
urldate = {2021-05-08},
booktitle = {Conference on Human Factors in Computing Systems Extended Abstracts},
publisher = {Association for Computing Machinery},
series = {CHI EA\'21},
abstract = {Recent advancements in artificial intelligence (AI) create new opportunities for implementing a wide range of intelligent user interfaces. Speech-based interfaces, chatbots, visual recognition of users and objects, recommender systems, and adaptive user interfaces are examples that have majored over the last 10 years due to new approaches in machine learning (ML). Modern ML-techniques outperform in many domains of previous approaches and enable new applications. Today, it is possible to run models efficiently on various devices, including PCs, smartphones, and embedded systems. Leveraging the potential of artificial intelligence and combining them with human-computer interaction approaches allows developing intelligent user interfaces supporting users better than ever before. This course introduces participants to terms and concepts relevant in AI and ML. Using examples and application scenarios, we practically show how intelligent user interfaces can be designed and implemented. In particular, we look at how to create optimized keyboards, use natural language processing for text and speech-based interaction, and how to implement a recommender system for movies. Thus, this course aims to introduce participants to a set of machine learning tools that will enable them to build their own intelligent user interfaces. This course will include video based lectures to introduce concepts and algorithms supported by practical and interactive exercises using python notebooks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Strohm, Florian; Sood, Ekta; Mayer, Sven; Müller, Philipp; Bâce, Mihai; Bulling, Andreas
Neural Photofit: Gaze-based Mental Image Reconstruction Proceedings Article
In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021.
BibTeX | Links:
@inproceedings{strohm2021neural,
title = {Neural Photofit: Gaze-based Mental Image Reconstruction},
author = {Florian Strohm and Ekta Sood and Sven Mayer and Philipp M\"{u}ller and Mihai B\^{a}ce and Andreas Bulling},
url = {https://perceptualui.org/publications/strohm21_iccv/
https://sven-mayer.com/wp-content/uploads/2021/10/strohm21neural.pdf
https://arxiv.org/abs/2108.07524},
year = {2021},
date = {2021-10-11},
urldate = {2021-10-11},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
series = {ICCV'21},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Vogelsang, Jonas; Kiss, Francisco; Mayer, Sven
A Design Space for User Interface Elements using Finger Orientation Input Proceedings Article
In: Proceedings of Mensch und Computer 2021, 2021.
@inproceedings{vogelsang2021design,
title = {A Design Space for User Interface Elements using Finger Orientation Input},
author = {Jonas Vogelsang and Francisco Kiss and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2021/09/vogelsang2021design.pdf},
doi = {10.1145/3473856.3473862},
year = {2021},
date = {2021-09-05},
urldate = {2021-09-05},
booktitle = {Proceedings of Mensch und Computer 2021},
series = {MuC '21},
abstract = {Despite touchscreens being used by billions of people every day, today’s touch-based interactions are limited in their expressiveness as they mostly reduce the rich information of the finger down to a single 2D point. Researchers have proposed using finger orientation as input to overcome these limitations, adding two extra dimensions - the finger’s pitch and yaw angles. While finger orientation has been studied in-depth over the last decade, we describe an updated design space. Therefore, we present expert interviews combined with a literature review to describe the wide range of finger orientation input opportunities. First, we present a comprehensive set of finger orientation input enhanced user interface elements supported by expert interviews. Second, we extract design implications as a result of the additional input parameters. Finally, we introduce a design space for finger orientation input.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Welsch, Robin; Rothe, Sylvia; Mayer, Sven
Proxemics in Virtual Reality: What Should We Put to the Test in Social VR? Proceedings Article
In: In the Proceedings of the 2021 Social VR Workshop – A New Medium for Remote Communication and Collaboration, 2021.
@inproceedings{welsch2021proxemics,
title = {Proxemics in Virtual Reality: What Should We Put to the Test in Social VR?},
author = {Robin Welsch and Sylvia Rothe and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2021/05/welsch2021proxemics.pdf},
year = {2021},
date = {2021-05-07},
urldate = {2021-05-07},
booktitle = {In the Proceedings of the 2021 Social VR Workshop \textendash A New Medium for Remote Communication and Collaboration},
abstract = {When approaching another user in social VR, there comes the point where we start to feel uncomfortable and intruded upon. Therefore, users maintain a personal space that is kept clear from others, much like in the real world. Although many determinants on the size of personal space have been identified, the process of maintaining and constructing a personal space in social VR is not well investigated, especially in multi-user environments. In the following, we will present the most important developments within the proxemic research field and raise opportunities and challenges for proxemics in social VR. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Zhang, Yang; Mayer, Sven; Gonzalez, Jesse T.; Harrison, Chris
Vibrosight++: City-Scale Sensing Using Existing Retroreflective Signs and Markers Proceedings Article
In: Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems,, Association for Computing Machinery, New York, New York, USA, 2021.
@inproceedings{zhang2021vibrosight,
title = {Vibrosight++: City-Scale Sensing Using Existing Retroreflective Signs and Markers},
author = {Yang Zhang and Sven Mayer and Jesse T. Gonzalez and Chris Harrison},
url = {https://sven-mayer.com/wp-content/uploads/2021/05/zhang2021vibrosight.pdf
https://www.youtube.com/watch?v=jcXpEXoanJE},
doi = {10.1145/3411764.3445054},
year = {2021},
date = {2021-05-08},
urldate = {2021-05-08},
booktitle = {Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems,},
journal = {Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems,},
publisher = {Association for Computing Machinery},
address = {New York, New York, USA},
series = {CHI \'21},
abstract = {Today’s smart cities use thousands of physical sensors distributed across the urban landscape to support decision making in areas such as infrastructure monitoring, public health, and resource management. These weather-hardened devices require power and connectivity, and often cost thousands just to install, let alone maintain. In this paper, we show how long-range laser vibrometry can be used for low-cost, city-scale sensing. Although typically limited to just a few meters of sensing range, the use of retroreflective markers can boost this to 1km or more. Fortuitously, cities already make extensive use of retroreflective materials for street signs, construction barriers, road studs, license plates, and many other markings. We describe how our prototype system can co-opt these existing markers at very long ranges and use them as unpowered accelerometers for use in a wide variety of sensing applications.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2020

Auda, Jonas; Gruenefeld, Uwe; Mayer, Sven
It Takes Two To Tango: Conflicts Between Users on the Reality-Virtuality Continuum and Their Bystanders Proceedings Article
In: In the Proceedings of the International Workshop on Cross-Reality (XR) Interaction, 2020.
@inproceedings{auda2020tango,
title = {It Takes Two To Tango: Conflicts Between Users on the Reality-Virtuality Continuum and Their Bystanders},
author = {Jonas Auda and Uwe Gruenefeld and Sven Mayer},
url = {https://sven-mayer.com/wp-content/uploads/2020/11/auda2020tango.pdf
https://www.youtube.com/watch?v=h_Qe0et2dlU},
year = {2020},
date = {2020-11-08},
urldate = {2020-11-08},
booktitle = {In the Proceedings of the International Workshop on Cross-Reality (XR) Interaction},
series = {XR \'20},
abstract = {Over the last years, Augmented and Virtual Reality technology became more immersive. However, when users immerse themselves in these digital realities, they detach from their real-world environments. This detachment creates conflicts that are problematic in public spaces such as planes but also private settings. Consequently, on the one hand, the detaching from the world creates an immerse experience for the user, and on the other hand, this creates a social conflict with bystanders. With this work, we highlight and categorize social conflicts caused by using immersive digital realities. We first present different social settings in which social conflicts arise and then provide an overview of investigated scenarios. Finally, we present research opportunities that help to address social conflicts between immersed users and bystanders.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Le, Huy Viet; Mayer, Sven; Henze, Niels
Imprint-Based Input Techniques for Touch-Based Mobile Devices Proceedings Article
In: 19th International Conference on Mobile and Ubiquitous Multimedia, 2020.
@inproceedings{huy2020imprint,
title = {Imprint-Based Input Techniques for Touch-Based Mobile Devices},
author = {Huy Viet Le and Sven Mayer and Niels Henze},
url = {https://sven-mayer.com/wp-content/uploads/2020/10/le2020imprint.pdf},
doi = {10.1145/3428361.3428393},
year = {2020},
date = {2020-11-22},
urldate = {2020-11-22},
booktitle = {19th International Conference on Mobile and Ubiquitous Multimedia},
series = {MUM 2020},
abstract = {Touchscreens translate touches of all kinds into 2D coordinates. This limits the input vocabulary and constrains effective interaction to touches by the fingertip. Previous tabletop research extended the input vocabulary with a myriad of promising input techniques using the shape of fingers and hands. However, these techniques are not applicable to mobile devices due to differences in size, ergonomics, and technology. We conducted ideation sessions (N=17) to explore novel input techniques and use cases for imprint-based touch sensing on mobile devices. As a case study, we present FlexionTouch, a novel input technique that recognizes the finger flexion on a touchscreen. Using the finger flexion as an additional input dimension, FlexionTouch provides an always-available shortcut and can be used for value inputs, document previews, and gestures. We propose five example use cases for FlexionTouch input which we evaluated in a second user study (N=20). While the low resolution of the capacitive images leads to a less accurate input compared to tabletops, participants still find the presented use cases helpful. As our input technique is purely software-based, it can be readily deployed to every mobile device with a capacitive touchscreen.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Kiss, Francisco; Mayer, Sven; Schwind, Valentin
Audio VR: Did Video Kill the Radio Star? Journal Article
In: Interactions, vol. 27, no. 3, pp. 46–51, 2020, ISSN: 1072-5520.
@article{Kiss2020AudioVR,
title = {Audio VR: Did Video Kill the Radio Star?},
author = { Francisco Kiss and Sven Mayer and Valentin Schwind},
url = {http://sven-mayer.com/wp-content/uploads/2020/04/kiss2020audiovr.pdf},
doi = {10.1145/3386385},
issn = {1072-5520},
year = {2020},
date = {2020-01-01},
journal = {Interactions},
volume = {27},
number = {3},
pages = {46\textendash51},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Today we see a global trend emphasizing the visual over other media. Visual media pervades our lives in the form of text, images, and video. Already in the mid-18th century, humans were fascinated by visual and often animated projections, such as magic lantern shows. With the advent of cinematography, silent films pulled crowds into the first movie theaters. However, the dominant media for information exchange was the ubiquitous newspaper, created in the 17th century. Starting in the 1950s, television gained dominance in media for both entertainment and information exchange, its dominion challenged only recently by digital streaming platforms. Thus, visual information is a longstanding, major source of knowledge transfer for a large portion of humankind, and both its ubiquity and the public it reaches grow as technology advances.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Le, Huy Viet; Mayer, Sven; Weiß, Max; Vogelsang, Jonas; Weingärtner, Henrike; Henze, Niels
Shortcut Gestures for Mobile Text Editing on Fully Touch Sensitive Smartphones Journal Article
In: ACM Trans. Comput.-Hum. Interact., vol. 27, no. 5, pp. 38, 2020, ISSN: 1073-0516.
@article{le2020shortcuts,
title = {Shortcut Gestures for Mobile Text Editing on Fully Touch Sensitive Smartphones},
author = { Huy Viet Le and Sven Mayer and Max Wei\ss and Jonas Vogelsang and Henrike Weing\"{a}rtner and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2020/09/le2020shortcuts.pdf},
doi = {10.1145/3396233},
issn = {1073-0516},
year = {2020},
date = {2020-08-01},
urldate = {2020-08-01},
journal = {ACM Trans. Comput.-Hum. Interact.},
volume = {27},
number = {5},
pages = {38},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {While advances in mobile text entry enable smartphone users to type almost as fast as on hardware keyboards, text-heavy activities are still not widely adopted. One reason is the lack of shortcut mechanisms. In this article, we determine shortcuts for text-heavy activities, elicit shortcut gestures, implement them for a fully touch-sensitive smartphone, and conduct an evaluation with potential users. We found that experts perform around 800 keyboard shortcuts per day, which are not available on smartphones. Interviews revealed the lack of shortcuts as a major limitation that prevents mobile text editing. Therefore, we elicited gestures for the 22 most important shortcuts for smartphones that are touch-sensitive on the whole device surface. We implemented the gestures for a fully touch-sensitive smartphone using deep learning and evaluated them in realistic scenarios to gather feedback. We show that the developed prototype is perceived as intuitive and faster than recent commercial approaches.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Mayer, Sven; Laput, Gierad; Harrison, Chris
Enhancing Mobile Voice Assistants with WorldGaze Proceedings Article
In: Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems, ACM, New York, NY, USA, 2020.
@inproceedings{Mayer:2020:EMV,
title = {Enhancing Mobile Voice Assistants with WorldGaze},
author = {Sven Mayer and Gierad Laput and Chris Harrison},
url = {http://sven-mayer.com/wp-content/uploads/2020/03/mayer2020worldgaze.pdf
https://www.youtube.com/watch?v=kjACtQK3D-k},
doi = {10.1145/3313831.3376479},
year = {2020},
date = {2020-04-25},
booktitle = {Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems},
publisher = {ACM},
address = {New York, NY, USA},
series = {CHI '20},
abstract = {Contemporary voice assistants require that objects of interest be specified in spoken commands. Of course, users are often looking directly at the object or place of interest \textendash fine-grained, contextual information that is currently unused. We present WorldGaze, a software-only method for smartphones that provides the real-world gaze location of a user that voice agents can utilize for rapid, natural, and precise interactions. We achieve this by simultaneously opening the front and rear cameras of a smartphone. The front-facing camera is used to track the head in 3D, including estimating its direction vector. As the geometry of the front and back cameras are fixed and known, we can raycast the head vector into the 3D world scene as captured by the rear-facing camera. This allows the user to intuitively define an object or region of interest using their head gaze. We started our investigations with a qualitative exploration of competing methods, before developing a functional, real-time implementation. We conclude with an evaluation that shows WorldGaze can be quick and accurate, opening new multimodal gaze+voice interactions for mobile voice agents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Ładoński, Piotr; Dominiak, Julia; Romanowski, Andrzej; Wozniak, Paweł W.; Lischke, Lars
Enhancing Social Experiences with Shared Drones Proceedings Article
In: Interdisciplinary Workshop on Human-Drone Interaction, pp. 1-7, CEUR Workshop Proceedings, CEUR-WS.org, 2020, ISSN: 1613-0073.
@inproceedings{Mayer:2020:SharedDrones,
title = {Enhancing Social Experiences with Shared Drones},
author = {Sven Mayer and Piotr \Lado\'{n}ski and Julia Dominiak and Andrzej Romanowski and Pawe\l W. Wozniak and Lars Lischke},
url = {http://sven-mayer.com/wp-content/uploads/2020/03/mayer2020shareddrones.pdf
http://ceur-ws.org/Vol-2617/paper1.pdf
http://ceur-ws.org/Vol-2617/},
issn = {1613-0073},
year = {2020},
date = {2020-04-25},
booktitle = {Interdisciplinary Workshop on Human-Drone Interaction},
pages = {1-7},
publisher = {CEUR Workshop Proceedings},
address = {CEUR-WS.org},
series = {iHDI 2020},
abstract = {Over the last 10 years, drones have become smaller, more durable, affordable, and easier to fly. Their photo and video-taking capabilities have significantly improved. In fact, the proliferation of drones is already happening, with new no-fly zones being developed in more territories. Given current developments, we envision that everyone will be able to carry a pocket drone with them at all times, just like we do with smartphones today. Drones appeal to users as they offer, among other things, a unique view of landscapes, wildlife, the user, and other people. We conducted a survey in which we asked participants about their video and photo-taking habits and how they could envision using a drone for these purposes. Based on our findings, we envision systems in which nearby drones are available for personal and shared usage. This allows having all the advantages of drones but leaves control over the airspace to regulators, thus enabling safely and respectfully flying over areas such as national parks or zoos. Moreover, we envision shared drones as a means of sparking new social interactions},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Reinhardt, Jens; Schweigert, Robin; Jelke, Brighten; Schwind, Valentin; Wolf, Katrin; Henze, Niels
Improving Humans’ Ability to Interpret Deictic Gestures in Virtual Reality Proceedings Article
In: Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems, ACM, New York, NY, USA, 2020.
@inproceedings{mayer2020improving,
title = {Improving Humans’ Ability to Interpret Deictic Gestures in Virtual Reality},
author = {Sven Mayer and Jens Reinhardt and Robin Schweigert and Brighten Jelke and Valentin Schwind and Katrin Wolf and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2020/01/mayer2020deictic.pdf
https://www.youtube.com/watch?v=Afi4TPzHdlM
https://github.com/interactionlab/Deictic-Pointing-in-VR},
doi = {10.1145/3313831.3376340},
year = {2020},
date = {2020-04-25},
urldate = {2020-04-25},
booktitle = {Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems},
publisher = {ACM},
address = {New York, NY, USA},
series = {CHI \'20},
abstract = {Collaborative Virtual Environments (CVEs) offer unique opportunities for human communication. Humans can interact with each other over a distance in any environment and visual embodiment they want. Although deictic gestures are especially important as they can guide other humans’ attention, humans make systematic errors when using and interpreting them. Recent work suggests that the interpretation of vertical deictic gestures can be significantly improved by warping the pointing arm. In this paper, we extend previous work by showing that models enable to also improve the interpretation of deictic gestures at targets all around the user. Through a study with 28 participants in a CVE, we analyzed the errors users make when interpreting deictic gestures. We derived a model that rotates the arm of a pointing user’s avatar to improve the observing users’ accuracy. A second study with 24 participants shows that we can improve observers’ accuracy by 22.9%. As our approach is not noticeable for users, it improves their accuracy without requiring them to learn a new interaction technique or distracting from the experience.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Moore, Nathan; Molloy, Kevin; Lovo, William; Mayer, Sven; Wozniak, Paweł W.; Stewart, Michael
POST: A Machine Learning Based Paper Organization and Scheduling Tool Proceedings Article
In: Companion of the 2020 ACM International Conference on Supporting Group Work, pp. 135–138, ACM, New York, NY, USA, 2020.
@inproceedings{Moore:2020:POST,
title = {POST: A Machine Learning Based Paper Organization and Scheduling Tool},
author = {Nathan Moore and Kevin Molloy and William Lovo and Sven Mayer and Pawe\l W. Wozniak and Michael Stewart},
url = {http://sven-mayer.com/wp-content/uploads/2020/01/moore2020post.pdf},
doi = {https://doi.org/10.1145/3323994.3369892},
year = {2020},
date = {2020-01-06},
booktitle = {Companion of the 2020 ACM International Conference on Supporting Group Work},
pages = {135\textendash138},
publisher = {ACM},
address = {New York, NY, USA},
series = {GROUP'20},
abstract = {Organizing and assigning sessions within a large confer- ence is a formidable challenge. Some conference organiz- ers, who are typically volunteers, have utilized event plan- ning software to ensure simple constraints, such as two people can not be scheduled to talk at the same time. In this work, we proposed utilizing natural language process- ing to find the topics within a corpus of conference submis- sions and then cluster them together into sessions. As a preliminary evaluation of this technique, we compare ses- sion assignments from previous conferences to ones gener- ated with our proposed techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Weiß, Maximilian; Angerbauer, Katrin; Voit, Alexandra; Schwarzl, Magdalena; Sedlmair, Michael; Mayer, Sven
Revisited: Comparison of Empirical Methods to Evaluate Visualizations Supporting Crafting and Assembly Purposes Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, 2020.
@article{weiss2020revisited,
title = {Revisited: Comparison of Empirical Methods to Evaluate Visualizations Supporting Crafting and Assembly Purposes},
author = {Maximilian Wei\ss and Katrin Angerbauer and Alexandra Voit and Magdalena Schwarzl and Michael Sedlmair and Sven Mayer},
url = {http://sven-mayer.com/wp-content/uploads/2020/10/weis2020revisited_preprint.pdf
https://www.youtube.com/watch?v=P02Wfj1WlOk},
doi = {10.1109/TVCG.2020.3030400},
year = {2020},
date = {2020-10-25},
urldate = {2020-10-25},
journal = {IEEE Transactions on Visualization and Computer Graphics},
abstract = {Ubiquitous, situated, and physical visualizations create entirely new possibilities for tasks contextualized in the real world, such as doctors inserting needles. During the development of situated visualizations, evaluating visualizations is a core requirement. However, performing such evaluations is intrinsically hard as the real scenarios are safety-critical or expensive to test. To overcome these issues, researchers and practitioners adapt classical approaches from ubiquitous computing and use surrogate empirical methods such as Augmented Reality (AR), Virtual Reality (VR) prototypes, or merely online demonstrations. This approach’s primary assumption is that meaningful insights can also be gained from different, usually cheaper and less cumbersome empirical methods. Nevertheless, recent efforts in the Human-Computer Interaction (HCI) community have found evidence against this assumption, which would impede the use of surrogate empirical methods. Currently, these insights rely on a single investigation of four interactive objects. The goal of this work is to investigate if these prior findings also hold for situated visualizations. Therefore, we first created a scenario where situated visualizations support users in do-it-yourself (DIY) tasks such as crafting and assembly. We then set up five empirical study methods to evaluate the four tasks using an online survey, as well as VR, AR, laboratory, and in-situ studies. Using this study design, we conducted a new study with 60 participants. Our results show that the situated visualizations we investigated in this study are not prone to the same dependency on the empirical method, as found in previous work. Our study provides the first evidence that analyzing situated visualizations through different empirical (surrogate) methods might lead to comparable results.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Xiao, Robert; Mayer, Sven; Harrison, Chris
VibroComm: Using Commodity Gyroscopes for Vibroacoustic Data Reception Proceedings Article
In: Proceedings of the 22st International Conference on Human-Computer Interaction with Mobile Devices and Services, ACM, New York, NY, USA, 2020.
@inproceedings{xiao2020vibrocomm,
title = {VibroComm: Using Commodity Gyroscopes for Vibroacoustic Data Reception},
author = {Robert Xiao and Sven Mayer and Chris Harrison},
url = {http://sven-mayer.com/wp-content/uploads/2020/08/xiao2020vibrocomm.pdf
https://www.youtube.com/watch?v=gCA-0cPS1eM},
doi = {10.1145/3379503.3403540},
year = {2020},
date = {2020-10-05},
urldate = {2020-10-05},
booktitle = {Proceedings of the 22st International Conference on Human-Computer Interaction with Mobile Devices and Services},
publisher = {ACM},
address = {New York, NY, USA},
series = {MobileHCI\'20},
abstract = {Inertial Measurement Units (IMUs) with gyroscopic sensors are standard in today’s mobile devices. We show that these sensors can be co-opted for vibroacoustic data reception. Our approach, called VibroComm, requires direct physical contact to a transmitting (i.e., vibrating) surface. This makes interactions targeted and explicit in nature, making it well suited for contexts with many targets or requiring and intent. It also offers an orthogonal dimension of physical security to wireless technologies like Bluetooth and NFC. Using our implementation, we achieve a transfer rate over 2000 bits/sec with less than 5% packet loss \textendash an order of magnitude faster than prior IMU-based approaches at a quarter of the loss rate, opening new, powerful and practical use cases that could be enabled on mobile devices with a simple software update.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2019

Boceck, Tobias; Sprott, Sascha; Le, Huy Viet; Mayer, Sven
Force Touch Detection on Capacitive Sensors using Deep Neural Networks Proceedings Article
In: Proceedings of the 21st International Conference on Human-Computer Interaction with Mobile Devices and Services, pp. 6, ACM, New York, NY, USA, 2019, ISBN: 978-1-4503-6825-4/19/10.
@inproceedings{Boceck:2019:ForceTouch,
title = {Force Touch Detection on Capacitive Sensors using Deep Neural Networks},
author = {Tobias Boceck and Sascha Sprott and Huy Viet Le and Sven Mayer},
url = {http://sven-mayer.com/wp-content/uploads/2019/07/boceck2019forcetouch.pdf
https://github.com/interactionlab/ForceTouchDetection},
doi = {10.1145/3338286.3344389},
isbn = {978-1-4503-6825-4/19/10},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 21st International Conference on Human-Computer Interaction with Mobile Devices and Services},
pages = {6},
publisher = {ACM},
address = {New York, NY, USA},
series = {MobileHCI'19},
abstract = {As the touchscreen is the most successful input method of current mobile devices, the importance to transmit more information per touch is raising. A wide range of approaches has been presented to enhance the richness of a single touch. With Apple's 3D Touch, they successfully introduce pressure as a new input dimension into consumer devices. However, they are using a new sensing layer, which increases production cost and hardware complexity. Moreover, users have to upgrade their phones to use the new feature. In contrast, with this work, we introduce a strategy to acquire the pressure measurements from the mutual capacitive sensor, which is used in the majority of today’s touch devices. We present a data collection study in which we collect capacitive images where participants apply different pressure levels. We then train a Deep Neural Network (DNN) to estimate the pressure allowing for force touch detection. As a result, we present a model which enables estimating the pressure with a mean error of 369.0g.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Colley, Ashley; Mayer, Sven; Henze, Niels
Investigating the Effect of Orientation and Visual Style on Touchscreen Slider Performance Proceedings Article
In: Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems, pp. 406:1–406:14, ACM, Glasgow, Scotland UK, 2019, ISBN: 978-1-4503-5970-2.
@inproceedings{Colley:2019:ITE,
title = {Investigating the Effect of Orientation and Visual Style on Touchscreen Slider Performance},
author = { Ashley Colley and Sven Mayer and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2019/01/colley2018slider.pdf
https://www.youtube.com/watch?v=owsbVm5mu3k},
doi = {10.1145/3290605.3300419},
isbn = {978-1-4503-5970-2},
year = {2019},
date = {2019-05-04},
booktitle = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems},
pages = {406:1--406:14},
publisher = {ACM},
address = {Glasgow, Scotland UK},
series = {CHI '19},
abstract = {Sliders are one of the most fundamental components used in touchscreen user interfaces (UIs). When entering data using a slider, errors occur due e.g. to visual perception, resulting in inputs not matching what is intended by the user. However, it is unclear if the errors occur uniformly across the full range of the slider or if there are systematic offsets. We conducted a study to assess the errors occurring when entering values with horizontal and vertical sliders as well as two common visual styles. Our results reveal significant effects of slider orientation and style on the precision of the entered values. Furthermore, we identify systematic offsets that depend on the visual style and the target value. As the errors are partially systematic, they can be compensated to improve users’ precision. Our findings provide UI designers with data to optimize user experiences in the wide variety of application areas where slider based touchscreen input is used.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Kumar, Abinaya; Radjesh, Aishwarya; Mayer, Sven; Le, Huy Viet
Improving the Input Accuracy of Touchscreens using Deep Learning Proceedings Article
In: Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems, ACM, New York, NY, USA, 2019.
@inproceedings{Kumar:2019:ITI,
title = {Improving the Input Accuracy of Touchscreens using Deep Learning},
author = {Abinaya Kumar and Aishwarya Radjesh and Sven Mayer and Huy Viet Le},
url = {http://sven-mayer.com/wp-content/uploads/2019/02/kumar2018accuracy.pdf
https://github.com/interactionlab/improving-touch-accuracy},
doi = {10.1145/3290607.3312928},
year = {2019},
date = {2019-05-04},
booktitle = {Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems},
publisher = {ACM},
address = {New York, NY, USA},
series = {CHI'19 EA},
abstract = {Touchscreens combine input and output in a single interface. While this enables an intuitive interaction and dynamic user interfaces, the fat-finger problem and the resulting occlusions still impact the input accuracy. Previous work presented approaches to improve the touch accuracy by involving visual features on the top side of fingers, as well as static compensation functions. While the former is not applicable on recent mobile devices as the top side of a finger cannot be tracked, compensation functions do not take properties such as finger angle into account. In this work, we present a datadriven approach to estimate the 2D touch position on commodity mutual capacitive touchscreens which increases the touch accuracy by 23.0 % over recently implemented approaches.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Le, Huy Viet; Mayer, Sven; Steuerlein, Benedict; Henze, Niels
Investigating Unintended Inputs for One-Handed Touch Interaction Beyond the Touchscreen Proceedings Article
In: Proceedings of the 21st International Conference on Human-Computer Interaction with Mobile Devices and Services, 2019.
@inproceedings{Le:2019:IUI,
title = {Investigating Unintended Inputs for One-Handed Touch Interaction Beyond the Touchscreen},
author = {Huy Viet Le and Sven Mayer and Benedict Steuerlein and Niels Henze },
url = {http://sven-mayer.com/wp-content/uploads/2019/06/le2019implicit.pdf},
doi = {10.1145/3338286.3340145},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 21st International Conference on Human-Computer Interaction with Mobile Devices and Services},
series = {MobileHCI'19},
abstract = {Additional input controls such as fingerprint scanners, physical buttons, and Back-of-Device (BoD) touch panels improve the input capabilities on smartphones. While previous work showed the benefits of input beyond the touchscreen, unfavorably designed input controls force detrimental grip changes and increase the likelihood of unintended inputs. Researchers investigated all fingers’ comfortable areas to avoid grip changes. However, there is no understanding of unintended BoD inputs which frustrate users and lead to embarrassing mistakes. In this paper, we study the BoD areas in which unintended inputs occur during interaction with the touchscreen. Participants performed common tasks on four smartphones which they held in the prevalent singlehanded grip while sitting and walking. We recorded finger movements with a motion capture system and analyzed the unintended inputs. We identified comfortable areas on the back in which no unintended inputs occur and found that the least unintended inputs occurred on 5′′ devices. We derive three design implications for BoD input to help designers considering reachability and unintended input.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Le, Huy Viet; Mayer, Sven; Henze, Niels
Investigating the Feasibility of Finger Identification on Capacitive Touchscreens using Deep Learning Proceedings Article
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, ACM, Marina del Ray, CA, USA, 2019.
@inproceedings{le2019investigating,
title = {Investigating the Feasibility of Finger Identification on Capacitive Touchscreens using Deep Learning},
author = { Huy Viet Le and Sven Mayer and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2019/01/le2019investigating.pdf
https://github.com/interactionlab/CapFingerId
https://www.youtube.com/watch?v=pJltIcZskao
https://www.youtube.com/watch?v=8m2SSpiR4xM},
doi = {10.1145/3301275.3302295},
year = {2019},
date = {2019-03-17},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
publisher = {ACM},
address = {Marina del Ray, CA, USA},
series = {IUI '19},
abstract = {Touchscreens enable intuitive mobile interaction. However, touch input is limited to 2D touch locations which makes it challenging to provide shortcuts and secondary actions similar to hardware keyboards and mice. Previous work presented a wide range of approaches to provide secondary actions by identifying which finger touched the display. While these approaches are based on external sensors which are inconvenient, we use capacitive images from mobile touchscreens to investigate the feasibility of finger identification. We collected a dataset of low-resolution fingerprints and trained convolutional neural networks that classify touches from eight combinations of fingers. We focused on combinations that involve the thumb and index finger as these are mainly used for interaction. As a result, we achieved an accuracy of over 92 % for a position-invariant differentiation between left and right thumbs. We evaluated the model and two use cases that users find useful and intuitive. We publicly share our data set (CapFingerId) comprising 455,709 capacitive images of touches from each finger on a representative mutual capacitive touchscreen and our models to enable future work using and improving them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Lischke, Lars; Wozniak, Paweł W.
Drones for Search and Rescue Proceedings Article
In: International workshop on Human-Drone Interaction, CHI ’19 Extended Abstracts, pp. 6, Glasgow, Scotland, UK, 2019.
@inproceedings{Mayer:2019:DFS,
title = {Drones for Search and Rescue},
author = {Sven Mayer and Lars Lischke and Pawe\l W. Wozniak },
url = {http://sven-mayer.com/wp-content/uploads/2019/04/mayer2019drones.pdf
https://hal.archives-ouvertes.fr/hal-02128385},
year = {2019},
date = {2019-05-04},
booktitle = {International workshop on Human-Drone Interaction, CHI ’19 Extended Abstracts},
pages = {6},
address = {Glasgow, Scotland, UK},
series = {iHDI'19},
abstract = {Natural disasters are increasingly common as climate change becomes more severe. Search and rescue operations become more and more important to societies worldwide. Rescue services are often engaged in missions in rural areas, treating the injured or searching for missing persons. Often, time is an essential factor for a positive outcome of search and rescue missions. Due to their capacity for flexible deployment, drones have a great potential to be deployed in search and rescue scenarios and thus reduce the rescue time. In this work, we discuss how drones can effectively assist rescue crews in their mission to save human life.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Schwind, Valentin; Le, Huy Viet; Weber, Dominik; Vogelsang, Jonas; Wolf, Johannes; Henze, Niels
Effect of Orientation on Unistroke Touch Gestures Proceedings Article
In: Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems, pp. 406:1–406:14, ACM, Glasgow, Scotland UK, 2019, ISBN: 978-1-4503-5970-2.
@inproceedings{Mayer:2019:EOO,
title = {Effect of Orientation on Unistroke Touch Gestures},
author = { Sven Mayer and Valentin Schwind and Huy Viet Le and Dominik Weber and Jonas Vogelsang and Johannes Wolf and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2019/04/mayer2019orientation.pdf
https://www.youtube.com/watch?v=PSyYVskhIW8},
doi = {10.1145/3290605.3300928},
isbn = {978-1-4503-5970-2},
year = {2019},
date = {2019-05-04},
booktitle = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems},
pages = {406:1--406:14},
publisher = {ACM},
address = {Glasgow, Scotland UK},
series = {CHI '19},
abstract = {As touchscreens are the most successful input method of current mobile devices, touch gestures became a widely used input technique. While gestures provide users with advantages to express themselves, they also introduce challenges regarding accuracy and memorability. In this paper, we investigate the effect of a gesture’s orientation on how well the gesture can be performed. We conducted a study in which participants performed systematically rotated unistroke gestures. For straight lines as well as for compound lines, we found that users tend to align gestures with the primary axes. We show that the error can be described by a Clausen function with R2 = .93. Based on our findings, we suggest design implications and highlight the potential for recognizing flick gestures, visualizing gestures and improving recognition of compound gestures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Le, Huy Viet; Funk, Markus; Henze, Niels
Finding the Sweet Spot: Analyzing Unrestricted Touchscreen Interaction In-the-Wild Proceedings Article
In: Proceedings of the 2019 ACM International Conference on Interactive Surfaces and Spaces, 2019.
@inproceedings{Mayer:2019:SweetSpot,
title = {Finding the Sweet Spot: Analyzing Unrestricted Touchscreen Interaction In-the-Wild},
author = {Sven Mayer and Huy Viet Le and Markus Funk and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2019/09/mayer2019sweetspot.pdf
https://www.youtube.com/watch?v=MirqESUmmp4},
doi = {http://dx.doi.org/10.1145/3343055.3359705},
year = {2019},
date = {2019-11-10},
booktitle = {Proceedings of the 2019 ACM International Conference on Interactive Surfaces and Spaces},
series = {ISS'19},
abstract = {With smartphones being a prime example, touchscreens became one of the most widely used interface to interact with computing systems. Compared to other touchscreen devices, smartphones pose additional challenges as the hand that interacts with the device is commonly used to also hold the device. Consequently, determining how fingers of the hand holding the device can interact with the screen is a non-trivial challenge. A body of recent work investigated the comfortable area in controlled lab studies. This poses limitations as it is based on the assumption that the grips used in the studies are representative for normal smartphone use. In this paper, we extend previous work by providing insights from in-the-wild studies using two different apps that were deployed in the Android App Store. Comparing our results with previous work we confirm that our data fits previously proposed models. Further analyzing the data, we highlight the sweet spot, the position that is touched if the input can be performed on the whole screen.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Lischke, Lars; Schwind, Valentin; Gärtner, Markus; Hämmerle, Eric; Turcan, Emine; Rheinwald, Florin; Murawski, Gustav; Kuhn, Jonas; Henze, Niels
Text Analysis Using Large High-Resolution Displays Proceedings Article
In: Mensch und Computer, 2019.
@inproceedings{Mayer:2019:TAU,
title = {Text Analysis Using Large High-Resolution Displays},
author = {Sven Mayer and Lars Lischke and Valentin Schwind and Markus G\"{a}rtner and Eric H\"{a}mmerle and Emine Turcan and Florin Rheinwald and Gustav Murawski and Jonas Kuhn and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2019/06/mayer2019textanalysis.pdf
https://www.youtube.com/watch?v=rBqLhCx6OHA},
doi = {10.1145/3340764.3340768},
year = {2019},
date = {2019-09-08},
booktitle = {Mensch und Computer},
series = {MuC '19},
abstract = {Large high-resolution displays are entering into our daily life. Today, we already see them in installations where they display tailored applications, e.g. in exhibitions. However, while heavily studied under lab conditions, real-world applications for personal use, which utilize the extended screen space are rarely available. Thus, today's studies of Large High-Resolution Displays
(LHRD) are particularly designed to embrace the large screen space. In contrast, in this paper, we investigate a real-world application designed for researchers working on large text corpora to support them in deep text understanding. We conducted a study with 14 experts from the humanities and computational linguistics which solved a text analysis task using a standard desktop version on a 24 inch screen and an LHRD version on three 50 inch screens. Surprisingly, the smaller display condition outperformed the LHRD in terms of task completion time and error rate. While participants appreciated the overview provided by the large screen, qualitative feedback also revealed that the need for head movement and the scrolling mechanism decreased the usability of the LHRD condition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
(LHRD) are particularly designed to embrace the large screen space. In contrast, in this paper, we investigate a real-world application designed for researchers working on large text corpora to support them in deep text understanding. We conducted a study with 14 experts from the humanities and computational linguistics which solved a text analysis task using a standard desktop version on a 24 inch screen and an LHRD version on three 50 inch screens. Surprisingly, the smaller display condition outperformed the LHRD in terms of task completion time and error rate. While participants appreciated the overview provided by the large screen, qualitative feedback also revealed that the need for head movement and the scrolling mechanism decreased the usability of the LHRD condition.

Mayer, Sven
Finger Orientation as an Additional Input Dimension for Touchscreens PhD Thesis
University of Stuttgart, 2019.
@phdthesis{Mayer:2019:thesis,
title = {Finger Orientation as an Additional Input Dimension for Touchscreens},
author = {Sven Mayer},
url = {http://sven-mayer.com/wp-content/uploads/2019/04/mayer2019thesis.pdf},
doi = {10.18419/opus-10397},
year = {2019},
date = {2019-03-01},
address = {Paffenwaldring. 5a, 70569 Stuttgart, Germany},
school = {University of Stuttgart},
abstract = {Since the first digital computer in 1941 and the first personal computer back in 1975, the way we interact with computers has radically changed. The keyboard is still one of the two main input devices for desktop computers which is accompanied most of the time by a mouse or trackpad. However, the interaction with desktop and laptop computers today only make up a small percentage of current interaction with computing devices. Today, we mostly interact with ubiquitous computing devices, and while the first ubiquitous devices were controlled via buttons, this changed with the invention of touchscreens. Moreover, the phone as the most prominent ubiquitous computing device is heavily relying on touch interaction as the dominant input mode. Through direct touch, users can directly interact with graphical user interfaces (GUIs). GUI controls can directly be manipulated by simply touching them. However, current touch devices reduce the richness of touch input to two-dimensional positions on the screen.
In this thesis, we investigate the potential of enriching a simple touch with additional information about the finger touching the screen. We propose to use the user's finger orientation as two additional input dimensions. We investigate four key areas which make up the foundation to fully understand finger orientation as an additional input technique. With these insights, we provide designers with the foundation to design new gestures sets and use cases which take the finger orientation into account. We first investigate approaches to recognize finger orientation input and provide ready-to-deploy models to recognize the orientation. Second, we present design guidelines for a comfortable use of finger orientation. Third, we present a method to analyze applications in social settings to design use cases with possible conversation disruption in mind. Lastly, we present three ways how new interaction techniques like finger orientation input can be communicated to the user. This thesis contributes these four key insights to fully understand finger orientation as an additional input technique. Moreover, we combine the key insights to lay the foundation to evaluate every new interaction technique based on the same in-depth evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
In this thesis, we investigate the potential of enriching a simple touch with additional information about the finger touching the screen. We propose to use the user's finger orientation as two additional input dimensions. We investigate four key areas which make up the foundation to fully understand finger orientation as an additional input technique. With these insights, we provide designers with the foundation to design new gestures sets and use cases which take the finger orientation into account. We first investigate approaches to recognize finger orientation input and provide ready-to-deploy models to recognize the orientation. Second, we present design guidelines for a comfortable use of finger orientation. Third, we present a method to analyze applications in social settings to design use cases with possible conversation disruption in mind. Lastly, we present three ways how new interaction techniques like finger orientation input can be communicated to the user. This thesis contributes these four key insights to fully understand finger orientation as an additional input technique. Moreover, we combine the key insights to lay the foundation to evaluate every new interaction technique based on the same in-depth evaluation.

Rzayev, Rufat; Mayer, Sven; Krauter, Christian; Henze, Niels
Notification in VR: The Effect of Notification Placement, Task, and Environment Proceedings Article
In: Proceedings of the 2019 CHI Conference on Computer-Human Interaction in Play, ACM, New York, NY, USA, 2019.
@inproceedings{Rzayev:2019:NVR,
title = {Notification in VR: The Effect of Notification Placement, Task, and Environment},
author = {Rufat Rzayev and Sven Mayer and Christian Krauter and Niels Henze},
url = {https://www.youtube.com/watch?v=DTK-17OwZrc
http://sven-mayer.com/wp-content/uploads/2019/08/rzayev2019notification.pdf},
doi = {10.1145/3311350.3347190},
year = {2019},
date = {2019-10-22},
booktitle = {Proceedings of the 2019 CHI Conference on Computer-Human Interaction in Play},
publisher = {ACM},
address = {New York, NY, USA},
series = {CHIPlay'19},
abstract = {Virtual reality (VR) is commonly used for entertainment applications but is also increasingly employed for a large number of use cases such as digital prototyping or training workers. Here, VR is key to present an immersive secondary world. VR enables experiences that are close to reality, regardless of time and place. However, highly immersive VR can result in missing digital information from the real world, such as important notifications. For efficient notification presentation in VR, it is necessary to understand how notifications should be integrated in VR without breaking the immersion. Thus, we conducted a study with 24 participants to investigate notification placement in VR while playing games, learning, and solving problems. We compared placing notifications using a Head-Up Display, On-Body, Floating, and In-Situ in open, semi-open, and closed VR environments. We found significant effects of notification placement and task on how notifications are perceived in VR. Insights from our study inform the design of VR applications that support digital notifications.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Schweigert, Robin; Schwind, Valentin; Mayer, Sven
EyePointing: A Gaze-Based Selection Technique Proceedings Article
In: Proceedings of Mensch und Computer 2019, ACM, New York, NY, USA, 2019.
@inproceedings{Schweigert:2019:EyePointing,
title = {EyePointing: A Gaze-Based Selection Technique},
author = {Robin Schweigert and Valentin Schwind and Sven Mayer},
url = {http://sven-mayer.com/wp-content/uploads/2019/07/schweigert2019eyepointing.pdf},
doi = {10.1145/3340764.3344897},
year = {2019},
date = {2019-09-08},
booktitle = {Proceedings of Mensch und Computer 2019},
publisher = {ACM},
address = {New York, NY, USA},
series = {MuC '19},
abstract = {Interacting with objects from a distance is not only challenging in the real world but also a common problem in virtual reality (VR). One issue concerns the distinction between attention for exploration and attention for selection - also known as the Midas-touch problem. Researchers proposed numerous approaches to overcome that challenge using additional devices, gaze input cascaded pointing, and using eye blinks to select the remote object. While techniques such as MAGIC pointing still require additional input for confirming a selection using eye gaze and, thus, forces the user to perform unnatural behavior, there is still no solution enabling a truly natural and unobtrusive device free interaction for selection. In this paper, we propose EyePointing: a technique which combines the MAGIC pointing technique and the referential mid-air pointing gesture to selecting objects in a distance. While the eye gaze is used for referencing the object, the pointing gesture is used as a trigger. Our technique counteracts the Midas-touch problem.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Schweigert, Robin; Leusmann, Jan; Hagenmayer, Simon; Weiß, Maximilian; Le, Huy Viet; Mayer, Sven; Bulling, Andreas
KnuckleTouch: Enabling Knuckle Gestures on Capacitive Touchscreens using Deep Learning Proceedings Article
In: Mensch und Computer, 2019.
@inproceedings{Schweigert:2019:KTE,
title = {KnuckleTouch: Enabling Knuckle Gestures on Capacitive Touchscreens using Deep Learning},
author = {Robin Schweigert and Jan Leusmann and Simon Hagenmayer and Maximilian Wei\ss and Huy Viet Le and Sven Mayer and Andreas Bulling},
url = {http://sven-mayer.com/wp-content/uploads/2019/07/schweigert2019knuckletouch.pdf
https://www.youtube.com/watch?v=4U1daa7fCbY
https://git.perceptualui.org/public-projects/knuckletouch},
doi = {10.1145/3340764.3340767},
year = {2019},
date = {2019-09-08},
booktitle = {Mensch und Computer},
series = {MuC '19},
abstract = {While mobile devices have become essential for social communication and have paved the way for work on the go, their interactive capabilities are still limited to simple touch input. A promising enhancement for touch interaction is knuckle input but recognizing knuckle gestures robustly and accurately remains challenging. We present a method to differentiate between 17 finger and knuckle gestures based on a long short-term memory (LSTM) machine learning model. Furthermore, we introduce an open source approach that is ready-to-deploy on commodity touch-based devices. The model was trained on a new dataset that we collected in a mobile interaction study with 18 participants. We show that our method can achieve an accuracy of 86.8% on recognizing one of the 17 gestures and an accuracy of 94.6% to differentiate between finger and knuckle. In our evaluation study, we validate our models and found that the LSTM gestures recognizing archived an accuracy of 88.6%. We show that KnuckleTouch can be used to improve the input expressiveness and to provide shortcuts to frequently used functions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Voit, Alexandra; Mayer, Sven; Schwind, Valentin; Henze, Niels
Online, VR, AR, Lab, and In-Situ: Comparison of Research Methods to Evaluate Smart Artifacts Proceedings Article
In: Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems, pp. 406:1–406:14, ACM, Glasgow, Scotland UK, 2019, ISBN: 978-1-4503-5970-2.
@inproceedings{Voit:2019:OVA,
title = {Online, VR, AR, Lab, and In-Situ: Comparison of Research Methods to Evaluate Smart Artifacts},
author = { Alexandra Voit and Sven Mayer and Valentin Schwind and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2019/01/voit2019comparison.pdf
https://github.com/interactionlab/CHI19-Comparison-of-Research-Methods
https://www.youtube.com/watch?v=BqLQo4kTL9Y},
doi = {10.1145/3290605.3300737},
isbn = {978-1-4503-5970-2},
year = {2019},
date = {2019-05-04},
booktitle = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems},
pages = {406:1--406:14},
publisher = {ACM},
address = {Glasgow, Scotland UK},
series = {CHI '19},
abstract = {Empirical studies are a cornerstone of HCI research. Technical progress constantly enables new study methods. Online surveys, for example, make it possible to collect feedback from remote users. Progress in augmented and virtual reality enables to collect feedback with early designs. In-situ studies enable researchers to gather feedback in natural environments. While these methods have unique advantages and disadvantages, it is unclear if and how using a specifc method affects the results. Therefore, we conducted a study with 60 participants comparing five different methods (online, virtual reality, augmented reality, lab setup, and in-situ) to evaluate early prototypes of smart artifacts. We asked participants to assess four different smart artifacts using standardized questionnaires. We show that the method significantly affects the study result and discuss implications for HCI research. Finally, we highlight further directions to overcome the effect of the used methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wisiecka, Katarzyna; Mayer, Sven; Schweigert, Robin; Krejtz, Izabela; Nielek, Radosław; Bulling, Andreas; Krejtz, Krzysztof
Joint and Self-Focused Attention in Computer-Supported Collaboration - the Role of Gaze Visualisation Proceedings Article
In: In the Adjunct Proceedings of the 20th European Conference on Eye Movements - Abstracts, 2019.
@inproceedings{Wisiecka:2019:JSF,
title = {Joint and Self-Focused Attention in Computer-Supported Collaboration - the Role of Gaze Visualisation},
author = {Katarzyna Wisiecka and Sven Mayer and Robin Schweigert and Izabela Krejtz and Rados\law Nielek and Andreas Bulling and Krzysztof Krejtz},
url = {http://sven-mayer.com/wp-content/uploads/2019/10/wisiecka2019jointattention.pdf},
year = {2019},
date = {2019-08-18},
booktitle = {In the Adjunct Proceedings of the 20th European Conference on Eye Movements - Abstracts},
series = {ECEM 2019},
abstract = {Following another person’s gaze to a new focus of visual attention creates a situation of joint attention (Carpenter et al., 1998). Joint attention is possible to achieve if the conversation partners have the ability to inhibit their self-perspective (Samson et al., 2005). To fully engage in the process of collaboration, gaze communication enables a transition from a self-focused perspective to focusing on another person’s gaze direction. Gaze visualisations provide solutions to attach directing attention to crucial information during computer-supported collaboration (Zhang et al., 2017). However, it remains unclear if gaze visualisation could reduce difficulties in reaching joint attention among self-focused people. In our study, participants pre-screened by Self-Consciousness Scale (SCS-R) solve two tasks requiring mutual problem solving and joint visual search in two conditions and in two experimental settings. We use a within-subjects experimental design 2x2 = Setting x GazeVis. The setting is divided into a co-located and a remote computer collaboration, while the collaboration is enhanced with or without the partner’s gaze visualization. While the data collection is on-going, we hypothesise that (1) more self-focused attention is correlated with the level of achieved joint attention, (2) gaze visualisation enhances joint attention, as well as efficiency and (3) gaze visualisation, is more effective in remote setting compared to the co-located setting. Regardless of the outcome, our results will contribute to a greater understanding of the role of gaze communication in computer-supported collaboration within different settings and kinds of tasks. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2018

Funk, Markus; Lischke, Lars; Mayer, Sven; Shirazi, Alireza Sahami; Schmidt, Albrecht
Teach Me How! Interactive Assembly Instructions Using Demonstration and In-Situ Projection Book Chapter
In: Huber, Jochen; Shilkrot, Roy; Maes, Pattie; Nanayakkara, Suranga (Ed.): pp. 49–73, Springer Singapore, Singapore, 2018, ISBN: 978-981-10-6404-3.
@inbook{Funk2018,
title = {Teach Me How! Interactive Assembly Instructions Using Demonstration and In-Situ Projection},
author = { Markus Funk and Lars Lischke and Sven Mayer and Alireza Sahami Shirazi and Albrecht Schmidt},
editor = { Jochen Huber and Roy Shilkrot and Pattie Maes and Suranga Nanayakkara},
doi = {10.1007/978-981-10-6404-3_4},
isbn = {978-981-10-6404-3},
year = {2018},
date = {2018-01-01},
pages = {49--73},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {When ordering a product the options to personalize or customize the items have increased over the last years. This flexibility has lead to an increasing number of variants in manufactured products. As storing produced items is expensive, companies tend to produce their products lean, i.e. in smaller lot sizes just when they are needed. This lean manufacturing creates more flexible production environments. In our work, we investigate how human workers can be assisted to work in such demanding environments. Therefore, Augmented Reality systems can be used to provide work instructions. First, in this chapter we provide a comprehensive overview about Augmented Reality approaches to support workers directly at the workplace and introduce an assistive system for providing in-situ instructions. Through three user studies, we evaluate the general impact of in-situ instructions, evaluate three instruction creation strategies, and finally evaluate the created instructions using a real product assembly task.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}

Gärtner, Markus; Mayer, Sven; Schwind, Valentin; Hämmerle, Eric; Turcan, Emine; Rheinwald, Florin; Murawski, Gustav; Lischke, Lars; Kuhn, Jonas
NLATool: An Application for Enhanced Deep Text Understanding Proceedings Article
In: Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations, pp. 4, 2018.
@inproceedings{Gartner:2018:NLA,
title = {NLATool: An Application for Enhanced Deep Text Understanding},
author = {Markus G\"{a}rtner and Sven Mayer and Valentin Schwind and Eric H\"{a}mmerle and Emine Turcan and Florin Rheinwald and Gustav Murawski and Lars Lischke and Jonas Kuhn},
url = {http://sven-mayer.com/wp-content/uploads/2018/06/gartner2018nlatool.pdf
https://github.com/interactionlab/NLATool},
year = {2018},
date = {2018-08-20},
booktitle = {Proceedings of the 27th International Conference on Computational Linguistics: System Demonstrations},
pages = {4},
series = {COLING'18},
abstract = {Today, we see an ever growing number of tools supporting text annotation. Each of these tools is optimized for specific use-cases such as named entity recognition. However, we see large growing knowledge bases such as Wikipedia or the Google Knowledge Graph. In this paper, we introduce NLATool, a web application developed using a human-centered design process. The application combines supporting text annotation and enriching the text with additional information from a number of sources directly within the application. The tool assists users to efficiently recognize named entities, annotate text, and automatically provide users additional information while solving deep text understanding tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Le, Huy Viet; Mayer, Sven; Kosch, Thomas; Henze, Niels
Demonstrating PalmTouch: The Palm as An Additional Input Modality on Commodity Smartphones Proceedings Article
In: Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct, ACM, New York, NY, USA, 2018.
@inproceedings{Le:2018:DPT,
title = {Demonstrating PalmTouch: The Palm as An Additional Input Modality on Commodity Smartphones},
author = {Huy Viet Le and Sven Mayer and Thomas Kosch and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2018/09/le2018demonstrating.pdf
https://www.youtube.com/watch?v=dAo3uYnZywA},
doi = {10.1145/3236112.3236163},
year = {2018},
date = {2018-09-03},
booktitle = {Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct},
publisher = {ACM},
address = {New York, NY, USA},
series = {MobileHCI'18},
abstract = {Touchscreens are the most successful input method for smartphones. Despite their flexibility, touch input is limited to the location of taps and gestures. We present PalmTouch, an additional input modality that differentiates between touches of fingers and the palm. Touching the display with the palm can be a natural gesture since moving the thumb towards the device’s top edge implicitly places the palm on the touchscreen. We developed a model that differentiates between finger and palm touch with an accuracy of 99.53 % in realistic scenarios. In this demonstration, we exhibit different use cases for PalmTouch, including the use as a shortcut and for improving reachability. In a previous evaluation, we showed that participants perceive the input modality as intuitive and natural to perform. Moreover, they appreciate PalmTouch as an easy and fast solution to address the reachability issue during one-handed smartphone interaction compared to thumb stretching or grip changes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Le, Huy Viet; Mayer, Sven; Bader, Patrick; Henze, Niels
Fingers' Range and Comfortable Area for One-Handed Smartphone Interaction Beyond the Touchscreen Proceedings Article
In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, pp. 31:1–31:12, ACM, New York, NY, USA, 2018.
@inproceedings{Le:2018:FRC,
title = {Fingers' Range and Comfortable Area for One-Handed Smartphone Interaction Beyond the Touchscreen},
author = {Huy Viet Le and Sven Mayer and Patrick Bader and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2018/01/le2018fingers.pdf
https://www.youtube.com/watch?v=Kzp-sO7mIbo},
doi = {10.1145/3173574.3173605},
year = {2018},
date = {2018-04-21},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
pages = {31:1--31:12},
publisher = {ACM},
address = {New York, NY, USA},
series = {CHI'18},
abstract = {Previous research and recent smartphone development presented a wide range of input controls beyond the touchscreen. Fingerprint scanners, silent switches, and Back-of-Device (BoD) touch panels offer additional ways to perform input. However, with the increasing amount of input controls on the device, unintentional input or limited reachability can hinder interaction. In a one-handed scenario, we conducted a study to investigate the areas that can be reached without losing grip stability (comfortable area), and with stretched fingers (maximum range) using four different phone sizes. We describe the characteristics of the comfortable area and maximum range for different phone sizes and derive four design implications for the placement of input controls to support one-handed BoD and edge interaction. Amongst others, we show that the index and middle finger are the most suited fingers for BoD interaction and that the grip shifts towards the top edge with increasing phone sizes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Le, Huy Viet; Mayer, Sven; Henze, Niels
InfiniTouch: Finger-Aware Interaction on Fully Touch Sensitive Smartphones Proceedings Article
In: Proceedings of the 31th Annual ACM Symposium on User Interface Software and Technology, ACM, New York, NY, USA, 2018.
@inproceedings{Le:2018:IFA,
title = {InfiniTouch: Finger-Aware Interaction on Fully Touch Sensitive Smartphones},
author = {Huy Viet Le and Sven Mayer and Niels Henze },
url = {http://sven-mayer.com/wp-content/uploads/2018/08/le2018infinitouch.pdf
https://github.com/interactionlab/InfiniTouch
https://www.youtube.com/watch?v=0XlF1kenRp8
https://www.youtube.com/watch?v=OvvZwMJCyVU},
doi = {10.1145/3242587.3242605},
year = {2018},
date = {2018-10-14},
booktitle = {Proceedings of the 31th Annual ACM Symposium on User Interface Software and Technology},
publisher = {ACM},
address = {New York, NY, USA},
series = {UIST'18},
abstract = {Smartphones are the most successful mobile devices and offer intuitive interaction through touchscreens. Current devices treat all fingers equally and only sense touch contacts on the front of the device. In this paper, we present InfiniTouch, the first system that enables touch input on the whole device surface and identifies the fingers touching the device without external sensors while keeping the form factor of a standard smartphone. We first developed a prototype with capacitive sensors on the front, the back and on three sides. We then conducted a study to train a convolutional neural network that identifies fingers with an accuracy of 95.78% while estimating their position with a mean absolute error of 0.74cm. We demonstrate the usefulness of multiple use cases made possible with InfiniTouch, including finger-aware gestures and finger flexion state as an action modifier.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Le, Huy Viet; Mayer, Sven; Ali, Abdallah El; Henze, Niels
Machine Learning for Intelligent Mobile User Interfaces using Keras Miscellaneous
2018.
@misc{Le:2018:MLI,
title = {Machine Learning for Intelligent Mobile User Interfaces using Keras},
author = {Huy Viet Le and Sven Mayer and Abdallah El Ali and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2018/09/le2018tutorial.pdf},
year = {2018},
date = {2018-09-03},
abstract = {High-level APIs such as Keras facilitate the development of deep learning models through a simple interface and enable users to train neural networks within a few lines of code. Building on top of TensorFlow, trained models can be exported and run efficiently on mobile devices. This enables a wide range of opportunities for researchers and developers. In this tutorial, we teach attendees three basic steps to run neural networks on a mobile phone: Firstly, we will teach how to develop neural network architectures and train them with Keras based on the TensorFlow backend. Secondly, we show the process to run the trained models on a mobile phone. In the final part, we demonstrate how to perform Human Activity Recognition using existing mobile device sensor datasets.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}

Le, Huy Viet; Kosch, Thomas; Bader, Patrick; Mayer, Sven; Henze, Niels
PalmTouch: Using the Palm as an Additional Input Modality on Commodity Smartphones Proceedings Article
In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, pp. 360:1–360:13, ACM, New York, NY, USA, 2018.
@inproceedings{Le:2018:PUP,
title = {PalmTouch: Using the Palm as an Additional Input Modality on Commodity Smartphones},
author = {Huy Viet Le and Thomas Kosch and Patrick Bader and Sven Mayer and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2018/01/le2018palmtouch.pdf
https://github.com/interactionlab/PalmTouch
https://www.youtube.com/watch?v=dAo3uYnZywA
https://www.youtube.com/watch?v=dAo3uYnZywA},
doi = {10.1145/3173574.3173934},
year = {2018},
date = {2018-04-21},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
pages = {360:1--360:13},
publisher = {ACM},
address = {New York, NY, USA},
series = {CHI'18},
abstract = {Touchscreens are the most successful input method for smartphones. Despite their flexibility, touch input is limited to the location of taps and gestures. We present PalmTouch, an additional input modality that differentiates between touches of fingers and the palm. Touching the display with the palm can be a natural gesture since moving the thumb towards the device's top edge implicitly places the palm on the touchscreen. We present different use cases for PalmTouch, including the use as a shortcut and for improving reachability. To evaluate these use cases, we have developed a model that differentiates between finger and palm touch with an accuracy of 99.53% in realistic scenarios. Results of the evaluation show that participants perceive the input modality as intuitive and natural to perform. Moreover, they appreciate PalmTouch as an easy and fast solution to address the reachability issue during one-handed smartphone interaction compared to thumb stretching or grip changes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Lischke, Lars; Mayer, Sven; Preikschat, Andreas; Schweizer, Markus; Vu, Ba; Woźniak, Paweł W.; Henze, Niels
Understanding Large Display Environments: Contextual Inquiry in a Control Room Proceedings Article
In: Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems, pp. LBW134:1–LBW134:6, ACM, New York, NY, USA, 2018.
@inproceedings{Lischke:2018:ULD,
title = {Understanding Large Display Environments: Contextual Inquiry in a Control Room},
author = {Lars Lischke and Sven Mayer and Andreas Preikschat and Markus Schweizer and Ba Vu and Pawe\l W. Wo\'{z}niak and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2018/03/lischke2018controllroom.pdf},
doi = {10.1145/3170427.3188621},
year = {2018},
date = {2018-04-21},
booktitle = {Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems},
pages = {LBW134:1--LBW134:6},
publisher = {ACM},
address = {New York, NY, USA},
series = {CHI EA '18},
abstract = {Research has identified benefits of large high-resolution displays (LHRDs) for exploring and understanding visual information. However, these displays are still not commonplace in work environments. Control rooms are one of the rare cases where LHRD workplaces are used in practice. To understand the challenges in developing LHRD workplaces, we conducted a contextual inquiry a public transport control room. In this work, we present the physical arrangement of the control room workplaces and describe work routines with a focus on the interaction with visually displayed content. While staff members stated that they would prefer to use even more display space, we identified critical challenges for input on LHRDs and designing graphical user interfaces (GUIs) for LHRDs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Martin, Ullrich; von Molo, Carlo; Henze, Niels; Lischke, Lars; Mayer, Sven; Rieger, Monika A.; Steinhilber, Benjamin; Wagenblast, Florestan
Ethnographic Analysis of the Dispatchers’ Workplace in a Rail-Based Transport Control Center Proceedings Article
In: Third German Workshop on Rail Human Factors 2018, pp. 7, 2018.
@inproceedings{Martin:2018:Ethnographic,
title = {Ethnographic Analysis of the Dispatchers’ Workplace in a Rail-Based Transport Control Center},
author = {Ullrich Martin and Carlo von Molo and Niels Henze and Lars Lischke and Sven Mayer and Monika A. Rieger and Benjamin Steinhilber and Florestan Wagenblast},
url = {http://sven-mayer.com/wp-content/uploads/2018/02/martin2018ethnographic.pdf},
year = {2018},
date = {2018-04-18},
booktitle = {Third German Workshop on Rail Human Factors 2018},
volume = {3},
pages = {7},
series = {RHF'18},
abstract = {In traffic control centers a large number of complex processes are monitored by few dispatchers. Dispatchers are assisted by computer-aided systems that are able to support dispatchers’ operations/decisions by filtering task-relevant from task-irrelevant aspects. With the increased use of computer-aided systems, the area controlled by a dispatcher increases and so does the risk of non-normal operations which require additional dispatcher action. In case of these non-normal operations, monotonous surveillance work can quickly change to a complex operation requiring sustained attention, cognitive effort, and responsible decisions since they might impact railway traffic safety. The level of dispatchers’ attention and alertness is crucial for adequate decisions in non-normal operations. A computer-aided system that supports these abilities, for example by measures of attention control could be a key element for optimizing the work in traffic control centers regarding fewer mistakes and less mental demands to the dispatcher. In this paper we identify potential improvements in traffic control centers for promoting dispatchers’ attention and alertness that can be included in a computer-aided system. Therefore, in a first step, we conducted a ethnographic analysis of rail-based transport control centers to gain knowledge about work conditions, work processes and its impact on the dispatcher. In a second step we identify a scenario classification to distinguish actions. This classification gives a structure for a detailed description of potential improvements to promote dispatchers’ attention and alertness. Our classification revealed eleven scenarios for in which the dispatcher has no safety responsibility. We clustered these eleven scenarios in three high level groups, namely: peak hour, special service time and during off-peak hour and night-owl service time. Further, we highlight potential options how to utilize the latest technology to enable an efficient operation in control rooms in the future.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Lischke, Lars; Woźniak, Paweł W.; Henze, Niels
Evaluating the Disruptiveness of Mobile Interactions: A Mixed-Method Approach Proceedings Article
In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, pp. 406:1–406:14, ACM, New York, NY, USA, 2018.
@inproceedings{Mayer:2018:Disruptiveness,
title = {Evaluating the Disruptiveness of Mobile Interactions: A Mixed-Method Approach},
author = { Sven Mayer and Lars Lischke and Pawe\l W. Wo\'{z}niak and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2018/01/mayer2018mixedmethod.pdf
https://www.youtube.com/watch?v=6-HGXW6bLPw
https://www.youtube.com/watch?v=IJe2e-Ax3RU},
doi = {10.1145/3173574.3173980},
year = {2018},
date = {2018-04-21},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
pages = {406:1--406:14},
publisher = {ACM},
address = {New York, NY, USA},
series = {CHI'18},
abstract = {While the proliferation of mobile devices has rendered mobile notifications ubiquitous, researchers are only slowly beginning to understand how these technologies affect everyday social interactions. In particular, the negative social influence of mobile interruptions remains unexplored from a methodological perspective. This paper contributes a mixed-method evaluation procedure for assessing the disruptive impact of mobile interruptions in conversation. The approach combines quantitative eye tracking, qualitative analysis, and a simulated conversation environment to enable fast assessment of disruptiveness. It is intended to be used as a part of an iterative interaction design process. We describe our approach in detail, present an example of its use to study a new call declining technique, and reflect upon the pros and cons of our approach.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Schwind, Valentin; Schweigert, Robin; Henze, Niels
The Effect of Offset Correction and Cursor on Mid-Air Pointing in Real and Virtual Environments Proceedings Article
In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, pp. 653:1–653:13, ACM, New York, NY, USA, 2018.
@inproceedings{Mayer:2018:EOC,
title = {The Effect of Offset Correction and Cursor on Mid-Air Pointing in Real and Virtual Environments},
author = {Sven Mayer and Valentin Schwind and Robin Schweigert and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2018/01/mayer2018vrpointing.pdf
https://www.youtube.com/watch?v=Mu_8iJer2BM},
doi = {10.1145/3173574.3174227},
year = {2018},
date = {2018-04-21},
urldate = {2018-04-21},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
journal = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
pages = {653:1--653:13},
publisher = {ACM},
address = {New York, NY, USA},
series = {CHI\'18},
abstract = {Pointing at remote objects to direct others\' attention is a fundamental human ability. Previous work explored methods for remote pointing to select targets. Absolute pointing techniques that cast a ray from the user to a target are affected by humans\' limited pointing accuracy. Recent work suggests that accuracy can be improved by compensating systematic offsets between targets a user aims at and rays cast from the user to the target. In this paper, we investigate mid-air pointing in the real world and virtual reality. Through a pointing study, we model the offsets to improve pointing accuracy and show that being in a virtual environment affects how users point at targets. In the second study, we validate the developed model and analyze the effect of compensating systematic offsets. We show that the provided model can significantly improve pointing accuracy when no cursor is provided. We further show that a cursor improves pointing accuracy but also increases the selection time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Le, Huy Viet; Henze, Niels
Designing Finger Orientation Input for Mobile Touchscreens Proceedings Article
In: Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services, ACM, New York, NY, USA, 2018.
@inproceedings{Mayer:2018:Ergonomic,
title = {Designing Finger Orientation Input for Mobile Touchscreens},
author = {Sven Mayer and Huy Viet Le and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2018/05/mayer2018ergonomics.pdf},
doi = {10.1145/3229434.3229444},
year = {2018},
date = {2018-09-03},
booktitle = {Proceedings of the 20th International Conference on Human-Computer Interaction with Mobile Devices and Services},
publisher = {ACM},
address = {New York, NY, USA},
series = {MobileHCI'18},
abstract = {A large number of today's systems use interactive touch surfaces as the main input channel. Current devices reduce the richness of touch input to two-dimensional positions on the screen. A growing body of work develops methods that enrich touch input to provide additional degrees of freedom for touch interaction. In particular, previous work proposed to use the finger's orientation as additional input. To efficiently implement new input techniques which make use of the new input dimensions, we need to understand the limitations of the input. Therefore, we conducted a study to derive the ergonomic constraints for using finger orientation as additional input in a two-handed smartphone scenario. We show that for both hands, the comfort and the non-comfort zone depend on how the user interacts with a touch surface. For two-handed smartphone scenarios, the range is 33.3% larger than for tabletop scenarios. We further show that the phone orientation correlates with the finger orientation. Finger orientations which are harder to perform result in phone orientations where the screen does not directly face the user.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Lischke, Lars; Lanksweirt, Adrian; Le, Huy Viet; Henze, Niels
How to Communicate New Input Techniques Proceedings Article
In: Proceedings of the 10th Nordic Conference on Human-Computer Interaction, pp. 13, ACM, New York, NY, USA, 2018.
@inproceedings{Mayer:2018:HCN,
title = {How to Communicate New Input Techniques},
author = {Sven Mayer and Lars Lischke and Adrian Lanksweirt and Huy Viet Le and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2018/07/mayer2018communicate.pdf},
doi = {10.1145/3240167.3240176},
year = {2018},
date = {2018-10-01},
booktitle = {Proceedings of the 10th Nordic Conference on Human-Computer Interaction},
pages = {13},
publisher = {ACM},
address = {New York, NY, USA},
series = {NordiCHI '18},
abstract = {Touchscreens are among the most ubiquitous input technologies. Commercial devices typically limit the input to 2D touch points. While a body of work enhances the interaction through finger recognition and diverse gestures, advanced input techniques have had a limited commercial impact. A major challenge is explaining new input techniques to users. In this paper, we investigate how to communicate novel input techniques for smartphones. Through interviews with 12 UX experts, we identified three potential approaches: Depiction uses an icon to visualize the input technique, Pop-up shows a modal dialog when the input technique is available, and Tutorial explains all available input techniques in a centralized way. To understand which approach is most preferred by users we conducted a study with 36 participants that introduced novel techniques using one of the communication methods. While Depiction was preferred, we found that the approach should be selected based on the complexity of the interaction, novelty to the user, and the device size.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mayer, Sven
SimTech Milestone Report: Finger Orientation as an Additional Input Dimension for Touchscreens Technical Report
2018.
@techreport{Mayer:2018:Milestone,
title = {SimTech Milestone Report: Finger Orientation as an Additional Input Dimension for Touchscreens},
author = {Sven Mayer},
url = {http://sven-mayer.com/wp-content/uploads/2019/02/mayer2018milestone.pdf},
year = {2018},
date = {2018-06-05},
abstract = {The age of ubiquitous computing has brought a large number of interactive surfaces into our lives. Interactive surfaces are present in various forms, and various contexts from tabletops to mobile devices, and touchscreens continue to remain the main input technique. In current systems, a finger touching a screen or surface is typically reduced to simple two-dimensional coordinates. To overcome the limited expressiveness when using touchscreens, a large body of research proposes to enrich such a touch interaction. In particular, previous work suggests using the finger orientation as a means of input, to address this is the fundamental problem when using touchscreens.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}

Mayer, Sven; Lischke, Lars; Grønbæk, Jens Emil; Sarsenbayeva, Zhanna; Vogelsang, Jonas; Woźniak, Paweł W.; Henze, Niels; Jacucci, Giulio
Pac-Many: Movement Behavior when Playing Collaborative and Competitive Games on Large Displays Proceedings Article
In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, pp. 539:1–539:10, ACM, New York, NY, USA, 2018.
@inproceedings{Mayer:2018:PMB,
title = {Pac-Many: Movement Behavior when Playing Collaborative and Competitive Games on Large Displays},
author = { Sven Mayer and Lars Lischke and Jens Emil Gr\onb\aek and Zhanna Sarsenbayeva and Jonas Vogelsang and Pawe\l W. Wo\'{z}niak and Niels Henze and Giulio Jacucci},
url = {http://sven-mayer.com/wp-content/uploads/2018/01/mayer2018pacmany.pdf
https://github.com/interactionlab/pacmany
https://www.youtube.com/watch?v=FsyBKALvAw8},
doi = {10.1145/3173574.3174113},
year = {2018},
date = {2018-04-21},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
pages = {539:1--539:10},
publisher = {ACM},
address = {New York, NY, USA},
series = {CHI'18},
abstract = {Previous work has shown that large high resolution displays (LHRDs) can enhance collaboration between users. As LHRDs allow free movement in front of the screen, an understanding of movement behavior is required to build successful interfaces for these devices. This paper presents Pac-Many; a multiplayer version of the classical computer game Pac-Man to study group dynamics when using LHRDs. We utilized smartphones as game controllers to enable free movement while playing the game. In a lab study, using a 4m × 1m LHRD, 24 participants (12 pairs) played Pac-Many in collaborative and competitive conditions. The results show that players in the collaborative condition divided screen space evenly. In contrast, competing players stood closer together to avoid benefits for the other player. We discuss how the nature of the task is important when designing and analyzing collaborative interfaces for LHRDs. Our work shows how to account for the spatial aspects of interaction with LHRDs to build immersive experiences.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Le, Huy Viet; Nesti, Alessandro; Henze, Niels; Bülthoff, Heinrich H.; Chuang, Lewis L.
The Effect of Road Bumps on Touch Interaction in Cars Proceedings Article
In: 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications Proceedings, 2018.
@inproceedings{Mayer:2018:TouchInACar,
title = {The Effect of Road Bumps on Touch Interaction in Cars},
author = {Sven Mayer and Huy Viet Le and Alessandro Nesti and Niels Henze and Heinrich H. B\"{u}lthoff and Lewis L. Chuang},
url = {http://sven-mayer.com/wp-content/uploads/2018/09/mayer2018touchinacar.pdf
https://github.com/interactionlab/Touch-Interaction-with-Road-Bumps},
doi = {10.1145/3239060.3239071},
year = {2018},
date = {2018-09-23},
booktitle = {10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications Proceedings},
series = {AutomotiveUI '18},
abstract = {Touchscreens are a common fixture in current vehicles. With autonomous driving, we can expect touch interaction with such in-vehicle media systems to exponentially increase. In spite of vehicle suspension systems, road perturbations will continue to exert forces that can render in-vehicle touch interaction challenging. Using a motion simulator, we investigate how different vehicle speeds interact with road features (i.e., speed bumps) to influence touch interaction. We determine their effect on pointing accuracy and task completion time. We show that road bumps have a significant effect on touch input and can decrease accuracy by 19%. In light of this, we developed a Random Forest (RF) model that improves touch accuracy by 32.0% on our test set and by 22.5% on our validation set. As the lightweight model uses only features that can easily be determined through inertial measurement units, this model could be easily deployed in current automobiles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Schwind, Valentin; Mayer, Sven; Comeau-Vermeersch, Alexandre; Schweigert, Robin; Henze, Niels
Up to the Finger Tip: The Effect of Avatars on Mid-Air Pointing Accuracy in Virtual Reality Book Section
In: Proceedings of the 2018 CHI Conference on Computer-Human Interaction in Play , 2018.
@incollection{Schwind:2018:FingerTip,
title = {Up to the Finger Tip: The Effect of Avatars on Mid-Air Pointing Accuracy in Virtual Reality},
author = {Valentin Schwind and Sven Mayer and Alexandre Comeau-Vermeersch and Robin Schweigert and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2018/09/schwind2018handsinvr.pdf},
doi = {10.1145/3242671.3242675},
year = {2018},
date = {2018-10-28},
booktitle = {Proceedings of the 2018 CHI Conference on Computer-Human Interaction in Play },
series = {CHIPlay'18},
abstract = {Avatars in virtual reality (VR) increase the immersion and provide an interface between the user’s physical body and the virtual world. Thus, avatars enable referential gestures, which are essential for targeting, selection, locomotion, and collaboration in VR. However, players of immersive games can have another virtual appearance deviating from human-likeness and previous work suggests that avatars can have an effect on the accuracy of referential gestures in VR. One of the most important referential gestures is mid-air pointing. It has been shown that mid-air pointing is affected by systematic errors, which can be compensated using different methods. Thus, it is unknown if the avatar must be considered in corrections of the systematic error. In this paper, we investigate the effect of the avatar on pointing accuracy. We show that the systematic error in pointing is significantly affected by the virtual appearance but does not correlate with the degree to which the appearance deviates from the perceived human-likeness. Moreover, we confirm that people only rely on their fingertip and not on their forearm or index finger orientation. We present compensation models and contribute with design implications to increase the accuracy of pointing in VR.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
2017

Le, Huy Viet; Mayer, Sven; Bader, Patrick; Bastian, Frank; Henze, Niels
Interaction Methods and Use Cases for a Full-Touch Sensing Smartphone Proceedings Article
In: Proceedings of the 2017 CHI Conference Extended Abstracts on Human Factors in Computing Systems (CHI EA '17), 2017.
@inproceedings{full-touchable-phone,
title = {Interaction Methods and Use Cases for a Full-Touch Sensing Smartphone},
author = { Huy Viet Le and Sven Mayer and Patrick Bader and Frank Bastian and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/le2017full-touchable-phone.pdf},
doi = {10.1145/3027063.3053196},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings of the 2017 CHI Conference Extended Abstracts on Human Factors in Computing Systems (CHI EA '17)},
abstract = {Touchscreens are successful in recent smartphones due to a combination of input and output in a single interface. Despite their advantages, touch input still suffers from common limitations such as the fat-finger problem. To address these limitations, prior work proposed a variety of interaction techniques based on input sensors beyond the touchscreen. These were evaluated from a technical perspective. In contrast, we envision a smartphone that senses touch input on the whole device. Through interviews with experienced interaction designers, we elicited interaction methods to address touch input limitations from a different perspective. In this work, we focus on the interview results and present a smartphone prototype which senses touch input on the whole device. It has dimensions similar to regular phones and can be used to evaluate presented findings under realistic conditions in future work. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Funk, Markus; Lischke, Lars; Mayer, Sven
Neue Impulse für visuelle Kommissionierassistenzsysteme aus der Mensch-Computer-Interaktion Book Chapter
In: Warehousing 4.0, pp. 223–236, B+G Wissenschaftsverlag, Lauda-Königshofen, Germany, 2017.
@inbook{funk2017neue_preprint,
title = {Neue Impulse f\"{u}r visuelle Kommissionierassistenzsysteme aus der Mensch-Computer-Interaktion},
author = { Markus Funk and Lars Lischke and Sven Mayer},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/funk2017.pdf},
year = {2017},
date = {2017-01-01},
booktitle = {Warehousing 4.0},
pages = {223--236},
publisher = {B+G Wissenschaftsverlag},
address = {Lauda-K\"{o}nigshofen, Germany},
abstract = {Durch den Einzug von computergest\"{u}tzten Systemen in industrielle Produktionsprozesse (Industrie 4.0) werden mehr und mehr Anwendungen m\"{o}glich, die Arbeitern w\"{a}hrend komplexen Aufgaben helfen k\"{o}nnen. Dieser Beitrag besch\"{a}ftigt sich mit der manuellen Kommissionierung und stellt einen \"{U}berblick \"{u}ber computergest\"{u}tzte Assistenzsysteme f\"{u}r diese T\"{a}tigkeit vor. Hierbei liegt der Fokus auf der Mensch-Computer Schnittstelle, welche im Zuge der Industrie 4.0 eine zunehmend gr\"{o}\ssere Bedeutung erf\"{a}hrt. Zuerst wird ein \"{U}berblick \"{u}ber die Mensch-Computer Schnittstellen gegeben, die in Industrie und Forschung vorgeschlagen wurden. Danach werden projektionsbasierte Kommissionierassistenzsysteme vorgestellt, die im Rahmen des Forschungsprojektes motionEAP entworfen wurden. },
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}

Henze, Niels; Le, Huy Viet; Mayer, Sven; Schwind, Valentin
Improving Software-Reduced Touchscreen Latency Proceedings Article
In: Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct, 2017.
@inproceedings{Henze:2017:Improving,
title = { Improving Software-Reduced Touchscreen Latency},
author = {Niels Henze and Huy Viet Le and Sven Mayer and Valentin Schwind},
url = {http://sven-mayer.com/wp-content/uploads/2017/08/henze2017latency.pdf
https://github.com/interactionlab/MobileHCI17-Touch-Extrapolation},
doi = {10.1145/3098279.3122150},
year = {2017},
date = {2017-09-04},
booktitle = {Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct},
journal = {Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct},
series = {mobileHCI'17},
abstract = {The latency of current mobile devices’ touchscreens is around 100ms and has widely been explored. Latency down to 2ms is noticeable, and latency as low as 25ms reduces users’ performance. Previous work reduced touch latency by extrapolating a finger’s movement using an ensemble of shallow neural networks and showed that predicting 33ms into the future increases users’ performance. Unfortunately, this prediction has a high error. Predicting beyond 33ms did not increase participants’ performance, and the error affected the subjective assessment. We use more recent machine learning techniques to reduce the prediction error. We train LSTM networks and multilayer perceptrons using a large data set and regularization. We show that linear extrapolation causes an 116.7% higher error and the previously proposed ensembles of shallow networks cause a 26.7% higher error compared to the LSTM networks. The trained models, the data used for testing, and the source code is available on GitHub.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Henze, Niels; Martin, Ullrich; Rieger, Monika A.; Lischke, Lars; Mayer, Sven; von Molo, Carlo; Steinhilber, Benjamin; Wagenblast, Florestan
Konzept zur Entwicklung moderner Bedienformen für Betriebszentralen Journal Article
In: ETR - Eisenbahntechnische Rundschau, vol. 1, pp. 26–30, 2017.
@article{Henze2017a,
title = {Konzept zur Entwicklung moderner Bedienformen f\"{u}r Betriebszentralen},
author = {Niels Henze and Ullrich Martin and Monika A. Rieger and Lars Lischke and Sven Mayer and Carlo von Molo and Benjamin Steinhilber and Florestan Wagenblast},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/Henze_Martin_Rieger_ua_Betriebszentralen.pdf},
year = {2017},
date = {2017-01-01},
journal = {ETR - Eisenbahntechnische Rundschau},
volume = {1},
pages = {26--30},
publisher = {Eurailpress Deutscher Verkehrs-Verlag},
address = {Deutschland},
abstract = {Ein gemeinsames Projekt der Institute f\"{u}r Eisenbahn- und Verkehrswesen sowie f\"{u}r Visualisierung und Interaktive Systeme der Universit\"{a}t Stuttgart und dem Institut f\"{u}r Arbeitsmedizin, Sozialmedizin und Versorgungsforschung des Universit\"{a}tsklinikums T\"{u}bingen wird in diesem Beitrag vorgestellt. Zun\"{a}chst werden Kontrollr\"{a}ume, Vorg\"{a}nge und T\"{a}tigkeiten sowie die damit in Zusammenhang stehenden Belastungen von Disponenten in Betriebszentralen von Verkehrsunternehmen analysiert. Basierend auf dena Ergebnissen werden moderne Bedienformen und Benutzungsschnittstellen untersucht.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Kiss, Francisco; Kucharski, Konrad; Mayer, Sven; Lischke, Lars; Knierim, Pascal; Romanowski, Andrzej; Wozniak, Paweł W.
RunMerge: Towards Enhanced Proprioception for Advanced Amateur Runners Proceedings Article
In: Proceedings of the 2017 ACM Conference Companion Publication on Designing Interactive Systems, pp. 192–196, ACM, New York, NY, USA, 2017.
@inproceedings{Kiss:2017:RTE,
title = {RunMerge: Towards Enhanced Proprioception for Advanced Amateur Runners},
author = {Francisco Kiss and Konrad Kucharski and Sven Mayer and Lars Lischke and Pascal Knierim and Andrzej Romanowski and Pawe\l W. Wozniak},
url = {http://sven-mayer.com/wp-content/uploads/2018/03/kiss2017runmerge.pdf},
doi = {10.1145/3064857.3079144},
year = {2017},
date = {2017-00-00},
booktitle = {Proceedings of the 2017 ACM Conference Companion Publication on Designing Interactive Systems},
pages = {192--196},
publisher = {ACM},
address = {New York, NY, USA},
series = {DIS '17 Companion},
abstract = {While amateur running is one of the most popular recreational sport activities, it also produces many injuries, which are often caused by improper technique or shoe choice. In this paper, we present the design and initial evaluation of RunMerge \textemdash a mobile application that integrates data from location and motion sensors to give runners a better understanding of their running. With RunMerge, we investigate how technologically enhanced bodily awareness can help amateur runners achieve a better running experience. We present the design RunMerge, and the insights of its user study. Our work indicates that enhanced proprioception (i.e. the awareness of one’s body parts and movement) can be beneficial for everyday running training. Finally, we reflect on future work on increased bodily awareness for endurance sports.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Le, Huy Viet; Mayer, Sven; Henze, Niels
Machine Learning with Tensorflow for Mobile and Ubiquitous Interaction Proceedings Article
In: Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia, pp. 567–572, ACM, Stuttgart, Germany, 2017, ISBN: 978-1-4503-5378-6.
@inproceedings{Le:2017:MLT,
title = {Machine Learning with Tensorflow for Mobile and Ubiquitous Interaction},
author = { Huy Viet Le and Sven Mayer and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2018/01/le2017tuttf.pdf},
doi = {10.1145/3152832.3156559},
isbn = {978-1-4503-5378-6},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia},
pages = {567--572},
publisher = {ACM},
address = {Stuttgart, Germany},
series = {MUM '17},
abstract = {Due to the increasing amount of sensors integrated into the environment and worn by the user, a sheer amount of context-sensitive data become available. While interpreting them with traditional methods (e.g., formulas and simple heuristics) is challenging, the latest machine learning techniques require only a set of labeled data. TensorFlow is an open-source library for machine learning which implements a wide range of neural network models. With TensorFlow Mobile, researchers and developers can further deploy the trained models on low-end mobile devices for ubiquitous scenarios. This facilitates the model export and offers techniques to optimize the model for a mobile deployment. In this tutorial, we teach attendees two basic steps to a deployment of neural networks on smartphones: Firstly, we will teach how to develop neural network architectures and train them in TensorFlow. Secondly, we show the process to run the trained models on a mobile phone using TensorFlow Mobile.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Le, Huy Viet; Mayer, Sven; Bader, Patrick; Henze, Niels
A Smartphone Prototype for Touch Interaction on the Whole Device Surface Proceedings Article
In: Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct, pp. 8, ACM, New York, NY, USA, 2017.
@inproceedings{Le:2017:SPT,
title = {A Smartphone Prototype for Touch Interaction on the Whole Device Surface},
author = {Huy Viet Le and Sven Mayer and Patrick Bader and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2017/07/le2017smartphone.pdf
https://github.com/interactionlab/full-touch-smartphone},
doi = {10.1145/3098279.3122143},
year = {2017},
date = {2017-09-04},
booktitle = {Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct},
pages = {8},
publisher = {ACM},
address = {New York, NY, USA},
series = {MobileHCI'17},
abstract = {Previous research proposed a wide range of interaction methods and use cases based on the previously unused back side and edge of a smartphone. Common approaches to implementing Back-of-Device (BoD) interaction include attaching two smartphones back to back and building a prototype completely from scratch. Changes in the device's form factor can influence hand grip and input performance as shown in previous work. Further, the lack of an established operating system and SDK requires more effort to implement novel interaction methods. In this work, we present a smartphone prototype that runs Android and has a form factor nearly identical to an off-the-shelf smartphone. It further provides capacitive images of the hand holding the device for use cases such as grip-pattern recognition. We describe technical details and share source files so that others can re-build our prototype. We evaluated the prototype with 8 participants to demonstrate the data that can be retrieved for an exemplary grip classification.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Lischke, Lars; Mayer, Sven; Hoffmann, Jan; Kratzer, Philipp; Roth, Stephan; Wolf, Katrin; Woniak, Paweł
Interaction Techniques for Window Management on Large High-resolution Displays Proceedings Article
In: Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia, pp. 241–247, ACM, Stuttgart, Germany, 2017, ISBN: 978-1-4503-5378-6.
@inproceedings{Lischke:2017:ITW,
title = {Interaction Techniques for Window Management on Large High-resolution Displays},
author = { Lars Lischke and Sven Mayer and Jan Hoffmann and Philipp Kratzer and Stephan Roth and Katrin Wolf and Pawe\l Woniak},
url = {http://sven-mayer.com/wp-content/uploads/2017/12/lischke2017window.pdf},
doi = {10.1145/3152832.3152852},
isbn = {978-1-4503-5378-6},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings of the 16th International Conference on Mobile and Ubiquitous Multimedia},
pages = {241--247},
publisher = {ACM},
address = {Stuttgart, Germany},
series = {MUM '17},
abstract = {Large high-resolution displays (LHRDs) present new opportunities for interaction design in areas such as interactive visualization and data analytics. Design processes for graphical interfaces for LHRDs are still challenging. In this paper, we explore the design space of graphical interfaces for LHRDs by engaging in the creation of four prototypes for supporting office work. Specifically, we investigate how users can effectively manage application windows on LHRDs using four window alignment techniques: curved zooming, window grouping, window spinning and side pane navigation. We present the design and implementation of these window alignment techniques in a sample office application. Based on a mixed-methods user study of our prototypes, we contribute insights on designing future graphical interfaces for LHRDs. We show that potential users appreciate techniques, which enhance focus switching without changing the spatial relation between related windows.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Lischke, Lars; Woźniak, Paweł W.; Mayer, Sven; Preikschat, Andreas; Fjeld, Morten
Using Variable Movement Resistance Sliders for Remote Discrete Input Proceedings Article
In: Proceedings of the 2017 ACM International Conference on Interactive Surfaces and Spaces, pp. 116–125, ACM, Brighton, United Kingdom, 2017, ISBN: 978-1-4503-4691-7.
@inproceedings{Lischke:2017:UVM,
title = {Using Variable Movement Resistance Sliders for Remote Discrete Input},
author = { Lars Lischke and Pawe\l W. Wo\'{z}niak and Sven Mayer and Andreas Preikschat and Morten Fjeld},
url = {http://sven-mayer.com/wp-content/uploads/2017/10/lischke2017slider.pdf},
doi = {10.1145/3132272.3134135},
isbn = {978-1-4503-4691-7},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings of the 2017 ACM International Conference on Interactive Surfaces and Spaces},
pages = {116--125},
publisher = {ACM},
address = {Brighton, United Kingdom},
series = {ISS '17},
abstract = {Despite the proliferation of screens in everyday environments, providing values to remote displays for exploring complex data sets is still challenging. Enhanced input for remote screens can increase their utility and enable the construction of rich data-driven environments. Here, we investigate the opportunities provided by a variable movement resistance slider (VMRS), based on a motorized slide potentiometer. These devices are often used in professional soundboards as an effective way to provide discrete input. We designed, built and evaluated a remote input device using a VMRS that facilitates choosing a number on a discrete scale. By comparing our prototype to a traditional slide potentiometer and a software slider, we determined that for conditions where users are not looking at the slider, VMRS can offer significantly better performance and accuracy. Our findings contribute to the understanding of discrete input and enable building new interaction scenarios for large display environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Knierim, Pascal; Woźniak, Paweł W.; Funk, Markus
How Drones Can Support Backcountry Activities Proceedings Article
In: Proceedings of the 2017 natureCHI workshop, in conjunction with ACM mobileHCI'17, pp. 6, 2017.
@inproceedings{Mayer:2017:Drones,
title = {How Drones Can Support Backcountry Activities},
author = {Sven Mayer and Pascal Knierim and Pawe\l W. Wo\'{z}niak and Markus Funk},
url = {http://sven-mayer.com/wp-content/uploads/2017/07/mayer2017drones.pdf},
year = {2017},
date = {2017-09-03},
booktitle = {Proceedings of the 2017 natureCHI workshop, in conjunction with ACM mobileHCI'17},
volume = {2},
pages = {6},
series = {NatureCHI'17},
abstract = {Recent technology advances allow larger groups of people to reach more remote places. However, threats like sudden changes in weather, dangerous animals, or rough terrain are still present and sometimes underestimated. We propose autonomous personal drones, specifically quadcopters, as a supportive ubiquitous interface during backcountry activities. Due to their flexibility, quadcopters have the potential to support nature activities like hiking, cycling, or climbing. Drones can assist mountaineers during pathfinding and photo taking and protect them from threats. Changes of weather, dangerous animals, or calling for emergency support are all possible applications. We provide an overview of current technologies supporting outdoor activity and recent progress in human-drone interaction. Further, we describe our vision of drones as a personal outdoor assistant, including a perspective on future scenarios as well as a discussion of related challenges.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Le, Huy Viet; Henze, Niels
Estimating the Finger Orientation on Capacitive Touchscreens Using Convolutional Neural Networks Proceedings Article
In: Proceedings of the 2017 ACM International Conference on Interactive Surfaces and Spaces, pp. 220–229, ACM, Brighton, United Kingdom, 2017, ISBN: 978-1-4503-4691-7.
@inproceedings{Mayer:2017:EFO,
title = {Estimating the Finger Orientation on Capacitive Touchscreens Using Convolutional Neural Networks},
author = { Sven Mayer and Huy Viet Le and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2017/08/mayer2017orientation.pdf
https://github.com/interactionlab/Capacitive-Finger-Orientation-Estimation
https://www.youtube.com/watch?v=BLdynD9A23s},
doi = {10.1145/3132272.3134130},
isbn = {978-1-4503-4691-7},
year = {2017},
date = {2017-10-18},
booktitle = {Proceedings of the 2017 ACM International Conference on Interactive Surfaces and Spaces},
pages = {220--229},
publisher = {ACM},
address = {Brighton, United Kingdom},
series = {ISS '17},
abstract = {In the last years, touchscreens became the most common input device for a wide range of computers. While touchscreens are truly pervasive, commercial devices reduce the richness of touch input to two-dimensional positions on the screen. Recent work proposed interaction techniques to extend the richness of the input vocabulary using the finger orientation. Approaches for determining a finger’s orientation using off-the-shelf capacitive touchscreens proposed in previous work already enable compelling use cases. However, the low estimation accuracy limits the usability and restricts the usage of finger orientation to non-precise input. With this paper, we provide a ground truth data set for capacitive touch screens recorded with a high-precision motion capture system. Using this data set, we show that a Convolutional Neural Network can outperform approaches proposed in previous work. Instead of relying on hand-crafted features, we trained the model based on the raw capacitive images. Thereby we reduce the pitch error by 9.8% and the yaw error by 45.7% },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Gad, Perihan; Wolf, Katrin; Woźniak, Paweł W.; Henze, Niels
Understanding the Ergonomic Constraints in Designing for Touch Surfaces Honorable Mention Proceedings Article
In: Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services, pp. 9, ACM, Vienna, Austria, 2017, ISBN: 978-1-4503-5075-4, (Honourable Mention Award).
@inproceedings{Mayer:2017:Ergonomic,
title = {Understanding the Ergonomic Constraints in Designing for Touch Surfaces},
author = {Sven Mayer and Perihan Gad and Katrin Wolf and Pawe\l W. Wo\'{z}niak and Niels Henze },
url = {http://sven-mayer.com/wp-content/uploads/2017/06/mayer2017ergonomic.pdf},
doi = {10.1145/3098279.3098537},
isbn = {978-1-4503-5075-4},
year = {2017},
date = {2017-09-04},
urldate = {2017-09-04},
booktitle = {Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services},
journal = {Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services},
pages = {9},
publisher = {ACM},
address = {Vienna, Austria},
series = {MobileHCI \'17},
abstract = {While most current interactive surfaces use only the position of the finger on the surface as the input source, previous work suggests using the finger orientation for enriching the input space. Thus, an understanding of the physiological restrictions of the hand is required to build effective interactive techniques that use finger orientation. We conducted a study to derive the ergonomic constraints for using finger orientation as an effective input source. In a controlled experiment, we systematically manipulated finger pitch and yaw while performing a touch action. Participants were asked to rate the feasibility of the touch action. We found that finger pitch and yaw do significantly affect perceived feasibility and 21.1% of the touch actions were perceived as impossible to perform. Our results show that the finger yaw input space can be divided into the comfort and non-comfort zones. We further present design considerations for future interfaces using finger orientation.},
note = {Honourable Mention Award},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Mayer, Michael; Henze, Niels
Feasibility Analysis of Detecting the Finger Orientation with Depth Cameras Proceedings Article
In: Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct, pp. 82:1–82:8, ACM, New York, NY, USA, 2017, ISBN: 978-1-4503-5075-4.
@inproceedings{Mayer:2017:FAD,
title = {Feasibility Analysis of Detecting the Finger Orientation with Depth Cameras},
author = {Sven Mayer and Michael Mayer and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2017/07/mayer2017depth.pdf},
doi = {10.1145/3098279.3122125},
isbn = {978-1-4503-5075-4},
year = {2017},
date = {2017-09-04},
booktitle = {Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct},
pages = {82:1--82:8},
publisher = {ACM},
address = {New York, NY, USA},
series = {MobileHCI'17},
abstract = {ver the last decade, a body of research investigated enriching touch actions by using finger orientation as an additional input. Beyond new interaction techniques, we envision new user interface elements to make use of the additional input information. We define the fingers orientation by the pitch, roll, and yaw on the touch surface. Determining the finger orientation is not possible using current state-of-the-art devices. As a first step, we built a system that can determine the finger orientation. We developed a working prototype with a depth camera mounted on a tablet. We conducted a study with 12 participants to record ground truth data for the index, middle, ring and little finger to evaluate the accuracy of our prototype using the PointPose algorithm to estimate the pitch and yaw of the finger. By applying 2D linear correction models, we further show a reduction of RMSE by 45.4% for pitch and 21.83% for yaw.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mayer, Sven; Le, Huy Viet; Henze, Niels
Machine Learning for Intelligent Mobile User Interfaces using TensorFlow Proceedings Article
In: Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services, pp. 5, ACM Vienna, AT, 2017.
@inproceedings{Mayer:2017:IntelligentMobileUserInterfaces,
title = {Machine Learning for Intelligent Mobile User Interfaces using TensorFlow},
author = {Sven Mayer and Huy Viet Le and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2017/06/mayer2017tensorflow.pdf
http://interactionlab.io/imui-mobilehci17/},
doi = {10.1145/3098279.3119915},
year = {2017},
date = {2017-09-04},
booktitle = {Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services},
pages = {5},
address = {Vienna, AT},
organization = {ACM},
series = {mobileHCI'17},
abstract = {One key feature of TensorFlow includes the possibility to compile the trained model to run efficiently on mobile phones. This enables a wide range of opportunities for researchers and developers. In this tutorial, we teach attendees two basic steps to run neural networks on a mobile phone: Firstly, we will teach how to develop neural network architectures and train them in TensorFlow. Secondly, we show the process to run the trained models on a mobile phone.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Romanowski, Andrzej; Mayer, Sven; Lischke, Lars; Grudzień, Krzysztof; Jaworski, Tomasz; Perenc, Izabela; Kucharski, Przemysław; Obaid, Mohammad; Kosinski, Tomasz; Woźniak, Paweł W.
Towards Supporting Remote Cheering during Running Races with Drone Technology Proceedings Article
In: Press, ACM (Ed.): Proceedings of the 2017 CHI Conference Extended Abstracts on Human Factors in Computing Systems (CHI EA '17), ACM, 2017.
@inproceedings{Romanowski:2017:TSR,
title = {Towards Supporting Remote Cheering during Running Races with Drone Technology},
author = {Andrzej Romanowski and Sven Mayer and Lars Lischke and Krzysztof Grudzie\'{n} and Tomasz Jaworski and Izabela Perenc and Przemys\law Kucharski and Mohammad Obaid and Tomasz Kosinski and Pawe\l W. Wo\'{z}niak},
editor = {ACM Press},
url = {http://sven-mayer.com/wp-content/uploads/2017/06/romanowski2017dronerun.pdf},
doi = {10.1145/3027063.3053218},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings of the 2017 CHI Conference Extended Abstracts on Human Factors in Computing Systems (CHI EA '17)},
publisher = {ACM},
abstract = {The increasing availability of drones produces a number of opportunities for integrating them in everyday settings and using drones to create engaging experiences for users. In this paper, we investigate how drones can support amateur runners in their endeavours. We explore the possible roles for drones during amateur running races. Through two field studies and multiple semi-structured interviews, we gain new insights on how drones could augment the experience of both runners and supporters during organised races. Finally, we contribute a set of future directions for integrating drones into the sports experience. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Woźniak, Paweł W.; Lischke, Lars; Mayer, Sven; Preikschat, Andreas; Schweizer, Markus; Vu, Ba; von Molo, Carlo; Henze, Niels
Understanding Work in Public Transport Management Control Rooms Proceedings Article
In: Companion of the 2017 ACM Conference on Computer Supported Cooperative Work and Social Computing, pp. 339–342, ACM, Portland, Oregon, USA, 2017, ISBN: 978-1-4503-4688-7.
@inproceedings{Wozniak:2017:UWP:3022198.3026341b,
title = {Understanding Work in Public Transport Management Control Rooms},
author = {Pawe\l W. Wo\'{z}niak and Lars Lischke and Sven Mayer and Andreas Preikschat and Markus Schweizer and Ba Vu and Carlo von Molo and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/wozniak2017control-room.pdf},
doi = {10.1145/3022198.3026341},
isbn = {978-1-4503-4688-7},
year = {2017},
date = {2017-01-01},
booktitle = {Companion of the 2017 ACM Conference on Computer Supported Cooperative Work and Social Computing},
pages = {339--342},
publisher = {ACM},
address = {Portland, Oregon, USA},
series = {CSCW '17 Companion},
abstract = {Urban transport systems are increasingly important for modern cities as they provide sustainable transport and a positive social environment. The systems that allow controlling transport infrastructures integrate many legacy systems and require increasing resources for maintenance. Authorities managing public transport facilities not only need to dynamically adapt to the daily fluctuations in city life, but they also strive to be in constant dialogue with the citizens. In this poster paper, we present our preliminary insights from a study where we looked closely at the operations centre of a light rail and bus operator in a major German city. Through contextual inquiry, we chart emerging issues and design challenges. We showcase how urban facility managers negotiate legacy systems and cooperate with each other to keep transport systems functioning. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2016

Funk, Markus; Kosch, Thomas; Wolf, Katrin; Knierim, Pascal; Mayer, Sven; Schmidt, Albrecht
Automatic Projection Positioning Based on Surface Suitability Proceedings Article
In: Proceedings of the 5th ACM International Symposium on Pervasive Displays, pp. 75–79, ACM, Oulu, Finland, 2016, ISBN: 978-1-4503-4366-4.
@inproceedings{Funk:2016:APP,
title = {Automatic Projection Positioning Based on Surface Suitability},
author = { Markus Funk and Thomas Kosch and Katrin Wolf and Pascal Knierim and Sven Mayer and Albrecht Schmidt},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/funk2016automatic.pdf},
doi = {10.1145/2914920.2915014},
isbn = {978-1-4503-4366-4},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings of the 5th ACM International Symposium on Pervasive Displays},
pages = {75--79},
publisher = {ACM},
address = {Oulu, Finland},
series = {PerDis '16},
abstract = {Projectors most likely will be embedded into the next generation of mobile devices, and thus projecting pictures and videos into the physical world will be an important use case. However, the projection positioning in the real world is challenged by surface reflections or objects in the environment. As adjusting the projected image manually is cumbersome, in this work, we introduce a camera-based algorithm to measure the projection suitability of arbitrary surfaces by using three classifiers that can be derived automatically and in real-time from a camera image. Through a user study, in that participants rated the projection quality of different surfaces, we developed guidelines to indicate suitable projection spots using the three classifiers. As a proof of concept, we implemented a mobile prototype applying the proposed guidelines. This prototype automatically scales and places projected content at the position in everyday environments that promises the best projection quality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Funk, Markus; Mayer, Sven; Nistor, Michael; Schmidt, Albrecht
Mobile In-Situ Pick-by-Vision: Order Picking Support Using a Projector Helmet Proceedings Article
In: Proceedings of the 9th ACM International Conference on PErvasive Technologies Related to Assistive Environments, pp. 45:1–45:4, ACM, Corfu, Island, Greece, 2016, ISBN: 978-1-4503-4337-4.
@inproceedings{Funk:2016:MIP:2910674.2910730b,
title = {Mobile In-Situ Pick-by-Vision: Order Picking Support Using a Projector Helmet},
author = { Markus Funk and Sven Mayer and Michael Nistor and Albrecht Schmidt},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/funk2016helmet.pdf},
doi = {10.1145/2910674.2910730},
isbn = {978-1-4503-4337-4},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings of the 9th ACM International Conference on PErvasive Technologies Related to Assistive Environments},
pages = {45:1--45:4},
publisher = {ACM},
address = {Corfu, Island, Greece},
series = {PETRA '16},
abstract = {Order picking is one of the most complex and error-prone tasks that can be found in the industry. To support the workers, many order picking instruction systems have been proposed. A large number of systems focus on equipping the user with head-mounted displays or equipping the environment with projectors to support the workers. However combining the user-worn design dimension with in-situ projection has not been investigated in the area of order picking yet. With this paper, we aim to close this gap by introducing HelmetPickAR: a body-worn helmet using in-situ projection for supporting order picking. Through a user study with 16 participants we compare HelmetPickAR against a state-of-the-art Pick-by- Paper approach. The results reveal that HelmetPickAR leads to significantly less cognitive effort for the worker during order picking tasks. While no difference was found in errors and picking time, the placing time increases.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Le, Huy Viet; Mayer, Sven; Wolf, Katrin; Henze, Niels
Finger Placement and Hand Grasp During Smartphone Interaction Proceedings Article
In: Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems, pp. 2576–2584, ACM, Santa Clara, California, USA, 2016, ISBN: 978-1-4503-4082-3.
@inproceedings{Le:2016:FPH:2851581.2892462,
title = {Finger Placement and Hand Grasp During Smartphone Interaction},
author = { Huy Viet Le and Sven Mayer and Katrin Wolf and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/le2016placement.pdf},
doi = {10.1145/2851581.2892462},
isbn = {978-1-4503-4082-3},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems},
pages = {2576--2584},
publisher = {ACM},
address = {Santa Clara, California, USA},
series = {CHI EA '16},
abstract = {Smartphones are currently the most successful mobile devices. Through their touchscreens, they combine input and output in a single interface. A body of work investigated interaction beyond direct touch. In particular, previous work proposed using the device’s rear as an interaction surface and the grip of the hands that hold the device as a means of input. While previous work provides a categorization of grip styles, a detailed understanding of the preferred fingers’ position during different tasks is missing. This understanding is needed to develop ergonomic grasp-based and Back-of-Device interaction techniques. We report from a study to understand users’ finger position during three representative tasks. We highlight the areas that are already covered by the users’ hands while using the on-screen keyboard, reading a text, and watching a video. Furthermore, we present the position of each of the user’s fingers during these tasks. From the results, we derive interaction possibilities from an ergonomic perspective.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Lischke, Lars; Mayer, Sven; Wolf, Katrin; Henze, Niels; Reiterer, Harald; Schmidt, Albrecht
Screen Arrangements and Interaction Areas for Large Display Work Places Proceedings Article
In: Proceedings of the 5th ACM International Symposium on Pervasive Displays, pp. 228–234, ACM, Oulu, Finland, 2016, ISBN: 978-1-4503-4366-4.
@inproceedings{Lischke:2016:SAI:2914920.2915027b,
title = {Screen Arrangements and Interaction Areas for Large Display Work Places},
author = { Lars Lischke and Sven Mayer and Katrin Wolf and Niels Henze and Harald Reiterer and Albrecht Schmidt},
url = {http://doi.acm.org/10.1145/2914920.2915027
http://sven-mayer.com/wp-content/uploads/2017/03/lischke2016screen-arrangement.pdf},
doi = {10.1145/2914920.2915027},
isbn = {978-1-4503-4366-4},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings of the 5th ACM International Symposium on Pervasive Displays},
pages = {228--234},
publisher = {ACM},
address = {Oulu, Finland},
series = {PerDis '16},
abstract = {Size and resolution of computer screens are constantly increasing. Individual screens can easily be combined to wallsized displays. This enables computer displays that are folded, straight, bow shaped or even spread. As possibilities for arranging the screens are manifold, it is unclear what arrangements are appropriate. Moreover, it is unclear how content and applications should be arranged on such large displays. To determine guidelines for the arrangement of multiple screens and for content and application layouts, we conducted a design study. In the study, we asked 16 participants to arrange a large screen setup as well as to create layouts of multiple common application windows. Based on the results we provide a classification for screen arrangements and interaction areas. We identified, that screen space should be divided into a central area for interactive applications and peripheral areas, mainly for displaying additional content.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kötteritzsch, Anna; Weyers, Benjamin; Lischke, Lars; Mayer, Sven; Woźniak, Paweł W.; Fietkau, Julian; Koch, Michael
Workshopband Urban und Digital – Gemeinsam auf interaktiven Wegen Proceedings Article
In: Weyers, Benjamin; Dittmar, Anke (Ed.): Mensch und Computer 2016 – Workshopband, Gesellschaft für Informatik e.V., Aachen, 2016.
@inproceedings{mci/koetteritzsch2016,
title = {Workshopband Urban und Digital \textendash Gemeinsam auf interaktiven Wegen},
author = {Anna K\"{o}tteritzsch and Benjamin Weyers and Lars Lischke and Sven Mayer and Pawe\l W. Wo\'{z}niak and Julian Fietkau and Michael Koch},
editor = {Benjamin Weyers AND Anke Dittmar},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/koetteritzsch2016urban.pdf},
doi = {10.18420/muc2016-ws14-0000},
year = {2016},
date = {2016-01-01},
booktitle = {Mensch und Computer 2016 \textendash Workshopband},
publisher = {Gesellschaft f\"{u}r Informatik e.V.},
address = {Aachen},
abstract = {Urbane R\"{a}ume sind gepr\"{a}gt von einem rapiden Wandel der Bev\"{o}lkerung. Viele junge Menschen ziehen f\"{u}r ihr Studium oder eine Ausbildung in Gro\ssst\"{a}dte, ganze Berufszweige senden ihre Mitarbeiter zeitweise in andere St\"{a}dte. Anderseits verlassen ganze Bev\"{o}lkerungsgruppen Gro\ssst\"{a}dte beispielsweise aufgrund zu stark steigender Mieten. Gleichzeitig erm\"{o}glicht ein dichtes Mobilit\"{a}tsangebot durch gut ausgebauten \"{o}ffentlichen Nah- und Fernverkehr sowie Autobahnnetze ein einfaches Zur\"{u}cklegen von gr\"{o}\sseren Entfernungen. Ein vielf\"{a}ltiges Angebot an Aktivit\"{a}ten, Kunst und Kultur, sowie gute Perspektiven f\"{u}r Beruf und Bildung in Ballungszentren f\"{u}hren zu einem steigenden Zuwachs an Einwohnern und einem erh\"{o}hten Verkehrsaufkommen. Informationen zu diesen Angeboten sind zudem durch eine zunehmende Digitalisierung ubiquit\"{a}r verf\"{u}gbar. Digitale Kommunikationstechnologien, wie soziale Netzwerke, VoIP und Messeger-Dienste, bieten die M\"{o}glichkeit \"{u}ber Distanzen hinweg in Verbindung zu bleiben. Des Weiteren erlauben digitale Dienste, wie Google Maps, das schnelle, flexible und einfache Navigieren. Die sich dadurch ergebende Schnelllebigkeit der und die wachsenden Anforderungen an die Gesellschaft kann Risiken f\"{u}r die Gesundheit des Menschen und eine erh\"{o}hte Belastung der Natur mit sich bringen. Mit der steigenden Wahrnehmung dieser Risiken findet ein Wandel von Arbeits- und Lebensmodellen statt. Konzepte aus der Mensch-Technik-Interaktion begleiten diesen Prozess hin zu einer ausgeglichenen und gesunden Lebensweise. Persuasive Anwendungen auf der Smartwatch oder Fitness-Tracker geben ad hoc gesundheitsf\"{o}rdernde, personalisierte und kontextsensitive Verhaltensempfehlungen, um Risiken zu minimieren. Die intelligente Unterst\"{u}tzung der Bev\"{o}lkerung in ihrer Mobilit\"{a}t hat das Potential gesellschaftliche Herausforderungen wie das zunehmende Alter der Bev\"{o}lkerung anzugehen und die wachsende Umweltbelastung zu verringern.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Schneegass, Stefan; Olsson, Thomas; Mayer, Sven; van Laerhoven, Kristof
Mobile Interactions Augmented by Wearable Computing: A Design Space and Vision Journal Article
In: Int. J. Mob. Hum. Comput. Interact., vol. 8, no. 4, pp. 104–114, 2016, ISSN: 1942-390X.
@article{Schneegass:2016:MIA:2984931.2984937b,
title = {Mobile Interactions Augmented by Wearable Computing: A Design Space and Vision},
author = { Stefan Schneegass and Thomas Olsson and Sven Mayer and Kristof van Laerhoven},
url = {http://dx.doi.org/10.4018/IJMHCI.2016100106
http://sven-mayer.com/wp-content/uploads/2017/03/schneegass2016designspace.pdf},
doi = {10.4018/IJMHCI.2016100106},
issn = {1942-390X},
year = {2016},
date = {2016-01-01},
journal = {Int. J. Mob. Hum. Comput. Interact.},
volume = {8},
number = {4},
pages = {104--114},
publisher = {IGI Global},
address = {Hershey, PA, USA},
abstract = {Wearable computing has a huge potential to shape the way we interact with mobile devices in the future. Interaction with mobile devices is still mainly limited to visual output and tactile finger-based input. Despite the visions of next-generation mobile interaction, the hand-held form factor hinders new interaction techniques becoming commonplace. In contrast, wearable devices and sensors are intended for more continuous and close-to-body use. This makes it possible to design novel wearable-augmented mobile interaction methods \textendash both explicit and implicit. For example, the EEG signal from a wearable breast strap could be used to identify user status and change the device state accordingly (implicit) and the optical tracking with a head-mounted camera could be used to recognize gestural input (explicit). In this paper, we outline the design space for how the existing and envisioned wearable devices and sensors could augment mobile interaction techniques. Based on designs and discussions in a recently organized workshop on the topic as well as other related work, we present an overview of this design space and highlight some use cases that underline the potential therein.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Weber, Dominik; Mayer, Sven; Voit, Alexandra; Fierro, Rodrigo Ventura; Henze, Niels
Design Guidelines for Notifications on Smart TVs Proceedings Article
In: Proceedings of the ACM International Conference on Interactive Experiences for TV and Online Video, pp. 13–24, ACM, Chicago, Illinois, USA, 2016, ISBN: 978-1-4503-4067-0.
@inproceedings{Weber:2016:DGN,
title = {Design Guidelines for Notifications on Smart TVs},
author = { Dominik Weber and Sven Mayer and Alexandra Voit and Rodrigo Ventura Fierro and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/weber2016notifications-tv.pdf},
doi = {10.1145/2932206.2932212},
isbn = {978-1-4503-4067-0},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings of the ACM International Conference on Interactive Experiences for TV and Online Video},
pages = {13--24},
publisher = {ACM},
address = {Chicago, Illinois, USA},
series = {TVX '16},
abstract = {Notifications are among the core mechanisms of most smart devices. Smartphones, smartwatches, tablets and smart glasses all provide similar means to notify the user. For smart TVs, however, no standard notification mechanism has been established. Smart TVs are unlike other smart devices because they are used by multiple people - often at the same time. It is unclear how notifications on smart TVs should be designed and which information users need. From a set of focus groups, we derive a design space for notifications on smart TVs. By further studying selected design alternatives in an online survey and lab study we show, for example, that users demand different information when they are watching TV with others and that privacy is a major concern. We derive according design guidelines for notifications on smart TVs that developers can use to gain the user’s attention in a meaningful way.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Wolf, Katrin; Mayer, Sven; Meyer, Stephan
Microgesture Detection for Remote Interaction with Mobile Devices Proceedings Article
In: Proceedings of the 18th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct, pp. 783–790, ACM, Florence, Italy, 2016, ISBN: 978-1-4503-4413-5.
@inproceedings{Wolf:2016:MDR:2957265.2961865b,
title = {Microgesture Detection for Remote Interaction with Mobile Devices},
author = { Katrin Wolf and Sven Mayer and Stephan Meyer},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/wolf2016microgesture.pdf},
doi = {10.1145/2957265.2961865},
isbn = {978-1-4503-4413-5},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
booktitle = {Proceedings of the 18th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct},
pages = {783--790},
publisher = {ACM},
address = {Florence, Italy},
series = {MobileHCI \'16},
abstract = {The rise of smart rings enables for ubiquitous control of computers that are wearable or mobile. We developed a ring interface using a 9 DOF IMU for detecting microgestures that can be executed while performing another task that involve hands, e.g. riding a bicycle. For the gesture classification we implemented 4 classifiers that run on the Android operating system without the need of clutch events. In a user study, we compared the success of 4 classifiers in a cycling scenario. We found that Random Forest (RF) works better for microgesture detection on Android than Dynamic Time Warping (DTW), K-Nearest-Neighbor (KNN), and than a Threshold (TH)-based approach as it has the best detection rate while it runs in real-time on Android. This work shell encourages other researchers to develop further mobile applications for using remote microgesture control in encumbered contexts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Woźniak, Paweł W.; Goyal, Nitesh; Kucharski, Przemysław; Lischke, Lars; Mayer, Sven; Fjeld, Morten
RAMPARTS: Supporting Sensemaking with Spatially-Aware Mobile Interactions Proceedings Article
In: Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems, pp. 2447–2460, ACM, Santa Clara, California, USA, 2016, ISBN: 978-1-4503-3362-7.
@inproceedings{Wozniak:2016:RSS:2858036.2858491b,
title = {RAMPARTS: Supporting Sensemaking with Spatially-Aware Mobile Interactions},
author = {Pawe\l W. Wo\'{z}niak and Nitesh Goyal and Przemys\law Kucharski and Lars Lischke and Sven Mayer and Morten Fjeld},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/wozniak2016ramparts.pdf},
doi = {10.1145/2858036.2858491},
isbn = {978-1-4503-3362-7},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems},
pages = {2447--2460},
publisher = {ACM},
address = {Santa Clara, California, USA},
series = {CHI '16},
abstract = {Synchronous colocated collaborative sensemaking requires that analysts share their information and insights with each other. The challenge is to know when is the right time to share what information without disrupting the present state of analysis. This is crucial in ad-hoc sensemaking sessions with mobile devices because small screen space limits information display. To address these tensions, we propose and evaluate RAMPARTS\textemdasha spatially aware sensemaking system for collaborative crime analysis that aims to support faster information sharing, clue-finding, and analysis. We compare RAMPARTS to an interactive tabletop and a paper-based method in a controlled laboratory study. We found that RAMPARTS significantly decreased task completion time compared to paper, without affecting cognitive load or task completion time adversely compared to an interactive tabletop. We conclude that designing for ad-hoc colocated sensemaking on mobile devices could benefit from spatial awareness. In particular, spatial awareness could be used to identify relevant information, support diverse alignment styles for visual comparison, and enable alternative rhythms of sensemaking.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2015

Funk, Markus; Shirazi, Alireza Sahami; Mayer, Sven; Lischke, Lars; Schmidt, Albrecht
Pick from Here!: An Interactive Mobile Cart Using In-situ Projection for Order Picking Proceedings Article
In: Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing, pp. 601–609, ACM, Osaka, Japan, 2015, ISBN: 978-1-4503-3574-4.
@inproceedings{Funk:2015:PHI:2750858.2804268b,
title = {Pick from Here!: An Interactive Mobile Cart Using In-situ Projection for Order Picking},
author = { Markus Funk and Alireza Sahami Shirazi and Sven Mayer and Lars Lischke and Albrecht Schmidt},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/funk2015cart.pdf},
doi = {10.1145/2750858.2804268},
isbn = {978-1-4503-3574-4},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing},
pages = {601--609},
publisher = {ACM},
address = {Osaka, Japan},
series = {UbiComp \'15},
abstract = {Order Picking is not only one of the most important but also most mentally demanding and error-prone tasks in the industry. Both stationary and wearable systems have been introduced to facilitate this task. Existing stationary systems are not scalable because of the high cost and wearable systems have issues being accepted by the workers. In this paper, we introduce a mobile camera-projector cart called OrderPickAR, which combines the benefits of both stationary and mobile systems to support order picking through Augmented Reality. Our system dynamically projects in-situ picking information into the storage system and automatically detects when a picking task is done. In a lab study, we compare our system to existing approaches, i.e, Pick-by-Paper, Pick-by-Voice, and Pick-byVision. The results show that using the proposed system, order picking is almost twice as fast as other approaches, the error rate is decreased up to 9 times, and mental demands are reduced up to 50%. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Funk, Markus; Mayer, Sven; Schmidt, Albrecht
Using In-Situ Projection to Support Cognitively Impaired Workers at the Workplace Proceedings Article
In: Proceedings of the 17th International ACM SIGACCESS Conference on Computers & Accessibility, pp. 185–192, ACM, Lisbon, Portugal, 2015, ISBN: 978-1-4503-3400-6.
@inproceedings{Funk:2015:UIP:2700648.2809853b,
title = {Using In-Situ Projection to Support Cognitively Impaired Workers at the Workplace},
author = { Markus Funk and Sven Mayer and Albrecht Schmidt},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/funk2015support.pdf},
doi = {10.1145/2700648.2809853},
isbn = {978-1-4503-3400-6},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 17th International ACM SIGACCESS Conference on Computers \& Accessibility},
pages = {185--192},
publisher = {ACM},
address = {Lisbon, Portugal},
series = {ASSETS '15},
abstract = {Today’s working society tries to integrate more and more impaired workers into everyday working processes. One major scenario for integrating impaired workers is in the assembly of products. However, the tasks that are being assigned to cognitively impaired workers are easy tasks that consist of only a small number of assembly steps. For tasks with a higher number of steps, cognitively impaired workers need instructions to help them with assembly. Although supervisors provide general support and assist new workers while learning new assembly steps, sheltered work organizations often provide additional printed pictorial instructions that actively guide the workers. To further improve continuous instructions, we built a system that uses in-situ projection and a depth camera to provide context-sensitive instructions. To explore the effects of in-situ instructions, we compared them to state-of-the-art pictorial instructions in a user study with 15 cognitively impaired workers at a sheltered work organization. The results show that using in-situ instructions, cognitively impaired workers can assemble more complex products up to 3 times faster and with up to 50% less errors. Further, the workers liked the insitu instructions provided by our assistive system and would use it for everyday assembly. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Lischke, Lars; Mayer, Sven; Wolf, Katrin; Shirazi, Alireza Sahami; Henze, Niels
Subjective and Objective Effects of Tablet's Pixel Density Proceedings Article
In: Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems, pp. 2769–2772, ACM, Seoul, Republic of Korea, 2015, ISBN: 978-1-4503-3145-6.
@inproceedings{Lischke:2015:SOE:2702123.2702390b,
title = {Subjective and Objective Effects of Tablet's Pixel Density},
author = { Lars Lischke and Sven Mayer and Katrin Wolf and Alireza Sahami Shirazi and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/lischke2015pixel-density.pdf
https://www.youtube.com/watch?v=VNfwBHzxu5g},
doi = {10.1145/2702123.2702390},
isbn = {978-1-4503-3145-6},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems},
pages = {2769--2772},
publisher = {ACM},
address = {Seoul, Republic of Korea},
series = {CHI '15},
abstract = {Pixel densities are increasing rapidly. We can observe this trend in particular for mobile devices like smartphones and tablets. Previous work revealed an effect of pixel density on subjective feedback and objective performance only for low resolution cathode ray tube screens. It is unclear if this effect persists for the four times higher pixel densities of current mobile devices. Therefore, we conducted a study to compare four pixel densities with 359, 180, 120, and 90 pixels per inch. While participants performed three tasks involving images, text and videos on a tablet, we measured perceived effort, perceived visual quality, task completion time, error rate, and body pose. Our results show that the effect of the pixel density highly depends on the content. We found that only for text, the four pixel densities have clearly different perceived media qualities. Pixel density seems to have a smaller effect on perceived media quality for images and videos and we found no effect on objective measures. Results show that text should be displayed in high resolution, while this is less important for images and videos. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Lischke, Lars; Mayer, Sven; Wolf, Katrin; Henze, Niels; Schmidt, Albrecht; Leifert, Svenja; Reiterer, Harald
Using Space: Effect of Display Size on Users' Search Performance Proceedings Article
In: Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems, pp. 1845–1850, ACM, Seoul, Republic of Korea, 2015, ISBN: 978-1-4503-3146-3.
@inproceedings{Lischke:2015:USE:2702613.2732845b,
title = {Using Space: Effect of Display Size on Users' Search Performance},
author = { Lars Lischke and Sven Mayer and Katrin Wolf and Niels Henze and Albrecht Schmidt and Svenja Leifert and Harald Reiterer},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/lischke2015large-screen.pdf},
doi = {10.1145/2702613.2732845},
isbn = {978-1-4503-3146-3},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems},
pages = {1845--1850},
publisher = {ACM},
address = {Seoul, Republic of Korea},
series = {CHI EA '15},
abstract = {Due to advances in technology large displays with very high resolution started to become affordable for daily work. Today it is possible to build display walls with a pixel density that is comparable to standard office screens. Previous work indicates that physical navigation enables a deeper engagement with the data set. In particular, visibility of detailed data subsets on large screens supports users’ work and understanding of large data. In contrast to previous work we explore how users’ performance scales with an increasing amount of large display space when working with text documents. In a controlled experiment, we determine participants’ performance when searching for titles and images in large text documents using one to six 50” 4K monitors. Our results show that the users’ visual search performance does not linearly increase with an increasing amount of display space. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Lischke, Lars; Kucharski, Przemysław; Woźniak, Paweł W.
Towards Enhancing Data Exploration with Multiple Mobile Devices Proceedings Article
In: Proceedings of 2015 Cross-Surface workshop, in conjunction with ACM ITS'15, pp. 4, 2015.
@inproceedings{Mayer:2015:Exploration,
title = {Towards Enhancing Data Exploration with Multiple Mobile Devices},
author = {Sven Mayer and Lars Lischke and Przemys\law Kucharski and Pawe\l W. Wo\'{z}niak},
url = {http://sven-mayer.com/wp-content/uploads/2018/03/mayer2015data-exploration.pdf},
year = {2015},
date = {2015-11-15},
booktitle = {Proceedings of 2015 Cross-Surface workshop, in conjunction with ACM ITS'15},
volume = {1},
pages = {4},
abstract = {In a world of an increasing number of mobile devices in everyday life, people are dealing with large amounts of data every minute. It is an emerging need to create interfaces for multiple devices to support the process of data exploration and understanding. New sensors, enabling mobile devices to be spatially aware, inspire the design of context-aware adaptive interfaces. We indicate a possible direction of further research, where we treat the spatiotemporal relationships between different subsets of a given data set as part of the information communicated by the system. That give us the opportunity to create more effective visualizations to enhance perception. This approach builds on a natural human tendency to organize information spatially, as shown in previous research in cognitive science.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Mayer, Sven; Wolf, Katrin; Schneegass, Stefan; Henze, Niels
Modeling Distant Pointing for Compensating Systematic Displacements Proceedings Article
In: Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems, pp. 4165–4168, ACM, Seoul, Republic of Korea, 2015, ISBN: 978-1-4503-3145-6.
@inproceedings{Mayer:2015:MDP,
title = {Modeling Distant Pointing for Compensating Systematic Displacements},
author = {Sven Mayer and Katrin Wolf and Stefan Schneegass and Niels Henze},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/mayer2015.pdf
https://www.youtube.com/watch?v=4rsKRQIYyrg
https://www.youtube.com/watch?v=aT20tAONiJA},
doi = {10.1145/2702123.2702332},
isbn = {978-1-4503-3145-6},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems},
pages = {4165--4168},
publisher = {ACM},
address = {Seoul, Republic of Korea},
series = {CHI \'15},
abstract = {Distant pointing at objects and persons is a highly expressive gesture that is widely used in human communication. Pointing is also used to control a range of interactive systems. For determining where a user is pointing at, different ray casting methods have been proposed. In this paper we assess how accurately humans point over distance and how to improve it. Participants pointed at projected targets on a wall display from 2m and 3m while standing and sitting. Testing three common ray casting methods, we found that even with the most accurate one the average error is 61.3cm. We found that all tested ray casting methods are affected by systematic displacements. Therefore, we trained a polynomial to compensate this displacement. We show that using a user-, pose-, and distant-independent quartic polynomial can reduce the average error by 37.3%.
},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Schneegass, Stefan; Mayer, Sven; Olsson, Thomas; Laerhoven, Kristof Van
From Mobile to Wearable: Using Wearable Devices to Enrich Mobile Interaction Proceedings Article
In: Proceedings of the 17th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct, pp. 936–939, ACM, Copenhagen, Denmark, 2015, ISBN: 978-1-4503-3653-6.
@inproceedings{Schneegass:2015:MWU:2786567.2795396b,
title = {From Mobile to Wearable: Using Wearable Devices to Enrich Mobile Interaction},
author = { Stefan Schneegass and Sven Mayer and Thomas Olsson and Kristof Van Laerhoven},
url = {http://doi.acm.org/10.1145/2786567.2795396
http://sven-mayer.com/wp-content/uploads/2017/03/schneegass2015mobile-wearable.pdf},
doi = {10.1145/2786567.2795396},
isbn = {978-1-4503-3653-6},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 17th International Conference on Human-Computer Interaction with Mobile Devices and Services Adjunct},
pages = {936--939},
publisher = {ACM},
address = {Copenhagen, Denmark},
series = {MobileHCI '15},
abstract = {In the last decades, mobile phones have turned into sensor-rich devices that use different built-in sensors such as accelerometers or gyroscopes. The sensors have enriched the interaction possibilities, allowing, for example, gestural interaction. With the prevalence of wearable devices and peripherals, such as fitness bracelets and breast straps, the input and output possibilities can be further extended with both new sensors and actuators. Current applications could benefit from them, and entirely new applications could be designed. The design space for new applications needs to be identified, which will again drive advances in mobile and wearable computing. This workshop sets focus on wearable devices as means to enrich smartphones and their interaction capabilities. We will discuss the new design space and generate ideas of new applications. Furthermore, we will provide sensors and actuators allowing the participants to implement rapid prototypes of their novel application ideas},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}

Wolf, Katrin; Schneegass, Stefan; Henze, Niels; Weber, Dominik; Schwind, Valentin; Knierim, Pascal; Mayer, Sven; Dingler, Tilman; Abdelrahman, Yomna; Kubitza, Thomas; Funk, Markus; Mebus, Anja; Schmidt, Albrecht
TUIs in the Large: Using Paper Tangibles with Mobile Devices Proceedings Article
In: Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems, pp. 1579–1584, ACM, Seoul, Republic of Korea, 2015, ISBN: 978-1-4503-3146-3.
@inproceedings{Wolf:2015:TLU:2702613.2732863b,
title = {TUIs in the Large: Using Paper Tangibles with Mobile Devices},
author = { Katrin Wolf and Stefan Schneegass and Niels Henze and Dominik Weber and Valentin Schwind and Pascal Knierim and Sven Mayer and Tilman Dingler and Yomna Abdelrahman and Thomas Kubitza and Markus Funk and Anja Mebus and Albrecht Schmidt},
url = {http://sven-mayer.com/wp-content/uploads/2017/03/wolf2015tui.pdf},
doi = {10.1145/2702613.2732863},
isbn = {978-1-4503-3146-3},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 33rd Annual ACM Conference Extended Abstracts on Human Factors in Computing Systems},
pages = {1579--1584},
publisher = {ACM},
address = {Seoul, Republic of Korea},
series = {CHI EA '15},
abstract = {Tangible user interfaces (TUIs) have been proposed to interact with digital information through physical objects. However being investigated since decades, TUIs still play a marginal role compared to other UI paradigms. This is at least partially because TUIs often involve complex hardware elements, which make prototyping and production in quantities difficult and expensive. In this paper we present our work towards paper TUIs (pTUIs) \textendash easily makeable interactive TUIs using laser-cut paper, brass fasteners, metal bands, mirror foils, and touch screen devices as platform. Through three examples we highlight the flexibility of the approach. We rebuilt the seminal work URP to show that pTUIs can replicate existing TUIs in DIY manufacturing. We implemented tangible Pong being controlled by paper rackets to show that pTUIs can be used in highly interactive systems. Finally, we manufactured an interactive Christmas card and distributed it to 300 recipients by mail to show that pTUIs can be used as apparatus to explore how pTUIs are used outside the lab in real life.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2014
Mayer, Sven
Modeling Distant Pointing for Compensating Systematic Displacements Masters Thesis
University of Stuttgart, 2014.
@mastersthesis{Mayer:2014:MDP,
title = {Modeling Distant Pointing for Compensating Systematic Displacements},
author = {Sven Mayer},
url = {http://sven-mayer.com/wp-content/uploads/2019/02/mayer2014master.pdf},
doi = {10.18419/opus-3329},
year = {2014},
date = {2014-06-19},
school = {University of Stuttgart},
abstract = {People use gestures to give verbal communication more expression and also to replace the speech. One of the most concise and expressive gestures is the pointing gesture. Pointing gesture can be observed in the early childhood. In these early years, it is used to point at objects or people. Later, people use pointing gestures even for more complex things such as to visualize directions. Increasing pointing gestures are also used for interacting with computers. For example, gestures can be used to remotely interact with a display, without using an input tool. In this work we investigated how people point to objects and how the recognition accuracy can be improved by using a gesture recognition system. We performed a user study, where participants had to point on projected pointing targets. These gestures were recorded as reference data with the help of motion capture system. We used a variety of starting positions in which the study was carried out. For this the participants were placed with a distance of 2 to 3 meters to the pointing targets. At these two positions the participants had to sit and stand while pointing to the targets. From the recorded reference data we derived a pointing vector. Each vector describes the direction in which the gesture is directed. We generate these vectors out of different body parts. This is done to show that there are different ways to create these vectors but they behave all the same. In the optimal case, this vector would describe the path of the person pointing to the object, in this case, the projected point. By mathematical analyzes we show that in average over several experiments and over several participants a systematic deviation from this optimal vector can be detected. We specify models, which can compensate the systematic deviation. These models shift the pointing vector in the direction of the average distance between optimum and average study vector. Products of the consumer market can be used to detect pointing gestures. There gestures can be improved with the generated models. The focus here is, for example, products like the Kinect.},
keywords = {},
pubstate = {published},
tppubtype = {mastersthesis}
}