@misc{18001, author = {Helge Spieker}, title = {Trustworthy Automated Driving through Qualitative Explainable Graphs}, abstract = {Understanding driving scenes and communicating automated vehicle decisions are key requirements for trustworthy automated driving in connected, cooperative automated mobility (CCAM). In this talk, Helge will present the Qualitative Explainable Graph (QXG), which is a unified symbolic and qualitative representation for scene understanding in urban mobility. The QXG enables interpreting an automated vehicle{\textquoteright}s environment using sensor data and machine learning models. It utilises spatio-temporal graphs and qualitative constraints to extract scene semantics from raw sensor inputs, such as LiDAR and camera data, offering an interpretable scene model. A QXG can be incrementally constructed in real-time, making it a versatile tool for in-vehicle explanations across various sensor types. Experiments have shown that 1) QXGs can be used as an action explanation mechanism, i.e. highlighting which object interactions caused actions taken by other participants; 2) Scene understanding can be strengthened when QXGs are augmented with human-labelled information about objects and relations relevance. This leads to a powerful explanation technique and fully interpretable due to the end-to-end reliance on symbolic representations and inherently interpretable machine learning techniques such as decision trees.}, year = {2025}, journal = {Trustworthy Systems Laboratory, University of Bristol, UK}, }