@conference {243, title = {Doctor XAI: an ontology-based approach to black-box sequential data classification explanations}, booktitle = {Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency}, year = {2020}, abstract = {Several recent advancements in Machine Learning involve black box models: algorithms that do not provide human-understandable explanations in support of their decisions. This limitation hampers the fairness, accountability and transparency of these models; the field of eXplainable Artificial Intelligence (XAI) tries to solve this problem providing human-understandable explanations for black-box models. However, healthcare datasets (and the related learning tasks) often present peculiar features, such as sequential data, multi-label predictions, and links to structured background knowledge. In this paper, we introduce Doctor XAI, a model-agnostic explainability technique able to deal with multi-labeled, sequential, ontology-linked data. We focus on explaining Doctor AI, a multilabel classifier which takes as input the clinical history of a patient in order to predict the next visit. Furthermore, we show how exploiting the temporal dimension in the data and the domain knowledge encoded in the medical ontology improves the quality of the mined explanations.}, doi = {https://doi.org/10.1145/3351095.3372855}, url = {https://dl.acm.org/doi/abs/10.1145/3351095.3372855}, author = {Cecilia Panigutti and Perotti, Alan and Pedreschi, Dino} } @conference {241, title = {Explaining multi-label black-box classifiers for health applications}, booktitle = {International Workshop on Health Intelligence}, year = {2019}, publisher = {Springer}, organization = {Springer}, abstract = {Today the state-of-the-art performance in classification is achieved by the so-called {\textquotedblleft}black boxes{\textquotedblright}, i.e. decision-making systems whose internal logic is obscure. Such models could revolutionize the health-care system, however their deployment in real-world diagnosis decision support systems is subject to several risks and limitations due to the lack of transparency. The typical classification problem in health-care requires a multi-label approach since the possible labels are not mutually exclusive, e.g. diagnoses. We propose MARLENA, a model-agnostic method which explains multi-label black box decisions. MARLENA explains an individual decision in three steps. First, it generates a synthetic neighborhood around the instance to be explained using a strategy suitable for multi-label decisions. It then learns a decision tree on such neighborhood and finally derives from it a decision rule that explains the black box decision. Our experiments show that MARLENA performs well in terms of mimicking the black box behavior while gaining at the same time a notable amount of interpretability through compact decision rules, i.e. rules with limited length.}, keywords = {Explainable Machine Learning, Healthcare}, doi = {10.1007/978-3-030-24409-5_9}, url = {https://link.springer.com/chapter/10.1007/978-3-030-24409-5_9}, author = {Cecilia Panigutti and Guidotti, Riccardo and Monreale, Anna and Pedreschi, Dino} }