lm2023.bib

@comment{{This file has been generated by bib2bib 1.99}}
@comment{{Command line: bib2bib -oc lm2023.keys -ob lm2023.bib -c 'export = "yes" and year=2023' lm.bib ../euprovenance.bib ../ops.bib}}
@comment{{This file has been generated by bib2bib 1.99}}
@comment{{Command line: bib2bib -ob lm.bib -oc lm.keys -c 'export = "yes"' ../lm.bib}}
@misc{Kohan:arxiv2023,
  title = {Provenance Graph Kernel},
  abstract = {Provenance is a record that describes how entities, activities, and agents have influenced a piece of data. Such provenance information is commonly represented in graphs with relevant labels on both their nodes and edges. With the growing adoption of provenance in a wide range of application domains, increasingly, users are confronted with an abundance of graph data, which may prove challenging to analyse. Graph kernels, on the other hand, have been consistently and successfully used to efficiently classify graphs. In this paper, we introduce a novel graph kernel called \emph{provenance kernel}, which is inspired by and tailored for provenance data. It decomposes a provenance graph into tree-patterns rooted at a given node and considers the labels of edges and nodes up to a certain distance from the root. We employ provenance kernels to classify provenance graphs from three application domains. Our evaluation shows that they perform well in terms of classification accuracy and yield competitive results when compared against standard graph kernel methods and the provenance network analytics method while taking significantly less time.Moreover, we illustrate how the provenance types used in provenance kernels help improve the explainability of predictive models.},
  keywords = {kernel methods, data provenance, graph classification, provenance analytics, interpretable machine learning},
  author = {{Kohan Marzag{\~a}o}, David and Dong Huynh and Ayah Helal and Luc Moreau},
  year = {2023},
  language = {English},
  export = {yes},
  eprints = {https://arxiv.org/abs/2010.10343},
  local = {papers/Kohan-arxiv20.pdf}
}
@misc{Huynh:EBD22,
  doi = {10.48550/ARXIV.2206.06251},
  url = {https://arxiv.org/abs/2206.06251},
  author = {Huynh, Trung Dong and Tsakalakis, Niko and Helal, Ayah and Stalla-Bourdillon, Sophie and Moreau, Luc},
  title = {Explainability-by-Design: A Methodology to Support Explanations in Decision-Making Systems},
  publisher = {arXiv},
  year = {2023},
  export = {yes},
  eprints = {https://kclpure.kcl.ac.uk/portal/en/publications/explainabilitybydesign-a-methodology-to-support-explanations-in-decisionmaking-systems(ee23aa54-1b5f-4432-a41c-e59efc135c40).html},
  copyright = {Creative Commons Attribution Share Alike 4.0 International},
  abstract = {Algorithms play a key role nowadays in many technological systems that control or affect various aspects of our lives. As a result, providing explanations to address the needs of users and organisations is increasingly expected by the laws and regulations, codes of conduct, and the public. However, as laws and regulations do not prescribe how to meet such expectations, organisations are often left to devise their own approaches to explainability, inevitably increasing the cost of compliance and good governance. Hence, we put forth "Explainability by Design", a holistic methodology characterised by proactive measures to include explanation capability in the design of decision-making systems. This paper describes the technical steps of the Explainability-by-Design methodology in a software engineering workflow to implement explanation capability from requirements elicited by domain experts for a specific application context. Outputs of the Explainability-by-Design methodology are a set of configurations, allowing a reusable service, called the Explanation Assistant, to exploit logs provided by applications and create provenance traces that can be queried to extract relevant data points, which in turn can be used in explanation plans to construct explanations personalised to their consumers. Following those steps, organisations will be able to design their decision-making systems to produce explanations that meet the specified requirements, be it from laws, regulations, or business needs. We apply the methodology to two applications, resulting in a deployment of the Explanation Assistant demonstrating explanations capabilities. Finally, the associated development costs are measured, showing that the approach to construct explanations is tractable in terms of development time, which can be as low as two hours per explanation sentence. }
}
@inproceedings{Moreau:IPAW23,
  author = {Luc Moreau and Nicola Hogan and Nick O'Donnell},
  title = {Implementing an Environmental Management System Using Provenance-By-Design},
  optcrossref = {},
  optkey = {},
  booktitle = {The Web Conference Companion, Provenance Week'23 --- International Provenance and Annotation Workshop IPAW'23, at the Web Conference},
  year = {2023},
  opteditor = {},
  optvolume = {},
  optnumber = {},
  optseries = {},
  optpages = {},
  optmonth = {},
  optaddress = {Austin, Texas, USA},
  optorganization = {King's College London},
  optpublisher = {},
  optnote = {},
  optannote = {},
  abstract = {Organisations have to comply with environmental regulations to
protect the environment and meet internationally agreed climate
change targets. To assist organisations, processes and standards
are being defined to manage these compliance obligations. They
typically rely on a notion of Environmental Management System
(EMS), defined as a reflective framework allowing organisations
to set and manage their goals, and demonstrate they follow due
processes in order to comply with prevailing regulations. The importance of these obligations can be highlighted by the fact that
failing to comply may lead to significant liabilities for organisations. An EMS framework, typically structured as a set of documents
and spreadsheets, contains a record of continuously evolving regulations, teams, stakeholders, actions and updates. However, the
maintainance of an EMS is often human driven, and therefore is
error prone despite the meticulousness of environmental officers,
and further requires external human auditing to check their validity. To avoid green washing, but also to contain the burden and
cost of compliance, it is desirable for these claims to be checked
by trusted automated means. Provenance is ideally suited to track
the changes occurring in an EMS, allowing queries to determine
precisely which compliance objective is prevailing at any point in
time, whether it is being met, and who is responsible for it. Thus,
this paper has a dual aim: first, it investigates the benefits of provenance for EMS, second, it presents the application of an emerging
approach “Provenance-By-Design”, which automatically converts
a specification of an EMS data model and its provenance to a data
backend, a service for processing and querying of EMS provenance
data, a client-side library to interact with such a service, and a simple user interface allowing developers to navigate the provenance.
The application of a Provenance-By-Design approach to EMS applications results in novel opportunities for a provenance-based EMS; we present our preliminary reflection on their potential.},
  eprints = {https://kclpure.kcl.ac.uk/portal/en/publications/implementing-an-environmental-management-system-using-provenancebydesign(785726a5-54a0-4e18-af23-29925e4efcec).html},
  local = {papers/ipaw2023.pdf},
  export = {yes},
  doi = {https://doi.org/10.1145/3543873.3587560}
}
@article{Abeywickrama:CACM23,
  title = {On Specifying for Trustworthiness},
  abstract = {As autonomous systems (AS) increasingly become part of our daily lives, ensuring their trustworthiness is crucial. In order to demon- strate the trustworthiness of an AS, we first need to specify what is required for an AS to be considered trustworthy. This roadmap paper identifies key challenges for specifying for trustworthiness in AS, as identified during the “Specifying for Trustworthiness” workshop held as part of the UK Research and Innovation (UKRI) Trustworthy Autonomous Systems (TAS) programme. We look across a range of AS domains with consideration of the resilience, trust, functionality, verifiability, security, and governance and regu- lation of AS and identify some of the key specification challenges in these domains. We then highlight the intellectual challenges that are involved with specifying for trustworthiness in AS that cut across domains and are exacerbated by the inherent uncertainty involved with the environments in which AS need to operate.},
  author = {Abeywickrama, {Dhaminda B.} and Amel Bennaceur and Greg Chance and Yiannis Demiris and Anastasia Kordoni and Mark Levine and Luke Moffat and Luc Moreau and Mohammadreza Mousavi and Bashar Nuseibeh and Subramanian Ramamoorthy and Ringert, {Jan Oliver} and James Wilson and Shane Windsor and Kerstin Eder},
  year = {2023},
  month = jun,
  day = {22},
  language = {English},
  journal = {Communications of the ACM},
  local = {papers/cacm2023.pdf},
  export = {yes},
  eprints = {https://kclpure.kcl.ac.uk/portal/en/publications/on-specifying-for-trustworthiness},
  url = {https://arxiv.org/abs/2206.11421},
  doi = {TBD},
  issn = {0001-0782},
  publisher = {Association for Computing Machinery (ACM)},
  export = {yes}
}
@inproceedings{Huyhn:CUI23,
  title = {Why Are Conversational Assistants Still Black Boxes? The Case For Transparency},
  abstract = {Much has been written about privacy in the context of conversa- tional and voice assistants. Yet, there have been remarkably few developments in terms of the actual privacy offered by these devices. But how much of this is due to the technical and design limitations of speech as an interaction modality? In this paper, we set out to re- frame the discussion on why commercial conversational assistants do not offer meaningful privacy and transparency by demonstrating how they could. By instrumenting the open-source voice assistant Mycroft to capture audit trails for data access, we demonstrate how such functionality could be integrated into big players in the sector like Alexa and Google Assistant. We show that this problem can be solved with existing technology and open standards and is thus fundamentally a business decision rather than a technical limitation.},
  keywords = {conversational assistants, voice assistants, provenance, audit trails, personal data, Mycroft, privacy, transparency},
  author = {Dong Huynh and William Seymour and Luc Moreau and Jose Such},
  year = {2023},
  month = jun,
  day = {8},
  doi = {10.1145/3571884.3604319},
  language = {English},
  booktitle = {ACM Conversational User Interfaces (CUI)},
  eprints = {https://kclpure.kcl.ac.uk/ws/portalfiles/portal/219453338/Provenance_in_Voice_Assistants.pdf},
  local = {papers/huynh-cui23.pdf},
  export = {yes}
}
@inproceedings{Akintunde:TAS23,
  title = {Verifiably Safe and Trusted Human-AI Systems: A Socio-technical Perspective (extended abastract)},
  abstract = {Replacing human decision-making with machine decision-making results in challenges associated with stakeholders' trust in AI systems that interact with and keep the human user in the loop. We refer to such systems as Human-AI Systems (HAIS) and argue that technical safety and social trustworthiness of a HAIS are key to its wide-spread adoption by society. To develop a verifiably safe and trusted HAIS, it is important to understand how different stakeholders perceive an autonomous system (AS) as trusted, and how the context of application affects their perceptions. Technical approaches to meet trust and safety concerns are widely investigated and under-used in the context of measuring users' trust in autonomous AI systems. Interdisciplinary socio-technical approaches, grounded in social science (trust) and computer science (safety), are less considered in HAIS investigations. This paper aims to elaborate on the need for the application of formal methods, for ensuring safe behaviour of HAIS, based on the real-life understanding of users about trust, and analysing trust dynamics. This work puts forward core challenges in this area and presents a research agenda on verifiably safe and trusted human-AI systems.},
  keywords = {Trust, Human-AI Systems, Safety, Verification},
  author = {Michael Akintunde and Luc Moreau and Victoria Young and Asieh Salehi and Vahid Yazdanpanah and Pauline Leonard and Michael Butler},
  year = {2023},
  month = jul,
  doi = {10.1145/3597512.3599719},
  booktitle = {First International Symposium on Trustworthy Autonomous Systems (TAS'23)},
  eprints = {https://kclpure.kcl.ac.uk/portal/en/publications/verifiably-safe-and-trusted-human-ai-systems-a-socio-technical-pe},
  local = {papers/vestas-tas23.pdf},
  export = {yes}
}
@inproceedings{Masters:TAS23,
  title = {A Practical Taxonomy of {TAS}-related Usecase Scenarios (extended abstract)},
  abstract = {This paper proposes a taxonomy of experimental usecase scenarios to facilitate research into trustworthy autonomous systems (TAS). Unable to identify an open-access repository of usecases to support our research, the project team embarked on development of an online library where fellow researchers would be able to find, share and recommend usecases to other practitioners in the field. To organise the library's content, we needed a taxonomy and, informed by a commitment to responsible research and innovation (RRI), we prioritised stakeholder involvement to shape its development. Conflict arose, however, between the project team's objective—a rigorous taxonomy focused on surfacing genuine “benchmarks” that can be used to test a multiplicity of variables in a range of domains under differing experimental conditions—and stakeholder expectation that the library would provide details of particular studies and results. How then can we reconcile project requirements with stakeholder preferences? A practical solution has to be found.},
  keywords = {taxonomy, trust, autonomous systems},
  author = {Peta Masters and Victoria Young and Alan Chamberlain and Sachini Weerawardhana and McKenna, {Peter E.} and Yang Lu and Liz Dowthwaite and Paul Luff and Luc Moreau},
  year = {2023},
  month = jul,
  doi = {10.1145/3597512.3597523},
  booktitle = {First International Symposium on Trustworthy Autonomous Systems (TAS'23)},
  eprints = {https://kclpure.kcl.ac.uk/portal/en/publications/a-practical-taxonomy-of-tas-related-usecase-scenarios},
  local = {papers/masters-tas23.pdf},
  export = {yes}
}