% Encoding: UTF-8 @Misc{2016, title = {{AskNature}}, url = {http://www.asknature.org}, publisher = {The Biomimicry Institute}, volume = {2016}, year = {2016}, } @Online{kickstartAI, title = {{Kickstart AI}}, url = {https://www.kickstartai.nl/}, groups = {Ethical AI}, year = {2019}, } @Misc{Bruton2012, author = {Bruton, John et al.}, title = {{Manifesto for a resource-efficient Europe}}, url = {http://europa.eu/rapid/press-release{\_}MEMO-12-989{\_}en.htm}, publisher = {European Commission}, year = {2012}, } @Unpublished{NS-AI-0991, title = {{Ethische Benadering van AI bij NS}}, groups = {Ethical AI}, institution = {Nederlandse Spoorwegen}, pages = {1--10}, year = {2021}, } @misc{ProjectGutenberg, title = {{Project Gutenberg}}, url = {https://www.gutenberg.org/}, urldate = {2020-11-25} } @Misc{NLAICoalitie, title = {{NL AI Coalitie}}, url = {https://nlaic.com/over-nl-aic/}, urldate = {2021-03-01}, groups = {Ethical AI}, } @article{Abbasi2018, abstract = {Ontologies have gained a lot of popularity and recognition in the semantic web because of their extensive use in Internet-based applications. Ontologies are often considered a fine source of semantics and interoperability in all artificially smart systems. Exponential increase in unstructured data on the web has made automated acquisition of ontology from unstructured text a most prominent research area. Several methodologies exploiting numerous techniques of various fields (machine learning, text mining, knowledge representation and reasoning, information retrieval and natural language processing) are being proposed to bring some level of automation in the process of ontology acquisition from unstructured text. This paper describes the process of ontology learning and further classification of ontology learning techniques into three classes (linguistics, statistical and logical) and discusses many algorithms under each category. This paper also explores ontology evaluation techniques by highlighting their pros and cons. Moreover, it describes the scope and use of ontology learning in several industries. Finally, the paper discusses challenges of ontology learning along with their corresponding future directions.}, author = {Abbasi, Hafiza Mahnoor and Wasim, Muhammad and Mahmood, Waqar and Asim, Muhammad Nabeel and Khan, Muhammad Usman Ghani}, doi = {10.1093/database/bay101}, issn = {1758-0463}, journal = {Database}, month = {oct}, title = {{A survey of ontology learning techniques and applications}}, url = {https://dx.doi.org/10.1093/database/bay101}, volume = {2018}, year = {2018} } @InProceedings{Abebe2010, author = {Abebe, S L and Tonella, P}, booktitle = {2010 IEEE 18th International Conference on Program Comprehension}, title = {{Natural Language Parsing of Program Element Names for Concept Extraction}}, doi = {10.1109/ICPC.2010.29}, isbn = {1092-8138 VO -}, pages = {156--159}, keywords = {Cognitive science, Natural languages, Navigation, Ontologies, Postal services, Programming profession, Protection, Software engineering, concept extraction, concept location, natural language parsing, natural languages, ontology extraction, program compilers, program element identifiers, program maintenance}, year = {2010}, } @Article{Acquisti2009, author = {Acquisti, A}, title = {{Nudging Privacy: The Behavioral Economics of Personal Information}}, doi = {10.1109/MSP.2009.163}, issn = {1558-4046 VO - 7}, number = {6}, pages = {82--85}, volume = {7}, abstract = {Privacy decisions often involve balancing competing interests. As such, they're a natural field of study for economics. But traditional economic models have made overly restrictive assumptions about the stability and nature of individual privacy preferences. Approaches drawing on existing research in behavioral economics and psychology can offer complementary tools for understanding privacy decision making.}, journal = {IEEE Security {\&} Privacy}, keywords = {Decision making, Privacy, Psychology, Stability, asymmetric paternalism, behavioral economics, data privacy, economic model, economics, personal information, privacy, privacy decision making, privacy decisions, privacy preferences}, year = {2009}, } @article{Adams2012, author = {Adams, Sam and Arel, Itmar and Bach, Joscha and Coop, Robert and Furlan, Rod and Goertzel, Ben and Hall, J Storrs and Samsonovich, Alexei and Scheutz, Matthias and Schlesinger, Matthew}, issn = {0738-4602}, journal = {AI magazine}, number = {1}, pages = {25--42}, title = {{Mapping the landscape of human-level artificial general intelligence}}, volume = {33}, year = {2012} } @incollection{Agamben1999e, address = {Stanford}, author = {Agamben, Giorgio}, booktitle = {Potentialities: Collected Essays in Philosophy}, editor = {{Heller-Roazen Daniel}, Daniel A4 - Heller-Roazen}, pages = {177--184}, publisher = {Stanford University Press}, title = {{On Potentiality}}, year = {1999} } @incollection{Agamben1999i, address = {Stanford}, author = {Agamben, Giorgio}, booktitle = {Potentialities: Collected Essays in Philosophy}, editor = {Heller-Roazen, Daniel}, pages = {116--137}, publisher = {Stanford University Press}, title = {{*Se: Hegel's Absolute and Heidegger's Ereignis}}, year = {1999} } @incollection{Agamben1999, address = {Stanford}, author = {Agamben, Giorgio}, booktitle = {Potentialities: Collected Essays in Philosophy}, editor = {Heller-Roazen, Daniel}, pages = {104--115}, publisher = {Stanford University Press}, title = {{Tradition of the Immemorial}}, year = {1999} } @incollection{Agamben1999d, address = {Stanford}, author = {Agamben, Giorgio}, booktitle = {Potentialities: Collected Essays in Philosophy}, editor = {{Heller-Roazen Daniel}, Daniel A4 - Heller-Roazen}, pages = {220--242}, publisher = {Stanford University Press}, title = {{Absolute Immanence}}, year = {1999} } @InCollection{Agamben1999c, author = {Agamben, Giorgio}, booktitle = {Potentialities: Collected Essays in Philosophy}, date = {1999}, title = {{Bartleby, or On Contingency}}, editor = {Heller-Roazen, Daniel}, location = {Stanford}, pages = {243--274}, publisher = {Stanford University Press}, } @incollection{Agamben1999h, address = {Stanford}, author = {Agamben, Giorgio}, booktitle = {Potentialities: Collected Essays in Philosophy}, editor = {Heller-Roazen, Daniel}, pages = {39--47}, publisher = {Stanford University Press}, title = {{The Idea of Language}}, year = {1999} } @misc{Agamben1999f, address = {New York}, author = {Agamben, Giorgio}, publisher = {Zone Books}, title = {{Remnants of Auschwitz. The Witness and the Archive}}, year = {1999} } @incollection{Agamben1999g, address = {Stanford}, author = {Agamben, Giorgio}, booktitle = {Potentialities: Collected Essays in Philosophy}, editor = {{Heller-Roazen Daniel}, Daniel A4 - Heller-Roazen}, pages = {204--219}, publisher = {Stanford University Press}, title = {{Pardes: The Writing of Potentiality}}, year = {1999} } @incollection{Agamben1999j, author = {Agamben, Giorgio}, booktitle = {Potentialities: Collected Essays in Philosophy}, editor = {Heller-Roazen, Daniel}, pages = {39--47}, publisher = {Stanford University Press}, title = {{No Title}}, year = {1999} } @incollection{Agamben1999b, address = {Stanford}, author = {Agamben, Giorgio}, booktitle = {Potentialities: Collected Essays in Philosophy}, editor = {Heller-Roazen, Daniel}, pages = {138--159}, publisher = {Stanford University Press}, title = {{Walter Benjamin and the Demonic: Happiness and Historical Redemption}}, year = {1999} } @misc{Agamben1998, address = {Stanford}, author = {Agamben, Giorgio}, publisher = {Stanford University Press}, title = {{Homo Sacer: Sovereign Power and Bare Life}}, year = {1998} } @misc{Agamben2002, address = {Amsterdam}, author = {Agamben, Giorgio}, publisher = {Boom/Parr{\`{e}}sia}, title = {{Homo Sacer: De souvereine macht en het naakte leven}}, year = {2002} } @article{Agre1994, abstract = {Two models of privacy issues are contrasted. The surveillance model employs visual metaphors (e.g., ?Big Brother is watching") and derives from historical experiences of secret police surveillance. The less familiar capture model employs linguistic metaphors and has deep roots in the practices of applied computing through which human activities are systematically reorganized to allow computers to track them in real time. The capture model is discussed with reference to systems in numerous domains.}, annote = {doi: 10.1080/01972243.1994.9960162}, author = {Agre, Philip E}, doi = {10.1080/01972243.1994.9960162}, journal = {The Information Society}, number = {2}, pages = {101--127}, publisher = {Routledge}, title = {{Surveillance and capture: Two models of privacy}}, volume = {10}, year = {1994} } @article{Ajunwa2017, abstract = {From the Pinkerton private detectives of the 1850s, to the closed-circuit cameras and email monitoring of the 1990s, to new apps that quantify the productivity of workers, and to the collection of health data as part of workplace wellness programs, American employers have increasingly sought to track the activities of their employees. Starting with Taylorism and Fordism, American workers have become accustomed to heightened levels of monitoring that have only been mitigated by the legal counterweight of organized unions and labor laws. Thus, along with economic and technological limits, the law has always been presumed as a constraint on these surveillance activities. Recently, technological advancements in several fields-big data analytics, communications capture, mobile device design, DNA testing, and biometrics-have dramatically expanded capacities for worker surveillance both on and off the job. While the cost of many forms of surveillance has dropped significantly, new technologies make the surveillance of workers even more convenient and accessible, and labor unions have become much less powerful in advocating for workers. The American worker must now contend with an all-seeing Argus Panoptes built from technology that allows for the trawling of employee data from the Internet and the employer collection of productivity data and health data, with the ostensible consent of the worker. This raises the question of whether the law still remains a meaningful avenue to delineate boundaries for worker surveillance.}, author = {Ajunwa, Ifeoma and Crawford, K and Schultz, J}, doi = {10.15779/Z38BR8MF94}, journal = {California Law Review}, month = {jun}, pages = {735--776}, title = {{Limitless worker surveillance}}, volume = {105}, year = {2017} } @misc{Allyn2020, author = {Allyn, Bobby}, title = {{'The Computer Got It Wrong': How Facial Recognition Led To False Arrest Of Black Man}}, url = {https://www.npr.org/2020/06/24/882683463/the-computer-got-it-wrong-how-facial-recognition-led-to-a-false-arrest-in-michig?t=1593547453415}, urldate = {2020-06-30}, year = {2020} } @incollection{AlSayyad2004, address = {New York}, author = {AlSayyad, Nezar}, booktitle = {The End of Tradition?}, editor = {AlSayyad, Nezar}, pages = {1--28}, publisher = {Routledge}, title = {{The end of tradition, or the tradition of endings?}}, year = {2004} } @techreport{Amoroso2019, author = {Amoroso, Daniele and Tamburrini, Guglielmo}, month = {aug}, title = {{What makes human control over weapon systems "meaningful"?}}, year = {2019} } @article{Anderson2008a, abstract = {So proclaimed statistician George Box 30 years ago, and he was right. But what choice did we have? Only models, from cosmological equations to theories of human behavior, seemed to be able to consistently, if imperfectly, explain the world around us. Until now. Today companies like Google, which have grown up in an era of massively abundant data, don't have to settle for wrong models. Indeed, they don't have to settle for models at all.}, archivePrefix = {arXiv}, arxivId = {arXiv:1011.1669v3}, author = {Anderson, Chris}, doi = {10.1016/j.ecolmodel.2009.09.008}, eprint = {arXiv:1011.1669v3}, isbn = {0304-3800}, issn = {1059-1028}, journal = {Wired Magazine}, pmid = {812}, title = {{The End of Theory: The Data Deluge Makes the Scientific Method Obsolete}}, year = {2008} } @book{Andreus2007, address = {Haarlem}, author = {Andreus, Hans}, publisher = {Uitgeverij Holland}, title = {{De sonnetten van de kleine waanzin}}, year = {2007} } @misc{Article362014, author = {Article36}, title = {{Autonomous weapons, meaningful human control and the CCW}}, url = {http://www.article36.org/weapons/weapons-review/autonomous-weapons-meaningful-human-control-and-the-ccw/}, urldate = {2019-10-29}, year = {2014} } @Article{AswaniKumar2010, author = {{Aswani Kumar}, Ch. and Srinivas, S.}, title = {{Concept lattice reduction using fuzzy K-Means clustering}}, doi = {https://doi.org/10.1016/j.eswa.2009.09.026}, issn = {0957-4174}, number = {3}, pages = {2696--2704}, url = {http://www.sciencedirect.com/science/article/pii/S0957417409008070}, volume = {37}, abstract = {During the design of concept lattices, complexity plays a major role in computing all the concepts from the huge incidence matrix. Hence for reducing the size of the lattice, methods based on matrix decompositions like SVD are available in the literature. However, SVD computation is known to have large time and memory requirements. In this paper, we propose a new method based on Fuzzy K-Means clustering for reducing the size of the concept lattices. We demonstrate the implementation of proposed method on two application areas: information retrieval and information visualization.}, journal = {Expert Systems with Applications}, keywords = {Concept lattice, Formal concept analysis, Fuzzy -Means clustering, Singular value decomposition}, year = {2010}, } @book{Bachelard2014, address = {New York}, author = {Bachelard, Gaston}, publisher = {Penguin Group}, title = {{The Poetics of Space}}, year = {2014} } @article{Backman2012, author = {Backman, Jussi}, doi = {10.1163/156916412X628757}, journal = {Research in Phenomenology}, number = {1}, pages = {67--91}, title = {{Logocentrism and the Gathering Logos: Heidegger, Derrida, and the Contextual Centers of Meaning}}, volume = {42}, year = {2012} } @article{Bahl2017, abstract = {PurposeTo develop a machine learning model that allows high-risk breast lesions (HRLs) diagnosed with image-guided needle biopsy that require surgical excision to be distinguished from HRLs that are at low risk for upgrade to cancer at surgery and thus could be surveilled.Materials and MethodsConsecutive patients with biopsy-proven HRLs who underwent surgery or at least 2 years of imaging follow-up from June 2006 to April 2015 were identified. A random forest machine learning model was developed to identify HRLs at low risk for upgrade to cancer. Traditional features such as age and HRL histologic results were used in the model, as were text features from the biopsy pathologic report.ResultsOne thousand six HRLs were identified, with a cancer upgrade rate of 11.4{\%} (115 of 1006). A machine learning random forest model was developed with 671 HRLs and tested with an independent set of 335 HRLs. Among the most important traditional features were age and HRL histologic results (eg, atypical ductal hyperplasia). An important text feature from the pathologic reports was ?severely atypical.? Instead of surgical excision of all HRLs, if those categorized with the model to be at low risk for upgrade were surveilled and the remainder were excised, then 97.4{\%} (37 of 38) of malignancies would have been diagnosed at surgery, and 30.6{\%} (91 of 297) of surgeries of benign lesions could have been avoided.ConclusionThis study provides proof of concept that a machine learning model can be applied to predict the risk of upgrade of HRLs to cancer. Use of this model could decrease unnecessary surgery by nearly one-third and could help guide clinical decision making with regard to surveillance versus surgical excision of HRLs.? RSNA, 2017}, annote = {doi: 10.1148/radiol.2017170549}, author = {Bahl, Manisha and Barzilay, Regina and Yedidia, Adam B and Locascio, Nicholas J and Yu, Lili and Lehman, Constance D}, doi = {10.1148/radiol.2017170549}, issn = {0033-8419}, journal = {Radiology}, month = {oct}, number = {3}, pages = {810--818}, publisher = {Radiological Society of North America}, title = {{High-Risk Breast Lesions: A Machine Learning Model to Predict Pathologic Upgrade and Reduce Unnecessary Surgical Excision}}, url = {https://doi.org/10.1148/radiol.2017170549}, volume = {286}, year = {2017} } @inproceedings{Bats2013, author = {Bats, Jan and Valkenburg, Rianne and Verbeek, Peter-Paul}, booktitle = {DS 75-7: Proceedings of the 19th International Conference on Engineering Design (ICED13), Design for Harmonies, Vol. 7: Human Behaviour in Design, Seoul, Korea, 19-22.08. 2013}, pages = {397--406}, title = {{Mediating technology: How ICT influences the morality of the digital generation}}, year = {2013} } @book{Bauman1991, address = {Cambridge}, author = {Bauman, Zygmunt}, isbn = {0745605737 9780745605739 0745612423 9780745612423}, language = {English}, publisher = {Polity Press}, title = {{Modernity and ambivalence}}, year = {1991} } @article{Bauman1990, annote = {doi: 10.1177/026327690007002010}, author = {Bauman, Zygmunt}, doi = {10.1177/026327690007002010}, issn = {0263-2764}, journal = {Theory, Culture {\&} Society}, month = {jun}, number = {2-3}, pages = {143--169}, publisher = {SAGE Publications Ltd}, title = {{Modernity and Ambivalence}}, url = {https://doi.org/10.1177/026327690007002010}, volume = {7}, year = {1990} } @book{Baumeister2014, address = {Missoula}, author = {Baumeister, Dayna and Tocke, Rose and Dwyer, Jamie and Ritter, Sherry and Benyus, Janine M}, publisher = {Biomimicry 3.8}, title = {{Biomimicry: Recourse Handbook: A Seed Bank of Best Practices}}, year = {2014} } @incollection{Beers1976, address = {Amsterdam}, author = {Beers, Paul}, booktitle = {De Revisor}, pages = {28--34}, publisher = {Athenaeum-Polak {\&} Van Gennep}, title = {{Paul Beers in gesprek met Hans Andreus. Overlevende van een tweeling}}, volume = {3}, year = {1976} } @Article{Belohlavek2009, author = {Belohlavek, R and Vychodil, V}, title = {{Formal Concept Analysis With Background Knowledge: Attribute Priorities}}, doi = {10.1109/TSMCC.2008.2012168}, issn = {1094-6977 VO - 39}, number = {4}, pages = {399--409}, volume = {39}, journal = {IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews)}, keywords = {Application software, Attribute priorities, Computer science, Data engineering, Data mining, Data visualization, Industrial engineering, Information analysis, Machinery, Spatial databases, Web search, background knowledge, binary data, clustering, computational tractability, formal concept analysis, formal concept analysis (FCA), knowledge acquisition, knowledge extraction, knowledge representation, mathematical feasibility, modeling background knowledge, pattern clustering}, year = {2009}, } @misc{Bengali.AI2020, abstract = {Classify the components of handwritten Bengali}, author = {Bengali.AI}, title = {{Bengali.AI Handwritten Grapheme Classification}}, url = {https://www.kaggle.com/c/bengaliai-cv19}, urldate = {2020-03-19}, year = {2020} } @Article{Benitez-Quiroz2018, author = {Benitez-Quiroz, Carlos F and Srinivasan, Ramprakash and Martinez, Aleix M}, title = {{Facial color is an efficient mechanism to visually transmit emotion.}}, doi = {10.1073/pnas.1716084115}, issn = {1091-6490}, number = {14}, pages = {3581--3586}, url = {http://www.ncbi.nlm.nih.gov/pubmed/29555780 http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=PMC5889636}, volume = {115}, abstract = {Facial expressions of emotion in humans are believed to be produced by contracting one's facial muscles, generally called action units. However, the surface of the face is also innervated with a large network of blood vessels. Blood flow variations in these vessels yield visible color changes on the face. Here, we study the hypothesis that these visible facial colors allow observers to successfully transmit and visually interpret emotion even in the absence of facial muscle activation. To study this hypothesis, we address the following two questions. Are observable facial colors consistent within and differential between emotion categories and positive vs. negative valence? And does the human visual system use these facial colors to decode emotion from faces? These questions suggest the existence of an important, unexplored mechanism of the production of facial expressions of emotion by a sender and their visual interpretation by an observer. The results of our studies provide evidence in favor of our hypothesis. We show that people successfully decode emotion using these color features, even in the absence of any facial muscle activation. We also demonstrate that this color signal is independent from that provided by facial muscle movements. These results support a revised model of the production and perception of facial expressions of emotion where facial color is an effective mechanism to visually transmit and decode emotion.}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, keywords = {affect, categorization, computer vision, face perception}, month = {apr}, pmid = {29555780}, publisher = {National Academy of Sciences}, year = {2018}, } @misc{Benjamin1968, author = {Benjamin, Walter}, title = {{{\"{U}}ber den Begriff der Geschichte}}, url = {http://www.textlog.de/benjamin-begriff-geschichte.html}, volume = {2018}, year = {1968} } @incollection{Bennington1999, address = {London}, author = {Bennington, Geoffrey}, booktitle = {Jacques Derrida}, pages = {3--316}, publisher = {The University of Chicago Press}, title = {{Derridabase}}, year = {1999} } @article{Bensaude-Vincent, author = {Bensaude-Vincent, Bernadette and Arribart, Hervé and Bouligand, Yves and Sanchez, Clément}, issn = {1144-0546}, journal = {New Journal of Chemistry}, number = {1}, pages = {1--5}, title = {{Chemists and the school of nature}}, volume = {26} } @book{Benyus2002, address = {New York}, author = {Benyus, Janine M}, publisher = {Harper Perennial}, title = {{Biomimicry: Innovation Inspired by Nature}}, year = {2002} } @incollection{Berghs1991, address = {Nijmegen}, author = {Berghs, Harry}, booktitle = {Wonen: Architectuur in het denken van Martin Heidegger}, editor = {{De Visscher}, Jacques and {De Saeger}, Raf}, pages = {28--63}, publisher = {SUN}, title = {{Martin Heideggers bijdrage aan een filosofie van het wonen en het milieu}}, year = {1991} } @misc{Berners-Lee2017, abstract = {It has taken all of us to build the web we have, and now it is up to all of us to build the web we want – for everyone}, author = {Berners-Lee, Tim}, booktitle = {The Guardian}, title = {{Tim Berners-Lee: I invented the web. Here are three things we need to change to save it}}, url = {https://www.theguardian.com/technology/2017/mar/11/tim-berners-lee-web-inventor-save-internet}, urldate = {2020-01-03}, year = {2017} } @article{Beverungen2007, author = {Beverungen, Armin and Dunne, Stephen}, journal = {Culture and Organization}, number = {2}, pages = {171--183}, title = {{'I'd Prefer Not To'. Bartleby and the Excesses of Interpretation}}, volume = {13}, year = {2007} } @article{Blok2014, author = {Blok, Vincent}, doi = {10.5840/envirophil20149913}, issn = {1718-0918}, journal = {Environmental Philosophy}, number = {2}, pages = {307--332}, title = {{Reconnecting with Nature in the Age of Technology: The Heidegger and Radical Environmentalism Debate Revisited}}, volume = {11}, year = {2014} } @article{Blok2016, author = {Blok, Vincent and Gremmen, Bart}, journal = {Journal of Agricultural and Environmental Ethics}, pages = {203--217}, title = {{Ecological Innovation: Biomimicry as a New Way of Thinking and Acting Ecologically}}, volume = {29}, year = {2016} } @article{Blum2014a, author = {Blum, Mark E}, journal = {Journal of the Philosophy of History}, pages = {39--77}, title = {{Phenomenological Time, Historical Time, and The Writing of History}}, volume = {8}, year = {2014} } @article{Boeve2005a, abstract = {The category of detraditionalization, in combination with the category of pluralization, it is argued, offers a conceptual framework to think anew the 'transformation of religion' in so-called postsecular Europe. Subsequently the impact of this transformation on Christian faith is investigated, and what the appropriate theological response may be. Beyond mere continuity and discontinuity between faith and contemporary context, the main lines are sketched of what the author calls a 'theology of interruption', understood as both 'interrupted theology' as well as 'interrupting theology'.}, annote = {doi: 10.1177/002114000507000201}, author = {Boeve, Lieven}, doi = {10.1177/002114000507000201}, issn = {0021-1400}, journal = {Irish Theological Quarterly}, month = {jun}, number = {2}, pages = {99--122}, publisher = {SAGE Publications Ltd}, title = {{Religion after Detraditionalization: Christian Faith in a Post-Secular Europe}}, url = {https://doi.org/10.1177/002114000507000201}, volume = {70}, year = {2005} } @article{Bollnow1961, author = {Bollnow, O F}, journal = {Philosophy Today TA - Gerlach, Dominic}, number = {1}, pages = {31--39}, title = {{Lived Space}}, volume = {5}, year = {1961} } @book{Bollnow, address = {Stuttgart}, author = {Bollnow, Otto Friedrich}, language = {Duits}, publisher = {Kohlhammer}, title = {{Mensch und Raum}} } @Article{Borgesius2018, author = {Borgesius, Frederik J.Zuiderveen and M{\"{o}}ller, Judith and Kruikemeier, Sanne and Fathaigh, Ronan and Irion, Kristina and Dobber, Tom and Bodo, Balazs and de Vreese, Claes}, title = {{Online political microtargeting: Promises and threats for democracy}}, number = {1}, pages = {82--96}, volume = {14}, abstract = {Online political microtargeting involves monitoring people's online behaviour, and using the collected data, sometimes enriched with other data, to show people-targeted political advertisements. Online political microtargeting is widely used in the US; Europe may not be far behind. This paper maps microtargeting's promises and threats to democracy. For example, microtargeting promises to optimise the match between the electorate's concerns and political campaigns, and to boost campaign engagement and political participation. But online microtargeting could also threaten democracy. For instance, a political party could, misleadingly, present itself as a different one-issue party to different individuals. And data collection for microtargeting raises privacy concerns. We sketch possibilities for policymakers if they seek to regulate online political microtargeting. We discuss which measures would be possible, while complying with the right to freedom of expression under the European Convention on Human Rights.}, journal = {Utrecht Law Review}, keywords = {Democracy, Elections, Microtargeting, Political campaigns, Privacy, Profiling}, year = {2018}, } @article{Bovens2002, abstract = {The use of information and communication technology (ICT) is rapidly changing the structure of a number of large, executive public agencies. They used to be machine bureaucracies in which street-level officials exercised ample administrative discretion in dealing with individual clients. In some realms, the street-level bureaucrats have vanished. Instead of street-level bureaucracies, they have become system-level bureaucracies. System analysts and software designers are the key actors in these executive agencies. This article explores the implications of this transformation from the perspective of the constitutional state. Thanks to ICT, the implementation of the law has virtually been perfected. However, some new issues rise: What about the discretionary power of the system-level bureaucrats? How can we guarantee due process and fairness in difficult cases? The article ends with several institutional innovations that may help to embed these system-level bureaucracies in the constitutional state.}, author = {Bovens, Mark and Zouridis, Stavros}, doi = {10.1111/0033-3352.00168}, issn = {00333352}, journal = {Public Administration Review}, title = {{From street-level to system-level bureaucracies: How information and communication technology is transforming administrative discretion and constitutional control}}, year = {2002} } @article{Boyd2020, abstract = {Scholars across disciplines have long debated the existence of a common structure that underlies narratives. Using computer-based language analysis methods, several structural and psychological categories of language were measured across {\~{}}40,000 traditional narratives (e.g., novels and movie scripts) and {\~{}}20,000 nontraditional narratives (science reporting in newspaper articles, TED talks, and Supreme Court opinions). Across traditional narratives, a consistent underlying story structure emerged that revealed three primary processes: staging, plot progression, and cognitive tension. No evidence emerged to indicate that adherence to normative story structures was related to the popularity of the story. Last, analysis of fact-driven texts revealed structures that differed from story-based narratives.}, author = {Boyd, Ryan L. and Blackburn, Kate G. and Pennebaker, James W.}, doi = {10.1126/sciadv.aba2196}, journal = {Science Advances}, month = {aug}, number = {32}, pages = {eaba2196}, title = {{The narrative arc: Revealing core narrative structures through text analysis}}, volume = {6}, year = {2020} } @book{Braver2012, address = {Cambridge}, author = {Braver, Lee}, publisher = {The MIT Press}, title = {{Groundless Grounds. A Study of Wittgenstein and Heidegger}}, year = {2012} } @article{Brewster2004, author = {Brewster, Christopher and Alani, Harith and Dasmahapatra, Srinandan and Wilks, Yorick}, title = {{Data driven ontology evaluation}}, year = {2004} } @book{Buchanan2008, address = {London}, author = {Buchanan, Ian}, publisher = {Continuum International Publishing Group}, title = {{Deleuze and Guattari's Anti-Oedipus. A Reader's Guide. }}, year = {2008} } @book{Buchanan2000, address = {Edinburgh}, author = {Buchanan, Ian}, publisher = {Edinburgh University Press}, title = {{Deleuzism. A Metacommentary.}}, year = {2000} } @article{Buitelaar2005, author = {Buitelaar, Paul and Cimiano, Philipp and Magnini, Bernardo}, issn = {1586035231}, journal = {Ontology learning from text: Methods, evaluation and applications}, pages = {3--12}, publisher = {IOS Press, Amsterdam}, title = {{Ontology learning from text: An overview}}, volume = {123}, year = {2005} } @inproceedings{Buolamwini2018, author = {{Buolamwini, Joy; Gebru}, Timnit}, booktitle = {Proceedings of Machine Learning Research 81}, pages = {1--15}, title = {{Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification}}, year = {2018} } @inproceedings{Buzmakov2015, author = {Buzmakov, A. and Kuznetsov, S. and Napoli, A.}, booktitle = {Proceedings of the 4th International Conference on What can FCA do for Artificial Intelligence?-Volume 1430}, pages = {27--34}, publisher = {CEUR-WS. org}, title = {{Sofia: how to make FCA polynomial?}}, year = {2015} } @article{Byron2010, abstract = {Luciano Floridi has proposed that we are on the cusp of a fourth revolution in human self-understanding. The information revolution with its prospect of digitally enhancing human beings opens the door to engineering human nature. Floridi has emphasized the importance of making this transition as ethically smooth as possible. He is quite right to worry about ethics after the fourth revolution. The coming revolution, if it unfolds as he envisions, spells the demise of traditional ethical theorizing.}, author = {Byron, Michael}, doi = {10.1007/s12130-010-9103-y}, journal = {Knowledge, Technology {\&} Policy}, number = {1}, pages = {135--147}, title = {{Floridi's Fourth Revolution and the Demise of Ethics}}, volume = {23}, year = {2010} } @article{Cabrera2017, abstract = {In this paper, I consider the relationship between Inference to the Best Explanation (IBE) and Bayesianism, both of which are well-known accounts of the nature of scientific inference. In Sect. 2, I give a brief overview of Bayesianism and IBE. In Sect. 3, I argue that IBE in its most prominently defended forms is difficult to reconcile with Bayesianism because not all of the items that feature on popular lists of “explanatory virtues”—by means of which IBE ranks competing explanations—have confirmational import. Rather, some of the items that feature on these lists are “informational virtues”—properties that do not make a hypothesis {\$}{\$}$\backslash$hbox {\{}H{\}}{\_}{\{}1{\}}{\$}{\$}H1more probable than some competitor {\$}{\$}$\backslash$hbox {\{}H{\}}{\_}{\{}2{\}}{\$}{\$}H2given evidence E, but that, roughly-speaking, give that hypothesis greater informative content. In Sect. 4, I consider as a response to my argument a recent version of compatibilism which argues that IBE can provide further normative constraints on the objectively correct probability function. I argue that this response does not succeed, owing to the difficulty of defending with any generality such further normative constraints. Lastly, in Sect. 5, I propose that IBE should be regarded, not as a theory of scientific inference, but rather as a theory of when we ought to “accept” H, where the acceptability of H is fixed by the goals of science and concerns whether H is worthy of commitment as research program. In this way, IBE and Bayesianism, as I will show, can be made compatible, and thus the Bayesian and the proponent of IBE can be friends.}, author = {Cabrera, Frank}, doi = {10.1007/s11229-015-0990-z}, issn = {1573-0964}, journal = {Synthese}, number = {4}, pages = {1245--1272}, title = {{Can there be a Bayesian explanationism? On the prospects of a productive partnership}}, url = {https://doi.org/10.1007/s11229-015-0990-z}, volume = {194}, year = {2017} } @misc{canziani2016, abstract = {Since the emergence of Deep Neural Networks (DNNs) as a prominent technique in the field of computer vision, the ImageNet classification challenge has played a major role in advancing the state-of-the-art. While accuracy figures have steadily increased, the resource utilisation of winning models has not been properly taken into account. In this work, we present a comprehensive analysis of important metrics in practical applications: accuracy, memory footprint, parameters, operations count, inference time and power consumption. Key findings are: (1) power consumption is independent of batch size and architecture; (2) accuracy and inference time are in a hyperbolic relationship; (3) energy constraint is an upper bound on the maximum achievable accuracy and model complexity; (4) the number of operations is a reliable estimate of the inference time. We believe our analysis provides a compelling set of information that helps design and engineer efficient DNNs.}, archivePrefix = {arXiv}, arxivId = {cs.CV/1605.07678}, author = {Canziani, Alfredo and Paszke, Adam and Culurciello, Eugenio}, eprint = {1605.07678}, primaryClass = {cs.CV}, title = {{An Analysis of Deep Neural Network Models for Practical Applications}}, year = {2016} } @article{Capurro2010a, author = {Capurro, Rafael}, doi = {10.1007/s00146-009-0255-9}, journal = {AI {\&} Society}, number = {1}, pages = {35--42}, title = {{Digital hermeneutics: an outline}}, volume = {25}, year = {2010} } @article{Capurro2017, author = {Capurro, Rafael}, doi = {10.1007/s00146-016-0686-z}, journal = {AI {\&} Society}, number = {2}, pages = {277--283}, title = {{Digitization as an ethical challenge}}, volume = {32}, year = {2017} } @article{Capurro2019, abstract = {The paper deals with the difference between who and what we are in order to take an ethical perspective on algorithms and their regulation. The present casting of ourselves as homo digitalis implies the possibility of projecting who we are as social beings sharing a world, into the digital medium, thereby engendering what can be called digital whoness, or a digital reification of ourselves. A main ethical challenge for the evolving digital age consists in unveiling this ethical difference, particularly when dealing with algorithms and their regulation in the context of human relationships. The paper addresses by way of example some issues raised by autonomous cars.}, author = {Capurro, Rafael}, doi = {10.1007/s11569-019-00340-9}, journal = {NanoEthics}, number = {2}, pages = {131--137}, title = {{Enculturating Algorithms}}, volume = {13}, year = {2019} } @article{Caputo2002, author = {Caputo, John D}, journal = {American Catholic Philosophical Quarterly}, number = {2}, pages = {341--344}, title = {{Auto-Deconstructing or Constructing a Bridge?: A Reply to Thomas AF Kelly}}, volume = {76}, year = {2002} } @book{Caputo1986, address = {New York}, author = {Caputo, John D}, publisher = {Fordham University Press}, title = {{The Mystical Element in Heidegger's Thought}}, year = {1986} } @article{Caputo1986a, annote = {doi: 10.1080/00071773.1986.11007774}, author = {Caputo, John D}, doi = {10.1080/00071773.1986.11007774}, journal = {Journal of the British Society for Phenomenology}, number = {3}, pages = {252--274}, publisher = {Routledge}, title = {{Heidegger and Derrida: Cold Hermeneutics}}, volume = {17}, year = {1986} } @inproceedings{Caridakis2007, abstract = {In this paper we present a multimodal approach for the recognition of eight emotions that integrates information from facial expressions, body movement and gestures and speech. We trained and tested a model with a Bayesian classifier, using a multimodal corpus with eight emotions and ten subjects. First individual classifiers were trained for each modality. Then data were fused at the feature level and the decision level. Fusing multimodal data increased very much the recognition rates in comparison with the unimodal systems: the multimodal approach gave an improvement of more than 10{\%} with respect to the most successful unimodal system. Further, the fusion performed at the feature level showed better results than the one performed at the decision level.}, address = {Boston, MA}, author = {Caridakis, George and Castellano, Ginevra and Kessous, Loic and Raouzaiou, Amaryllis and Malatesta, Lori and Asteriadis, Stelios and Karpouzis, Kostas}, editor = {Boukis, Christos and Pnevmatikakis, Aristodemos and Polymenakos, Lazaros}, isbn = {978-0-387-74161-1}, pages = {375--388}, publisher = {Springer US}, title = {{Multimodal emotion recognition from expressive faces, body gestures and speech BT - Artificial Intelligence and Innovations 2007: from Theory to Applications}}, year = {2007} } @article{Carpineto1996, author = {Carpineto, Claudio and Romano, Giovanni}, issn = {0885-6125}, journal = {Machine learning}, number = {2}, pages = {95--122}, publisher = {Springer}, title = {{A lattice conceptual clustering system and its application to browsing retrieval}}, volume = {24}, year = {1996} } @article{Carr2009, author = {Carr, David}, journal = {Journal of the Philosophy of History}, pages = {335--354}, title = {{Experience, Temporality and History}}, volume = {3}, year = {2009} } @book{Carr1991a, address = {Bloomington}, author = {Carr, David}, publisher = {Indiana University Press}, title = {{Time, Narrative and History}}, year = {1991} } @misc{Carr2008, abstract = {"Dave, stop. Stop, will you? Stop, Dave. Will you stop, Dave?” So the supercomputer HAL pleads with the implacable astronaut Dave Bowman in a famous and weirdly poignant scene toward the end of Stanley Kubrick's 2001: A Space Odyssey. Bowman, having nearly been sent to a deep-space death by the malfunctioning machine, is calmly, coldly disconnecting the memory circuits that control its artificial “ brain. “Dave, my mind is going,” HAL says, forlornly. “I can feel it. I can feel it.”$\backslash$r$\backslash$n$\backslash$r$\backslash$nI can feel it, too. Over the past few years I've had an uncomfortable sense that someone, or something, has been tinkering with my brain, remapping the neural circuitry, reprogramming the memory. My mind isn't going—so far as I can tell—but it's changing. I'm not thinking the way I used to think. I can feel it most strongly when I'm reading. Immersing myself in a book or a lengthy article used to be easy. My mind would get caught up in the narrative or the turns of the argument, and I'd spend hours strolling through long stretches of prose. That's rarely the case anymore. Now my concentration often starts to drift after two or three pages. I get fidgety, lose the thread, begin looking for something else to do. I feel as if I'm always dragging my wayward brain back to the text. The deep reading that used to come naturally has become a struggle.}, author = {Carr, Nicholas}, booktitle = {The Atlantic}, title = {{Is Google Making Us Stupid?}}, url = {https://www.theatlantic.com/magazine/archive/2008/07/is-google-making-us-stupid/306868/}, urldate = {2019-01-08}, year = {2008} } @book{Cazeaux2007, address = {New York}, author = {Cazeaux, C}, isbn = {9780415324007}, publisher = {Routledge}, title = {{Metaphor and Continental Philosophy: From Kant to Derrida}}, url = {https://books.google.nl/books?id=IozCbAf323UC}, year = {2007} } @misc{CCEP, author = {CCEP}, title = {{Center for Contemporary European Philosophy}}, url = {https://www.ru.nl/ptrs/ccep/}, urldate = {2020-11-25} } @book{Celan1967, address = {Frankfurt am Main}, author = {Celan, Paul}, publisher = {Suhrkamp}, title = {{Atemwende}}, year = {1967} } @article{Ceze2019, abstract = {Molecular data storage is an attractive alternative for dense and durable information storage, which is sorely needed to deal with the growing gap between information production and the ability to store data. DNA is a clear example of effective archival data storage in molecular form. In this Review, we provide an overview of the process, the state of the art in this area and challenges for mainstream adoption. We also survey the field of in vivo molecular memory systems that record and store information within the DNA of living cells, which, together with in vitro DNA data storage, lie at the growing intersection of computer systems and biotechnology.}, author = {Ceze, Luis and Nivala, Jeff and Strauss, Karin}, doi = {10.1038/s41576-019-0125-3}, journal = {Nature Reviews Genetics}, number = {8}, pages = {456--466}, title = {{Molecular digital data storage using DNA}}, volume = {20}, year = {2019} } @misc{chatterjee2019, archivePrefix = {arXiv}, arxivId = {cs.CV/1902.11133}, author = {Chatterjee, Swagato and Dutta, Rwik Kumar and Ganguly, Debayan and Chatterjee, Kingshuk and Roy, Sudipta}, eprint = {1902.11133}, primaryClass = {cs.CV}, title = {{Bengali Handwritten Character Classification using Transfer Learning on Deep Convolutional Neural Network}}, year = {2019} } @article{Chesney2019, annote = {This is really weird, but it's forthcoming in volume 107, but the site of the California Law Review does not show the issue yet. It seems delayed. So now I cite this as "forthcoming" even though it is 2020 at the moment of writing. Issue number and page numbers are thus currently missing.}, author = {Chesney, Robert and {Keats Citron}, Danielle}, doi = {10.2139/ssrn.3213954}, journal = {California Law Review}, number = {6}, title = {{Deep Fakes: A Looming Challenge for Privacy, Democracy, and National Security}}, volume = {107}, year = {2019} } @article{Cheung2005, author = {Cheung, K.S.K. and Vogel, D.}, doi = {10.1007/s10791-005-5663-y}, journal = {Information Retrieval}, number = {2}, pages = {285--299}, title = {{Complexity Reduction in Lattice-Based Information Retrieval}}, volume = {8}, year = {2005} } @article{Cimiano2005, author = {Cimiano, P. and Hotho, A. and Staab, S.}, doi = {10.1613/jair.1648 LK - https://ru.on.worldcat.org/oclc/7782984126}, journal = {Journal of Artificial Intelligence Research TA - TT -}, pages = {305--339}, title = {{Learning Concept Hierarchies from Text Corpora using Formal Concept Analysis}}, volume = {24}, year = {2005} } @inproceedings{Cimiano2003, author = {Cimiano, P. and Staab, S. and Tane, J.}, booktitle = {Proceedings of the International Workshop {\&} Tutorial on Adaptive Text Extraction and Mining held in conjunction with the 14th European Conference on Machine Learning and the 7th European Conference on Principles and Practice of Knowledge Discovery in Data}, title = {{Automatic acquisition of taxonomies from text: FCA meets NLP}}, year = {2003} } @incollection{Ciurazzi2010, address = {Evanston}, author = {Ciurazzi, Gaetano}, booktitle = {Consequences of Hermeneutics}, editor = {Malpas, Jeff and Zabala, Santiago}, pages = {244--260}, publisher = {Northwestern University Press}, title = {{The Condition of Hermeneutics: The Implicative Structure of Understanding}}, year = {2010} } @inproceedings{Codocedo2011, author = {Codocedo, V. and Taramasco, C. and Astudillo, H.}, booktitle = {The Eighth International Conference on Concept Lattices and their Applications-CLA 2011}, pages = {349--362}, title = {{Cheating to achieve formal concept analysis over a large formal context}}, year = {2011} } @online{Cole2018, annote = {(accessed: 2019-03-21)}, author = {Cole, Samantha}, title = {{We Are Truly Fucked: Everyone Is Making AI-Generated Fake Porn Now}}, url = {https://motherboard.vice.com/en{\_}us/article/bjye8a/reddit-fake-porn-app-daisy-ridley}, year = {2018} } @book{Coliva2010, address = {London}, author = {Coliva, Annalisa}, publisher = {Palgrave Macmillan}, title = {{Moore and Wittgenstein. Scepticism, Certainty and Common Sense}}, year = {2010} } @incollection{Colombat2000, address = {Edinburgh}, author = {Colombat, Andr{\'{e}} Pierre}, booktitle = {Deleuze and Literature}, editor = {Buchanan, Ian and Marks, John}, pages = {14--33}, publisher = {Edinburgh University Press}, title = {{Deleuze and Signs}}, year = {2000} } @article{Conrad2018, author = {Conrad, J. G. and Branting, L. K.}, doi = {10.1007/s10506-018-9227-z LK - https://ru.on.worldcat.org/oclc/7897840039}, isbn = {0924-8463 TA - TT -}, journal = {Artificial Intelligence and Law}, number = {2}, pages = {99--102}, title = {{Introduction to the special issue on legal text analytics}}, volume = {26}, year = {2018} } @article{Contissa2017, abstract = {Accidents involving autonomous vehicles (AVs) raise difficult ethical dilemmas and legal issues. It has been argued that self-driving cars should be programmed to kill, that is, they should be equipped with pre-programmed approaches to the choice of what lives to sacrifice when losses are inevitable. Here we shall explore a different approach, namely, giving the user/passenger the task (and burden) of deciding what ethical approach should be taken by AVs in unavoidable accident scenarios. We thus assume that AVs are equipped with what we call an “Ethical Knob”, a device enabling passengers to ethically customise their AVs, namely, to choose between different settings corresponding to different moral approaches or principles. Accordingly, AVs would be entrusted with implementing users' ethical choices, while manufacturers/programmers would be tasked with enabling the user's choice and ensuring implementation by the AV.}, author = {Contissa, Giuseppe and Lagioia, Francesca and Sartor, Giovanni}, doi = {10.1007/s10506-017-9211-z}, issn = {1572-8382}, journal = {Artificial Intelligence and Law}, number = {3 LB - Contissa2017}, pages = {365--378}, title = {{The Ethical Knob: ethically-customisable automated vehicles and the law}}, url = {https://doi.org/10.1007/s10506-017-9211-z}, volume = {25}, year = {2017} } @article{Cowie2005, abstract = {There has been rapid development in conceptions of the kind of database that is needed for emotion research. Familiar archetypes are still influential, but the state of the art has moved beyond them. There is concern to capture emotion as it occurs in action and interaction (‘pervasive emotion') as well as in short episodes dominated by emotion, and therefore in a range of contexts, which shape the way it is expressed. Context links to modality-different contexts favour different modalities. The strategy of using acted data is not suited to those aims, and has been supplemented by work on both fully natural emotion and emotion induced by various technique that allow more controlled records. Applications for that kind of work go far beyond the ‘trouble shooting' that has been the focus for application: ‘really natural language processing' is a key goal. The descriptions included in such a database ideally cover quality, emotional content, emotion-related signals and signs, and context. Several schemes are emerging as candidates for describing pervasive emotion. The major contemporary databases are listed, emphasising those which are naturalistic or induced, multimodal, and influential.}, author = {Cowie, Roddy and Douglas-Cowie, Ellen and Cox, Cate}, doi = {10.1016/j.neunet.2005.03.002}, issn = {08936080}, journal = {Neural Networks}, number = {4}, pages = {371--388}, title = {{Beyond emotion archetypes: Databases for emotion modelling using neural networks}}, volume = {18}, year = {2005} } @book{Critchley1999, address = {Edinburgh}, author = {Critchley, Simon}, publisher = {Edinburgh University Press}, title = {{The Ethics of Deconstruction}}, year = {1999} } @incollection{Crownfield2001, address = {Bloomington}, author = {Crownfield, David}, booktitle = {Companion to Heidegger's Contributions to Philosophy}, editor = {Scott, Charles E. and Schoenbohm, Susan M. and Vallega-Neu, Daniela and Vallega, Alejandro}, pages = {213--229}, publisher = {Indiana University Press}, title = {{The Last God}}, year = {2001} } @article{Dastur2014, author = {Dastur, Fran{\c{c}}oise}, journal = {Continental Philosophy Review}, pages = {399--421}, title = {{Time, event and presence in the late Heidegger}}, volume = {47}, year = {2014} } @article{DeBeistegui2003, author = {{De Beistegui}, Miguel}, doi = {10.1163/15691640360699681}, journal = {Research in Phenomenology}, number = {1}, pages = {221--246}, title = {{The Transformation of the Sense of Dasein in Heidegger's Beitr{\"{a}}ge zur Philosophie (Vom Ereignis)}}, volume = {33}, year = {2003} } @inproceedings{DeGraaf2017, author = {{De Graaf}, Maartje and Malle, Bertram}, booktitle = {AAAI Fall Symposium: Artificial Intelligence for Human-Robot Interaction}, month = {nov}, title = {{How people explain action (and AIS should too)}}, year = {2017} } @Article{DeKnijff2013, author = {de Knijff, J. and Frasincar, F. and Hogenboom, F.}, title = {{Domain taxonomy learning from text: The subsumption method versus hierarchical clustering}}, doi = {https://doi.org/10.1016/j.datak.2012.10.002}, issn = {0169-023X}, pages = {54--69}, url = {http://www.sciencedirect.com/science/article/pii/S0169023X12000973}, volume = {83}, abstract = {This paper proposes a framework to automatically construct taxonomies from a corpus of text documents. This framework first extracts terms from documents using a part-of-speech parser. These terms are then filtered using domain pertinence, domain consensus, lexical cohesion, and structural relevance. The remaining terms represent concepts in the taxonomy. These concepts are arranged in a hierarchy with either the extended subsumption method that accounts for concept ancestors in determining the parent of a concept or a hierarchical clustering algorithm that uses various text-based window and document scopes for concept co-occurrences. Our evaluation in the field of management and economics indicates that a trade-off between taxonomy quality and depth must be made when choosing one of these methods. The subsumption method is preferable for shallow taxonomies, whereas the hierarchical clustering algorithm is recommended for deep taxonomies.}, journal = {Data {\&} Knowledge Engineering}, keywords = {Association rules, Classification, Clustering, Ontologies, Text mining}, year = {2013}, } @misc{Deleuze1995, address = {New York}, author = {Deleuze, Gilles}, publisher = {Columbia University Press}, title = {{Difference {\&} Repetition}}, year = {1995} } @misc{Deleuze2013, address = {London}, author = {Deleuze, Gilles and Guattari, Felix}, editor = {Seem, Mark and Lane, Helen R}, publisher = {Bloomsbury Academic}, title = {{Anti-Oedipus.}}, year = {2013} } @misc{Deleuze2012, address = {Minneapolis}, author = {Deleuze, Gilles and Guattari, Felix}, publisher = {University of Minnesota Press}, title = {{Kafka. Toward a Minor Literature}}, year = {2012} } @misc{Deleuze2013a, address = {London}, author = {Deleuze, Gilles and Guattari, Felix}, publisher = {Bloomsbury Academic}, title = {{A Thousand Plateaus}}, year = {2013} } @book{Derrida1997, address = {Baltimore}, author = {Derrida, Jacques}, publisher = {The Johns Hopkins University Press}, title = {{Of Grammatology}}, translator = {Spivak, Gayatri Chakravorty}, year = {1997} } @incollection{Derrida2008, address = {Stanford}, author = {Derrida, Jacques}, booktitle = {Psyche. Inventions of the Other, Volume II}, pages = {1--6}, publisher = {Stanford University Press}, title = {{Letter to a Japanese Friend}}, year = {2008} } @incollection{Derrida2005a, address = {New York}, author = {Derrida, Jacques}, booktitle = {Sovereignties in Question: The Poetics of Paul Celan}, editor = {Dutoit, Thomas and Pasanen, Outi}, pages = {65--96}, publisher = {Fordham University Press}, title = {{Poetics and Politics of Witnessing}}, year = {2005} } @misc{Derrida, address = {Chicago}, author = {Derrida, Jacques}, publisher = {University of Chicago Press}, title = {{Positions}} } @incollection{Derrida2007b, address = {Stanford}, author = {Derrida, Jacques}, booktitle = {Psyche. Inventions of the Other, Volume I}, pages = {48--80}, publisher = {Stanford University Press}, title = {{The Retrait of Metaphor}}, year = {2007} } @incollection{Derrida1992a, address = {New York}, author = {Derrida, Jacques}, booktitle = {Acts of Literature}, editor = {Attridge, Derek}, pages = {33--75}, publisher = {Routledge}, title = {{"This Strange Institution Called Literature": An Interview with Jacques Derrida}}, year = {1992} } @incollection{Derrida2007a, address = {Stanford}, author = {Derrida, Jacques}, booktitle = {Psyche. Inventions of the Other, Volume I}, editor = {Kamuf, Peggy and Rottenberg, Elizabeth}, pages = {1--48}, publisher = {Stanford University Press}, title = {{Psyche: Invention of the Other}}, year = {2007} } @incollection{Derrida2007c, address = {Stanford}, author = {Derrida, Jacques}, booktitle = {Psyche. Inventions of the Other, Volume I}, editor = {Kamuf, Peggy and Rottenberg, Elizabeth}, pages = {226--261}, publisher = {Stanford University Press}, title = {{Telepathy}}, year = {2007} } @misc{Derrida1988a, address = {Evanston}, author = {Derrida, Jacques}, publisher = {Northwestern University Press}, title = {{Limited Inc}}, year = {1988} } @incollection{Derrida2008a, address = {Stanford}, author = {Derrida, Jacques}, booktitle = {Psyche. Inventions of the Other, Volume II}, editor = {Kamuf, Peggy and Rottenberg, Elizabeth}, pages = {196--230}, publisher = {Stanford University Press}, title = {{D{\'{e}}sistance}}, year = {2008} } @incollection{Derrida1987, address = {Chicago}, author = {Derrida, Jacques}, booktitle = {The Post Card: From Socrates to Freud and Beyond}, pages = {1--258}, publisher = {The University of Chicago Press}, title = {{Envois}}, translator = {Bass, Alan}, year = {1987} } @incollection{Derrida2007d, address = {Stanford}, author = {Derrida, Jacques}, booktitle = {Psyche. Inventions of the Other, Volume I}, editor = {Kamuf, Peggy and Rottenberg, Elizabeth}, pages = {94--128}, publisher = {Stanford University Press}, title = {{Envoi}}, year = {2007} } @book{Derrida1980, address = {Paris}, author = {Derrida, Jacques}, publisher = {Flammarion}, title = {{La Carte Postale: de Socrate {\`{a}} Freud et au-del{\`{a}}}}, year = {1980} } @misc{Derrida1992, address = {Leuven}, author = {Derrida, Jacques}, publisher = {Garant}, title = {{Sjibbolet: Voor Paul Celan}}, year = {1992} } @misc{Derrida1978, address = {London}, author = {Derrida, Jacques}, publisher = {Routledge}, title = {{Writing and Difference}}, year = {1978} } @misc{Derrida1989, address = {Hilversum}, author = {Derrida, Jacques}, booktitle = {Marges van de filosofie}, publisher = {Gooi en Sticht}, title = {{Witte mythologie. De metafoor in de filosofische tekst}}, year = {1989} } @incollection{Derrida1982a, address = {Brighton}, author = {Derrida, Jacques}, booktitle = {Margins of Philosophy}, pages = {307--330}, publisher = {The Harvester Press}, title = {{Signature Event Context}}, year = {1982} } @incollection{Derrida1993a, address = {Bloomington}, author = {Derrida, Jacques}, booktitle = {Reading Heidegger}, editor = {Sallis, John}, pages = {163--218}, publisher = {Indiana University Press}, title = {{Heidegger's Ears: Philopolemology (Geschlecht IV)}}, translator = {Leavey, John. P. Jr.}, year = {1993} } @misc{Derrida2000, address = {Stanford}, author = {Derrida, Jacques}, publisher = {Stanford University Press}, title = {{Demeure: Fiction and Testimony}}, year = {2000} } @misc{Derrida1993, address = {Stanford}, author = {Derrida, Jacques}, publisher = {Stanford University Press}, title = {{Aporias}}, year = {1993} } @book{Derrida1989a, address = {Lincoln and London}, author = {Derrida, Jacques}, publisher = {University of Nebraska Press}, title = {{Edmund Husserl's Origin of Geometry: An Introduction}}, translator = {Leavey, John P.}, year = {1989} } @misc{Derrida2003a, address = {Chicago}, author = {Derrida, Jacques}, publisher = {The University of Chicago Press}, title = {{The Problem of Genesis in Husserl's Philosophy}}, year = {2003} } @incollection{Derrida2005, address = {New York}, author = {Derrida, Jacques}, booktitle = {Sovereignties in Question: The Poetics of Paul Celan}, editor = {Dutoit, Thomas and Pasanen, Outi}, pages = {97--107}, publisher = {Fordham University Press}, title = {{Language is Never Owned: An Interview}}, year = {2005} } @incollection{Derrida1982, address = {Brighton}, author = {Derrida, Jacques}, booktitle = {Margins of Philosophy}, pages = {207--272}, publisher = {The Harvester Press}, title = {{White Mythology: Metaphor in the Text of Philosophy}}, translator = {Bass, Alan}, year = {1982} } @Article{Desain2009a, author = {Desain, Marcel van Gerven and Farquhar, Jason and Schaefer, Rebecca and Vlek, Rutger and Geuze, Jeroen and Nijholt, Anton and Ramsey, Nick and Haselager, Pim and Vuurpijl, Louis and Gielen, Stan and Peter}, title = {{The brain–computer interface cycle}}, issn = {1741-2552}, number = {4}, pages = {41001}, url = {http://stacks.iop.org/1741-2552/6/i=4/a=041001}, volume = {6}, abstract = {Brain–computer interfaces (BCIs) have attracted much attention recently, triggered by new scientific progress in understanding brain function and by impressive applications. The aim of this review is to give an overview of the various steps in the BCI cycle, i.e., the loop from the measurement of brain activity, classification of data, feedback to the subject and the effect of feedback on brain activity. In this article we will review the critical steps of the BCI cycle, the present issues and state-of-the-art results. Moreover, we will develop a vision on how recently obtained results may contribute to new insights in neurocognition and, in particular, in the neural representation of perceived stimuli, intended actions and emotions. Now is the right time to explore what can be gained by embracing real-time, online BCI and by adding it to the set of experimental tools already available to the cognitive neuroscientist. We close by pointing out some unresolved issues and present our view on how BCI could become an important new tool for probing human cognition.}, journal = {Journal of Neural Engineering}, year = {2009}, } @article{Devillers2005, abstract = {Since the early studies of human behavior, emotion has attracted the interest of researchers in many disciplines of Neurosciences and Psychology. More recently, it is a growing field of research in computer science and machine learning. We are exploring how the expression of emotion is perceived by listeners and how to represent and automatically detect a subject's emotional state in speech. In contrast with most previous studies, conducted on artificial data with archetypal emotions, this paper addresses some of the challenges faced when studying real-life non-basic emotions. We present a new annotation scheme allowing the annotation of emotion mixtures. Our studies of real-life spoken dialogs from two call center services reveal the presence of many blended emotions, dependent on the dialog context. Several classification methods (SVM, decision trees) are compared to identify relevant emotional states from prosodic, disfluency and lexical cues extracted from the real-life spoken human-human interactions.}, author = {Devillers, Laurence and Vidrascu, Laurence and Lamel, Lori}, doi = {10.1016/J.NEUNET.2005.03.007}, issn = {0893-6080}, journal = {Neural Networks}, month = {may}, number = {4}, pages = {407--422}, publisher = {Pergamon}, title = {{Challenges in real-life emotion annotation and machine learning based detection}}, url = {https://www.sciencedirect.com/science/article/pii/S0893608005000407}, volume = {18}, year = {2005} } @Article{Dias2015, author = {Dias, S. M. and Vieira, N. J.}, title = {{Concept lattices reduction: Definition, analysis and classification}}, doi = {https://doi.org/10.1016/j.eswa.2015.04.044}, issn = {0957-4174}, number = {20}, pages = {7084--7097}, url = {http://www.sciencedirect.com/science/article/pii/S0957417415002869}, volume = {42}, abstract = {Formal concept analysis (FCA) is currently considered an important formalism for knowledge representation, extraction and analysis with applications in different areas. A problem identified in several applications is the computational cost due to the large number of formal concepts generated. Even when that number is not very large, the essential aspects, those effectively needed, can be immersed in a maze of irrelevant details. In fact, the problem of obtaining a concept lattice of appropriate complexity and size is one of the most important problems of FCA. In literature, several different approaches to control the complexity and size of a concept lattice have been described, but so far they have not been properly analyzed, compared and classified. We propose the classification of techniques for concept lattice reduction in three groups: redundant information removal, simplification, and selection. The main techniques to reduce concept lattice are analyzed and classified based on seven dimensions, each one composed of a set of characteristics. Considerations are made about the applicability and computational complexity of approaches of different classes.}, journal = {Expert Systems with Applications}, keywords = {Concept lattices, Formal concept analysis, Reduction}, year = {2015}, } @Article{Dias2017, author = {Dias, S. M. and Vieira, N. J.}, title = {{A methodology for analysis of concept lattice reduction}}, doi = {https://doi.org/10.1016/j.ins.2017.02.037}, issn = {0020-0255}, pages = {202--217}, url = {http://www.sciencedirect.com/science/article/pii/S0020025517305388}, volume = {396}, abstract = {Formal concept analysis (FCA) is a mathematical theory of data analysis with applications in many areas. The problem of obtaining a concept lattice of an appropriate size was identified in several applications as one of the most important problems of FCA. In order to deal with this problem several techniques with different characteristics were proposed for concept lattice reduction. However, there are currently no adequate methods to assess what types of knowledge transformations can result from a reduction. A methodology for analysis of concept lattice reduction is presented here. It is based on the use of sets of proper implications holding in the original and reduced formal contexts or concept lattices. Working with both sets of implications, the methodology is able to show what is preserved, eliminated, inserted or transformed by a reduction technique. Three classes of reduction techniques are analyzed from the standpoint of the methodology in order to highlight techniques of each class have in common with respect to the transformations performed. Such analysis is followed by specific examples in each class.}, journal = {Information Sciences}, keywords = {Formal concept analysis, Lattice reduction, Proper implications}, year = {2017}, } @article{Dicks2015, abstract = {The philosophy of biomimicry, I argue, consists of four main areas of inquiry. The first, which has already been explored by Freya Mathews (2011), concerns the “deep” question of what Nature ultimately is. The second, third, and fourth areas correspond to the three basic principles of biomimicry as laid out by Janine Benyus (1997). “Nature as model” is the poetic principle of biomimicry, for it tells us how it is that things are to be “brought forth” (poiēsis). “Nature as measure” is the ethical principle of biomimicry, for it tells us that Nature places ethical limits or standards on what it is possible for us to accomplish. And “Nature as mentor” is the epistemological principle of biomimicry, for it affirms that Nature is the ultimate source of truth, wisdom, and freedom from error. Within this overall framework, I argue that seeing Nature as physis—understood as “self-production” or “self-placing-into-the-open”—constitutes the requisite ground for the poetic, ethical, and epistemological principles of biomimicry, and that biomimicry thus conceived involves a new philosophical paradigm, which I call “enlightened naturalism”.}, author = {Dicks, Henry}, doi = {10.1007/s13347-015-0210-2 LB - Dicks2015}, issn = {2210-5441}, journal = {Philosophy {\&} Technology}, pages = {1--21}, title = {{The Philosophy of Biomimicry}}, url = {http://dx.doi.org/10.1007/s13347-015-0210-2}, year = {2015} } @misc{DigitalEnlightenmentForum, author = {{Digital Enlightenment Forum}}, title = {{About Us}}, url = {https://digitalenlightenment.org/about-us}, urldate = {2020-01-05} } @article{Donahue2015, abstract = {What today divides analytical from Continental philosophy? This paper argues that the present divide is not what it once was. Today, the divide concerns the styles in which philosophers deal with intellectual problems: solving them, pressing them, resolving them, or dissolving them. Using ?the boundary problem?, or ?the democratic paradox?, as an example, we argue for two theses. First, the difference between most analytical and most Continental philosophers today is that Continental philosophers find intelligible two styles of dealing with problems that most analytical philosophers find unintelligible: pressing them and resolving them. Second, when it comes to a genuine divide in which not understanding the other side?s basic philosophical purposes combines with disagreement on fundamental questions of doctrine, the only such divide today is that between those analytical philosophers who tend to solve problems and those Continental philosophers who tend to press problems (roughly, the heirs of Derrida). It is among these subgroups that there is a real philosophical divide today. So the analytical?Continental divide is more a matter of style than of substance; but as we try to show, differences in style shape differences over substance.}, annote = {doi: 10.1177/1474885115585324}, author = {Donahue, Thomas J and {Ochoa Espejo}, Paulina}, doi = {10.1177/1474885115585324}, issn = {1474-8851}, journal = {European Journal of Political Theory}, month = {may}, number = {2}, pages = {138--154}, publisher = {SAGE Publications}, title = {{The analytical–Continental divide: Styles of dealing with problems}}, url = {https://doi.org/10.1177/1474885115585324}, volume = {15}, year = {2015} } @article{Dong2018, archivePrefix = {arXiv}, arxivId = {1804.04589}, author = {Dong, Yue}, eprint = {1804.04589}, journal = {CoRR}, title = {{A Survey on Neural Network-Based Summarization Methods}}, url = {http://arxiv.org/abs/1804.04589}, volume = {abs/1804.0}, year = {2018} } @incollection{Dostal2002, author = {Dostal, Robert J}, booktitle = {The Cambridge Companion to Gadamer}, editor = {Dostal, Robert J}, pages = {247--266}, title = {{Gadamer's Relation to Heidegger and Phenomenology}}, year = {2002} } @inproceedings{Drymonas2010, author = {Drymonas, E. and Zervanou, K. and Petrakis, E. G. M.}, booktitle = {International Conference on Application of Natural Language to Information Systems}, pages = {277--287}, publisher = {Springer}, title = {{Unsupervised ontology acquisition from plain texts: the OntoGain system}}, year = {2010} } @book{Durantaye2009, address = {Stanford}, author = {de la Durantaye, Leland}, publisher = {Stanford University Press}, title = {{Giorgio Agamben: A Critical Introduction}}, year = {2009} } @article{Easwaran2011, author = {Easwaran, Kenny}, journal = {Philosophy Compass}, number = {5}, pages = {312--320}, title = {{Bayesianism I: Introduction and arguments in favor}}, volume = {6}, year = {2011} } @article{Easwaran2011a, author = {Easwaran, Kenny}, journal = {Philosophy Compass}, number = {5}, pages = {321--332}, title = {{Bayesianism II: Applications and Criticisms}}, volume = {6}, year = {2011} } @incollection{Eldred2016, abstract = {Through a lengthy e-mail conversation in 1999, Rafael Capurro and I undertook the maieutics of a hermeneutic approach to a digital phenomenon that we dubbed digital ontology. The present paper employs this ontology to deepen discussion of the idea of the Universal Turing Machine, which serves as the ontological blueprint for the basic unit of today's artificial cyberworld. Its way of working therefore also serves as a guide to investigating the spatiality and temporality of this artificial dimension to which humanity is today more than willingly exposed. In particular, an investigation of the Turing machine's linear, logically causal ‘temporality' shows up a contrast with the three-dimensional, ‘ecstatic' temporality of the world shared by human beings. Properly speaking, a Turing machine is a contraption for copulating bit-strings timelessly; hence a digital ‘copulator ' Only by virtue of being nested in the existential world of human beings is the cyberworld in time.}, address = {Wiesbaden}, author = {Eldred, Michael}, booktitle = {Information Cultures in the Digital Age: A Festschrift in Honor of Rafael Capurro}, doi = {10.1007/978-3-658-14681-8_4}, editor = {Kelly, Matthew and Bielby, Jared}, pages = {65--81}, publisher = {Springer Fachmedien}, title = {{Turing's Cyberworld}}, year = {2016} } @incollection{Emad1993, address = {Bloomington}, author = {Emad, Parvis}, booktitle = {Reading Heidegger}, editor = {Sallis, John}, pages = {323--340}, publisher = {Indiana University Press}, title = {{Thinking More Deeply into the Question of Translation: Essential Translation and the Unfolding of Language}}, year = {1993} } @article{Emery1976, author = {Emery, Allan Moore}, journal = {University of California Press}, number = {2}, pages = {170--187}, title = {{The Alternatives of Melville's "Bartleby"}}, url = {http://www.jstor.org/stable/2933500}, volume = {31}, year = {1976} } @misc{Espinoza2020, author = {Espinoza, Javier and Murgia, Madhumita}, booktitle = {Financial Times}, title = {{EU backs away from call for blanket ban on facial recognition tech}}, url = {https://www.ft.com/content/ff798944-4cc6-11ea-95a0-43d18ec715f5}, urldate = {2020-06-30}, year = {2020} } @TechReport{Commission2020, author = {{European Commission}}, institution = {European Commission}, title = {{White Paper On Artificial Intelligence - A European approach to excellence and trust}}, pages = {27}, url = {https://ec.europa.eu/info/sites/info/files/commission-white-paper-artificial-intelligence-feb2020{\_}en.pdf}, address = {Brussels}, groups = {Ethical AI}, year = {2020}, } @misc{EuropeanDigitalRights2020, author = {{European Digital Rights}}, title = {{Ban biometric mass surveillance!}}, url = {https://edri.org/blog-ban-biometric-mass-surveillance/}, urldate = {2020-07-01}, year = {2020} } @techreport{Eynde2004, author = {Eynde, F.}, institution = {Centrum voor Computerlingu{\"{i}}stiek, K.U. Leuven.}, month = {jan}, pages = {74}, title = {{Part of Speech Tagging en Lemmatisering van het Corpus Gesproken Nederlands}}, year = {2004} } @book{Faverey1993, address = {Amsterdam}, author = {Faverey, Hans}, publisher = {Uitgeverij De Bezige Bij}, title = {{Verzamelde Gedichten}}, year = {1993} } @article{Felman1985, author = {Felman, Shoshana}, journal = {Yale French Studies}, number = {69}, pages = {49--72}, title = {{Postal Survival, or the Question of the Navel}}, year = {1985} } @book{Felman1992, address = {London}, author = {Felman, Shoshana and Laub, Dori}, publisher = {Routledge}, title = {{Testimony: crises of witnessing in literature, psychoanalysis, and history}}, year = {1992} } @incollection{Figal2002, address = {Cambridge}, author = {Figal, G{\"{u}}nter}, booktitle = {The Cambridge Companion to Gadamer}, editor = {Dostal, Robert J}, pages = {102--125}, publisher = {Cambridge University Press}, title = {{The Doing of the Thing Itself: Gadamer's Hermeneutic Ontology of Language}}, year = {2002} } @article{Fiorillo2012, author = {Fiorillo, Christopher D.}, journal = {Information}, pages = {175--203}, title = {{Beyond Bayes: On the need for a unified and Jaynesian definition of probability and information within neuroscience}}, volume = {3}, year = {2012} } @article{Floridi2005, abstract = {The paper outlines a new interpretation of informational privacy and of its moral value. The main theses defended are: (a) informational privacy is a function of the ontological friction in the infosphere, that is, of the forces that oppose the information flow within the space of information; (b) digital ICTs (information and communication technologies) affect the ontological friction by changing the nature of the infosphere (re-ontologization); (c) digital ICTs can therefore both decrease and protect informational privacy but, most importantly, they can also alter its nature and hence our understanding and appreciation of it; (d) a change in our ontological perspective, brought about by digital ICTs, suggests considering each person as being constituted by his or her information and hence regarding a breach of one's informational privacy as a form of aggression towards one's personal identity.}, author = {Floridi, Luciano}, doi = {10.1007/s10676-006-0001-7}, journal = {Ethics and Information Technology}, number = {4}, pages = {185--200}, title = {{The Ontological Interpretation of Informational Privacy}}, url = {https://doi.org/10.1007/s10676-006-0001-7}, volume = {7}, year = {2005} } @book{Floridi2015c, abstract = {What is the impact of information and communication technologies (ICTs) on the human condition? In order to address this question, in 2012 the European Commission organized a research project entitled The Onlife Initiative: concept reengineering for rethinking societal concerns in the digital transition. This volume collects the work of the Onlife Initiative. It explores how the development and widespread use of ICTs have a radical impact on the human condition. ICTs are not mere tools but rather social forces that are increasingly affecting our self-conception (who we are), our mutual interactions (how we socialise); our conception of reality (our metaphysics); and our interactions with reality (our agency). In each case, ICTs have a huge ethical, legal, and political significance, yet one with which we have begun to come to terms only recently. The impact exercised by ICTs is due to at least four major transformations: the blurring of the distinction between reality and virtuality; the blurring of the distinction between human, machine and nature; the reversal from information scarcity to information abundance; and the shift from the primacy of stand-alone things, properties, and binary relations, to the primacy of interactions, processes and networks. Such transformations are testing the foundations of our conceptual frameworks. Our current conceptual toolbox is no longer fitted to address new ICT-related challenges. This is not only a problem in itself. It is also a risk, because the lack of a clear understanding of our present time may easily lead to negative projections about the future. The goal of The Manifesto, and of the whole book that contextualises, is therefore that of contributing to the update of our philosophy. It is a constructive goal. The book is meant to be a positive contribution to rethinking the philosophy on which policies are built in a hyperconnected world, so that we may have a better chance of understanding our ICT-related problems and solving them satisfactorily. The Manifesto launches an open debate on the impacts of ICTs on public spaces, politics and societal expectations toward policymaking in the Digital Agenda for Europe's remit. More broadly, it helps start a reflection on the way in which a hyperconnected world calls for rethinking the referential frameworks on which policies are built.}, booktitle = {The Onlife Manifesto: Being Human in a Hyperconnected Era}, doi = {10.1007/978-3-319-04093-6}, editor = {Floridi, Luciano}, title = {{The onlife manifesto: Being human in a hyperconnected era}}, year = {2015} } @incollection{Floridi2014b, address = {London}, author = {Floridi, Luciano}, booktitle = {Philosophy, Computing and Information Science}, chapter = {1}, doi = {10.4324/9781315653938}, editor = {Hagengruber, Ruth and Riss, Uwe V.}, pages = {19--28}, publisher = {Pickering {\&} Chatto}, title = {{The fourth revolution in our selfunderstanding}}, year = {2014} } @book{Floridi2014, address = {Oxford}, author = {Floridi, Luciano}, pages = {265}, publisher = {Oxford University Press}, title = {{The fourth revolution: How the infosphere is changing human reality}}, year = {2014} } @book{Forbes2005, address = {London}, author = {Forbes, Peter}, publisher = {Fourth Estate}, title = {{The Gecko's Foot: Bio-inspiration: Engineered from Nature}}, year = {2005} } @article{Fourcade2016, abstract = {What do markets see when they look at people? Information dragnets increasingly yield huge quantities of individual-level data, which are analyzed to sort and slot people into categories of taste, riskiness or worth. These tools deepen the reach of the market and define new strategies of profit-making. We present a new theoretical framework for understanding their development. We argue that (a) modern organizations follow an institutional data imperative to collect as much data as possible; (b) as a result of the analysis and use of this data, individuals accrue a form of capital flowing from their positions as measured by various digital scoring and ranking methods; and (c) the facticity of these scoring methods makes them organizational devices with potentially stratifying effects. They offer firms new opportunities to structure and price offerings to consumers. For individuals, they create classification situations that identify shared life-chances in product and service markets. We discuss the implications of these processes and argue that they tend toward a new economy of moral judgment, where outcomes are experienced as morally deserved positions based on prior good actions and good tastes, as measured and classified by this new infrastructure of data collection and analysis.}, author = {Fourcade, Marion and Healy, Kieran}, doi = {10.1093/ser/mww033}, issn = {1475-1461}, journal = {Socio-Economic Review}, month = {dec}, number = {1}, pages = {9--29}, title = {{Seeing like a market}}, url = {https://doi.org/10.1093/ser/mww033}, volume = {15}, year = {2016} } @article{Fragopanagos2005, abstract = {In this paper, we outline the approach we have developed to construct an emotion-recognising system. It is based on guidance from psychological studies of emotion, as well as from the nature of emotion in its interaction with attention. A neural network architecture is constructed to be able to handle the fusion of different modalities (facial features, prosody and lexical content in speech). Results from the network are given and their implications discussed, as are implications for future direction for the research.}, author = {Fragopanagos, N. and Taylor, J.G.}, doi = {10.1016/j.neunet.2005.03.006}, issn = {08936080}, journal = {Neural Networks}, number = {4}, pages = {389--405}, title = {{Emotion recognition in human–computer interaction}}, volume = {18}, year = {2005} } @incollection{sep-technology, annote = {(accessed: 2019-03-27)}, author = {Franssen, Maarten and Lokhorst, Gert-Jan and van de Poel, Ibo}, booktitle = {The Stanford Encyclopedia of Philosophy}, edition = {Fall 2018}, editor = {Zalta, Edward N}, publisher = {Metaphysics Research Lab, Stanford University}, title = {{Philosophy of Technology}}, url = {https://plato.stanford.edu/archives/fall2018/entries/technology/}, year = {2018} } @article{French2010, author = {French, John R J and Ahmed, Berhan M}, doi = {10.1111/j.1744-7917.2009.01306.x}, journal = {Insect Science}, pages = {154--162}, title = {{The challenge of biomimetic design for carbon-neutral buildings using termite engineering}}, volume = {17}, year = {2010} } @article{Gadamer1981, author = {Gadamer, Hans-Georg}, journal = {The Monist}, number = {4}, pages = {423--433}, title = {{Heidegger und die Geschichte der Philosophie}}, volume = {64}, year = {1981} } @book{Gadamer2004, address = {London and New York}, author = {Gadamer, Hans-Georg}, publisher = {Continuum}, title = {{Truth and Method}}, translator = {Weinsheimer, Joel C. and Marshall, Donald G.}, year = {2004} } @article{Gander2004, author = {Gander, Hans-Helmuth}, journal = {Research in Phenomenology}, pages = {121--136}, title = {{Between Strangeness and Familiarity: Towards Gadamer's Conception of Effective History}}, volume = {34}, year = {2004} } @book{Gasche1997, address = {Cambridge}, author = {Gasch{\'{e}}, Rodolphe}, publisher = {Harvard University Press}, title = {{The Tain of the Mirror. Derrida and the Philosophy of Reflection}}, year = {1997} } @misc{OpenDS, author = {{German Research Center for Artificial Intelligence}}, title = {{OpenDS: Open Source Driving Simulation}}, url = {https://opends.dfki.de/}, urldate = {2019-10-31} } @article{Glass2012, abstract = {In the form of inference known as inference to the best explanation there are various ways to characterise what is meant by the best explanation. This paper considers a number of such characterisations including several based on confirmation measures and several based on coherence measures. The goal is to find a measure which adequately captures what is meant by ‘best' and which also yields the truth with a high degree of probability. Computer simulations are used to show that the overlap coherence measure achieves this goal, enabling the true explanation to be identified almost as often as an approach which simply selects the most probable explanation. Further advantages to this approach are also considered in the case where there is uncertainty in the prior probability distribution.}, author = {Glass, David H}, doi = {10.1007/s11229-010-9829-9}, issn = {1573-0964}, journal = {Synthese}, number = {3}, pages = {411--427}, title = {{Inference to the best explanation: does it track truth?}}, url = {https://doi.org/10.1007/s11229-010-9829-9}, volume = {185}, year = {2012} } @article{Gomaa2013, author = {Gomaa, Wael H and Fahmy, Aly A}, journal = {International Journal of Computer Applications}, number = {13}, pages = {13--18}, publisher = {Citeseer}, title = {{A survey of text similarity approaches}}, volume = {68}, year = {2013} } @book{Gonzalez2009, address = {University Park}, author = {Gonzalez, Fransisco J}, publisher = {The Pennsylvania State University Press}, title = {{Plato and Heidegger: A Question of Dialogue}}, year = {2009} } @techreport{Grapperhaus2020, author = {Grapperhaus, Ferdinand}, institution = {Ministerie van Justitie en Veiligheid}, pages = {26}, title = {{Beantwoording schriftelijke vragen AI bij de politie}}, url = {https://www.rijksoverheid.nl/regering/bewindspersonen/ferdinand-grapperhaus/documenten/kamerstukken/2020/02/18/tk-beantwoording-schriftelijke-vragen-ai-bij-de-politie}, year = {2020} } @incollection{Grondin2002, address = {Cambridge}, author = {Grondin, Jean}, booktitle = {The Cambridge Companion to Gadamer}, editor = {Dostal, Robert J}, publisher = {Cambridge University Press}, title = {{Gadamer's Basic Understanding of Understanding}}, year = {2002} } @misc{Grondin1994, address = {New Haven and London}, author = {Grondin, Jean}, publisher = {Yale University Press}, title = {{Introduction to Philosophical Hermeneutics}}, year = {1994} } @phdthesis{Grootjen, author = {Grootjen, F. A.}, pages = {245}, school = {Radboud University}, title = {{A Pragmatic Approach to the Conceptualisation of Language}}, year = {2005} } @article{Guglielmo2009, annote = {doi: 10.1080/00201740903302600}, author = {Guglielmo, Steve and Monroe, Andrew E and Malle, Bertram F}, doi = {10.1080/00201740903302600}, issn = {0020-174X}, journal = {Inquiry}, month = {oct}, number = {5}, pages = {449--466}, publisher = {Routledge}, title = {{At the Heart of Morality Lies Folk Psychology}}, url = {https://doi.org/10.1080/00201740903302600}, volume = {52}, year = {2009} } @article{Guignon1990, author = {Guignon, Charles}, journal = {Philosophy and Phenomenological Research}, number = {4}, pages = {649--672}, title = {{Philosophy after Wittgenstein and Heidegger}}, volume = {50}, year = {1990} } @misc{Gutting2012, author = {Gutting, Gary}, booktitle = {The New York Times}, month = {feb}, title = {{Bridging the Analytic-Continental Divide}}, url = {https://opinionator.blogs.nytimes.com/2012/02/19/bridging-the-analytic-continental-divide/}, year = {2012} } @Article{HaCohen-Kerner2014, author = {HaCohen-Kerner, Yaakov and Margaliot, Orr}, date = {2014-08}, journaltitle = {Cybernetics and Systems}, title = {{Authorship Attribution of {\textless}i{\textgreater}Responsa{\textless}/i{\textgreater} using Clustering}}, doi = {10.1080/01969722.2014.945311}, issn = {0196-9722}, number = {6}, pages = {530--545}, volume = {45}, abstract = {Authorship attribution of text documents is a ?hot? domain in research; however, almost all of its applications use supervised machine learning (ML) methods. In this research, we explore authorship attribution as a clustering problem, that is, we attempt to complete the task of authorship attribution using unsupervised machine learning methods. The application domain is responsa, which are answers written by well-known Jewish rabbis in response to various Jewish religious questions. We have built a corpus of 6,079 responsa, composed by five authors who lived mainly in the 20th century and containing almost 10 M words. The clustering tasks that have been performed were according to two or three or four or five authors. Clustering has been performed using three kinds of word lists: most frequent words (FW) including function words (stopwords), most frequent filtered words (FFW) excluding function words, and words with the highest variance values (HVW); and two unsupervised machine learning methods: K-means and Expectation Maximization (EM). The best clustering tasks according to two or three or four authors achieved results above 98{\%}, and the improvement rates were above 40{\%} in comparison to the ?majority? (baseline) results. The EM method has been found to be superior to K-means for the discussed tasks. FW has been found as the best word list, far superior to FFW. FW, in contrast to FFW, includes function words, which are usually regarded as words that have little lexical meaning. This might imply that normalized frequencies of function words can serve as good indicators for authorship attribution using unsupervised ML methods. This finding supports previous findings about the usefulness of function words for other tasks, such as authorship attribution, using supervised ML methods, and genre and sentiment classification.}, annotation = {doi: 10.1080/01969722.2014.945311}, publisher = {Taylor {\&} Francis}, } @Article{Hagendorff2020, author = {Hagendorff, Thilo}, title = {{The Ethics of AI Ethics: An Evaluation of Guidelines}}, doi = {10.1007/s11023-020-09517-8}, issn = {1572-8641}, number = {1}, pages = {99--120}, url = {https://doi.org/10.1007/s11023-020-09517-8}, volume = {30}, abstract = {Current advances in research, development and application of artificial intelligence (AI) systems have yielded a far-reaching discourse on AI ethics. In consequence, a number of ethics guidelines have been released in recent years. These guidelines comprise normative principles and recommendations aimed to harness the “disruptive” potentials of new AI technologies. Designed as a semi-systematic evaluation, this paper analyzes and compares 22 guidelines, highlighting overlaps but also omissions. As a result, I give a detailed overview of the field of AI ethics. Finally, I also examine to what extent the respective ethical principles and values are implemented in the practice of research, development and application of AI systems—and how the effectiveness in the demands of AI ethics can be improved.}, groups = {Ethical AI}, journal = {Minds and Machines}, year = {2020}, } @misc{Hale2020, author = {Hale, Kori}, booktitle = {Forbes}, title = {{Amazon, Microsoft {\&} IBM Slightly Social Distancing From The {\$}8 Billion Facial Recognition Market}}, url = {https://www.forbes.com/sites/korihale/2020/06/15/amazon-microsoft--ibm-slightly-social-distancing-from-the-8-billion-facial-recognition-market/}, urldate = {2020-06-01}, year = {2020} } @incollection{Halpin2013, address = {Malden}, author = {Halpin, Harry and Monnin, Alexandre}, booktitle = {Philosophical Engineering: Toward a Philosophy of the Web}, chapter = {12}, doi = {10.1002/9781118700143.ch12}, editor = {Halpin, Harry and Monnin, Alexandre}, isbn = {9781118700143}, pages = {181--186}, publisher = {Wiley Blackwell}, title = {{Interview with Tim Berners-Lee}}, year = {2014} } @book{Hamilton2014, address = {New York}, author = {Hamilton, Andy}, publisher = {Routledge}, title = {{Wittgenstein and On Certainty}}, year = {2014} } @incollection{Harding2009, address = {Oxford}, author = {Harding, Anthony John}, booktitle = {The Oxford Handbook of Samual Taylor Coleridge}, editor = {Burwick, Frederic}, pages = {455--472}, publisher = {Oxford University Press}, title = {{Coleridge: Biblical and Classical Literature}}, year = {2009} } @article{Harman1965, author = {Harman, Gilbert H}, doi = {10.2307/2183532}, issn = {00318108, 15581470}, journal = {The Philosophical Review}, month = {may}, number = {1}, pages = {88--95}, publisher = {Duke University Press}, title = {{The Inference to the Best Explanation}}, url = {http://www.jstor.org/stable/2183532}, volume = {74}, year = {1965} } @misc{Harper2015, author = {Harper, Douglas}, title = {{Online Etymology Dictionary}}, url = {http://www.etymonline.com}, volume = {2015}, year = {2015} } @article{Harries1968, author = {Harries, Karsten}, doi = {10.1007/BF00135944}, issn = {0022-5363}, journal = {The Journal of Value Inquiry}, language = {English}, number = {4}, pages = {281--291}, title = {{Wittgenstein and Heidegger: The relationship of the philosopher to language}}, url = {http://dx.doi.org/10.1007/BF00135944}, volume = {2}, year = {1968} } @incollection{Haselager2008, author = {Haselager, P and {Van Dijk}, J and {Van Rooij}, I}, booktitle = {Handbook of Cognitive Science: An Embodied Approach.}, title = {{A Lazy Brain? Embodied Embedded Cognition and Cognitive Neuroscience}}, year = {2008} } @article{Haugeland1996, author = {Haugeland, John}, journal = {Artificial Intelligence}, pages = {119--128}, title = {{Body and world: a review of What Computers Still Can't Do: A Critique of Artificial Reason (Hubert L. Dreyfus)}}, volume = {80}, year = {1996} } @misc{Hawkins2019, author = {Hawkins, Andrew J.}, title = {{Tesla's ‘Full Self-Driving' feature may get early-access release by the end of 2019}}, url = {https://www.theverge.com/2019/10/23/20929529/tesla-full-self-driving-release-2019-beta}, urldate = {2019-10-30}, year = {2019} } @book{Heelas1996, author = {Heelas, Paul and Lash, Scott and Morris, Paul}, title = {{Detraditionalization Critical Reflections on Authority and Identity}}, year = {1996} } @incollection{Heidegger1984a, address = {San Fransisco}, author = {Heidegger, Martin}, booktitle = {Early Greek Thinking: The Dawn of Western Philosophy}, editor = {Capuzzi, Frank A.}, pages = {59--78}, publisher = {Harper {\&} Row}, title = {{Logos}}, translator = {Krell, David Farrell and Capuzzi, Frank A.}, year = {1984} } @incollection{Heidegger1982, address = {New York}, author = {Heidegger, Martin}, booktitle = {On the way to language}, isbn = {0060638591 9780060638597}, language = {Engels}, pages = {1--56}, publisher = {Harper {\&} Row}, title = {{A Dialogue on Language}}, translator = {Hertz, Peter D.}, year = {1982} } @incollection{Heidegger1985, address = {Frankfurt am Main}, author = {Heidegger, Martin}, booktitle = {Gesamtsausgabe. 1. Abteilung: Ver{\"{o}}ffentlichte Schriften 1910-1976. Band 12: Unterwegs zur Sprache}, pages = {79--146}, publisher = {Vittorio Klosterman}, title = {{Aus einem Gespr{\"{a}}ch von der Sprache. Zwischen einem Japaner und einem Fragenden}}, year = {1985} } @book{Heidegger2000b, address = {Frankfurt Am Main}, author = {Heidegger, Martin}, publisher = {Vittorio Klostermann}, title = {{{\"{U}}ber den Humanismus}}, year = {2000} } @incollection{Heidegger1976, address = {Frankfurt Am Main}, author = {Heidegger, Martin}, booktitle = {Wegmarken}, pages = {313--364}, publisher = {Vittorio Klostermann}, series = {Gesamtausgabe}, title = {{Brief {\"{U}}ber den Humanismus}}, volume = {9}, year = {1976} } @book{Heidegger2012, address = {Bloomington}, author = {Heidegger, Martin}, publisher = {Indiana University Press}, title = {{Contributions to Philosophy}}, translator = {Vallega-Neu, Daniela and Rojcewicz, Richard}, year = {2012} } @incollection{Heidegger1972, address = {New York}, author = {Heidegger, Martin}, booktitle = {On Time and Being}, pages = {25--54}, publisher = {Harper {\&} Row}, title = {{Summary of a Seminar on the Lecture "Time and Being"}}, translator = {Stambough, Joan}, year = {1972} } @incollection{Heidegger1984, address = {San Fransisco}, author = {Heidegger, Martin}, booktitle = {Early Greek Thinking: The Dawn of Western Philosophy}, editor = {Capuzzi, Frank A}, pages = {79--101}, publisher = {Harper {\&} Row}, title = {{Moira}}, year = {1984} } @book{Heidegger2010, address = {Albany}, author = {Heidegger, Martin}, publisher = {State University of New York Press}, title = {{Being and Time}}, translator = {Stambough, Joan}, year = {2010} } @misc{Heidegger2001a, address = {New York}, author = {Heidegger, Martin}, booktitle = {Poetry, Language, Thought}, pages = {209--227}, publisher = {Harper Perennial}, title = {{". . . Poetically Man Dwells . . ."}}, year = {2001} } @incollection{Heidegger1984b, address = {San Fransisco}, author = {Heidegger, Martin}, booktitle = {Early Greek Thinking: The Dawn of Western Philosophy}, editor = {Capuzzi, Frank A}, pages = {13--58}, publisher = {Harper {\&} Row}, title = {{The Anaximander Fragment}}, year = {1984} } @misc{Heidegger2001, address = {New York}, author = {Heidegger, Martin}, booktitle = {Poetry, Language, Thought}, pages = {141--159}, publisher = {Harper Perennial}, title = {{Building Dwelling Thinking}}, year = {2001} } @book{Heidegger1955, address = {Frankfurt Am Main}, author = {Heidegger, Martin}, publisher = {Vittorio Klostermann}, title = {{Was ist Metafysik?}}, year = {1955} } @incollection{Heidegger1993, address = {New York}, author = {Heidegger, Martin}, booktitle = {Basic Writings}, editor = {Krell, David Farrell}, pages = {213--266}, publisher = {Harper {\&} Row}, title = {{Letter on Humanism}}, year = {1993} } @misc{Heidegger1992, address = {Bloomington}, author = {Heidegger, Martin}, editor = {Rojcewicz, Richard}, publisher = {Indiana University Press}, title = {{Parmenides}}, year = {1992} } @book{Heidegger1972a, address = {New York}, author = {Heidegger, Martin}, publisher = {Harper {\&} Row}, title = {{On Time and Being}}, translator = {Stambough, Joan}, year = {1972} } @book{Heidegger1997, address = {Frankfurt am Rein}, author = {Heidegger, Martin}, booktitle = {Gesamtausgabe}, publisher = {Vittorio Klosterman}, title = {{Der Satz vom Grund}}, volume = {1. Abteilu}, year = {1997} } @incollection{Heidegger1972b, address = {New York}, author = {Heidegger, Martin}, booktitle = {On Time and Being}, pages = {55--73}, publisher = {Harper {\&} Row}, title = {{The End of Philosophy and the Task of Thinking}}, translator = {Stambough, Joan}, year = {1972} } @incollection{Heidegger1993a, address = {New York}, author = {Heidegger, Martin}, booktitle = {Basic Writings}, editor = {Krell, David Farrell}, pages = {307--341}, publisher = {Harper {\&} Row}, title = {{The Question Concerning Technology}}, year = {1993} } @book{Heidegger1991, address = {Bloomington}, author = {Heidegger, Martin}, publisher = {Indiana University Press}, title = {{The Principle of Reason}}, translator = {Lilly, Reginald}, year = {1991} } @incollection{Heidegger1972c, address = {New York}, author = {Heidegger, Martin}, booktitle = {On Time and Being}, pages = {1--24}, publisher = {Harper {\&} Row}, title = {{Time and Being}}, translator = {Stambough, Joan}, year = {1972} } @article{Heidegger1985a, author = {Heidegger, Martin and Harries, Karsten and Heidegger, Hermann}, journal = {The Review of Metaphysics}, number = {3}, pages = {467--502}, publisher = {Philosophy Education Society Inc.}, title = {{The Self-Assertion of the German University: Address, Delivered on the Solemn Assumption of the Rectorate of the University Freiburg the Rectorate 1933/34: Facts and Thoughts}}, url = {http://www.jstor.org/stable/20128182}, volume = {38}, year = {1985} } @article{Heikoop2019, annote = {doi: 10.1080/1463922X.2019.1574931}, author = {Heikoop, Dani{\"{e}}l and Hagenzieker, Marjan and Mecacci, Giulio and Calvert, Simeon and {Santoni De Sio}, Filippo and van Arem, Bart}, doi = {10.1080/1463922X.2019.1574931}, issn = {1463-922X}, journal = {Theoretical Issues in Ergonomics Science}, month = {nov}, number = {6}, pages = {711--730}, publisher = {Taylor {\&} Francis}, title = {{Human behaviour with automated driving systems: a quantitative framework for meaningful human control}}, url = {https://doi.org/10.1080/1463922X.2019.1574931}, volume = {20}, year = {2019} } @article{Henderson2014, abstract = {Two of the most influential theories about scientific inference are inference to the best explanation (IBE) and Bayesianism. How are they related? Bas van Fraassen has claimed that IBE and Bayesianism are incompatible rival theories, as any probabilistic version of IBE would violate Bayesian conditionalization. In response, several authors have defended the view that IBE is compatible with Bayesian updating. They claim that the explanatory considerations in IBE are taken into account by the Bayesian because the Bayesian either does or should make use of them in assigning probabilities (priors and/or likelihoods) to hypotheses. I argue that van Fraassen has not succeeded in establishing that IBE and Bayesianism are incompatible, but that the existing compatibilist response is also not satisfactory. I suggest that a more promising approach to the problem is to investigate whether explanatory considerations are taken into account by a Bayesian who assigns priors and likelihoods on his or her own terms. In this case, IBE would emerge from the Bayesian account, rather than being used to constrain priors and likelihoods. I provide a detailed discussion of the case of how the Copernican and Ptolemaic theories explain retrograde motion, and suggest that one of the key explanatory considerations is the extent to which the explanation a theory provides depends on its core elements rather than on auxiliary hypotheses. I then suggest that this type of consideration is reflected in the Bayesian likelihood, given priors that a Bayesian might be inclined to adopt even without explicit guidance by IBE. The aim is to show that IBE and Bayesianism may be compatible, not because they can be amalgamated, but rather because they capture substantially similar epistemic considerations. 1 Introduction2 Preliminaries3 Inference to the Best Explanation4 Bayesianism5 The Incompatibilist View: Inference to the Best Explanation Contradicts Bayesianism 5.1 Criticism of the incompatibilist view6 Constraint-Based Compatibilism 6.1 Criticism of constraint-based compatibilism7 Emergent Compatibilism 7.1 Analysis of inference to the best explanation 7.1.1 Inference to the best explanation on specific hypotheses 7.1.2 Inference to the best explanation on general theories 7.1.3 Copernicus versus Ptolemy 7.1.4 Explanatory virtues 7.1.5 Summary 7.2 Bayesian account8 Conclusion}, author = {Henderson, Leah}, doi = {10.1093/bjps/axt020}, issn = {0007-0882}, journal = {The British Journal for the Philosophy of Science}, month = {sep}, number = {4}, pages = {687--715}, title = {{Bayesianism and Inference to the Best Explanation}}, url = {https://doi.org/10.1093/bjps/axt020}, volume = {65}, year = {2014} } @InCollection{Henderson2017, author = {Henderson, Leah}, booktitle = {Best Explanations: New Essays on Inference to the Best Explanation}, title = {{Bayesianism and IBE: The Case of Individual vs. Group Selection}}, doi = {10.1093/oso/9780198746904.001.0001}, isbn = {9780198746904}, language = {eng}, pages = {248--261}, publisher = {Oxford University Press}, url = {https://www.oxfordscholarship.com/10.1093/oso/9780198746904.001.0001/oso-9780198746904}, abstract = {Explanatory reasoning is quite common. Not only are rigorous inferences to the best explanation used pervasively in the sciences, explanatory reasoning is virtually ubiquitous in everyday life. Despite its widespread use, inference to the best explanation is still in need of precise formulation, and it remains controversial. On the one hand, supporters of explanationism take inference to the best explanation to be a justifying form of inference—some even take all justification to be a matter of explanatory reasoning. On the other hand, critics object that inference to the best explanation is not a fundamental form of inference, and some argue that we should be skeptical of inference to the best explanation in general. This volume brings together top epistemologists and philosophers of science to explore various aspects of inference to the best explanation and the debates surrounding it. The newly commissioned chapters in this volume constitute the cutting edge of research on the role explanatory considerations play in epistemology and philosophy of science.}, address = {Oxford}, keywords = {explanation, explanationism, inference to the best explanation, justification, reasoning ER}, year = {2017}, } @TechReport{HLEG-AI2019, author = {{High-Level Expert Group on Artificial Intelligence}}, date = {2019-04-08}, institution = {European Commission}, title = {{The European Commission's high-level expert group on Artificial Intelligence: Ethics guidelines for trustworthy AI}}, pages = {1--39}, groups = {Ethical AI}, keywords = {HLEG}, year = {2019}, } @TechReport{ALTAI2020, author = {{High-Level Expert Group on Artificial Intelligence}}, title = {{The Assessment List for Trustworthy Artificial Intelligence (ALTAI)}}, groups = {Ethical AI}, year = {2020}, } @book{HillisMiller2009, address = {New York}, author = {{Hillis Miller}, Joseph}, publisher = {Fordham University Press}, title = {{For Derrida}}, year = {2009} } @article{Hitchcock2007, annote = {doi: 10.1111/j.1933-1592.2007.00029.x}, author = {Hitchcock, Christopher}, doi = {10.1111/j.1933-1592.2007.00029.x}, issn = {0031-8205}, journal = {Philosophy and Phenomenological Research}, month = {mar}, number = {2}, pages = {433--440}, publisher = {John Wiley {\&} Sons, Ltd}, title = {{The Lovely and the Probable}}, url = {https://doi.org/10.1111/j.1933-1592.2007.00029.x}, volume = {74}, year = {2007} } @Book{Hobson1998, author = {Hobson, Marian}, title = {{Jacques Derrida: Opening Lines}}, publisher = {Routledge}, address = {London}, year = {1998}, } @misc{HOK, author = {HOK}, title = {{Planning + Urban Design}}, url = {http://www.hok.com/design/service/planning-urban-design/}, volume = {2016} } @Article{Hotho2003, author = {Hotho, Andreas and Maedche, Alexander and Staab, Steffen}, title = {{Ontology-based Text Document Clustering}}, pages = {451--452}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.8083}, abstract = {Text clustering typically involves clustering in a high dimensional space, which appears difficult with regard to virtually all practical settings. In addition, given a particular clustering result it is typically very hard to come up with a good explanation of why the text clusters have been constructed the way they are. In this paper, we propose a new approach for applying background knowledge during preprocessing in order to improve clustering results and allow for selection between results. We preprocess our input data applying an ontology-based heuristics for feature selection and feature aggregation. Thus, we construct a number of alternative text representations. Based on these representations, we compute multiple clustering results using K- Means. The results may be distinguished and explained by the corresponding selection of concepts in the ontology. Our results compare favourably with a sophisticated baseline preprocessing strategy.}, booktitle = {Institute AIFB, University Karlsruhe}, isbn = {3-540-00843-8}, year = {2003}, } @article{Hull2017a, abstract = {Foucault's 1979-80 Coll{\`{e}}ge de France lectures, On the Government of the Living, offer one way to situate the development of his later work, and in particular to understand his supposed turn away from biopolitics and governmentality to ethics and subjectivity. In this paper, I argue that (1) a unifying thread in most of Foucault's work from the late 1970s onward is an increasing concern with the centrality of confession as a primary technology of power in the Christian West; and (2) Neoliberalism is deeply confessional, and therefore highly suspect from a Foucauldian standpoint. (3) These connections are particularly evident in a Foucauldian reading of data analytics (“big data”).}, author = {Hull, Gordon}, doi = {10.2139/ssrn.2887480}, journal = {SSRN Electronic Journal}, title = {{Confessing Preferences: What Foucault's Government of the Living Can Tell Us About Neoliberalism and Big Data}}, year = {2017} } @InCollection{Hull2013, author = {Hull, Gordon}, booktitle = {The Nature of Technology}, title = {{Know thy Cyborg-Self: Thoughts on Socrates and Technological Literacy}}, chapter = {2}, doi = {10.1007/978-94-6209-269-3_3}, editor = {Clough, Michael P. and Olson, Joanne K. and Niederhausen, Dale S.}, isbn = {978-94-6209-269-3}, pages = {15--34}, abstract = {There are no doubt many good reasons to encourage a general technological literacy. Here is one: U.S. society purports to aspire to democracy. Insofar as we live in a technologically-mediated society, if we want that democracy to amount to more than the rule of an ignorant mob, the “people” need to have some understanding of the technologies surrounding them. At one level, this is clearly a political question, and questions about the limits of popular knowledge, how much technical skill is necessary to qualify as technologically literate, what sorts of reference frames can and should be brought to one's understanding of technology, and so forth, immediately present themselves.}, keywords = {Cognitive Enhancement, Human Nature, Intellectual Property, Technological Literac, y Extended Mind}, mendeley-tags = {Cognitive Enhancement,Human Nature,Intellectual Property,Technological Literac,y Extended Mind}, month = {jan}, year = {2013}, } @article{Hull2015, abstract = {The “privacy paradox” refers to the discrepancy between the concern individuals express for their privacy and the apparently low value they actually assign to it when they readily trade personal information for low-value goods online. In this paper, I argue that the privacy paradox masks a more important paradox: the self-management model of privacy embedded in notice-and-consent pages on websites and other, analogous practices can be readily shown to underprotect privacy, even in the economic terms favored by its advocates. The real question, then, is why privacy self-management occupies such a prominent position in privacy law and regulation. Borrowing from Foucault's late writings, I argue that this failure to protect privacy is also a success in ethical subject formation, as it actively pushes privacy norms and practices in a neoliberal direction. In other words, privacy self-management isn't about protecting people's privacy; it's about inculcating the idea that privacy is an individual, commodified good that can be traded for other market goods. Along the way, the self-management regime forces privacy into the market, obstructs the functioning of other, more social, understandings of privacy, and occludes the various ways that individuals attempt to resist adopting the market-based view of themselves and their privacy. Throughout, I use the analytics practices of Facebook and social networking sites as a sustained case study of the point.}, author = {Hull, Gordon}, doi = {10.1007/s10676-015-9363-z}, journal = {Ethics and Information Technology}, month = {may}, title = {{Successful failure: what Foucault can teach us about privacy self-management in a world of Facebook and big data}}, volume = {17}, year = {2015} } @book{Husserl1970, address = {Evanston}, author = {Husserl, Edmund}, publisher = {Northwestern University Press}, title = {{The Crisis of European Sciences and Transcendental Phenomenology: An Introduction to Phenomenological Philosophy}}, translator = {Carr, David}, year = {1970} } @InProceedings{Ignatov2015, author = {Ignatov, D. I.}, booktitle = {Communications in Computer and Information Science}, title = {{Introduction to formal concept analysis and its applications in information retrieval and related fields}}, doi = {10.1007/978-3-319-25485-2_3}, isbn = {9783319254845}, pages = {42--141}, publisher = {Springer Verlag}, volume = {505}, abstract = {This paper is a tutorial on Formal Concept Analysis (FCA) and its applications. FCA is an applied branch of Lattice Theory, a mathematical discipline which enables formalisation of concepts as basic units of human thinking and analysing data in the object-attribute form. Originated in early 80s, during the last three decades, it became a popular human-centred tool for knowledge representation and data analysis with numerous applications. Since the tutorial was specially prepared for RuSSIR 2014, the covered FCA topics include Information Retrieval with a focus on visualisation aspects, Machine Learning, Data Mining and Knowledge Discovery, Text Mining and several others.}, issn = {18650929}, keywords = {Biclustering, Concept lattices, Data mining, Formal Concept Analysis, Information retrieval, Knowledge discovery, Machine learning, Multimodal clustering, Text mining}, year = {2015}, } @incollection{Ijsseling1993, address = {Bloomington}, author = {Ijsseling, Samuel}, booktitle = {Reading Heidegger}, editor = {Sallis, John}, pages = {348--351}, publisher = {Indiana University Press}, title = {{Mimesis and Translation}}, year = {1993} } @InCollection{Ilieva2007, author = {Ilieva, M. G. and Ormandjieva, O.}, booktitle = {Proceedings of the 11th IASTED International Conference on Software Engineering and Applications}, title = {{Natural language processing and formal concept analysis technologies for automatic building of domain model}}, isbn = {978-0-88986-706-2}, pages = {445--452}, publisher = {ACTA Press}, url = {http://dl.acm.org/citation.cfm?id=1647636.1647713}, address = {Cambridge, Massachusetts}, keywords = {AI approach to requirements engineering, FCA, NLP, domain model, knowledge representation}, mendeley-tags = {AI approach to requirements engineering,FCA,NLP,domain model,knowledge representation}, year = {2007}, } @article{Ingram, author = {Ingram, David and Heelas, Paul and Lash, Scott and Morris, Paul}, doi = {10.1086/231135}, issn = {00029602, 15375390}, journal = {American Journal of Sociology}, number = {6}, pages = {1727--1729}, publisher = {The University of Chicago Press}, title = {{Book Review}}, url = {http://www.jstor.org.ru.idm.oclc.org/stable/10.1086/231135}, volume = {102} } @techreport{iPRAW2018, annote = {afkorting: iPRAW}, author = {{International Panel on the Regulation of Autonomous Weapons (iPRAW)}}, title = {{Concluding Report: Recommendations to the GGE}}, url = {https://www.ipraw.org/wp-content/uploads/2018/12/2018-12-14{\_}iPRAW{\_}Concluding-Report.pdf}, year = {2018} } @article{Ioannou2005, abstract = {Extracting and validating emotional cues through analysis of users' facial expressions is of high importance for improving the level of interaction in man machine communication systems. Extraction of appropriate facial features and consequent recognition of the user's emotional state that can be robust to facial expression variations among different users is the topic of this paper. Facial animation parameters (FAPs) defined according to the ISO MPEG-4 standard are extracted by a robust facial analysis system, accompanied by appropriate confidence measures of the estimation accuracy. A novel neurofuzzy system is then created, based on rules that have been defined through analysis of FAP variations both at the discrete emotional space, as well as in the 2D continuous activation–evaluation one. The neurofuzzy system allows for further learning and adaptation to specific users' facial expression characteristics, measured though FAP estimation in real life application of the system, using analysis by clustering of the obtained FAP values. Experimental studies with emotionally expressive datasets, generated in the EC IST ERMIS project indicate the good performance and potential of the developed technologies.}, author = {Ioannou, Spiros V. and Raouzaiou, Amaryllis T. and Tzouvaras, Vasilis A. and Mailis, Theofilos P. and Karpouzis, Kostas C. and Kollias, Stefanos D.}, doi = {10.1016/j.neunet.2005.03.004}, issn = {08936080}, journal = {Neural Networks}, number = {4}, pages = {423--435}, title = {{Emotion recognition through facial expression analysis based on a neurofuzzy network}}, volume = {18}, year = {2005} } @Article{Iqbal2010, author = {Iqbal, Farkhund and Binsalleeh, Hamad and Fung, Benjamin C. M. and Debbabi, Mourad}, title = {{Mining writeprints from anonymous e-mails for forensic investigation}}, doi = {https://doi.org/10.1016/j.diin.2010.03.003}, issn = {1742-2876}, number = {1}, pages = {56--64}, url = {http://www.sciencedirect.com/science/article/pii/S1742287610000162}, volume = {7}, abstract = {Many criminals exploit the convenience of anonymity in the cyber world to conduct illegal activities. E-mail is the most commonly used medium for such activities. Extracting knowledge and information from e-mail text has become an important step for cybercrime investigation and evidence collection. Yet, it is one of the most challenging and time-consuming tasks due to special characteristics of e-mail dataset. In this paper, we focus on the problem of mining the writing styles from a collection of e-mails written by multiple anonymous authors. The general idea is to first cluster the anonymous e-mail by the stylometric features and then extract the writeprint, i.e., the unique writing style, from each cluster. We emphasize that the presented problem together with our proposed solution is different from the traditional problem of authorship identification, which assumes training data is available for building a classifier. Our proposed method is particularly useful in the initial stage of investigation, in which the investigator usually have very little information of the case and the true authors of suspicious e-mail collection. Experiments on a real-life dataset suggest that clustering by writing style is a promising approach for grouping e-mails written by the same author.}, journal = {Digital Investigation}, keywords = {Authorship analysis, Classification, Clustering, E-mail, Forensic investigation, Stylometric features, Writeprint, Writing styles}, year = {2010}, } @article{Iranzo2008, abstract = {Bayesianism and Inference to the best explanation (IBE) are two different models of inference. Recently there has been some debate about the possibility of "bayesianizing" IBE. Firstly I explore several alternatives to include explanatory considerations in Bayes's Theorem. Then I distinguish two different interpretations of prior probabilities: "IBE-Bayesianism" (IBE-Bay) and "frequentist-Bayesianism" (Freq-Bay). After detailing the content of the latter, I propose a rule for assessing the priors. I also argue that Freq-Bay: (i) endorses a role for explanatory value in the assessment of scientific hypotheses; (ii) avoids a purely subjectivist reading of prior probabilities; and (iii) fits better than IBE-Bayesianism with two basic facts about science, i.e., the prominent role played by empirical testing and the existence of many scientific theories in the past that failed to fulfil their promises and were subsequently abandoned.}, author = {Iranzo, Valeriano}, issn = {04954548, 2171679X}, journal = {Theoria: An International Journal for Theory, History and Foundations of Science}, pages = {89--106}, publisher = {University of the Basque Country (UPV/EHU)}, title = {{Bayesianism and inference to the best explanation}}, url = {http://www.jstor.org/stable/23923920}, volume = {61}, year = {2008} } @book{ZuiderveenBorgesius2016, author = {{J. Zuiderveen Borgesius}, Frederik and Trilling, Damian and Moeller, Judith and Bodo, Balazs and de Vreese, Claes and Helberger, Natali and {Policy Review}, Internet}, booktitle = {Internet Policy Review}, doi = {10.14763/2016.1.401}, month = {mar}, title = {{Should we worry about filter bubbles?}}, volume = {Volume 5}, year = {2016} } @incollection{Jabbari2018, author = {Jabbari, Simin and Stoffel, Kilian}, doi = {10.1007/978-3-030-05918-7_1}, isbn = {978-3-030-05917-0}, month = {dec}, pages = {1--10}, title = {{FCA-Based Ontology Learning from Unstructured Textual Data: 6th International Conference, MIKE 2018, Cluj-Napoca, Romania, December 20–22, 2018, Proceedings}}, year = {2018} } @incollection{Jacobs2004, address = {New York}, author = {Jacobs, Jane M}, booktitle = {The End of Tradition?}, editor = {AlSayyad, Nezar}, pages = {29--44}, publisher = {Routledge}, title = {{Tradition is (not) modern: Deterritorializing globalization}}, year = {2004} } @article{Jacobsen1996, author = {Jacobsen, Rockney}, journal = {The Philosophical Quarterly}, number = {182}, pages = {12--30}, title = {{Wittgenstein on Self-Knowledge and Self-Expression}}, volume = {46}, year = {1996} } @incollection{Jeffrey1965, address = {Chicago}, author = {Jeffrey, Richard}, booktitle = {The Logic of Decision}, chapter = {11}, publisher = {University of Chicago Press}, title = {{Probability Kinematics}}, year = {1965} } @article{Jordheim2012, author = {Jordheim, Helge}, journal = {History and Theory}, pages = {151--171}, title = {{Against Periodization: Kosseleck's Theory of Multiple Temporalities}}, volume = {51}, year = {2012} } @article{Joronen2013, author = {Joronen, Mikko}, doi = {10.1111/j.1475-5661.2012.00550.x}, issn = {1475-5661}, journal = {Transactions of the Institute of British Geographers}, keywords = {event Heidegger materiality politics of ontology p}, number = {4}, pages = {627--638}, title = {{Heidegger, event and the ontological politics of the site}}, url = {http://dx.doi.org/10.1111/j.1475-5661.2012.00550.x}, volume = {38}, year = {2013} } @Misc{Kak2020, author = {Kak, Amba and Richardson, Rashida and Dobbe, Roel}, title = {{Submission to the European Commission on “White Paper on AI - A European Approach”}}, url = {https://ainowinstitute.org/ai-now-comments-to-eu-whitepaper-on-ai.pdf}, groups = {Ethical AI}, institution = {AI Now Institute}, pages = {23}, year = {2020}, } @article{Kaplan2006, abstract = {none}, author = {Kaplan, David}, doi = {10.5195/jffp.2006.182}, journal = {Journal of French and Francophone Philosophy}, number = {1/2}, pages = {42--56}, title = {{Paul Ricoeur and the Philosophy of Technology}}, volume = {16}, year = {2006} } @article{Kaplan2014, abstract = {Google's highly successful business model is based on selling words that appear in search queries. Organizing several million auctions per minute, the company has created the first global linguistic market and demonstrated that linguistic capitalism is a lucrative business domain, one in which billions of dollars can be realized per year. Google's services need to be interpreted from this perspective. This article argues that linguistic capitalism implies not an economy of attention but an economy of expression. As several million users worldwide daily express themselves through one of Google's interfaces, the texts they produce are systematically mediated by algorithms. In this new context, natural languages could progressively evolve to seamlessly integrate the linguistic biases of algorithms and the economical constraints of the global linguistic economy. Summer 2014}, author = {Kaplan, Frederic}, doi = {10.1525/rep.2014.127.1.57}, journal = {Representations}, number = {1}, pages = {57--63}, title = {{Linguistic Capitalism and Algorithmic Mediation}}, volume = {127}, year = {2014} } @incollection{Karras2019, annote = {(accessed: 2019-03-21)}, author = {Karras, Tero and Laine, Samuli and Aila, Timo}, booktitle = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, doi = {10.1109/CVPR.2019.00453}, pages = {4396--4405}, title = {{A Style-Based Generator Architecture for Generative Adversarial Networks}}, year = {2019} } @book{Kates2005a, address = {Evanston}, author = {Kates, Joshua}, publisher = {Northwestern University Press}, title = {{Essential History: Jacques Derrida and the Development of Deconstruction}}, year = {2005} } @Article{KLEINBERG2012, author = {KLEINBERG, ETHAN}, title = {{BACK TO WHERE WE'VE NEVER BEEN: HEIDEGGER, LEVINAS, AND DERRIDA ON TRADITION AND HISTORY}}, doi = {10.1111/j.1468-2303.2012.00650.x}, issn = {0018-2656}, number = {4}, pages = {114--135}, url = {https://doi.org/10.1111/j.1468-2303.2012.00650.x}, volume = {51}, abstract = {ABSTRACT This paper will address the topic of ?tradition? by exploring the ways that Martin Heidegger, Emmanuel Levinas, and Jacques Derrida each looked to return to traditional texts in order to overcome a perceived crisis or delimiting fault in the contemporary thought of their respective presents. For Heidegger, this meant a return to the pre-Socratics of ?early Greek thinking.? For Levinas, it entailed a return to the sacred Jewish texts of the Talmud. For Derrida, it was the return to texts that embodied the ?Western metaphysical tradition,? be it by Plato, Descartes, Rousseau, or Marx. I then want to ask whether these reflections can be turned so as to shed light on three resilient trends in the practice of history that I will label positivist, speculative or teleological, and constructivist. By correlating the ways that Heidegger, Levinas, and Derrida utilize and employ ?tradition? with the historical trends of positivism, speculative/teleological history, and constructivism, I hope to produce an engagement between theorists whose concerns implicate history even though they may not be explicitly historical, and historians who may not realize the ways that their work coincides with the claims of these theorists.}, annote = {doi: 10.1111/j.1468-2303.2012.00650.x}, journal = {History and Theory}, keywords = {Emmanuel Levinas, Franz Kafka, Jacques Derrida, Karen Barad, Martin Heidegger, Reinhart Koselleck, constructivism, historical theory, positivism, speculative history}, month = {dec}, publisher = {Wiley/Blackwell (10.1111)}, year = {2012}, } @article{Knaap2007, author = {Knaap, L. and Grootjen, F. A.}, publisher = {[Sl: sn]}, title = {{Author identification in chatlogs using formal concept analysis}}, year = {2007} } @online{Knight2018, annote = {(accessed: 2019-03-23)}, author = {Knight, Will}, title = {{The Defense Department has produced the first tools for catching deepfakes}}, url = {https://thispersondoesnotexist.com/}, year = {2019} } @incollection{Krell2015, address = {Albany, NY}, author = {Krell, David Farrell}, booktitle = {Phantoms of the Other: Four Generations of Derrida's Geschlecht}, pages = {107--130}, publisher = {State University of New York Press}, title = {{Geschlecht IV: Heidegger's Philopolemological Ear}}, year = {2015} } @article{Kuznetsov2018, archivePrefix = {arXiv}, arxivId = {1611.02646}, author = {Kuznetsov, S.O. and Makhalova, T.P.}, eprint = {1611.02646}, journal = {Information Sciences}, number = {442}, pages = {202--219}, title = {{On interestingness measures of formal concepts}}, url = {http://arxiv.org/abs/1611.02646}, volume = {abs/1611.0}, year = {2018} } @book{Kuznetsov2014, author = {Kuznetsov, Sergei and Napoli, Amedeo and Rudolph, Sebastian}, month = {aug}, title = {{Proceedings of the International Workshop "What can FCA do for Artificial Intelligence?" (FCA4AI 2014)}}, year = {2014} } @book{Lacoue-Labarthe1989, address = {Cambridge}, author = {Lacoue-Labarthe, Philippe}, publisher = {Harvard University Press}, title = {{Typography: Mimesis, Philosophy, Politics}}, year = {1989} } @article{Ladyman2005, author = {Ladyman, James}, doi = {10.1007/s11016-005-3431-7}, journal = {Metascience}, number = {3}, pages = {331--361}, title = {{Wouldn't it be Lovely: Explanation and Scientific Realism}}, volume = {14}, year = {2005} } @incollection{Lash1996a, address = {Oxford}, author = {Lash, Scott}, booktitle = {Detraditionalization: critical reflections on authority and identity}, editor = {Heelas, Paul and Lash, Scott and Morris, Paul}, pages = {250--274}, publisher = {Blackwell Publishers}, title = {{Tradition and the Limits of Difference}}, year = {1996} } @book{Lawn2006, address = {London and New York}, author = {Lawn, Chris}, publisher = {Continuum}, title = {{Gadamer. A Guide for the Perplexed}}, year = {2006} } @book{Leibniz2007, author = {Leibniz, Gottfried Wilhelm}, publisher = {BiblioBazaar}, title = {{Theodicy: Essays on the Goodness of God, the Freedom of Man and the Origin of Evil}}, year = {2007} } @book{Lemaire2011, address = {Amsterdam}, author = {Lemaire, Ton}, publisher = {Ambo}, title = {{Filosofie van het landschap}}, year = {2011} } @book{Lemaire2011a, address = {Amsterdam}, author = {Lemaire, Ton}, publisher = {Ambo}, title = {{De val van Prometheus: Over de keerzijden van de vooruitgang}}, year = {2011} } @article{Letiche2011, author = {Letiche, Hugo and Kuiper, Chris and Houweling, Loes}, journal = {Culture and Organization}, number = {5}, pages = {389--401}, title = {{Live metaphor: Hardiness and postcards}}, volume = {17}, year = {2011} } @incollection{Lin2015, abstract = {If motor vehicles are to be truly autonomous and able to operate responsibly on our roads, they will need to replicate – or do better than – the human decision-making process. But some decisions are more than just a mechanical application of traffic laws and plotting a safe path. They seem to require a sense of ethics, and this is a notoriously difficult capability to reduce into algorithms for a computer to follow.}, address = {Berlin, Heidelberg}, author = {Lin, Patrick}, booktitle = {Autonomous Driving: Technical, Legal and Social Aspects}, doi = {10.1007/978-3-662-45854-9_4}, editor = {Maurer, Markus and Gerdes, J Christian and Lenz, Barbara and Winner, Hermann}, isbn = {978-3-662-45854-9}, pages = {69--85}, publisher = {Springer Berlin Heidelberg}, title = {{Why Ethics Matters for Autonomous Cars}}, url = {https://doi.org/10.1007/978-3-662-45854-9{\_}4}, year = {2015} } @inproceedings{Lindig2000, author = {Lindig, C.}, booktitle = {Working with Conceptual Structures - Contributions to ICCS 2000}, month = {feb}, pages = {152--161}, publisher = {Shaker Verlag}, title = {{Fast Concept Analysis}}, year = {2000} } @incollection{Lipton2004a, address = {London}, author = {Lipton, Peter}, booktitle = {Inference to the Best Explanation}, edition = {2nd ed.}, isbn = {0415242029 9780415242028 0415242037 9780415242035}, language = {Engels}, pages = {103--120}, publisher = {Routledge}, title = {{Bayesian abduction}}, year = {2004} } @incollection{Lipton2001, address = {Dordrecht}, author = {Lipton, Peter}, booktitle = {Explanation: Theoretical Approaches and Applications}, editor = {Hon, G. and Rakover, S.}, pages = {92--119}, publisher = {Springer}, title = {{Is explanation a guide to inference?}}, year = {2001} } @incollection{Lipton2000, address = {Oxford, UK}, author = {Lipton, Peter}, booktitle = {A Companion to the Philosophy of Science}, editor = {Newton-Smith, W.H.}, isbn = {0631170243 9780631170242 0631230203 9780631230205}, language = {Engels}, pages = {184--194}, publisher = {Blackwell}, title = {{Inference to the best explanation}}, year = {2000} } @book{Lipton2004, address = {London}, author = {Lipton, Peter}, booktitle = {International library of philosophy}, edition = {2nd ed.}, isbn = {0415242029 9780415242028 0415242037 9780415242035}, language = {Engels}, pages = {219}, publisher = {Routledge}, title = {{Inference to the Best Explanation}}, url = {http://catdir.loc.gov/catdir/enhancements/fy0650/2003018554-d.html http://catdir.loc.gov/catdir/toc/ecip048/2003018554.html}, year = {2004} } @article{Lockton2005, abstract = {Radio Frequency Identification, or RFID, is a technology which has been receiving considerable attention as of late. It is a fairly simple technology involving radio wave communication between a microchip and an electronic reader, in which an identification number stored on the chip is transmitted and processed; it can frequently be found in inventory tracking and access control systems. In this paper, we examine the current uses of RFID, as well as identifying potential future uses of the technology, including item-level tagging, human implants and RFID-chipped passports, while discussing the impacts that each of these uses could potentially have on personal privacy. Possible guidelines for RFID's use, including Fair Information Principles and the RFID Bill of Rights are then presented, as well as technological solutions to personal privacy problems, such as tag killing and blocker tags, as well as simple aluminum foil shields for passports. It is then claimed, though, that guidelines and technological solutions will be ineffective for privacy protection, and that legislation will be necessary to guard against the threats posed by the RFID. Finally, we present what we believe to be the most important legislative points that must be addressed.}, author = {Lockton, Vance and Rosenberg, Richard S}, doi = {10.1007/s10676-006-0014-2}, issn = {1572-8439}, journal = {Ethics and Information Technology}, number = {4}, pages = {221--231}, title = {{RFID: The Next Serious Threat to Privacy}}, url = {https://doi.org/10.1007/s10676-006-0014-2}, volume = {7}, year = {2005} } @article{Lomakina2014, abstract = {High performance computing (HPC) systems currently integrate several resources such as multi-cores (CPUs), graphic$\backslash$r$\backslash$nprocessing units (GPUs) and reconfigurable logic devices, like field programmable gate arrays (FPGAs). The role of the latter two has$\backslash$r$\backslash$ntraditionally being confined to act as secondary accelerators rather than as main execution units. We perform a deep survey around$\backslash$r$\backslash$nstate of the art research and implementation of HPC algorithms; we extract features relevant to each family and list them as key factors$\backslash$r$\backslash$nto obtain higher performance. Due to the broad spectra of the survey we only include the most complete references found. We provide$\backslash$r$\backslash$na general classification of the 13 HPC families with respect to their needs and suitability for hardware implementation. In addition, we$\backslash$r$\backslash$npresent an analysis based on current and future technology availability as well as in particular aspects identified in the survey. Finally$\backslash$r$\backslash$nwe list general guidelines and opportunities to be accounted for in future heterogeneous designs that employ FPGAs for HPC.}, author = {Lomakina, L. S. and Rodionov, V. B. and Surkova, A. S.}, doi = {10.1134/s000511791407011x}, issn = {0005-1179}, journal = {Automation and Remote Control}, number = {7}, pages = {1309--1315}, title = {{Hierarchical clustering of text documents}}, volume = {75}, year = {2014} } @book{Loscerbo1981, address = {The Hague}, author = {Loscerbo, John}, publisher = {Martinus Nijhoff Publishers}, title = {{Being and Technology. A study in the philosophy of Martin Heidegger}}, year = {1981} } @article{Luger2017, abstract = {It has been just over 100 years since the birth of Alan Turing and more than 65 years since he published in Mind his seminal paper, Computing Machinery and Intelligence (Turing in Computing machinery and intelligence. Oxford University Press, Oxford, 1950). In the Mind paper, Turing asked a number of questions, including whether computers could ever be said to have the power of “thinking” (“I propose to consider the question, Can computers think?” ...Alan Turing, Computing Machinery and Intelligence, Mind, 1950). Turing also set up a number of criteria—including his imitation game—under which a human could judge whether a computer could be said to be “intelligent”. Turing's paper, as well as his important mathematical and computational insights of the 1930s and 1940s led to his popular acclaim as the “Father of Artificial Intelligence”. In the years since his paper was published, however, no computational system has fully satisfied Turing's challenge. In this paper we focus on a different question, ignored in, but inspired by Turing's work: How might the Artificial Intelligence practitioner implement “intelligence” on a computational device? Over the past 60 years, although the AI community has not produced a general-purpose computational intelligence, it has constructed a large number of important artifacts, as well as taken several philosophical stances able to shed light on the nature and implementation of intelligence. This paper contends that the construction of any human artifact includes an implicit epistemic stance. In AI this stance is found in commitments to particular knowledge representations and search strategies that lead to a product's successes as well as its limitations. Finally, we suggest that computational and human intelligence are two different natural kinds, in the philosophical sense, and elaborate on this point in the conclusion.}, author = {Luger, George F and Chakrabarti, Chayan}, doi = {10.1007/s00146-016-0646-7}, issn = {1435-5655}, journal = {AI {\&} SOCIETY}, number = {3 LB - Luger2017}, pages = {321--338}, title = {{From Alan Turing to modern AI: practical solutions and an implicit epistemic stance}}, url = {https://doi.org/10.1007/s00146-016-0646-7}, volume = {32}, year = {2017} } @incollection{Luke1996, address = {Oxford}, author = {Luke, Timothy W}, booktitle = {Detraditionalization: Critical reflections on authority and identity}, editor = {Heelas, Paul and Lash, Scott and Morris, Paul}, pages = {109--133}, publisher = {Blackwell Publishers}, title = {{Identity, meaning and globalization: Detraditionalization in postmodern space-time compression}}, year = {1996} } @InProceedings{Lundberg2017, author = {Lundberg, Scott M. and Lee, Su-In}, booktitle = {Advances in Neural Information Processing Systems}, title = {{A Unified Approach to Interpreting Model Predictions}}, editor = {Guyon, I and Luxburg, U V and Bengio, S and Wallach, H and Fergus, R and Vishwanathan, S and Garnett, R}, publisher = {Curran Associates, Inc.}, url = {https://proceedings.neurips.cc/paper/2017/file/8a20a8621978632d76c43dfd28b67767-Paper.pdf}, volume = {30}, groups = {Ethical AI, XAI}, year = {2017}, } @article{Lupton2016, abstract = {AbstractThe concept of self-tracking has recently begun to emerge in discussions of ways in which people can record specific features of their lives, often using digital technologies, to monitor, evaluate and optimize themselves. There is evidence that the personal data that are generated by the digital surveillance of individuals (dataveillance) are now used by a range of actors and agencies in diverse contexts. This paper examines the ?function creep? of self-tracking by outlining five modes that have emerged: private, communal, pushed, imposed and exploited. The analysis draws upon theoretical perspectives on concepts of selfhood, citizenship, dataveillance and the global digital data economy in discussing the wider socio-cultural implications of the emergence and development of these modes of self-tracking.}, annote = {doi: 10.1080/03085147.2016.1143726}, author = {Lupton, Deborah}, doi = {10.1080/03085147.2016.1143726}, journal = {Economy and Society}, month = {jan}, number = {1}, pages = {101--122}, publisher = {Routledge}, title = {{The diverse domains of quantified selves: self-tracking modes and dataveillance}}, volume = {45}, year = {2016} } @book{Maedche2002, author = {Maedche, Alexander and Staab, Steffen}, booktitle = {Knowledge Engineering and Knowledge Management: Ontologies and the Semantic Web}, doi = {10.1007/3-540-45810-7_24}, month = {oct}, pages = {15--21}, title = {{Measuring Similarity between Ontologies}}, volume = {2473}, year = {2002} } @article{Malle2016, abstract = {Robot ethics encompasses ethical questions about how humans should design, deploy, and treat robots; machine morality encompasses questions about what moral capacities a robot should have and how these capacities could be computationally implemented. Publications on both of these topics have doubled twice in the past 10 years but have often remained separate from one another. In an attempt to better integrate the two, I offer a framework for what a morally competent robot would look like (normally considered machine morality) and discuss a number of ethical questions about the design, use, and treatment of such moral robots in society (normally considered robot ethics). Instead of searching for a fixed set of criteria of a robot's moral competence I identify the multiple elements that make up human moral competence and probe the possibility of designing robots that have one or more of these human elements, which include: moral vocabulary; a system of norms; moral cognition and affect; moral decision making and action; moral communication. Juxtaposing empirical research, philosophical debates, and computational challenges, this article adopts an optimistic perspective: if robotic design truly commits to building morally competent robots, then those robots could be trustworthy and productive partners, caretakers, educators, and members of the human community. Moral competence does not resolve all ethical concerns over robots in society, but it may be a prerequisite to resolve at least some of them.}, author = {Malle, Bertram F}, doi = {10.1007/s10676-015-9367-8}, issn = {1572-8439}, journal = {Ethics and Information Technology}, number = {4 LB - Malle2016}, pages = {243--256}, title = {{Integrating robot ethics and machine morality: the study and design of moral competence in robots}}, url = {https://doi.org/10.1007/s10676-015-9367-8}, volume = {18}, year = {2016} } @incollection{Malpas2010, address = {Evanston}, author = {Malpas, Jeff}, booktitle = {Consequences of Hermeneutics}, editor = {Malpas, Jeff and Zabala, Santiago}, pages = {261--281}, publisher = {Northwestern University Press}, title = {{The Origin of Understanding: Event, Place, Truth}}, year = {2010} } @incollection{Maly1993, address = {Bloomington}, author = {Maly, Kenneth}, booktitle = {Reading Heidegger}, editor = {Sallis, John}, pages = {221--240}, publisher = {Indiana University Press}, title = {{Reading and Thinking: Heidegger and the Hinting Greeks}}, year = {1993} } @incollection{sep-hermeneutics, annote = {(accessed: 2019-03-27)}, author = {Mantzavinos, C}, booktitle = {The Stanford Encyclopedia of Philosophy}, edition = {Winter 201}, editor = {Zalta, Edward N}, publisher = {Metaphysics Research Lab, Stanford University}, title = {{Hermeneutics}}, url = {https://plato.stanford.edu/archives/win2016/entries/hermeneutics/}, year = {2016} } @article{Marks1997, author = {Marks, John}, issn = {1035-0330}, journal = {Social Semiotics}, month = {apr}, number = {2}, pages = {233--246}, title = {{Deleuze and literature: Metaphor and indirect discourse}}, url = {http://dx.doi.org/10.1080/10350339709360383}, volume = {7}, year = {1997} } @article{Martinez2016, abstract = {Correctly perceiving emotions in others is a crucial part of social interactions. We constructed a set of dynamic stimuli to determine the relative contributions of the face and body to the accurate perception of basic emotions. We also manipulated the length of these dynamic stimuli in order to explore how much information is needed to identify emotions. The findings suggest that even a short exposure time of 250 milliseconds provided enough information to correctly identify an emotion above the chance level. Furthermore, we found that recognition patterns from the face alone and the body alone differed as a function of emotion. These findings highlight the role of the body in emotion perception and suggest an advantage for angry bodies, which, in contrast to all other emotions, were comparable to the recognition rates from the face and may be advantageous for perceiving imminent threat from a distance.}, annote = {doi: 10.1080/02699931.2015.1035229}, author = {Martinez, Laura and Falvello, Virginia B and Aviezer, Hillel and Todorov, Alexander}, doi = {10.1080/02699931.2015.1035229}, issn = {0269-9931}, journal = {Cognition and Emotion}, month = {jul}, number = {5}, pages = {939--952}, publisher = {Routledge}, title = {{Contributions of facial expressions and body language to the rapid perception of dynamic emotions}}, url = {https://doi.org/10.1080/02699931.2015.1035229}, volume = {30}, year = {2016} } @incollection{Martino2009, author = {Martino, B. and Cantiello, P.}, booktitle = {Intelligent Distributed Computing III}, isbn = {3642032133}, title = {{Automatic ontology extraction with text clustering}}, year = {2009} } @article{Mathews2011, author = {Mathews, Freya}, doi = {DOI: 10.1177/1086026611425689}, journal = {Organization {\&} Environment}, number = {4}, pages = {364--387}, title = {{Towards a Deeper Philosophy of Biomimicry}}, volume = {24}, year = {2011} } @article{Matthias2004, author = {Matthias, Andreas}, journal = {Ethics and Information Technology}, number = {3}, publisher = {Springer}, title = {{The responsibility gap: Ascribing responsibility for the actions of learning automata}}, volume = {6}, year = {2004} } @incollection{McNeill2010, address = {Evanston}, author = {McNeill, William}, booktitle = {Consequences of Hermeneutics}, editor = {Malpas, Jeff and Zabala, Santiago}, pages = {98--120}, publisher = {Northwestern University Press}, title = {{The Hermeneutics of Everydayness}}, year = {2010} } @Article{Medina2012, author = {Medina, J.}, title = {{Relating attribute reduction in formal, object-oriented and property-oriented concept lattices}}, doi = {https://doi.org/10.1016/j.camwa.2012.03.087}, issn = {0898-1221}, number = {6}, pages = {1992--2002}, url = {http://www.sciencedirect.com/science/article/pii/S0898122112002921}, volume = {64}, abstract = {Attribute reduction is an important step in reducing computational complexity in order to extract information from relational systems. Three of these systems are the formal, object-oriented and property oriented concept lattices. Attribute reduction in the last two concept lattices has recently been studied. The relation with the first concept lattice is very important since two important, independent tools to extract information from databases–the formal concept analysis and rough set theory–will be related. This paper studies attribute reduction in these three frameworks. The main results are that the classification of each attribute into absolutely necessary, relatively necessary and absolutely unnecessary attributes is independent of the framework considered and that an attribute reduct in one of these relational systems is also an attribute reduct in the others.}, journal = {Computers {\&} Mathematics with Applications}, keywords = {Attribute reduction, Formal concept analysis, Galois connection, Property-oriented and object-oriented concept latt}, year = {2012}, } @article{Melo2013, author = {Melo, Cassio and Le-Grand, B{\'{e}}n{\'{e}}dicte and Aufaure, Marie-Aude}, journal = {International Journal of Intelligent Information Technologies (IJIIT)}, number = {4}, pages = {16--34}, publisher = {IGI Global}, title = {{Browsing large concept lattices through tree extraction and reduction methods}}, volume = {9}, year = {2013} } @misc{Melville2009, author = {Melville, Herman}, isbn = {978-0-06-192102-5}, publisher = {HarperCollins Publishers}, title = {{Bartleby, The Scrivener: A Story of Wall-Street}}, year = {2009} } @Online{MicrosoftAIPrinciples, author = {Microsoft}, title = {{Microsoft AI principles}}, url = {https://www.microsoft.com/en-us/ai/responsible-ai?activetab=pivot1:primaryr6}, urldate = {2021-03-11}, groups = {Ethical AI}, } @article{Milem1997, author = {Milem, Bruce}, issn = {0031-8256}, journal = {Philosophy Today TA -}, pages = {180--185}, title = {{The Impossible Has Already Occurred Derrida and Negative Theology TT -}}, volume = {41}, year = {1997} } @incollection{Miller2017, annote = {I did not add a publishing city because it's not clear.}, author = {Miller, J. Hillis}, booktitle = {Going Postcard: The Letter(s) of Jacques Derrida}, editor = {{Van Gerven Oei}, V.W.J.}, pages = {11--41}, publisher = {Punctum Books}, title = {{Glossing the Gloss of “Envois” in The Post Card}}, url = {https://punctumbooks.com/titles/going-postcard-the-letters-of-jacques-derrida/}, year = {2017} } @article{Miller2006, author = {Miller, J. Hillis}, journal = {MLN}, number = {4}, pages = {893--910}, publisher = {Johns Hopkins University Press}, title = {{Derrida's Destinerrance}}, url = {http://www.jstor.org/stable/4490747}, volume = {121}, year = {2006} } @incollection{Mitchell2004a, address = {New York}, author = {Mitchell, Katharyne}, booktitle = {The End of Tradition}, editor = {AlSayyad, Nezar}, pages = {45--62}, publisher = {Routledge}, title = {{The tradition of the end: Global capitalism and the contemporary spaces of apocalypse}}, year = {2004} } @Misc{Mitchell2018, author = {Mitchell, Kevin J.}, title = {{Does Neuroscience Leave Room for Free Will?}}, doi = {10.1016/j.tins.2018.05.008}, abstract = {A reductively mechanistic approach to neuroscience suggests that low-level physical laws determine our actions and that mental states are epiphenomena. In this scheme there seems to be little room for free will or genuine agency. I argue here that physical indeterminacy provides room for the information entailed in patterns of neuronal firing – the mental content of beliefs, goals, and intentions – to have real causal power in decision-making.}, booktitle = {Trends in Neurosciences}, issn = {1878108X}, keywords = {agency, compatibilism, determinism, emergence, meaning, reductionism}, year = {2018}, } @book{Moran2012, address = {Cambridge}, author = {Moran, Dermot}, publisher = {Cambridge University Press}, title = {{Husserl's Crisis of the European Sciences and Transcendental Phenomenology: An Introduction}}, year = {2012} } @book{Morozov2011, address = {New York}, author = {Morozov, Evgeny}, publisher = {PublicAffairs}, title = {{The Net Delusion: The Dark Side of Internet Freedom}}, year = {2011} } @incollection{Morris1996, address = {Oxford}, author = {Morris, Paul}, booktitle = {Detraditionalization: Critical reflections on authority and identity}, editor = {Heelas, Paul and Lash, Scott and Morris, Paul}, pages = {222--249}, publisher = {Blackwell Publishers}, title = {{Community beyond tradition}}, year = {1996} } @article{Morton2012, author = {Morton, Timothy}, doi = {10.1111/issj.12014}, issn = {1468-2451}, journal = {International Social Science Journal}, number = {207-208}, pages = {39--51}, title = {{From modernity to the Anthropocene: ecology and art in the age of asymmetry}}, url = {http://dx.doi.org/10.1111/issj.12014}, volume = {63}, year = {2012} } @book{Moyal-Sharrock2004, address = {Basingstoke}, author = {Moyal-Sharrock, Dani{\`{e}}le}, publisher = {Palgrave Macmillan}, title = {{Understanding Wittgenstein's On Certainty}}, year = {2004} } @inproceedings{Mueller2008, author = {Mueller, Shane T and Minnery, Brandon S}, booktitle = {AAAI Fall Symposium: Biologically Inspired Cognitive Architectures}, title = {{Adapting the Turing Test for Embodied Neurocognitive Evaluation of Biologically-Inspired Cognitive Agents}}, year = {2008} } @book{Myers, address = {London :}, author = {Myers, William}, isbn = {9780500516270 (gebonden) 0500516278 (gebonden)}, language = {Engels}, publisher = {Thames {\&} Hudson}, title = {{Bio design : nature, science, creativity}} } @incollection{Nancy1990, address = {Albany}, author = {Nancy, Jean-Luc}, booktitle = {Transforming the Hermeneutic Context: From Nietzsche to Nancy}, editor = {Ormiston, Gayle L. and Schrift, Alan D.}, pages = {211--261}, publisher = {State University of New York Press}, title = {{Sharing Voices}}, year = {1990} } @book{Nancy1982, address = {Paris}, author = {Nancy, Jean-Luc}, publisher = {{\'{E}}ditions Galil{\'{e}}e}, title = {{Le partage des voix}}, year = {1982} } @incollection{Nancy1993, address = {Stanford}, author = {Nancy, Jean-Luc}, booktitle = {The Birth to Presence}, editor = {Hamacher, Werner and Wellbery, David E.}, pages = {36--48}, publisher = {Stanford University Press}, title = {{Abandoned Being}}, year = {1993} } @article{Nancy1993a, author = {Nancy, Jean-Luc and Kamuf, Peggy}, issn = {02648334, 17500176}, journal = {Paragraph}, number = {2}, pages = {108--110}, title = {{'You ask me what it means today . . .' An epigraph for "Paragraph"}}, volume = {16}, year = {1993} } @article{Nealon1993, author = {Nealon, Jeffrey T}, issn = {0190-3659}, journal = {boundary 2}, number = {1}, pages = {221--241}, publisher = {JSTOR}, title = {{Thinking$\backslash$Writing the Postmodern: Representation, End, Ground, Sending}}, volume = {20}, year = {1993} } @misc{Newcomb2012, author = {Newcomb, Doug}, title = {{You won't need a driver's license by 2040}}, url = {https://edition.cnn.com/2012/09/18/tech/innovation/ieee-2040-cars}, urldate = {2019-10-31}, year = {2012} } @incollection{Nordberg-Schulz1991, address = {Nijmegen}, author = {Nordberg-Schulz, Christian}, booktitle = {Wonen: Architectuur in het denken van Martin Heidegger}, editor = {{De Visscher}, Jacques and {De Saeger}, Raf}, pages = {9--27}, publisher = {SUN}, title = {{Architectuur als vergaring en verlijfelijking}}, year = {1991} } @article{Nyholm2018, abstract = {Abstract Self-driving cars hold out the promise of being much safer than regular cars. Yet they cannot be 100{\%} safe. Accordingly, they need to be programmed for how to deal with crash scenarios. Should cars be programmed to always prioritize their owners, to minimize harm, or to respond to crashes on the basis of some other type of principle? The article first discusses whether everyone should have the same ?ethics settings.? Next, the oft-made analogy with the trolley problem is examined. Then follows an assessment of recent empirical work on lay-people's attitudes about crash algorithms relevant to the ethical issue of crash optimization. Finally, the article discusses what traditional ethical theories such as utilitarianism, Kantianism, virtue ethics, and contractualism imply about how cars should handle crash scenarios. The aim of the article is to provide an overview of the existing literature on these topics and to assess how far the discussion has gotten so far.}, annote = {doi: 10.1111/phc3.12507}, author = {Nyholm, Sven}, doi = {10.1111/phc3.12507}, issn = {1747-9991}, journal = {Philosophy Compass}, month = {jul}, number = {7}, publisher = {John Wiley {\&} Sons, Ltd (10.1111)}, title = {{The ethics of crashes with self-driving cars: A roadmap, I}}, url = {https://doi.org/10.1111/phc3.12507}, volume = {13}, year = {2018} } @article{Nyholm2016a, abstract = {Self-driving cars hold out the promise of being safer than manually driven cars. Yet they cannot be a 100 {\%} safe. Collisions are sometimes unavoidable. So self-driving cars need to be programmed for how they should respond to scenarios where collisions are highly likely or unavoidable. The accident-scenarios self-driving cars might face have recently been likened to the key examples and dilemmas associated with the trolley problem. In this article, we critically examine this tempting analogy. We identify three important ways in which the ethics of accident-algorithms for self-driving cars and the philosophy of the trolley problem differ from each other. These concern: (i) the basic decision-making situation faced by those who decide how self-driving cars should be programmed to deal with accidents; (ii) moral and legal responsibility; and (iii) decision-making in the face of risks and uncertainty. In discussing these three areas of disanalogy, we isolate and identify a number of basic issues and complexities that arise within the ethics of the programming of self-driving cars.}, author = {Nyholm, Sven and Smids, Jilles}, doi = {10.1007/s10677-016-9745-2}, issn = {1572-8447}, journal = {Ethical Theory and Moral Practice}, number = {5}, pages = {1275--1289}, title = {{The Ethics of Accident-Algorithms for Self-Driving Cars: an Applied Trolley Problem?}}, url = {https://doi.org/10.1007/s10677-016-9745-2}, volume = {19}, year = {2016} } @book{Oger2005, address = {Kapellen}, author = {Oger, Erik}, publisher = {Uitgeverij Pelckmans}, title = {{Derrida. Een inleiding}}, year = {2005} } @book{Olesen2013, address = {New York}, author = {Olesen, S{\o}ren Gosvig}, publisher = {Palgrave Macmillan}, title = {{Transcendental History}}, translator = {Possen, David D.}, year = {2013} } @online{OpenAI2019, author = {OpenAI}, title = {{Better Language Models and Their Implications}}, url = {https://openai.com/blog/better-language-models}, urldate = {2019-12-15}, year = {2019} } @online{Paez2019, annote = {(accessed: 2019-03-21)}, author = {Paez, Danny}, title = {{'This Person Does Not Exist' Creator Reveals His Site's Creepy Origin Story}}, url = {https://www.inverse.com/article/53414-this-person-does-not-exist-creator-interview}, year = {2019} } @incollection{Palmer2010, address = {Evanston}, author = {Palmer, Richard E}, booktitle = {Consequences of Hermeneutics}, editor = {Malpas, Jeff and Zabala, Santiago}, pages = {121--131}, publisher = {Northwestern University Press}, title = {{Two Contrasting Heideggerian Elements in Gadamer's Philosophical Hermeneutics}}, year = {2010} } @book{Palmer1969, address = {Evanston}, author = {Palmer, Richard E}, publisher = {Northwestern University Press}, title = {{Hermeutics. Interpretation Theory in Schleiermacher, Dilthey, Heidegger, and Gadamer}}, year = {1969} } @Article{Paukkeri2012, author = {Paukkeri, Mari-Sanna and Garc{\'{i}}a-Plaza, Alberto P{\'{e}}rez and Fresno, V{\'{i}}ctor and Unanue, Raquel Mart{\'{i}}nez and Honkela, Timo}, title = {{Learning a taxonomy from a set of text documents}}, doi = {https://doi.org/10.1016/j.asoc.2011.11.009}, issn = {1568-4946}, number = {3}, pages = {1138--1148}, url = {http://www.sciencedirect.com/science/article/pii/S1568494611004340}, volume = {12}, abstract = {We present a methodology for learning a taxonomy from a set of text documents that each describes one concept. The taxonomy is obtained by clustering the concept definition documents with a hierarchical approach to the Self-Organizing Map. In this study, we compare three different feature extraction approaches with varying degree of language independence. The feature extraction schemes include fuzzy logic-based feature weighting and selection, statistical keyphrase extraction, and the traditional tf-idf weighting scheme. The experiments are conducted for English, Finnish, and Spanish. The results show that while the rule-based fuzzy logic systems have an advantage in automatic taxonomy learning, taxonomies can also be constructed with tolerable results using statistical methods without domain- or style-specific knowledge.}, journal = {Applied Soft Computing}, keywords = {Document clustering, Fuzzy logic, Keyphrase extraction, Knowledge representation, Multilinguality, Self-Organizing Map, Taxonomy learning}, year = {2012}, } @book{Pawlyn2011, address = {London}, author = {Pawlyn, Michael}, publisher = {Riba Publishing}, title = {{Biomimicry in Architecture}}, year = {2011} } @article{scikit-learn, author = {Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal = {Journal of Machine Learning Research}, pages = {2825--2830}, title = {{Scikit-learn: Machine Learning in {\{}P{\}}ython}}, volume = {12}, year = {2011} } @article{Peters2011, author = {Peters, Terri}, doi = {10.1002/ad.1318}, issn = {1554-2769}, journal = {Architectural Design}, keywords = {Biomimicry Guild, Biology to Design and Challenge}, number = {6}, pages = {44--47}, title = {{Nature as Measure: The Biomimicry Guild}}, url = {http://dx.doi.org/10.1002/ad.1318}, volume = {81}, year = {2011} } @Online{Pichai2018, author = {Pichai, Sundar}, title = {{AI at Google: our principles}}, url = {https://www.blog.google/technology/ai/ai-principles/}, urldate = {2021-03-11}, booktitle = {The Keyword}, groups = {Ethical AI}, year = {2018}, } @incollection{Pierson2019, address = {Amsterdam}, author = {Pierson, Jo and {Van Zeeland}, Ine}, booktitle = {The Handbook of Privacy Studies}, chapter = {9}, doi = {10.2307/j.ctvcmxpmp.20}, editor = {{De Groot}, Aviva and {Van der Sloot}, Bart}, pages = {355--382}, publisher = {Amsterdam University Press}, title = {{Privacy from a Media Studies Perspective}}, year = {2019} } @misc{Plato2010, address = {Amsterdam}, author = {Plato}, booktitle = {Platoon Verzameld Werk Deel 1}, publisher = {Stichting Ars Floreat}, title = {{Symposion}}, year = {2010} } @misc{Plato2011, address = {Amsterdam}, author = {Plato}, booktitle = {Platoon Verzameld Werk - deel 4}, publisher = {Stichting Ars Floreat}, title = {{Ioon}}, year = {2011} } @incollection{Poston2014, address = {Basingstoke}, author = {Poston, Ted}, booktitle = {Reason and Explanation: a defense of explanatory coherentism}, pages = {149--181}, publisher = {Palgrave Macmillan}, title = {{Bayesian Explanationism}}, year = {2014} } @book{Preece2015, address = {Chichester}, author = {Preece, Jenny and Sharp, Helen and Rogers, Yvonne}, edition = {4th}, publisher = {John Wiley {\&} Sons Ltd}, title = {{Interaction Design: beyond human-computer interaction}}, year = {2015} } @incollection{Protevi2003, address = {London}, author = {Protevi, John}, booktitle = {Between Deleuze {\&} Derrida}, editor = {Patton, Paul and Protevi, John}, pages = {183--194}, publisher = {Continuum}, title = {{Love}}, year = {2003} } @article{Prozorov2010, author = {Prozorov, Sergei}, journal = {Philosophy and Social Criticism}, number = {9}, pages = {1053--1073}, title = {{Why Giorgio Agamben is an optimist}}, volume = {36}, year = {2010} } @incollection{Psillos2004, address = {Dodrecht}, author = {Psillos, Stathis}, booktitle = {Induction and Deduction in the Sciences}, doi = {10.1007/978-1-4020-2196-1_6}, editor = {Stadler, F.}, pages = {83--91}, publisher = {Springer}, title = {{Inference to the Best Explanation and Bayesianism}}, year = {2004} } @misc{Ricoeur2007, address = {Evanston}, author = {Ricoeur, Paul}, booktitle = {Husserl: An Analysis of His Phenomenology}, editor = {Embree, Lester E}, pages = {143--174}, publisher = {Northwestern University Press}, title = {{Husserl and the Sense of History}}, year = {2007} } @misc{Ricoeur1978, address = {London}, author = {Ricoeur, Paul}, editor = {McLaughlin, K and Costello, J}, isbn = {9780710093295}, publisher = {Routledge}, title = {{The Rule of Metaphor: Multi-disciplinary Studies of the Creation of Meaning in Language}}, url = {https://books.google.nl/books?id=05YOAAAAQAAJ}, year = {1978} } @incollection{Risser2010, address = {Evanston}, author = {Risser, James}, booktitle = {Consequences of Hermeneutics}, editor = {Malpas, Jeff and Zabala, Santiago}, pages = {5--24}, publisher = {Northwestern University Press}, title = {{Gadamer's Hidden Doctrine: The Simplicity and Humility of Philosophy}}, year = {2010} } @article{Roberts2012, author = {Roberts, Ben}, journal = {New Formations}, number = {77}, pages = {8--20}, title = {{Technics, Individuation and Tertiary Memory: Bernard Stiegler's Challenge to Media Theory}}, volume = {77}, year = {2012} } @misc{Roberts2019, author = {Roberts, Jeff John}, booktitle = {Fortune}, title = {{Airport and Payment Facial Recognition Systems Fooled by Masks and Photos, Raising Security Concerns}}, url = {https://fortune.com/2019/12/12/airport-bank-facial-recognition-systems-fooled/}, urldate = {2020-07-01}, year = {2019} } @article{Roff2016, author = {Roff, Heather and Moyes, Richard}, journal = {Briefing paper for delegates at the Convention on Certain Conventional Weapons (CCW) Meeting of Experts on Lethal Autonomous Weapons Systems (LAWS)}, title = {{Meaningful Human Control, Artificial Intelligence and Autonomous Weapons}}, url = {http://www.article36.org/wp-content/uploads/2016/04/MHC-AI-and-AWS-FINAL.pdf}, year = {2016} } @incollection{Rorty1993, address = {Cambridge}, author = {Rorty, Richard}, booktitle = {The Cambridge Companion to Heidegger}, editor = {Guignon, Charles}, pages = {337--357}, publisher = {Cambridge University Press}, title = {{Wittgeinstein, Heiddeger, and the reification of language}}, year = {1993} } @book{Rowland2014, address = {New York}, author = {Rowland, Antony}, publisher = {Routledge}, title = {{Poetry as Testimony: Witnessing and Memory in Twentieth-century Poems}}, year = {2014} } @incollection{Roy2004, address = {New York}, author = {Roy, Ananya}, booktitle = {The End of Tradition?}, editor = {AlSayyad, Nezar}, pages = {62--86}, publisher = {Routledge}, title = {{Nostalgias of the modern}}, year = {2004} } @article{Russo2018, abstract = {Technologies have always been bearers of profound changes in science, society, and any other aspect of life. The latest technological revolution—the digital revolution—is no exception in this respect. This paper presents the revolution brought about by digital technologies through the lenses of a specific approach: the philosophy of information. It is argued that the adoption of an informational approach helps avoiding utopian or dystopian approaches to (digital) technology, both expressions of technological determinism. Such an approach provides a conceptual framework able to address the ethical challenges that digital technologies pose, without getting stuck in the dichotomous thinking of technological determinism, and to bring together ethics, ontology, and epistemology into a coherent account.}, author = {Russo, Federica}, doi = {10.1007/s13347-018-0326-2}, journal = {Philosophy {\&} Technology}, number = {4}, pages = {655--667}, title = {{Digital Technologies, Ethical Questions, and the Need of an Informational Framework}}, volume = {31}, year = {2018} } @article{Ryan2019, abstract = {Self-driving vehicles (SDVs) offer great potential to improve efficiency on roads, reduce traffic accidents, increase productivity, and minimise our environmental impact in the process. However, they have also seen resistance from different groups claiming that they are unsafe, pose a risk of being hacked, will threaten jobs, and increase environmental pollution from increased driving as a result of their convenience. In order to reap the benefits of SDVs, while avoiding some of the many pitfalls, it is important to effectively determine what challenges we will face in the future and what steps need to be taken now to avoid them. The approach taken in this paper is the construction of a likely future (the year 2025), through the process of a policy scenario methodology, if we continue certain trajectories over the coming years. The purpose of this is to articulate issues we currently face and the construction of a foresight analysis of how these may develop in the next 6 years. It will highlight many of the key facilitators and inhibitors behind this change and the societal impacts caused as a result. This paper will synthesise the wide range of ethical, legal, social and economic impacts that may result from SDV use and implementation by 2025, such as issues of autonomy, privacy, liability, security, data protection, and safety. It will conclude with providing steps that we need to take to avoid these pitfalls, while ensuring we reap the benefits that SDVs bring.}, author = {Ryan, Mark}, doi = {10.1007/s11948-019-00130-2}, issn = {1471-5546}, journal = {Science and Engineering Ethics}, title = {{The Future of Transportation: Ethical, Legal, Social and Economic Impacts of Self-driving Vehicles in the Year 2025}}, url = {https://doi.org/10.1007/s11948-019-00130-2}, year = {2019} } @article{Sadowski2019, abstract = {The collection and circulation of data is now a central element of increasingly more sectors of contemporary capitalism. This article analyses data as a form of capital that is distinct from, but has its roots in, economic capital. Data collection is driven by the perpetual cycle of capital accumulation, which in turn drives capital to construct and rely upon a universe in which everything is made of data. The imperative to capture all data, from all sources, by any means possible influences many key decisions about business models, political governance, and technological development. This article argues that many common practices of data accumulation should actually be understood in terms of data extraction, wherein data is taken with little regard for consent and compensation. By understanding data as a form capital, we can better analyse the meaning, practices, and implications of datafication as a political economic regime.}, annote = {doi: 10.1177/2053951718820549}, author = {Sadowski, Jathan}, doi = {10.1177/2053951718820549}, issn = {2053-9517}, journal = {Big Data {\&} Society}, month = {jan}, number = {1}, pages = {2053951718820549}, publisher = {SAGE Publications Ltd}, title = {{When data is capital: Datafication, accumulation, and extraction}}, url = {https://doi.org/10.1177/2053951718820549}, volume = {6}, year = {2019} } @misc{SAE2018, author = {SAE}, title = {{Taxonomy and Definitions for Terms Related to Driving Automation Systems for On-Road Motor Vehicles}}, url = {https://www.sae.org/standards/content/j3016{\_}201806/}, urldate = {2019-10-30}, year = {2018} } @incollection{Salmon2001, address = {Dordrecht}, author = {Salmon, Wesley C.}, booktitle = {Explanation: Theoretical Approaches and Applications}, editor = {Hon, Giora and Rakover, Sam S.}, pages = {61--92}, publisher = {Springer}, title = {{Explanation and Confirmation: A Bayesian Critique of Inference to the Best Explanation}}, year = {2001} } @article{Salzberg1997, author = {Salzberg, Steven L}, issn = {1384-5810}, journal = {Data mining and knowledge discovery}, number = {3}, pages = {317--328}, publisher = {Springer}, title = {{On comparing classifiers: Pitfalls to avoid and a recommended approach}}, volume = {1}, year = {1997} } @article{Sankowski1978, author = {Sankowski, Edward}, journal = {Mind}, number = {346}, pages = {256--261}, title = {{Wittgenstein on Self-Knowledge}}, volume = {87}, year = {1978} } @article{SantonideSio2017, abstract = {How should autonomous vehicles (aka self-driving cars) be programmed to behave in the event of an unavoidable accident in which the only choice open is one between causing different damages or losses to different objects or persons? This paper addresses this ethical question starting from the normative principles elaborated in the law to regulate difficult choices in other emergency scenarios. In particular, the paper offers a rational reconstruction of some major principles and norms embedded in the Anglo-American jurisprudence and case law on the “doctrine of necessity”; and assesses which, if any, of these principles and norms can be utilized to find reasonable guidelines for solving the ethical issue of the regulation of the programming of autonomous vehicles in emergency situations. The paper covers the following topics: the distinction between “justification” and “excuse”, the legal prohibition of intentional killing outside self-defence, the incommensurability of goods, and the legal constrains to the use of lethal force set by normative positions: obligations, responsibility, rights, and authority. For each of these principles and constrains the possible application to the programming of autonomous vehicles is discussed. Based on the analysis, some practical suggestions are offered.}, author = {{Santoni de Sio}, Filippo}, doi = {10.1007/s10677-017-9780-7}, issn = {1572-8447}, journal = {Ethical Theory and Moral Practice}, number = {2 LB - Santoni de Sio2017}, pages = {411--429}, title = {{Killing by Autonomous Vehicles and the Legal Doctrine of Necessity}}, url = {https://doi.org/10.1007/s10677-017-9780-7}, volume = {20}, year = {2017} } @techreport{SantonideSio2016, abstract = {In the context of the knowledge agenda automated driving (knowledgeagenda.connekt.nl/engels/), Rijkswaterstaat commissioned TU Delft to write a white paper on ethical issues in automated driving to provide a basis for discussion and some recommendations on how to take into account this subject when deploying automated vehicles.In this paper I present, discuss, and offer some recommendations on some major ethical issues presented by the introduction on the public road of automated driving systems (ADS), aka self-driving cars. The recommended methodology is that of Responsible Innovation and Value-Sensitive Design. The concept of “meaningful human control” is introduced and proposed as a basis for a policy approach which prevents morally unacceptable risks for human safety, and anticipates issues of moral and legal responsibility for accidents. The importance of the individual rights to safety, access to mobility and privacy is highlighted too.}, author = {{Santoni de Sio}, Filippo}, institution = {TU Delft Ethics {\&} Philosophy of Technology}, title = {{Ethics and Self-driving Cars: A White Paper on Responsible Innovation in Automated Driving Systems}}, url = {https://repository.tudelft.nl/islandora/object/uuid:851eb5fb-0271-47df-9ab4-b9edb75b58e1?collection=research}, year = {2016} } @misc{SantonideSio2018, abstract = {Debates on lethal autonomous weapon systems have proliferated in the past 5 years. Ethical concerns have been voiced about a possible raise in the number of wrongs and crimes in military operations and about the creation of a “responsibility gap” for harms caused by these systems. To address these concerns, the principle of “meaningful human control” has been introduced in the legal–political debate; according to this principle, humans not computers and their algorithms should ultimately remain in control of, and thus morally responsible for, relevant decisions about (lethal) military operations. However, policy-makers and technical designers lack a detailed theory of what “meaningful human control” exactly means. In this paper, we lay the foundation of a philosophical account of meaningful human control, based on the concept of “guidance control” as elaborated in the philosophical debate on free will and moral responsibility. Following the ideals of “Responsible Innovation” and “Value-sensitive Design,” our account of meaningful human control is cast in the form of design requirements. We identify two general necessary conditions to be satisfied for an autonomous system to remain under meaningful human control: first, a “tracking” condition, according to which the system should be able to respond to both the relevant moral reasons of the humans designing and deploying the system and the relevant facts in the environment in which the system operates; second, a “tracing” condition, according to which the system should be designed in such a way as to grant the possibility to always trace back the outcome of its operations to at least one human along the chain of design and operation. As we think that meaningful human control can be one of the central notions in ethics of robotics and AI, in the last part of the paper, we start exploring the implications of our account for the design and use of non-military autonomous systems, for instance, self-driving cars.}, author = {{Santoni de Sio}, Filippo and {Van den Hoven}, Jeroen}, booktitle = {Frontiers in Robotics and AI}, isbn = {2296-9144}, pages = {15}, title = {{Meaningful Human Control over Autonomous Systems: A Philosophical Account}}, url = {https://www.frontiersin.org/article/10.3389/frobt.2018.00015}, volume = {5}, year = {2018} } @InProceedings{Sari2016, author = {Sari, Yunita and Stevenson, Mark}, booktitle = {CLEF}, title = {{Exploring Word Embeddings and Character N-Grams for Author Clustering}}, groups = {NLP}, year = {2016}, } @incollection{Sayed2008, abstract = {Ontology learning from text is considered as an appealing and challeging alternative to address the shortcomings of the hand-crafted ontologies. In this paper, we present OLea, a new framework for ontology learning from text. The proposal is a hybrid approach combining the pattern-based and the distributionnal approaches. It addresses key issues in the area of ontology learning: context-dependency, low recall of the pattern-based approach, low precision of the distributionnal approach, and finally ontology evolution. Experiments performed at each stage of the learning process show the advantages and drawbacks of the proposal.}, author = {Sayed, Ahmad El and Hacid, Hakim}, booktitle = {COMPSTAT 2008}, doi = {10.1007/978-3-7908-2084-3_21}, pages = {255--266}, title = {{A Hybrid Approach for Taxonomy Learning from Text}}, year = {2008} } @article{Schneier2020, abstract = {They're mouthpieces for foreign actors, domestic political groups, even the candidates themselves. And soon you won't be able to tell they're bots.}, author = {Schneier, Bruce}, journal = {The Atlantic}, title = {{Bots Are Destroying Political Discourse As We Know It}}, url = {https://www.theatlantic.com/technology/archive/2020/01/future-politics-bots-drowning-out-humans/604489/}, year = {2020} } @article{Shariff2017, author = {Shariff, Azim and Bonnefon, Jean-Fran{\c{c}}ois and Rahwan, Iyad}, doi = {10.1038/s41562-017-0202-6}, journal = {Nature Human Behaviour}, month = {sep}, title = {{Psychological roadblocks to the adoption of self-driving vehicles}}, volume = {1}, year = {2017} } @Article{Sharon2017b, author = {Sharon, Tamar}, title = {{Self-Tracking for Health and the Quantified Self: Re-Articulating Autonomy, Solidarity, and Authenticity in an Age of Personalized Healthcare}}, doi = {10.1007/s13347-016-0215-5}, issn = {22105441}, abstract = {Self-tracking devices point to a future in which individuals will be more involved in the management of their health and will generate data that will benefit clinical decision making and research. They have thus attracted enthusiasm from medical and public health professionals as key players in the move toward participatory and personalized healthcare. Critics, however, have begun to articulate a number of broader societal and ethical concerns regarding self-tracking, foregrounding their disciplining, and disempowering effects. This paper has two aims: first, to analyze some of the key promises and concerns that inform this polarized debate. I argue that far from being solely about health outcomes, this debate is very much about fundamental values that are at stake in the move toward personalized healthcare, namely, the values of autonomy, solidarity, and authenticity. The second aim is to provide a framework within which an alternative approach to self-tracking for health can be developed. I suggest that a practice-based approach, which studies how values are enacted in specific practices, can open the way for a new set of theoretical questions. In the last part of the paper, I sketch out how this can work by describing various enactments of autonomy, solidarity, and authenticity among self-trackers in the Quantified Self community. These examples show that shifting attention to practices can render visible alternative and sometimes unexpected enactments of values. Insofar as these may challenge both the promises and concerns in the debate on self-tracking for health, they can lay the groundwork for new conceptual interventions in future research.}, journal = {Philosophy and Technology}, keywords = {Digital health, Personalized healthcare, Philosophy of technology, Practice-based approach, Quantified self, Self-tracking, Surveillance}, year = {2017}, } @Misc{Sharon2016a, author = {Sharon, Tamar}, title = {{The googlization of health research: from disruptive innovation to disruptive ethics}}, doi = {10.2217/pme-2016-0057}, abstract = {Consumer-oriented mobile technologies offer new ways of capturing multidimensional health data, and are increasingly seen as facilitators of medical research. This has opened the way for large consumer tech companies, like Apple, Google, Amazon and Facebook, to enter the space of health research, offering new methods for collecting, storing and analyzing health data. While these developments are often portrayed as 'disrupting' research in beneficial ways, they also raise many ethical issues. These can be organized into three clusters: questions concerning the quality of research; privacy/informed consent; and new power asymmetries based on access to data and control over technological infrastructures. I argue that this last cluster, insofar as it may affect future research agendas, deserves more critical attention.}, booktitle = {Personalized Medicine}, issn = {1744828X}, keywords = {23andMe, Baseline study, Big data, Data-intensive medicine, Ethical issues, Google genomics, Health apps, Privacy, Researchkit}, year = {2016}, } @article{Sheehan2001, abstract = {The Beitr{\"{a}}ge zur Philosophie mandates a paradigm shift in Heidegger scholarship. In the face of (1) widespread disarray in the current model, the new paradigm (2) abandons “Sein” as a name for die Sache selbst, (3) understands Welt/Lichtung/Da as that which “gives” being, (4) interprets Dasein as apriori openedness rather than as “being-there,” (5) understands the Kehre as the interface of Geworfenheit and Entwurf, not as a shift in Heidegger's thinking, (6) interprets Ereignis as the opening of the Da rather than as “appropriation,” and (7) understands human finitude as what gives all forms of being and all epochs in the history of being. The conclusion alludes to the function of Mitdasein (“co-openness”) as die Sache selbst.}, author = {Sheehan, Thomas}, doi = {10.1023/A:1017568025461}, journal = {Continental Philosophy Review}, number = {2}, pages = {183--202}, title = {{A paradigm shift in Heidegger research}}, volume = {34}, year = {2001} } @misc{Sheehan1985, author = {Sheehan, Thomas}, booktitle = {Hermeneutics and Deconstruction}, pages = {201--218}, publisher = {State University of New York Press Albany, NY}, title = {{Derrida and Heidegger}}, year = {1985} } @incollection{Smith2003, address = {London}, author = {Smith, Daniel W}, booktitle = {Between Deleuze {\&} Derrida}, editor = {Patton, Paul and Protevi, John}, pages = {46--66}, publisher = {Continuum}, title = {{Deleuze and Derrida, Immanance and Transcendence}}, year = {2003} } @incollection{Smith2012, address = {Edinburgh}, author = {Smith, Daniel W}, booktitle = {Essays on Deleuze}, pages = {27--42}, publisher = {Edinburgh University Press}, title = {{The Doctrine of Univocity: Deleuze's Ontology of Immanence}}, year = {2012} } @incollection{Smith2012a, address = {Edinburgh}, author = {Smith, Daniel W}, booktitle = {Essays on Deleuze}, pages = {271--286}, publisher = {Edinburgh University Press}, title = {{Deleuze and Derrida, Immanence and Transcendence: Two Directions in Recent French Thought}}, year = {2012} } @book{Sneller1998, address = {Kampen}, author = {Sneller, Rico}, publisher = {Kok Agora}, title = {{Het Woord is schrift geworden. Derrida en de negatieve theologie}}, year = {1998} } @Article{Soraker2012, author = {S{\o}raker, Johnny Hartz}, title = {{Virtual Worlds and Their Challenge to Philosophy: Understanding the “Intravirtual” and the “Extravirtual”}}, doi = {10.1111/j.1467-9973.2012.01755.x}, number = {4}, pages = {499--512}, volume = {43}, abstract = {Abstract The Web, in particular real-time interactions in three-dimensional virtual environments (virtual worlds), comes with a set of unique characteristics that leave our traditional frameworks inapplicable. The present article illustrates this by arguing that the notion of ?technology relations,? as put forward by Ihde and Verbeek, becomes inapplicable when it comes to the Internet, and this inapplicability shows why these phenomena require new philosophical frameworks. Against this background, and more constructively, the article proposes a fundamental distinction between ?intravirtual? and ?extravirtual? consequences?a distinction that allows us to understand and conceptualize real-time interactions online more accurately. By relating this distinction to Searle's notion of ?condition of satisfaction,? the article also shows its implications for judging real-time, online interactions in virtual worlds as irrational and/or immoral. The ultimate purpose is to illustrate how new philosophical concepts and frameworks can allow us to better account for the unique characteristics of the Internet.}, annote = {doi: 10.1111/j.1467-9973.2012.01755.x}, journal = {Metaphilosophy}, keywords = {Searle, Web3D, philosophy of computing, technology relations, virtual worlds}, publisher = {John Wiley {\&} Sons, Ltd}, year = {2012}, } @article{Spanakis2012, author = {Spanakis, Gerasimos and Siolas, Georgios and Stafylopatis, Andreas}, issn = {0010-4620}, journal = {The Computer Journal}, number = {3}, pages = {299--312}, publisher = {OUP}, title = {{Exploiting Wikipedia knowledge for conceptual hierarchical clustering of documents}}, volume = {55}, year = {2012} } @article{Jones1972, abstract = {The exhaustivity of document descriptions and the specificity of index terms are usually regarded as independent. It is suggested that specificity should be interpreted statistically, as a function of term use rather than of term meaning. The effects on retrieval of variations in term specificity are examined, experiments with three test collections showing in particular that frequently?occurring terms are required for good overall performance. It is argued that terms should be weighted according to collection frequency, so that matches on less frequent, more specific, terms are of greater value than matches on frequent terms. Results for the test collections show that considerable improvements in performance are obtained with this very simple procedure.}, annote = {doi: 10.1108/eb026526}, author = {{Spark Jones}, K.}, doi = {10.1108/eb026526}, issn = {0022-0418}, journal = {Journal of Documentation}, month = {jan}, number = {1}, pages = {11--21}, publisher = {Emerald}, title = {{A statistical interpretation of term specificity and its application in retrieval}}, url = {https://doi.org/10.1108/eb026526}, volume = {28}, year = {1972} } @article{Spasic2005, author = {Spasic, Irena and Ananiadou, Sophia and McNaught, John and Kumar, Anand}, issn = {1477-4054}, journal = {Briefings in bioinformatics}, number = {3}, pages = {239--251}, publisher = {Henry Stewart Publications}, title = {{Text mining and ontologies in biomedicine: making sense of raw text}}, volume = {6}, year = {2005} } @book{Spiegelberg1994, address = {Dordrecht}, author = {Spiegelberg, Herbert}, publisher = {Kluwer Academic Publishers}, title = {{The Phenomenological Movement}}, year = {1994} } @article{Spivak1984, author = {Spivak, Gayatri Chakravorty}, doi = {10.2307/465049}, journal = {Diacritics}, number = {4}, pages = {19--36}, publisher = {Johns Hopkins University Press}, title = {{Love Me, Love My Ombre, Elle}}, volume = {14}, year = {1984} } @inproceedings{Staab2011, author = {Staab, Steffen}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-642-23291-6_3}, isbn = {9783642232909}, issn = {03029743}, pages = {11--16}, title = {{Ontologies and similarity}}, volume = {6880 LNAI}, year = {2011} } @misc{Staltz2017, abstract = {Before the year 2014, there were many people using Google, Facebook, and Amazon. Today, there are still many people using services from those three tech giants (respectively, GOOG, FB, AMZN). Not much has changed, and quite literally the user interface and features on those sites has remained mostly untouched. However, the underlying dynamics of power on the Web have drastically changed, and those three companies are at the center of a fundamental transformation of the Web.}, author = {Staltz, Andr{\'{e}}}, title = {{The Web began dying in 2014, here's how}}, url = {https://staltz.com/the-web-began-dying-in-2014-heres-how.html}, urldate = {2020-01-04}, year = {2017} } @inproceedings{Stamatatos2016b, author = {Stamatatos, E and Tschuggnall, Michael and Verhoeven, B and Daelemans, W and Specht, G and Stein, Benno and Potthast, Martin}, booktitle = {CLEF}, title = {{Clustering by Authorship Within and Across Documents}}, year = {2016} } @article{Stamatatos2009, abstract = {Abstract Authorship attribution supported by statistical or computational methods has a long history starting from the 19th century and is marked by the seminal study of Mosteller and Wallace (1964) on the authorship of the disputed ?Federalist Papers.? During the last decade, this scientific field has been developed substantially, taking advantage of research advances in areas such as machine learning, information retrieval, and natural language processing. The plethora of available electronic texts (e.g., e-mail messages, online forum messages, blogs, source code, etc.) indicates a wide variety of applications of this technology, provided it is able to handle short and noisy text from multiple candidate authors. In this article, a survey of recent advances of the automated approaches to attributing authorship is presented, examining their characteristics for both text representation and text classification. The focus of this survey is on computational requirements and settings rather than on linguistic or literary issues. We also discuss evaluation methodologies and criteria for authorship attribution studies and list open questions that will attract future work in this area.}, annote = {doi: 10.1002/asi.21001}, author = {Stamatatos, Efstathios}, doi = {10.1002/asi.21001}, issn = {1532-2882}, journal = {Journal of the American Society for Information Science and Technology}, month = {mar}, number = {3}, pages = {538--556}, publisher = {John Wiley {\&} Sons, Ltd}, title = {{A survey of modern authorship attribution methods}}, url = {https://doi.org/10.1002/asi.21001}, volume = {60}, year = {2009} } @article{Stevenson2009, author = {Stevenson, Frank}, journal = {Concentric}, number = {1}, pages = {77--108}, title = {{Stretching Language to its Limit: Deleuze and the Problem of Poiesis}}, volume = {35}, year = {2009} } @incollection{Stiegler2014, address = {Chichester}, author = {Stiegler, Bernard}, booktitle = {Philosophical Engineering: Toward a Philosophy of the Web}, chapter = {13}, doi = {10.1002/9781118700143.ch13}, editor = {Halpin, Harry and Monnin, Alexandre}, pages = {187--198}, publisher = {Wiley Blackwell}, title = {{Afterword: Web Philosophy}}, year = {2014} } @incollection{Stiegler2013, abstract = {Has today's digital society succeeded in becoming mature? If not, how might a new Enlightenment philosophy and practice for the digital age be constructed that could hope to address this situation? Such a philosophy must take into account the irreducibly ambivalent, ‘pharmacological' character of all technics and therefore all grammatisation and tertiary retention, and would thus be a philosophy not only of lights but of shadows. Grammatisation is the process whereby fluxes or flows are made discrete; tertiary retention is the result of the spatialisation in which grammatisation consists, a process that began thirty thousand years ago. The relation between minds is co-ordinated via transindividuation, and transindividuation occurs according to conditions that are overdetermined by the characteristics of grammatisation. Whereas for several thousand years this resulted in the constitution of ‘reading brains', today the conditions of knowledge and transindividuation result in a passage to the ‘digital brain'. For this reason, the attempt to understand the material or hyper-material condition of knowledge must be placed at the heart of a new discipline of ‘digital studies'. The pharmacological question raised by the passage from the reading to the digital brain is that of knowing what of the former must be preserved in the latter, and how this could be achieved. This means developing a ‘general organology' through which the social, neurological and technical organs, and the way these condition the materialisation of thought, can be understood. Integral to such an organology must be consideration of the way in which neurological automatisms are exploited by technological automatisms, an exploitation that is destructive of what Plato called thinking for oneself. The task of philosophical engineering today should be to prevent this short-circuit of the psychosomatic and social organological layers, a task that implies the need for a thoroughgoing reinvention of social and educational organisations.}, author = {Stiegler, Bernard}, booktitle = {Digital Enlightenment Yearbook 2013}, doi = {10.3233/978-1-61499-295-0-29}, pages = {29--39}, title = {{Die Aufkl{\"{a}}rung in the Age of Philosophical Engineering}}, year = {2013} } @InProceedings{Strok2010, author = {Strok, Fedor and Neznanov, Alexey}, booktitle = {Proceedings of the 2010 Annual Research Conference of the South African Institute of Computer Scientists and Information Technologists}, title = {{Comparing and Analyzing the Computational Complexity of FCA Algorithms}}, doi = {10.1145/1899503.1899557}, isbn = {978-1-60558-950-3}, pages = {417--420}, publisher = {ACM}, series = {SAICSIT '10}, url = {http://doi.acm.org/10.1145/1899503.1899557}, address = {New York, NY, USA}, keywords = {algorithm, computational complexity, formal concept analysis, implementation}, year = {2010}, } @book{Stroll1994, address = {New York}, author = {Stroll, Avrum}, publisher = {Oxford University Press}, title = {{Moore and Wittgenstein on Certainty}}, year = {1994} } @article{K.2018, author = {Sumangali, K. and {Aswani Kumar}, Ch.}, doi = {10.1007/s12652-018-0831-2 LK - https://ru.on.worldcat.org/oclc/7626005760}, issn = {1868-5137}, journal = {Journal of Ambient Intelligence and Humanized Computing}, title = {{Concept Lattice Simplification in Formal Concept Analysis Using Attribute Clustering}}, year = {2018} } @book{Syafrullah2001, author = {Syafrullah, Mohammad and Salim, Naomie}, title = {{A Framework for Ontology Learning from Textual Data}}, year = {2001} } @article{Thatcher2016, abstract = {In recent years, much has been written on ?big data? in both the popular and academic press. After the hubristic declaration of the ?end of theory? more nuanced arguments have emerged, suggesting that increasingly pervasive data collection and quantification may have significant implications for the social sciences, even if the social, scientific, political, and economic agendas behind big data are less new than they are often portrayed. Compared to the boosterish tone of much of its press, academic critiques of big data have been relatively muted, often focusing on the continued importance of more traditional forms of domain knowledge and expertise. Indeed, many academic responses to big data enthusiastically celebrate the availability of new data sources and the potential for new insights and perspectives they may enable. Undermining many of these critiques is a lack of attention to the role of technology in society, particularly with respect to the labor process, the continued extension of labor relations into previously private times and places, and the commoditization of more and more aspects of everyday life. In this article, we parse a variety of big data definitions to argue that it is only when individual datums by the million, billion, or more are linked together algorithmically that ?big data? emerges as a commodity. Such decisions do not occur in a vacuum but as part of an asymmetric power relationship in which individuals are dispossessed of the data they generate in their day-to-day lives. We argue that the asymmetry of this data capture process is a means of capitalist ?accumulation by dispossession? that colonizes and commodifies everyday life in ways previously impossible. Situating the promises of ?big data? within the utopian imaginaries of digital frontierism, we suggest processes of data colonialism are actually unfolding behind these utopic promises. Amid private corporate and academic excitement over new forms of data analysis and visualization, situating big data as a form of capitalist expropriation and dispossession stresses the urgent need for critical, theoretical understandings of data and society.}, annote = {doi: 10.1177/0263775816633195}, author = {Thatcher, Jim and O'Sullivan, David and Mahmoudi, Dillon}, doi = {10.1177/0263775816633195}, issn = {0263-7758}, journal = {Environment and Planning D: Society and Space}, month = {mar}, number = {6}, pages = {990--1006}, publisher = {SAGE Publications Ltd STM}, title = {{Data colonialism through accumulation by dispossession: New metaphors for daily data}}, url = {https://doi.org/10.1177/0263775816633195}, volume = {34}, year = {2016} } @Misc{TheMendeleySupportTeam2011, author = {{The Mendeley Support Team}}, title = {{Getting Started with Mendeley}}, url = {http://www.mendeley.com}, abstract = {A quick introduction to Mendeley. Learn how Mendeley creates your personal digital library, how to organize and annotate documents, how to collaborate and share with colleagues, and how to generate citations and bibliographies.}, address = {London}, booktitle = {Mendeley Desktop}, keywords = {Mendeley, how-to, user manual}, pages = {1--16}, publisher = {Mendeley Ltd.}, year = {2011}, } @Misc{Thellman2017, author = {Thellman, Sam and Silvervarg, Annika and Ziemke, Tom}, title = {{Folk-psychological interpretation of human vs. Humanoid robot behavior: Exploring the intentional stance toward robots.}}, doi = {10.3389/fpsyg.2017.01962}, abstract = {People rely on shared folk-psychological theories when judging behavior. These theories guide people's social interactions and therefore need to be taken into consideration in the design of robots and other autonomous systems expected to interact socially with people. It is, however, not yet clear to what degree the mechanisms that underlie people's judgments of robot behavior overlap or differ from the case of human or animal behavior. To explore this issue, participants (N = 90) were exposed to images and verbal descriptions of eight different behaviors exhibited either by a person or a humanoid robot. Participants were asked to rate the intentionality, controllability and desirability of the behaviors, and to judge the plausibility of seven different types of explanations derived from a recently proposed psychological model of lay causal explanation of human behavior. Results indicate: substantially similar judgments of human and robot behavior, both in terms of (1a) ascriptions of intentionality/controllability/desirability and in terms of (1b) plausibility judgments of behavior explanations; (2a) high level of agreement in judgments of robot behavior – (2b) slightly lower but still largely similar to agreement over human behaviors; (3) systematic differences in judgments concerning the plausibility of goals and dispositions as explanations of human vs. humanoid behavior. Taken together, these results suggest that people's intentional stance toward the robot was in this case very similar to their stance toward the human. (PsycINFO Database Record (c) 2018 APA, all rights reserved)}, booktitle = {Frontiers in Psychology}, isbn = {1664-1078(Electronic)}, keywords = {*Folk Psychology, *Human Computer Interaction, *Robotics, Social Interaction}, publisher = {Frontiers Media S.A.}, volume = {8}, year = {2017}, } @incollection{Thompson1996a, address = {Oxford}, author = {Thompson, John B}, booktitle = {Detraditionalization: Critical reflections on authority and identity}, editor = {Heelas, Paul and Lash, Scott and Morris, Paul}, pages = {89--108}, publisher = {Blackwell Publishers}, title = {{Tradition and self in a mediated world}}, year = {1996} } @book{Tonner2010, address = {London}, author = {Tonner, Philip}, publisher = {Continuum}, title = {{Heidegger, Metaphysics and the Univocity of Being}}, year = {2010} } @article{Tonner2007, author = {Tonner, Philip}, journal = {Pli}, pages = {129--146}, title = {{Duns Scotus' Concept of the Univocity of Being}}, volume = {18}, year = {2007} } @misc{Toxboe, author = {Toxboe, Anders}, title = {{UI Patterns}}, url = {http://ui-patterns.com/}, volume = {2017} } @article{Trakakis2012a, abstract = {Abstract Questions of style are often deemed of marginal importance in philosophy, as well as in metaphilosophical debates concerning the analytic/Continental divide. I take issue with this common tendency by showing how style ? suitably conceived not merely as a way of writing, but as a form of expression intimately linked to a form of life ? occupies a central role in philosophy. After providing an analysis of the concept of style, I take a fresh look at the analytic/Continental division by examining the various stylistic differences between philosophers on each side. Despite these differences, I argue, both sides of the divide suffer from a common stylistic deficiency, and if this deficiency were rectified the gulf separating the two traditions may not appear as insurmountable as it presently does. To show this, I draw principally from the philosophy of religion, a field that has recently experienced a renewal in both the analytic and Continental traditions.}, annote = {https://doi.org/10.1111/j.1747-9991.2012.00526.x}, author = {Trakakis, N N}, doi = {https://doi.org/10.1111/j.1747-9991.2012.00526.x}, issn = {1747-9991}, journal = {Philosophy Compass}, month = {dec}, number = {12}, pages = {919--942}, publisher = {John Wiley {\&} Sons, Ltd}, title = {{Doing Philosophy in Style: A New Look at the Analytic/Continental Divide}}, volume = {7}, year = {2012} } @incollection{Tripathi2016, abstract = {Philosophers of technologies respond to the “given fact” that we live in a “technological culture” by sketching a “praxis philosophy” of technologies, where technologies are inherently neutral and culturally multi-stable. The easiest way to understand the non-neutrality of a technology is that we try to consider how experience is mediated by the technologies we use. Material hermeneutics deals with the art of embodied interpretation of material culture and technologies. In my chapter, I will demonstrate that a newer approach of hermeneutics, digital hermeneutics, applies to the concrete praxis of technologies such as internet technology and cyberspace.}, address = {Wiesbaden}, author = {Tripathi, Arun}, booktitle = {Information Cultures in the Digital Age: A Festschrift in Honor of Rafael Capurro}, doi = {10.1007/978-3-658-14681-8_8}, editor = {Kelly, Matthew and Bielby, Jared}, pages = {143--157}, publisher = {Springer Fachmedien}, title = {{The Significance of Digital Hermeneutics for the Philosophy of Technology}}, year = {2016} } @inproceedings{Tripathi2017, author = {Tripathi, Samarth and Acharya, Shrinivas and Sharma, Ranti Dev and Mittal, Sudhanshu and Bhattacharya, Samit}, booktitle = {AAAI}, title = {{Using Deep and Convolutional Neural Networks for Accurate Emotion Classification on DEAP Dataset}}, year = {2017} } @article{Truyens2014, author = {Truyens, Maarten and {Van Eecke}, Patrick}, doi = {10.1016/j.clsr.2014.01.009 LK - https://ru.on.worldcat.org/oclc/5575637023}, issn = {0267-3649}, journal = {Computer Law {\&} Security Review TA - TT -}, number = {2}, pages = {153--170}, title = {{Legal aspects of text mining}}, volume = {30}, year = {2014} } @Article{Tu2015, author = {Tu, Ding and Chen, Ling and Chen, Gencai}, title = {{Automatic multi-way domain concept hierarchy construction from customer reviews}}, doi = {https://doi.org/10.1016/j.neucom.2014.06.038}, issn = {0925-2312}, pages = {472--484}, url = {http://www.sciencedirect.com/science/article/pii/S0925231214008108}, volume = {147}, abstract = {A concept hierarchy is important for many applications to manage and analyze text corpora. In the literature, most previous hierarchy construction works are under the assumption that the semantic relations in the concept hierarchy can be extracted from a text corpus, which is not fully satisfied for short and informal texts, e.g. tweets and customer reviews. And many works utilize hierarchical clustering methods to get the final concept hierarchy, in which the resulting binary-tree form concept hierarchy cannot fit the demand in many applications. In this paper, we propose a general process for building a concept hierarchy from customer reviews with an appropriate depth. The process can be divided into three steps. First, all highly ranked topic words are extracted as concept words using a topic model. And a word sense disambiguation task is performed to derive the possible semantics of the words. Then, the distances between these words are computed by combining their contexts and relations in the WordNet. Finally, all words are organized using a modified multi-way hierarchical clustering method. In addition, a new concept hierarchy evaluation model is presented. Our approach is compared to approaches using hierarchical clustering methods on the Amazon Customer Review data set, and the results show that our approach can get higher similarity scores with the reference concept hierarchy.}, journal = {Neurocomputing}, keywords = {Hierarchical clustering, Taxonomy comparison, Taxonomy construction, WordNet}, year = {2015}, } @article{Turing1950, author = {Turing, A M}, journal = {Mind}, number = {236}, pages = {433--460}, title = {{Computing Machinery and Intelligence}}, volume = {59}, year = {1950} } @article{Turner2019, author = {Turner, Fred}, journal = {Harpers Magazine January 2019 Issue}, title = {{Machine Politics: The rise of the internet and a new age of authoritarianism}}, url = {https://harpers.org/archive/2019/01/machine-politics-facebook-political-polarization/}, year = {2019} } @misc{Turner2010, author = {Turner, J S and Soar, R C}, booktitle = {Industrialised, Integrated, Intelligent sustainable Construction - I3CON Handbook 2}, editor = {Wallis, Ian and Bilan, Lesya and Smith, Mike and {Samad Kazi}, Abdul}, pages = {233--248}, publisher = {I3CON/BSRIA}, title = {{Beyond Biomimicry: What Termites Can Tell Us About Realizing the Living Building}}, year = {2010} } @article{Ulmer1981, author = {Ulmer, Gregory L}, doi = {10.2307/464513}, journal = {Diacritics}, number = {3}, pages = {39--56}, publisher = {Johns Hopkins University Press}, title = {{The Post-Age}}, volume = {11}, year = {1981} } @incollection{Valgentini2010, address = {Evanston}, author = {Valgentini, Robert T}, booktitle = {Consequences of Hermeneutics}, editor = {Malpas, Jeff and Zabala, Santiago}, pages = {66--81}, publisher = {Northwestern University Press}, title = {{The Tradition of Tradition in Philosophical Hermeneutics}}, year = {2010} } @incollection{VandenBosch2007, author = {{Van den Bosch}, A. and Busser, B. and Canisius, S. and Daelemans, W.}, booktitle = {Computational Linguistics in the Netherlands: Selected Papers from the Seventeenth CLIN Meeting}, month = {jan}, pages = {99--114}, title = {{An efficient memory-based morphosyntactic tagger and parser for Dutch}}, year = {2007} } @InBook{VandenHoven2013, author = {{Van den Hoven}, Jeroen}, booktitle = {Responsible Innovation}, title = {{Value Sensitive Design and Responsible Innovation}}, doi = {doi:10.1002/9781118551424.ch4}, isbn = {9781118551424}, pages = {75--83}, series = {Wiley Online Books}, abstract = {Summary The Netherlands has learned interesting lessons about ethics and innovation in the first decade of the twenty-first century. A real innovative design for an electronic patient record system or a truly smart electricity meter, would have anticipated or pre-empted moral concerns and accommodated them into its design, reconciling efficiency, privacy, sustainability, and safety. Innovation can take the shape of design solutions to situations of moral overload. Responsible innovation aims at changing the world in such a way that the pursuit of one horn of the dilemma is no longer necessarily at the expense of grabbing the other. It aims at grabbing the bull by both horns. Responsible innovation should, therefore, be distinguished from mere innovation or the adding of mere new functionality. Responsible innovation is the endeavor of attempting to add morally relevant functionality which allows us to do more good than before.}, annote = {doi:10.1002/9781118551424.ch4}, keywords = {moral overload, responsible innovation, value sensitive design}, month = {4}, year = {2013}, } @article{VanderHeiden2015b, author = {{Van der Heiden}, Gert-Jan}, doi = {10.5840/kilikya20152317}, journal = {Cilicia Journal of Philosophy}, number = {3}, pages = {51--67}, title = {{Technology and Formation}}, volume = {2}, year = {2015} } @phdthesis{VanderHeiden2008a, author = {{Van der Heiden}, Gert-Jan}, publisher = {Radboud University}, title = {{Disclosure and Displacement. Truth and Language in the Work of Heidegger, Ricoeur, and Derrida}}, year = {2008} } @book{VanderHeiden2010, address = {Pittsburgh}, author = {{Van der Heiden}, Gert-Jan}, publisher = {Duquesne University Press}, title = {{The truth (and untruth) of language: Heidegger, Ricoeur, and Derrida on Disclosure {\&} Displacement}}, year = {2010} } @book{VanderHeiden2014, address = {Pittsburgh}, author = {{Van der Heiden}, Gert-Jan}, publisher = {Duquesne University Press}, title = {{Ontology after Ontotheology. Plurality, Event, and Contingency in Contemporary Philosophy}}, year = {2014} } @article{VanderHeiden2015a, author = {{Van der Heiden}, Gert-Jan}, doi = {http://dx.doi.org/10.1080/00071773.2015.1053320}, journal = {Journal of the British Society for Phenomenology}, pages = {1--17}, title = {{The Letter and the Witness: Agamben, Heidegger, and Derrida}}, year = {2015} } @article{VanderHeiden2015, author = {{Van der Heiden}, Gert-Jan}, doi = {10.1177/0191453714566482}, journal = {Philosophy and Social Criticism}, number = {9}, pages = {929--944}, title = {{The abandonment of hermeneutics and the potentialization of the past: Nancy and Agamben on the loss of tradition}}, volume = {41}, year = {2015} } @book{VanderHeiden2012, address = {Nijmegen}, author = {van der Heiden, Gert-Jan}, publisher = {Uitgeverij Vantilt}, title = {{De stem van de doden. Hermeneutiek als spreken namens de ander}}, year = {2012} } @misc{VanderSloot, author = {{Van der Sloot}, K. and {Van den Bosch}, A. and {Van Gompel}, M.}, title = {{Frog: An advanced Natural Language Processing suite for Dutch}}, url = {https://languagemachines.github.io/frog/}, urldate = {2019-06-08} } @article{VanderWalt2005, author = {{Van der Walt}, Johan}, journal = {Law and Critique}, pages = {277--299}, title = {{Interrupting the myth of the partage: Reflections of sovereignty and sacrifice in the work of Nancy, Agamben and Derrida}}, volume = {16}, year = {2005} } @article{article, author = {{Van Dijk}, Jan A G M}, doi = {10.3233/978-1-61499-137-3-49}, journal = {Innovation and the Public Sector}, pages = {49--62}, title = {{Digital democracy: Vision and reality}}, volume = {19}, year = {2012} } @book{VanGervenOei2017, editor = {{Van Gerven Oei}, V.W.J.}, publisher = {Punctum Books}, title = {{Going Postcard: The Letter(s) of Jacques Derrida}}, url = {https://punctumbooks.com/titles/going-postcard-the-letters-of-jacques-derrida/}, year = {2017} } @misc{VanGompelLaMachine, author = {{Van Gompel}, M.}, title = {{LaMachine}}, url = {https://proycon.github.io/LaMachine/}, urldate = {2019-06-10} } @misc{VanWelie, author = {{Van Welie}, Martijn}, title = {{Patterns in Interaction Design}}, url = {http://www.welie.com/}, volume = {2017} } @incollection{Vanhoozer2006, address = {Bloomington}, author = {Vanhoozer, Kevin J}, booktitle = {Hermeneutics at the Crossroads}, editor = {Vanhoozer, Kevin J and Smith, James K A and Benson, Bruce Ellis}, pages = {3--34}, publisher = {Indiana University Press}, title = {{Discourse on Matter: Hermeneutics and the "Miracle" of Understanding}}, year = {2006} } @book{Vedder2003, address = {Budel}, author = {Vedder, Ben}, publisher = {Damon}, title = {{Wandelen met woorden. Een weg van de filosofische hermeneutiek naar de hermeneutische filosofie en terug}}, year = {2003} } @Article{Verbeek2006, author = {Verbeek, Peter-Paul}, title = {{Materializing Morality: Design Ethics and Technological Mediation}}, doi = {10.1177/0162243905285847}, number = {3}, pages = {361--380}, volume = {31}, abstract = {During the past decade, the {\^{a}}€{\oe}script{\^{a}}€ concept, indicating how technologies prescribe human actions, has acquired a central place in STS. Until now, the concept has mainly functioned in descriptive settings. This article will deploy it in a normative setting. When technologies coshape human actions, they give material answers to the ethical question of how to act. This implies that engineers are doing {\^{a}}€{\oe}ethics by other means{\^{a}}€: they materialize morality. The article will explore the implications of this insight for engineering ethics. It first augments the script concept by developing the notion of technological mediation. After this, it investigates how the concept of mediation could be made fruitful for design ethics. It discusses how the ambition to design behaviorinfluencing technologies raises moral questions itself and elaborates two methods for anticipating technological mediation in the design process: performing mediation analyses and using an augmented version of constructive technology assessment.}, groups = {Ethical AI}, journal = {Science, Technology, {\&} Human Values}, year = {2006}, } @Article{Verbeek2015a, author = {Verbeek, Peter-Paul}, title = {{Beyond Interaction: A Short Introduction to Mediation Theory}}, doi = {10.1145/2751314}, issn = {1072-5520}, number = {3}, pages = {26--31}, url = {https://doi-org.ru.idm.oclc.org/10.1145/2751314}, volume = {22}, address = {New York, NY, USA}, groups = {Ethical AI}, journal = {Interactions}, month = {apr}, publisher = {Association for Computing Machinery}, year = {2015}, } @Article{Verbeek2008, author = {Verbeek, Peter-Paul}, title = {{Cyborg intentionality: Rethinking the phenomenology of human–technology relations}}, doi = {10.1007/s11097-008-9099-x}, number = {3}, pages = {387--395}, volume = {7}, abstract = {This article investigates the types of intentionality involved in human–technology relations. It aims to augment Don Ihde's analysis of the relations between human beings and technological artifacts, by analyzing a number of concrete examples at the limits of Ihde's analysis. The article distinguishes and analyzes three types of “cyborg intentionality,” which all involve specific blends of the human and the technological. Technologically mediated intentionality occurs when human intentionality takes place “through” technological artifacts; hybrid intentionality occurs when the technological actually merges with the human; and composite intentionality is the addition of human intentionality and the intentionality of technological artifacts.}, groups = {Ethical AI}, journal = {Phenomenology and the Cognitive Sciences}, year = {2008}, } @misc{VerbeekBlog, annote = {(accessed: 2019-03-23)}, author = {Verbeek, Peter-Paul}, title = {{Mediation Theory}}, url = {https://ppverbeek.wordpress.com/mediation-theory/}, urldate = {2019-10-25}, year = {2019} } @InCollection{Verbeek2015, author = {Verbeek, Peter-Paul}, booktitle = {The Onlife Manifesto: Being Human in a Hyperconnected Era}, title = {{Designing the public sphere: Information technologies and the politics of mediation}}, doi = {10.1007/978-3-319-04093-6_21}, abstract = {After a few decades of living with Information and Communication Technologies, we have got so much used to their presence in our daily lives, that we hardly realize that the societal and cultural revolution they are causing has only just begun. While most discussions still focus on privacy issues and on the impact of social media on interpersonal relations, a whole new generation of ICTs is currently entering the world, with potentially revolutionary impacts that require careful analysis and evaluation. Many everyday objects are currently being equipped with forms of ‘ubiquitous computing' or ‘ambient intelligence'. At the same time, ‘augmented reality' technologies are rapidly gaining influence. ICTs will result in smart environments, and new social relations. Rather than merely assessing and criticizing these developments ‘from the outside', we must to learn to accompany them critically ‘from within'. The public sphere requires ‘technologies of the self': the capability to understand technological mediations, to take them into account in technological design, and to shape our existence in interaction with them. The real choice is not between accepting of rejecting new ICTs, but between critical engagement and powerless opposition.}, groups = {Ethical AI}, keywords = {Ethics of technology, Google Glass, Information technology, Philosophy of technology, Technological mediation}, year = {2015}, } @TechReport{ECP2020, author = {Verbeek, Peter-Paul and Tijink, Dani{\"{e}}l}, institution = {ECP | Platform voor de InformatieSamenleving}, title = {{Guidance ethics approach: An ethical dialogue about technology with perspective on actions}}, pages = {1--64}, url = {https://ecp.nl/wp-content/uploads/2020/11/Guidance-ethics-approach.pdf}, groups = {Ethical AI}, year = {2020}, } @article{Verbeek2017, author = {Verbeek, Peter-Paul and Zink, Verena and Engelmann, Severin}, journal = {Fatum 6}, title = {{Technology Mediation: How Technology Organizes Human-World Relations. Interview with Peter-Paul Verbeek}}, url = {https://www.fatum-magazin.de/ausgaben/synthese/internationale-perspektiven/technology-mediation.html}, year = {2017} } @incollection{Verhoeven1991, address = {Nijmegen}, author = {Verhoeven, Cornelis}, booktitle = {Wonen: Architectuur in het denken van Martin Heidegger}, editor = {{De Visscher}, Jacques and {De Saeger}, Raf}, pages = {97--116}, publisher = {SUN}, title = {{Wonen en thuis zijn. Aantekeningen bij 'Bouwen Wonen Denken' van Martin Heidegger}}, year = {1991} } @article{Volstad2012, author = {Volstad, Nina Louise and Boks, Casper}, doi = {10.1002/sd.1535}, issn = {1099-1719}, journal = {Sustainable Development}, keywords = {analogies biomimicry designer's toolkit industrial}, number = {3}, pages = {189--199}, title = {{On the use of Biomimicry as a Useful Tool for the Industrial Designer}}, url = {http://dx.doi.org/10.1002/sd.1535}, volume = {20}, year = {2012} } @incollection{Wachenfeld2016, author = {Wachenfeld, Walther and Winner, Hermann and Gerdes, J Christian and Lenz, Barbara and Maurer, Markus and Beiker, Sven and Faedrich, Eva and Winkle, Thomas}, booktitle = {Autonomous Driving}, doi = {10.1007/978-3-662-48847-8}, editor = {Maurer, Markus and Gerdes, J Christian and Lenz, Barbara and Winner, Hermann}, pages = {9--38}, publisher = {Springer-Verlag Berlin Heidelberg}, title = {{Use Cases for Autonomous Driving}}, year = {2016} } @Article{Wachter2017, author = {Wachter, Sandra and Mittelstadt, Brent and Floridi, Luciano}, title = {{Why a Right to Explanation of Automated Decision-Making Does Not Exist in the General Data Protection Regulation}}, number = {2}, pages = {76--99}, volume = {7}, groups = {Ethical AI}, journal = {International Data Privacy Law}, year = {2017}, } @online{Wang2019, annote = {(accessed: 2019-03-21)}, author = {Wang, Philip}, title = {{Thispersondoesnotexist}}, url = {https://thispersondoesnotexist.com/}, urldate = {2019-12-15}, year = {2019} } @book{Watkin2014, address = {London}, author = {Watkin, William}, publisher = {Rowman {\&} Littlefield International}, title = {{Agamben and Indifference: A Critical Overview}}, year = {2014} } @Article{Waytz2014, author = {Waytz, Adam and Heafner, Joy and Epley, Nicholas}, title = {{The mind in the machine: Anthropomorphism increases trust in an autonomous vehicle}}, doi = {https://doi.org/10.1016/j.jesp.2014.01.005}, issn = {0022-1031}, pages = {113--117}, url = {http://www.sciencedirect.com/science/article/pii/S0022103114000067}, volume = {52}, abstract = {Sophisticated technology is increasingly replacing human minds to perform complicated tasks in domains ranging from medicine to education to transportation. We investigated an important theoretical determinant of people's willingness to trust such technology to perform competently—the extent to which a nonhuman agent is anthropomorphized with a humanlike mind—in a domain of practical importance, autonomous driving. Participants using a driving simulator drove either a normal car, an autonomous vehicle able to control steering and speed, or a comparable autonomous vehicle augmented with additional anthropomorphic features—name, gender, and voice. Behavioral, physiological, and self-report measures revealed that participants trusted that the vehicle would perform more competently as it acquired more anthropomorphic features. Technology appears better able to perform its intended design when it seems to have a humanlike mind. These results suggest meaningful consequences of humanizing technology, and also offer insights into the inverse process of objectifying humans.}, journal = {Journal of Experimental Social Psychology}, keywords = {Anthropomorphism, Dehumanization, Human–computer interaction, Mind perception, Moral responsibility, Trust}, year = {2014}, } @book{Weinsheimer1985, address = {New Haven}, author = {Weinsheimer, Joel C}, publisher = {Yale University Press}, title = {{Gadamer's Hermeneutics}}, year = {1985} } @article{Weisberg2009, abstract = {Inference to the Best Explanation (IBE) and Bayesianism are our two most prominent theories of scientific inference. Are they compatible? Van Fraassen famously argued that they are not, concluding that IBE must be wrong since Bayesianism is right. Writers since then, from both the Bayesian and explanationist camps, have usually considered van Fraassen's argument to be misguided, and have plumped for the view that Bayesianism and IBE are actually compatible. I argue that van Fraassen's argument is actually not so misguided, and that it causes more trouble for compatibilists than is typically thought. Bayesianism in its dominant, subjectivist form, can only be made compatible with IBE if IBE is made subservient to conditionalization in a way that robs IBE of much of its substance and interest. If Bayesianism and IBE are to be fit together, I argue, a strongly objective Bayesianism is the preferred option. I go on to sketch this objectivist, IBE-based Bayesianism, and offer some preliminary suggestions for its development.}, author = {Weisberg, Jonathan}, doi = {10.1007/s11229-008-9305-y}, issn = {1573-0964}, journal = {Synthese}, number = {1}, pages = {125--143}, title = {{Locating IBE in the Bayesian framework}}, url = {https://doi.org/10.1007/s11229-008-9305-y}, volume = {167}, year = {2009} } @article{Whyte2009, author = {Whyte, Jessica}, doi = {10.1007/s10978-009-9059-9}, journal = {Law Critique}, pages = {309--324}, title = {{'I Would Prefer Not To': Giorgio Agamben, Bartleby and the Potentiality of the Law}}, volume = {20}, year = {2009} } @misc{Wiewiorowski2019, author = {Wiewi{\'{o}}rowski, Wojciech}, booktitle = {European Data Protection Supervisor Blog}, title = {{Facial recognition: A solution in search of a problem?}}, url = {https://edps.europa.eu/press-publications/press-news/blog/facial-recognition-solution-search-problem{\_}en}, urldate = {2020-06-30}, year = {2019} } @inproceedings{Wille2009, annote = {How to integrate this? Lecture Notes in Computer Science, vol 5548. Springer, Berlin, Heidelberg}, author = {Wille, R.}, booktitle = {International Conference on Formal Concept Analysis}, editor = {Ferr{\'{e}}, S. and Rudolph, S.}, pages = {314--339}, publisher = {Springer}, title = {{Restructuring lattice theory: an approach based on hierarchies of concepts}}, year = {2009} } @incollection{Williams2005, address = {Basingstoke}, author = {Williams, Michael}, booktitle = {Readings of Wittgenstein's On Certainty}, editor = {Moyal-Sharrock, Dani{\`{e}}le and Brenner, William H}, pages = {47--58}, publisher = {Palgrave Macmillan}, title = {{Why Wittgenstein Isn't a Foundationalist}}, year = {2005} } @article{Wills1984, author = {Wills, David}, doi = {10.2307/3684813}, journal = {SubStance}, number = {43}, pages = {19--38}, title = {{Post/Card/Match/Book/"Envois"/Derrida}}, volume = {13}, year = {1984} } @article{Wiltse2014, author = {Wiltse, Heather}, journal = {Techn{\'{e}}: Research in Philosophy and Technology}, number = {3}, title = {{Unpacking Digital Material Mediation}}, volume = {18}, year = {2014} } @misc{Wittgenstein2009, author = {Wittgenstein, Ludwig}, edition = {4th}, editor = {Hacker, P M S and Schulte, Joachim}, publisher = {Wiley-Blackwell}, title = {{Philosophical Investigations}}, year = {2009} } @book{Wittgenstein1972, address = {Oxford}, author = {Wittgenstein, Ludwig}, editor = {{van Wright Denis}, G H A4 - Paul and Anscombe, G E M}, publisher = {Basil Blackwell}, title = {{On Certainty}}, year = {1972} } @article{Wong2012, author = {Wong, Wilson and Liu, Wei and Bennamoun, Mohammed}, issn = {0360-0300}, journal = {ACM Computing Surveys (CSUR)}, number = {4}, pages = {20}, publisher = {ACM}, title = {{Ontology learning from text: A look back and into the future}}, volume = {44}, year = {2012} } @article{Worall2011, author = {Worall, Mark}, doi = {http://dx.doi.org/10.1080/17508975.2011.582316}, journal = {Intelligent Buildings International}, number = {2}, pages = {87--95}, title = {{Homeostasis in nature: Nest building termites and intelligent buildings}}, volume = {3}, year = {2011} } @book{Young2013, address = {London}, author = {Young, Eugene B and Genosko, Gary and Watson, Janell}, publisher = {Bloomsbury}, title = {{The Deleuze and Guattari Dictionary}}, year = {2013} } @incollection{Zabala2010, address = {Evanston}, author = {Zabala, Santiago}, booktitle = {Consequences of Hermeneutics}, editor = {Malpas, Jeff and Zabala, Santiago}, pages = {161--176}, publisher = {Northwestern University Press}, title = {{Being is Conversation: Remains, Weak Thought, and Hermeneutics}}, year = {2010} } @article{Zwier2016, author = {Zwier, Jochem and Blok, Vincent and Lemmens, Pieter}, doi = {10.1007/s13347-016-0221-7}, journal = {Philosophy {\&} Technology}, title = {{Phenomenology and the Empirical Turn: a Phenomenological Analysis of Postphenomenology}}, year = {2016} } @InCollection{Heidegger2000, author = {Heidegger, Martin}, booktitle = {Gesamtausgabe. 1. Abteilung: Ver{\"{o}}ffentlichte Schriften 1910-1976. Band 7: Vortr{\"{a}}ge und Aufs{\"{a}}tze}, title = {{Moira}}, pages = {235--262}, publisher = {Vittorio Klosterman}, volume = {7}, address = {Frankfurt am Rein}, year = {2000}, } @Article{Mittelstadt2016a, author = {Mittelstadt, Brent Daniel and Allo, Patrick and Taddeo, Mariarosaria and Wachter, Sandra and Floridi, Luciano}, title = {{The ethics of algorithms: Mapping the debate}}, doi = {10.1177/2053951716679679}, issn = {20539517}, number = {2}, pages = {1--21}, volume = {3}, abstract = {In information societies, operations, decisions and choices previously left to humans are increasingly delegated to algorithms, which may advise, if not decide, about how data should be interpreted and what actions should be taken as a result. More and more often, algorithms mediate social processes, business transactions, governmental decisions, and how we perceive, understand, and interact among ourselves and with the environment. Gaps between the design and operation of algorithms and our understanding of their ethical implications can have severe consequences affecting individuals as well as groups and whole societies. This paper makes three contributions to clarify the ethical importance of algorithmic mediation. It provides a prescriptive map to organise the debate. It reviews the current discussion of ethical aspects of algorithms. And it assesses the available literature in order to identify areas requiring further work to develop the ethics of algorithms.}, groups = {Ethical AI}, journal = {Big Data and Society}, keywords = {Algorithms, Big Data, automation, data analytics, data mining, ethics, machine learning}, year = {2016}, } @InCollection{Morador2016a, author = {Morador, Fernando Flores}, booktitle = {Information Cultures in the Digital Age: A Festschrift in Honor of Rafael Capurro}, title = {{Understanding the pulse of existence: An examination of capurro's angeletics}}, doi = {10.1007/978-3-658-14681-8_14}, editor = {Kelly, Matthew and Bielby, Jared}, isbn = {9783658146818}, publisher = {Springer Fachmedien}, abstract = {The contribution of Rafael Capurro to the clarification of the uses of the concept of information has a conceptual as well a historical dimension; in this last sense, there are few philosophers of information that can follow the history of the concept of information in the works of the Classics with the accuracy and erudition of Capurro We find that his contribution embraces the following fields: a) a clear differentiation between subjective and objective information and as a consequence of that b) a clear differentiation between information as order in the universe, different from matter and idea But the contribution he made has not only been analytical and historical, it has also been creative, opening up for us an understanding of the phenomena of "information" through the differentiation between the act of interpretation of a message (hermeneutics) and the proper act of communication as a specific kind of intentional act Capurro has opened up a new discipline named "angeletics" As we understand the Capurrian framework, the key idea is to comprehend how "a message produces changes in both the emissary and in the receptor" According to Capurro, a message has two dimensions; one is information, and the other intentionality It is the aim of our short article to take a closer look at Rafael Capurro's contribution to the understanding of these two sides of the message.}, address = {Wiesbaden}, year = {2016}, } @Online{AlgorithmWatch2019, author = {{AlgorithmWatch}}, date = {2019-04-09}, title = {Launch of our ‘AI Ethics Guidelines Global Inventory’}, url = {https://algorithmwatch.org/en/launch-of-our-ai-ethics-guidelines-global-inventory/}, urldate = {2021-08-04}, groups = {Ethical AI}, } @Online{AlgorithmWatch, author = {{AlgorithmWatch}}, title = {AI Ethics Guidelines Global Inventory}, url = {https://inventory.algorithmwatch.org/}, urldate = {2021-03-16}, groups = {Ethical AI}, } @InProceedings{Bietti2020, author = {Bietti, Elettra}, booktitle = {Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency}, date = {2020-01}, title = {From Ethics Washing to Ethics Bashing: A View on Tech Ethics from within Moral Philosophy}, doi = {10.1145/3351095.3372860}, isbn = {9781450369367}, location = {Barcelona, Spain}, pages = {210–219}, publisher = {Association for Computing Machinery}, series = {FAT* '20}, url = {https://doi.org/10.1145/3351095.3372860}, abstract = {The word 'ethics' is under siege in technology policy circles. Weaponized in support of deregulation, self-regulation or handsoff governance, "ethics" is increasingly identified with technology companies' self-regulatory efforts and with shallow appearances of ethical behavior. So-called "ethics washing" by tech companies is on the rise, prompting criticism and scrutiny from scholars and the tech community at large. In parallel to the growth of ethics washing, its condemnation has led to a tendency to engage in "ethics bashing." This consists in the trivialization of ethics and moral philosophy now understood as discrete tools or pre-formed social structures such as ethics boards, self-governance schemes or stakeholder groups.The misunderstandings underlying ethics bashing are at least threefold: (a) philosophy and "ethics" are seen as a communications strategy and as a form of instrumentalized cover-up or fa\c{c}ade for unethical behavior, (b) philosophy is understood in opposition and as alternative to political representation and social organizing and (c) the role and importance of moral philosophy is downplayed and portrayed as mere "ivory tower" intellectualization of complex problems that need to be dealt with in practice.This paper argues that the rhetoric of ethics and morality should not be reductively instrumentalized, either by the industry in the form of "ethics washing," or by scholars and policy-makers in the form of "ethics bashing." Grappling with the role of philosophy and ethics requires moving beyond both tendencies and seeing ethics as a mode of inquiry that facilitates the evaluation of competing tech policy strategies. In other words, we must resist narrow reductivism of moral philosophy as instrumentalized performance and renew our faith in its intrinsic moral value as a mode of knowledgeseeking and inquiry. Far from mandating a self-regulatory scheme or a given governance structure, moral philosophy in fact facilitates the questioning and reconsideration of any given practice, situating it within a complex web of legal, political and economic institutions. Moral philosophy indeed can shed new light on human practices by adding needed perspective, explaining the relationship between technology and other worthy goals, situating technology within the human, the social, the political. It has become urgent to start considering technology ethics also from within and not only from outside of ethics.}, address = {New York, NY, USA}, keywords = {self-regulation, technology law, AI, regulation, technology ethics, ethics, moral philosophy}, numpages = {10}, } @WWW{NS-Graffiti, author = {NS}, title = {Graffiti-preventie met AI}, url = {https://werkenbijns.nl/projecten/graffiti-preventie-met-ai/}, urldate = {2021-03-17}, } @Misc{vilone2020, author = {Giulia Vilone and Luca Longo}, date = {2020}, title = {Explainable Artificial Intelligence: a Systematic Review}, eprint = {2006.00093}, eprintclass = {cs.AI}, eprinttype = {arXiv}, groups = {Ethical AI}, } @InProceedings{Mittelstadt2019, author = {Mittelstadt, Brent and Russell, Chris and Wachter, Sandra}, booktitle = {Proceedings of the Conference on Fairness, Accountability, and Transparency}, date = {2019}, title = {Explaining Explanations in AI}, doi = {10.1145/3287560.3287574}, isbn = {9781450361255}, location = {Atlanta, GA, USA}, pages = {279–288}, publisher = {Association for Computing Machinery}, series = {FAT* '19}, url = {https://doi.org/10.1145/3287560.3287574}, abstract = {Recent work on interpretability in machine learning and AI has focused on the building of simplified models that approximate the true criteria used to make decisions. These models are a useful pedagogical device for teaching trained professionals how to predict what decisions will be made by the complex system, and most importantly how the system might break. However, when considering any such model it's important to remember Box's maxim that "All models are wrong but some are useful." We focus on the distinction between these models and explanations in philosophy and sociology. These models can be understood as a "do it yourself kit" for explanations, allowing a practitioner to directly answer "what if questions" or generate contrastive explanations without external assistance. Although a valuable ability, giving these models as explanations appears more difficult than necessary, and other forms of explanation may not have the same trade-offs. We contrast the different schools of thought on what makes an explanation, and suggest that machine learning might benefit from viewing the problem more broadly.}, address = {New York, NY, USA}, groups = {Ethical AI, XAI}, keywords = {Interpretability, Philosophy of Science, Explanations, Accountability}, numpages = {10}, } @Article{BarredoArrieta2020, author = {Alejandro {Barredo Arrieta} and Natalia Díaz-Rodríguez and Javier {Del Ser} and Adrien Bennetot and Siham Tabik and Alberto Barbado and Salvador Garcia and Sergio Gil-Lopez and Daniel Molina and Richard Benjamins and Raja Chatila and Francisco Herrera}, date = {2020}, journaltitle = {Information Fusion}, title = {Explainable Artificial Intelligence ({XAI}): Concepts, taxonomies, opportunities and challenges toward responsible {AI}}, doi = {https://doi.org/10.1016/j.inffus.2019.12.012}, issn = {1566-2535}, pages = {82-115}, url = {https://www.sciencedirect.com/science/article/pii/S1566253519308103}, volume = {58}, abstract = {In the last few years, Artificial Intelligence (AI) has achieved a notable momentum that, if harnessed appropriately, may deliver the best of expectations over many application sectors across the field. For this to occur shortly in Machine Learning, the entire community stands in front of the barrier of explainability, an inherent problem of the latest techniques brought by sub-symbolism (e.g. ensembles or Deep Neural Networks) that were not present in the last hype of AI (namely, expert systems and rule based models). Paradigms underlying this problem fall within the so-called eXplainable AI (XAI) field, which is widely acknowledged as a crucial feature for the practical deployment of AI models. The overview presented in this article examines the existing literature and contributions already done in the field of XAI, including a prospect toward what is yet to be reached. For this purpose we summarize previous efforts made to define explainability in Machine Learning, establishing a novel definition of explainable Machine Learning that covers such prior conceptual propositions with a major focus on the audience for which the explainability is sought. Departing from this definition, we propose and discuss about a taxonomy of recent contributions related to the explainability of different Machine Learning models, including those aimed at explaining Deep Learning methods for which a second dedicated taxonomy is built and examined in detail. This critical literature analysis serves as the motivating background for a series of challenges faced by XAI, such as the interesting crossroads of data fusion and explainability. Our prospects lead toward the concept of Responsible Artificial Intelligence, namely, a methodology for the large-scale implementation of AI methods in real organizations with fairness, model explainability and accountability at its core. Our ultimate goal is to provide newcomers to the field of XAI with a thorough taxonomy that can serve as reference material in order to stimulate future research advances, but also to encourage experts and professionals from other disciplines to embrace the benefits of AI in their activity sectors, without any prior bias for its lack of interpretability.}, groups = {Ethical AI, XAI, IRIS23}, keywords = {Explainable Artificial Intelligence, Machine Learning, Deep Learning, Data Fusion, Interpretability, Comprehensibility, Transparency, Privacy, Fairness, Accountability, Responsible Artificial Intelligence}, } @TechReport{Leslie2019, author = {Leslie, David}, date = {2019-06}, title = {{Understanding artificial intelligence ethics and safety: A guide for the responsible design and implementation of AI systems in the public sector}}, doi = {10.5281/zenodo.3240529}, url = {https://doi.org/10.5281/zenodo.3240529}, groups = {Ethical AI}, publisher = {Zenodo}, } @InProceedings{Raji2020, author = {Raji, Inioluwa Deborah and Smart, Andrew and White, Rebecca N. and Mitchell, Margaret and Gebru, Timnit and Hutchinson, Ben and Smith-Loud, Jamila and Theron, Daniel and Barnes, Parker}, booktitle = {Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency}, date = {2020}, title = {Closing the AI Accountability Gap: Defining an End-to-End Framework for Internal Algorithmic Auditing}, doi = {10.1145/3351095.3372873}, isbn = {9781450369367}, location = {Barcelona, Spain}, pages = {33–44}, publisher = {Association for Computing Machinery}, series = {FAT* '20}, url = {https://doi.org/10.1145/3351095.3372873}, abstract = {Rising concern for the societal implications of artificial intelligence systems has inspired a wave of academic and journalistic literature in which deployed systems are audited for harm by investigators from outside the organizations deploying the algorithms. However, it remains challenging for practitioners to identify the harmful repercussions of their own systems prior to deployment, and, once deployed, emergent issues can become difficult or impossible to trace back to their source.In this paper, we introduce a framework for algorithmic auditing that supports artificial intelligence system development end-to-end, to be applied throughout the internal organization development life-cycle. Each stage of the audit yields a set of documents that together form an overall audit report, drawing on an organization's values or principles to assess the fit of decisions made throughout the process. The proposed auditing framework is intended to contribute to closing the accountability gap in the development and deployment of large-scale artificial intelligence systems by embedding a robust process to ensure audit integrity.}, address = {New York, NY, USA}, groups = {Ethical AI}, keywords = {accountability, algorithmic audits, responsible innovation, machine learning}, numpages = {12}, } @Article{Canca2020, author = {Canca, Cansu}, date = {2020-11}, journaltitle = {Commun. ACM}, title = {Operationalizing AI Ethics Principles}, doi = {10.1145/3430368}, issn = {0001-0782}, number = {12}, pages = {18–21}, url = {https://doi.org/10.1145/3430368}, volume = {63}, abstract = {A better ethics analysis guide for developers.}, groups = {Ethical AI}, issue_date = {December 2020}, location = {New York, NY, USA}, numpages = {4}, publisher = {Association for Computing Machinery}, } @Article{nori2019, author = {Nori, Harsha and Jenkins, Samuel and Koch, Paul and Caruana, Rich}, date = {2019}, journaltitle = {arXiv preprint arXiv:1909.09223}, title = {InterpretML: A Unified Framework for Machine Learning Interpretability}, groups = {Ethical AI}, keywords = {explainability, interpretability}, } @Article{Ribeiro2016, author = {Marco Tulio Ribeiro and Sameer Singh and Carlos Guestrin}, date = {2016}, journaltitle = {CoRR}, title = {"Why Should I Trust You?": Explaining the Predictions of Any Classifier}, eprint = {1602.04938}, eprintclass = {cs.LG}, eprinttype = {arXiv}, volume = {abs/1602.04938}, groups = {Ethical AI}, keywords = {explainability, interpretability}, } @Article{Sundararajan2017, author = {Mukund Sundararajan and Ankur Taly and Qiqi Yan}, date = {2017}, journaltitle = {CoRR}, title = {Axiomatic Attribution for Deep Networks}, eprint = {1703.01365}, eprinttype = {arXiv}, url = {http://arxiv.org/abs/1703.01365}, volume = {abs/1703.01365}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/SundararajanTY17.bib}, groups = {Ethical AI}, keywords = {explainability, interpretability}, timestamp = {Mon, 13 Aug 2018 16:48:32 +0200}, } @Article{DBLP:journals/corr/SmilkovTKVW17, author = {Daniel Smilkov and Nikhil Thorat and Been Kim and Fernanda B. Vi{\'{e}}gas and Martin Wattenberg}, date = {2017}, journaltitle = {CoRR}, title = {SmoothGrad: removing noise by adding noise}, eprint = {1706.03825}, eprinttype = {arXiv}, url = {http://arxiv.org/abs/1706.03825}, volume = {abs/1706.03825}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/SmilkovTKVW17.bib}, groups = {Ethical AI}, keywords = {explainability, interpretability}, timestamp = {Mon, 13 Aug 2018 16:48:36 +0200}, } @Article{Mothilal2019, author = {Ramaravind Kommiya Mothilal and Amit Sharma and Chenhao Tan}, date = {2019}, journaltitle = {CoRR}, title = {Explaining Machine Learning Classifiers through Diverse Counterfactual Explanations}, eprint = {1905.07697}, eprintclass = {cs.LG}, eprinttype = {arXiv}, url = {http://arxiv.org/abs/1905.07697}, volume = {abs/1905.07697}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-07697.bib}, groups = {Ethical AI, XAI}, keywords = {explainability, interpretability}, timestamp = {Tue, 28 May 2019 12:48:08 +0200}, } @Article{Lundberg2020, author = {Lundberg, Scott M. and Erion, Gabriel and Chen, Hugh and DeGrave, Alex and Prutkin, Jordan M. and Nair, Bala and Katz, Ronit and Himmelfarb, Jonathan and Bansal, Nisha and Lee, Su-In}, date = {2020}, journaltitle = {Nature Machine Intelligence}, title = {From local explanations to global understanding with explainable AI for trees}, doi = {10.1038/s42256-019-0138-9}, issn = {2522-5839}, number = {1}, pages = {56--67}, url = {https://doi.org/10.1038/s42256-019-0138-9}, volume = {2}, abstract = {Tree-based machine learning models such as random forests, decision trees and gradient boosted trees are popular nonlinear predictive models, yet comparatively little attention has been paid to explaining their predictions. Here we improve the interpretability of tree-based models through three main contributions. (1) A polynomial time algorithm to compute optimal explanations based on game theory. (2) A new type of explanation that directly measures local feature interaction effects. (3) A new set of tools for understanding global model structure based on combining many local explanations of each prediction. We apply these tools to three medical machine learning problems and show how combining many high-quality local explanations allows us to represent global structure while retaining local faithfulness to the original model. These tools enable us to (1) identify high-magnitude but low-frequency nonlinear mortality risk factors in the US population, (2) highlight distinct population subgroups with shared risk characteristics, (3) identify nonlinear interaction effects among risk factors for chronic kidney disease and (4) monitor a machine learning model deployed in a hospital by identifying which features are degrading the model’s performance over time. Given the popularity of tree-based machine learning models, these improvements to their interpretability have implications across a broad set of domains.}, groups = {Ethical AI, XAI}, keywords = {interpretability, explainability}, refid = {Lundberg2020}, } @Article{Morley2020, author = {Morley, Jessica and Floridi, Luciano and Kinsey, Libby and Elhalal, Anat}, date = {2020}, journaltitle = {Science and Engineering Ethics}, title = {From What to How: An Initial Review of Publicly Available AI Ethics Tools, Methods and Research to Translate Principles into Practices}, doi = {10.1007/s11948-019-00165-5}, issn = {1471-5546}, number = {4}, pages = {2141--2168}, url = {https://doi.org/10.1007/s11948-019-00165-5}, volume = {26}, abstract = {The debate about the ethical implications of Artificial Intelligence dates from the 1960s (Samuel in Science, 132(3429):741-742, 1960. https://doi.org/10.1126/science.132.3429.741; Wiener in Cybernetics: or control and communication in the animal and the machine, MIT Press, New York, 1961). However, in recent years symbolic AI has been complemented and sometimes replaced by (Deep) Neural Networks and Machine Learning (ML) techniques. This has vastly increased its potential utility and impact on society, with the consequence that the ethical debate has gone mainstream. Such a debate has primarily focused on principles--the ‘what’ of AI ethics (beneficence, non-maleficence, autonomy, justice and explicability)--rather than on practices, the ‘how.’ Awareness of the potential issues is increasing at a fast rate, but the AI community’s ability to take action to mitigate the associated risks is still at its infancy. Our intention in presenting this research is to contribute to closing the gap between principles and practices by constructing a typology that may help practically-minded developers apply ethics at each stage of the Machine Learning development pipeline, and to signal to researchers where further work is needed. The focus is exclusively on Machine Learning, but it is hoped that the results of this research may be easily applicable to other branches of AI. The article outlines the research method for creating this typology, the initial findings, and provides a summary of future research needs.}, groups = {Ethical AI}, refid = {Morley2020}, } @Article{Floridi2019, author = {Floridi, Luciano}, date = {2019}, journaltitle = {Philosophy {\&} Technology}, title = {Translating Principles into Practices of Digital Ethics: Five Risks of Being Unethical}, doi = {10.1007/s13347-019-00354-x}, issn = {2210-5441}, number = {2}, pages = {185--193}, url = {https://doi.org/10.1007/s13347-019-00354-x}, volume = {32}, groups = {Ethical AI}, refid = {Floridi2019}, } @Article{Saltz2019, author = {Saltz, Jeffrey S. and Dewar, Neil}, date = {2019}, journaltitle = {Ethics and Information Technology}, title = {Data science ethical considerations: a systematic literature review and proposed project framework}, doi = {10.1007/s10676-019-09502-5}, issn = {1572-8439}, number = {3}, pages = {197--208}, url = {https://doi.org/10.1007/s10676-019-09502-5}, volume = {21}, abstract = {Data science, and the related field of big data, is an emerging discipline involving the analysis of data to solve problems and develop insights. This rapidly growing domain promises many benefits to both consumers and businesses. However, the use of big data analytics can also introduce many ethical concerns, stemming from, for example, the possible loss of privacy or the harming of a sub-category of the population via a classification algorithm. To help address these potential ethical challenges, this paper maps and describes the main ethical themes that were identified via systematic literature review. It then identifies a possible structure to integrate these themes within a data science project, thus helping to provide some structure in the on-going debate with respect to the possible ethical situations that can arise when using data science analytics.}, groups = {Ethical AI}, refid = {Saltz2019}, } @Article{Jacobs2018, author = {Jacobs, Naomi and Huldtgren, Alina}, date = {2018}, journaltitle = {Ethics and Information Technology}, title = {Why value sensitive design needs ethical commitments}, doi = {10.1007/s10676-018-9467-3}, issn = {1572-8439}, url = {https://doi.org/10.1007/s10676-018-9467-3}, abstract = {Currently, value sensitive design (VSD) does not commit to a particular ethical theory. Critiques contend that without such an explicit commitment, VSD lacks a methodology for distinguishing genuine moral values from mere stakeholders-preferences and runs the risk of attending to a set of values that is unprincipled or unbounded. We argue that VSD practitioners need to complement it with an ethical theory. We argue in favour of a mid-level ethical theory to fulfil this role.}, groups = {Ethical AI}, refid = {Jacobs2018}, } @Article{Lipton2016, author = {Zachary Chase Lipton}, date = {2016}, journaltitle = {CoRR}, title = {The Mythos of Model Interpretability}, eprint = {1606.03490}, eprinttype = {arXiv}, url = {http://arxiv.org/abs/1606.03490}, volume = {abs/1606.03490}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/Lipton16a.bib}, groups = {Ethical AI, XAI}, timestamp = {Mon, 13 Aug 2018 16:48:59 +0200}, } @Article{Sokol2019, author = {Kacper Sokol and Peter A. Flach}, date = {2019}, journaltitle = {CoRR}, title = {Explainability Fact Sheets: {A} Framework for Systematic Assessment of Explainable Approaches}, eprint = {1912.05100}, eprinttype = {arXiv}, url = {http://arxiv.org/abs/1912.05100}, volume = {abs/1912.05100}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/abs-1912-05100.bib}, groups = {Ethical AI}, timestamp = {Thu, 02 Jan 2020 18:08:18 +0100}, } @Article{Gebru2018, author = {Timnit Gebru and Jamie Morgenstern and Briana Vecchione and Jennifer Wortman Vaughan and Hanna M. Wallach and Hal Daum{\'{e}} III and Kate Crawford}, date = {2018}, journaltitle = {CoRR}, title = {Datasheets for Datasets}, eprint = {1803.09010}, eprinttype = {arXiv}, url = {http://arxiv.org/abs/1803.09010}, volume = {abs/1803.09010}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/abs-1803-09010.bib}, groups = {Ethical AI}, timestamp = {Mon, 20 Aug 2018 15:16:09 +0200}, } @Article{Mitchell2018a, author = {Margaret Mitchell and Simone Wu and Andrew Zaldivar and Parker Barnes and Lucy Vasserman and Ben Hutchinson and Elena Spitzer and Inioluwa Deborah Raji and Timnit Gebru}, date = {2018}, journaltitle = {CoRR}, title = {Model Cards for Model Reporting}, eprint = {1810.03993}, eprinttype = {arXiv}, url = {http://arxiv.org/abs/1810.03993}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/abs-1810-03993.bib}, groups = {Ethical AI}, timestamp = {Tue, 30 Oct 2018 20:39:56 +0100}, } @Misc{Agarwal2018, author = {Alekh Agarwal and Alina Beygelzimer and Miroslav Dudík and John Langford and Hanna Wallach}, date = {2018}, title = {A Reductions Approach to Fair Classification}, eprint = {1803.02453}, eprintclass = {cs.LG}, eprinttype = {arXiv}, groups = {Ethical AI}, } @InProceedings{Agarwal2019, author = {Agarwal, Alekh and Dudik, Miroslav and Wu, Zhiwei Steven}, booktitle = {Proceedings of the 36th International Conference on Machine Learning}, date = {2019}, title = {Fair Regression: Quantitative Definitions and Reduction-Based Algorithms}, editor = {Kamalika Chaudhuri and Ruslan Salakhutdinov}, pages = {120--129}, publisher = {PMLR}, series = {Proceedings of Machine Learning Research}, url = {http://proceedings.mlr.press/v97/agarwal19d.html}, volume = {97}, abstract = {In this paper, we study the prediction of a real-valued target, such as a risk score or recidivism rate, while guaranteeing a quantitative notion of fairness with respect to a protected attribute such as gender or race. We call this class of problems fair regression. We propose general schemes for fair regression under two notions of fairness: (1) statistical parity, which asks that the prediction be statistically independent of the protected attribute, and (2) bounded group loss, which asks that the prediction error restricted to any protected group remain below some pre-determined level. While we only study these two notions of fairness, our schemes are applicable to arbitrary Lipschitz-continuous losses, and so they encompass least-squares regression, logistic regression, quantile regression, and many other tasks. Our schemes only require access to standard risk minimization algorithms (such as standard classification or least-squares regression) while providing theoretical guarantees on the optimality and fairness of the obtained solutions. In addition to analyzing theoretical properties of our schemes, we empirically demonstrate their ability to uncover fairness–accuracy frontiers on several standard datasets.}, groups = {Ethical AI}, pdf = {http://proceedings.mlr.press/v97/agarwal19d/agarwal19d.pdf}, } @InProceedings{Hardt2016, author = {Hardt, Moritz and Price, Eric and Price, Eric and Srebro, Nati}, booktitle = {Advances in Neural Information Processing Systems}, date = {2016}, title = {Equality of Opportunity in Supervised Learning}, editor = {D. Lee and M. Sugiyama and U. Luxburg and I. Guyon and R. Garnett}, publisher = {Curran Associates, Inc.}, url = {https://proceedings.neurips.cc/paper/2016/file/9d2682367c3935defcb1f9e247a97c0d-Paper.pdf}, volume = {29}, } @Article{Saleiro2018, author = {Pedro Saleiro and Benedict Kuester and Abby Stevens and Ari Anisfeld and Loren Hinkson and Jesse London and Rayid Ghani}, date = {2018}, journaltitle = {CoRR}, title = {Aequitas: {A} Bias and Fairness Audit Toolkit}, eprint = {1811.05577}, eprinttype = {arXiv}, url = {http://arxiv.org/abs/1811.05577}, volume = {abs/1811.05577}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/abs-1811-05577.bib}, groups = {Ethical AI}, timestamp = {Sat, 24 Nov 2018 17:52:00 +0100}, } @InProceedings{Bolukbasi2016, author = {Bolukbasi, Tolga and Chang, Kai-Wei and Zou, James and Saligrama, Venkatesh and Kalai, Adam}, booktitle = {Proceedings of the 30th International Conference on Neural Information Processing Systems}, date = {2016}, title = {Man is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings}, isbn = {9781510838819}, location = {Barcelona, Spain}, pages = {4356–4364}, publisher = {Curran Associates Inc.}, series = {NIPS'16}, abstract = {The blind application of machine learning runs the risk of amplifying biases present in data. Such a danger is facing us with word embedding, a popular framework to represent text data as vectors which has been used in many machine learning and natural language processing tasks. We show that even word embeddings trained on Google News articles exhibit female/male gender stereotypes to a disturbing extent. This raises concerns because their widespread use, as we describe, often tends to amplify these biases. Geometrically, gender bias is first shown to be captured by a direction in the word embedding. Second, gender neutral words are shown to be linearly separable from gender definition words in the word embedding. Using these properties, we provide a methodology for modifying an embedding to remove gender stereotypes, such as the association between the words receptionist and female, while maintaining desired associations such as between the words queen and female. Using crowd-worker evaluation as well as standard benchmarks, we empirically demonstrate that our algorithms significantly reduce gender bias in embeddings while preserving the its useful properties such as the ability to cluster related concepts and to solve analogy tasks. The resulting embeddings can be used in applications without amplifying gender bias.}, address = {Red Hook, NY, USA}, groups = {Ethical AI, NLP}, numpages = {9}, } @InProceedings{Kusner2017, author = {Kusner, Matt J and Loftus, Joshua and Russell, Chris and Silva, Ricardo}, booktitle = {Advances in Neural Information Processing Systems}, date = {2017}, title = {Counterfactual Fairness}, editor = {I. Guyon and U. V. Luxburg and S. Bengio and H. Wallach and R. Fergus and S. Vishwanathan and R. Garnett}, publisher = {Curran Associates, Inc.}, url = {https://proceedings.neurips.cc/paper/2017/file/a486cd07e4ac3d270571622f4f316ec5-Paper.pdf}, volume = {30}, groups = {Ethical AI}, } @InProceedings{Alshammari2017, author = {Alshammari, Majed and Simpson, Andrew}, booktitle = {Privacy Technologies and Policy}, date = {2017}, title = {Towards a Principled Approach for Engineering Privacy by Design}, editor = {Schweighofer, Erich and Leitold, Herbert and Mitrakas, Andreas and Rannenberg, Kai}, isbn = {978-3-319-67280-9}, location = {Cham}, number = {10518}, pages = {161--177}, publisher = {Springer International Publishing}, series = {Lecture Notes in Computer Science}, abstract = {Privacy by Design has emerged as a proactive approach for embedding privacy into the early stages of the design of information and communication technologies, but it is no `silver bullet'. Challenges involved in engineering Privacy by Design include a lack of holistic and systematic methodologies that address the complexity and variability of privacy issues and support the translation of its principles into engineering activities. A consequence is that its principles are given at a high level of abstraction without accompanying tools and guidelines to address these challenges. We analyse three privacy requirements engineering methods from which we derive a set of criteria that aid in identifying data-processing activities that may lead to privacy violations and harms and also aid in specifying appropriate design decisions. We also present principles for engineering Privacy by Design that can be developed upon these criteria. Based on these, we outline some preliminary thoughts on the form of a principled framework that addresses the plurality and contextuality of privacy issues and supports the translation of the principles of Privacy by Design into engineering activities.}, groups = {Ethical AI}, } @Article{Arnold2019, author = {Arnold, M. and Bellamy, R. K. E. and Hind, M. and Houde, S. and Mehta, S. and Mojsilović, A. and Nair, R. and Ramamurthy, K. Natesan and Olteanu, A. and Piorkowski, D. and Reimer, D. and Richards, J. and Tsay, J. and Varshney, K. R.}, date = {2019}, journaltitle = {IBM Journal of Research and Development}, title = {FactSheets: Increasing trust in AI services through supplier's declarations of conformity}, doi = {10.1147/JRD.2019.2942288}, number = {4/5}, pages = {6:1-6:13}, volume = {63}, groups = {Ethical AI}, } @Article{Bach2015, author = {Bach, Sebastian and Binder, Alexander and Montavon, Grégoire and Klauschen, Frederick and Müller, Klaus-Robert and Samek, Wojciech}, date = {2015}, journaltitle = {PLOS ONE}, title = {On Pixel-Wise Explanations for Non-Linear Classifier Decisions by Layer-Wise Relevance Propagation}, doi = {10.1371/journal.pone.0130140}, number = {7}, pages = {1-46}, url = {https://doi.org/10.1371/journal.pone.0130140}, volume = {10}, abstract = {Understanding and interpreting classification decisions of automated image classification systems is of high value in many applications, as it allows to verify the reasoning of the system and provides additional information to the human expert. Although machine learning methods are solving very successfully a plethora of tasks, they have in most cases the disadvantage of acting as a black box, not providing any information about what made them arrive at a particular decision. This work proposes a general solution to the problem of understanding classification decisions by pixel-wise decomposition of nonlinear classifiers. We introduce a methodology that allows to visualize the contributions of single pixels to predictions for kernel-based classifiers over Bag of Words features and for multilayered neural networks. These pixel contributions can be visualized as heatmaps and are provided to a human expert who can intuitively not only verify the validity of the classification decision, but also focus further analysis on regions of potential interest. We evaluate our method for classifiers trained on PASCAL VOC 2009 images, synthetic image data containing geometric shapes, the MNIST handwritten digits data set and for the pre-trained ImageNet model available as part of the Caffe open source package.}, publisher = {Public Library of Science}, } @Article{Cavoukian2010, author = {Cavoukian, Ann and Taylor, Scott and Abrams, Martin E.}, date = {2010}, journaltitle = {Identity in the Information Society}, title = {Privacy by Design: essential for organizational accountability and strong business practices}, doi = {10.1007/s12394-010-0053-z}, issn = {1876-0678}, number = {2}, pages = {405--413}, url = {https://doi.org/10.1007/s12394-010-0053-z}, volume = {3}, abstract = {An accountability-based privacy governance model is one where organizations are charged with societal objectives, such as using personal information in a manner that maintains individual autonomy and which protects individuals from social, financial and physical harms, while leaving the actual mechanisms for achieving those objectives to the organization. This paper discusses the essential elements of accountability identified by the Galway Accountability Project, with scholarship from the Centre for Information Policy Leadership at Hunton & Williams LLP. Conceptual Privacy by Design principles are offered as criteria for building privacy and accountability into organizational information management practices. The authors then provide an example of an organizational control process that uses the principles to implement the essential elements. Initially developed in the ‘90s to advance privacy-enhancing information and communication technologies, Dr. Ann Cavoukian has since expanded the application of Privacy by Design principles to include business processes.}, groups = {Ethical AI}, refid = {Cavoukian2010}, } @InProceedings{Datta2016, author = {Datta, Anupam and Sen, Shayak and Zick, Yair}, booktitle = {2016 IEEE Symposium on Security and Privacy (SP)}, date = {2016}, title = {Algorithmic Transparency via Quantitative Input Influence: Theory and Experiments with Learning Systems}, doi = {10.1109/SP.2016.42}, pages = {598-617}, } @Misc{Holland2018, author = {Sarah Holland and Ahmed Hosny and Sarah Newman and Joshua Joseph and Kasia Chmielinski}, date = {2018}, title = {The Dataset Nutrition Label: A Framework To Drive Higher Data Quality Standards}, eprint = {1805.03677}, eprintclass = {cs.DB}, eprinttype = {arXiv}, groups = {Ethical AI}, } @Article{Bender2018, author = {Bender, Emily M. and Friedman, Batya}, date = {2018}, journaltitle = {Transactions of the Association for Computational Linguistics}, title = {Data Statements for Natural Language Processing: Toward Mitigating System Bias and Enabling Better Science}, doi = {10.1162/tacl_a_00041}, pages = {587--604}, url = {https://www.aclweb.org/anthology/Q18-1041}, volume = {6}, abstract = {In this paper, we propose data statements as a design solution and professional practice for natural language processing technologists, in both research and development. Through the adoption and widespread use of data statements, the field can begin to address critical scientific and ethical issues that result from the use of data from certain populations in the development of technology for other populations. We present a form that data statements can take and explore the implications of adopting them as part of regular practice. We argue that data statements will help alleviate issues related to exclusion and bias in language technology, lead to better precision in claims about how natural language processing research can generalize and thus better engineering results, protect companies from public embarrassment, and ultimately lead to language technology that meets its users in their own preferred linguistic style and furthermore does not misrepresent them to others.}, groups = {Ethical AI}, } @Article{Shrikumar2017, author = {Avanti Shrikumar and Peyton Greenside and Anshul Kundaje}, date = {2017}, journaltitle = {CoRR}, title = {Learning Important Features Through Propagating Activation Differences}, eprint = {1704.02685}, eprintclass = {cs.CV}, eprinttype = {arXiv}, url = {http://arxiv.org/abs/1704.02685}, volume = {abs/1704.02685}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/ShrikumarGK17.bib}, groups = {Ethical AI}, timestamp = {Mon, 13 Aug 2018 16:48:09 +0200}, } @Misc{Springenberg2015, author = {Jost Tobias Springenberg and Alexey Dosovitskiy and Thomas Brox and Martin Riedmiller}, date = {2015}, title = {Striving for Simplicity: The All Convolutional Net}, eprint = {1412.6806}, eprintclass = {cs.LG}, eprinttype = {arXiv}, } @Misc{Simonyan2014, author = {Karen Simonyan and Andrea Vedaldi and Andrew Zisserman}, date = {2014}, title = {Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps}, eprint = {1312.6034}, eprintclass = {cs.CV}, eprinttype = {arXiv}, groups = {Ethical AI}, } @Article{Castro2009, author = {Javier Castro and Daniel Gómez and Juan Tejada}, date = {2009}, journaltitle = {Computers {\&} Operations Research}, title = {Polynomial calculation of the Shapley value based on sampling}, doi = {https://doi.org/10.1016/j.cor.2008.04.004}, issn = {0305-0548}, note = {Selected papers presented at the Tenth International Symposium on Locational Decisions (ISOLDE X)}, number = {5}, pages = {1726-1730}, url = {https://www.sciencedirect.com/science/article/pii/S0305054808000804}, volume = {36}, abstract = {In this paper we develop a polynomial method based on sampling theory that can be used to estimate the Shapley value (or any semivalue) for cooperative games. Besides analyzing the complexity problem, we examine some desirable statistical properties of the proposed approach and provide some computational results.}, groups = {Ethical AI, XAI}, keywords = {Game theory, Shapley value, Sampling algorithm}, } @Misc{Zeiler2013, author = {Zeiler, Matthew D. and Fergus, Rob}, date = {2013}, title = {Visualizing and Understanding Convolutional Networks}, eprint = {1311.2901}, eprintclass = {cs.CV}, eprinttype = {arXiv}, groups = {Ethical AI}, } @Article{Rojat2021, author = {Thomas Rojat and Rapha{\"{e}}l Puget and David Filliat and Javier Del Ser and Rodolphe Gelin and Natalia D{\'{\i}}az Rodr{\'{\i}}guez}, date = {2021}, journaltitle = {CoRR}, title = {Explainable Artificial Intelligence {(XAI)} on TimeSeries Data: {A} Survey}, eprint = {2104.00950}, eprinttype = {arXiv}, url = {https://arxiv.org/abs/2104.00950}, volume = {abs/2104.00950}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/abs-2104-00950.bib}, groups = {Ethical AI}, timestamp = {Mon, 12 Apr 2021 16:14:56 +0200}, } @Misc{Dhurandhar2018, author = {Amit Dhurandhar and Pin-Yu Chen and Ronny Luss and Chun-Chen Tu and Paishun Ting and Karthikeyan Shanmugam and Payel Das}, date = {2018}, title = {Explanations based on the Missing: Towards Contrastive Explanations with Pertinent Negatives}, eprint = {1802.07623}, eprintclass = {cs.AI}, eprinttype = {arXiv}, groups = {Ethical AI, XAI}, } @Software{VanLooveren2019, author = {Van Looveren, Arnaud and Vacanti, Giovanni and Klaise, Janis and Coca, Alexandru and Cobb, Oliver}, date = {2021-06-07}, title = {Alibi Detect: Algorithms for outlier, adversarial and drift detection}, url = {https://github.com/SeldonIO/alibi-detect}, version = {0.7.0}, groups = {Ethical AI}, year = {2019}, } @Misc{McMahan2019, author = {H. Brendan McMahan and Galen Andrew and Ulfar Erlingsson and Steve Chien and Ilya Mironov and Nicolas Papernot and Peter Kairouz}, date = {2019}, title = {A General Approach to Adding Differential Privacy to Iterative Training Procedures}, eprint = {1812.06210}, eprintclass = {cs.LG}, eprinttype = {arXiv}, groups = {Ethical AI}, } @Misc{Zafar2017, author = {Muhammad Bilal Zafar and Isabel Valera and Manuel Gomez Rodriguez and Krishna P. Gummadi}, date = {2017}, title = {Fairness Constraints: Mechanisms for Fair Classification}, eprint = {1507.05259}, eprintclass = {stat.ML}, eprinttype = {arXiv}, groups = {Ethical AI}, } @Article{Zafar2017a, author = {Zafar, Muhammad Bilal and Valera, Isabel and Gomez Rodriguez, Manuel and Gummadi, Krishna P.}, date = {2017-04}, journaltitle = {Proceedings of the 26th International Conference on World Wide Web}, title = {Fairness Beyond Disparate Treatment {\&} Disparate Impact}, doi = {10.1145/3038912.3052660}, url = {http://dx.doi.org/10.1145/3038912.3052660}, groups = {Ethical AI}, isbn = {9781450349130}, publisher = {International World Wide Web Conferences Steering Committee}, } @Misc{Zafar2017b, author = {Muhammad Bilal Zafar and Isabel Valera and Manuel Gomez Rodriguez and Krishna P. Gummadi and Adrian Weller}, date = {2017}, title = {From Parity to Preference-based Notions of Fairness in Classification}, eprint = {1707.00010}, eprintclass = {stat.ML}, eprinttype = {arXiv}, groups = {Ethical AI}, } @Misc{Arya2019, author = {Vijay Arya and Rachel K. E. Bellamy and Pin-Yu Chen and Amit Dhurandhar and Michael Hind and Samuel C. Hoffman and Stephanie Houde and Q. Vera Liao and Ronny Luss and Aleksandra Mojsilović and Sami Mourad and Pablo Pedemonte and Ramya Raghavendra and John Richards and Prasanna Sattigeri and Karthikeyan Shanmugam and Moninder Singh and Kush R. Varshney and Dennis Wei and Yunfeng Zhang}, date = {2019}, title = {One Explanation Does Not Fit All: A Toolkit and Taxonomy of AI Explainability Techniques}, eprint = {1909.03012}, eprintclass = {cs.AI}, eprinttype = {arXiv}, groups = {Ethical AI}, } @Online{SAP2018, author = {Machmeier, Corinna}, date = {2018-09-18}, title = {SAP’s Guiding Principles for Artificial Intelligence}, url = {https://news.sap.com/2018/09/sap-guiding-principles-for-artificial-intelligence/}, urldate = {2021-08-04}, abstract = {Recognizing the significant impact of AI on people, our customers, and wider society, SAP designed these guiding principles to steer the development and deployment of our AI software to help the world run better and improve people’s lives. For us, these guidelines are a commitment to move beyond what is legally required and to begin a deep and continuous engagement with the wider ethical and socioeconomic challenges of AI. We look forward to expanding our conversations with customers, partners, employees, legislative bodies, and civil society; and to making our guiding principles an evolving reflection on these discussions and the ever-changing technological landscape}, groups = {Ethical AI}, } @Online{AlgorithmWatch2020, author = {{AlgorithmWatch}}, date = {2020-04-28}, title = {In the realm of paper tigers – exploring the failings of AI ethics guidelines}, url = {https://algorithmwatch.org/en/ai-ethics-guidelines-inventory-upgrade-2020/}, urldate = {2021-08-04}, groups = {Ethical AI}, } @Online{DEDA, author = {{Utrecht Data School}}, title = {De Ethische Data Assistent (DEDA)}, url = {https://dataschool.nl/deda/}, urldate = {2021-08-05}, groups = {Ethical AI}, } @Book{Molnar2019, author = {Christoph Molnar}, date = {2019}, title = {Interpretable Machine Learning}, note = {\url{https://christophm.github.io/interpretable-ml-book/}}, subtitle = {A Guide for Making Black Box Models Explainable}, } @TechReport{HLEG-AI2019-definition, author = {{High-Level Expert Group on Artificial Intelligence}}, date = {2019-04-08}, title = {A Definition of AI: Main Capabilities and Disciplines}, url = {https://digital-strategy.ec.europa.eu/en/library/definition-artificial-intelligence-main-capabilities-and-scientific-disciplines}, groups = {Ethical AI}, } @InProceedings{Westermann2019, author = {Westermann, Hannes and Walker, Vern R. and Ashley, Kevin D. and Benyekhlef, Karim}, booktitle = {Proceedings of the Seventeenth International Conference on Artificial Intelligence and Law}, date = {2019}, title = {Using Factors to Predict and Analyze Landlord-Tenant Decisions to Increase Access to Justice}, doi = {10.1145/3322640.3326732}, isbn = {9781450367547}, location = {Montreal, QC, Canada}, pages = {133–142}, publisher = {Association for Computing Machinery}, series = {ICAIL '19}, url = {https://doi.org/10.1145/3322640.3326732}, abstract = {This paper reports results from the JusticeBot Project, in which we analyzed two datasets drawn from 1 million written decisions from the R\'{e}gie du logement du Qu\'{e}bec. Using an empirical methodology, we identified 44 factors that occur in disputes where the tenant seeks a remedy due to problems with the rented apartment, such as the existence of bedbugs, high noise levels or problems with insulation. In the first dataset, we used these factors to tag 149 cases. We found a correlation between how many factors are found in a case and how likely the judge is to award rent reduction to a tenant; the amount of reduction was also higher in cases with more factors. For the second dataset (39 cases with bedbugs, drawn from the first dataset), we developed in-depth factors and used them to tag the cases. We found a number of plausible correlations, such as the average damage award being higher in cases with infestations of high intensity. Finally, in predicting the decision of the judge using the factors present in a case, the results were similar to the baselines or slightly above. We discuss the possible reasons for this, and why the approach shows promise in providing useful information to lay people and lawyers.}, address = {New York, NY, USA}, groups = {AI and Law, IRIS23}, keywords = {Factors, Machine Learning, Case Prediction, Access to Justice, Chatbot}, numpages = {10}, } @InProceedings{Savelka2021, author = {Savelka, Jaromir and Westermann, Hannes and Benyekhlef, Karim and Alexander, Charlotte S. and Grant, Jayla C. and Amariles, David Restrepo and Hamdani, Rajaa El and Mee\`{u}s, S\'{e}bastien and Troussel, Aurore and Araszkiewicz, Micha\l{} and Ashley, Kevin D. and Ashley, Alexandra and Branting, Karl and Falduti, Mattia and Grabmair, Matthias and Hara\v{s}ta, Jakub and Novotn\'{a}, Tereza and Tippett, Elizabeth and Johnson, Shiwanni}, booktitle = {Proceedings of the Eighteenth International Conference on Artificial Intelligence and Law}, date = {2021}, title = {Lex Rosetta: Transfer of Predictive Models across Languages, Jurisdictions, and Legal Domains}, doi = {10.1145/3462757.3466149}, isbn = {9781450385268}, location = {S\~{a}o Paulo, Brazil}, pages = {129–138}, publisher = {Association for Computing Machinery}, series = {ICAIL '21}, url = {https://doi.org/10.1145/3462757.3466149}, abstract = {In this paper, we examine the use of multi-lingual sentence embeddings to transfer predictive models for functional segmentation of adjudicatory decisions across jurisdictions, legal systems (common and civil law), languages, and domains (i.e. contexts). Mechanisms for utilizing linguistic resources outside of their original context have significant potential benefits in AI & Law because differences between legal systems, languages, or traditions often block wider adoption of research outcomes. We analyze the use of Language-Agnostic Sentence Representations in sequence labeling models using Gated Recurrent Units (GRUs) that are transferable across languages. To investigate transfer between different contexts we developed an annotation scheme for functional segmentation of adjudicatory decisions. We found that models generalize beyond the contexts on which they were trained (e.g., a model trained on administrative decisions from the US can be applied to criminal law decisions from Italy). Further, we found that training the models on multiple contexts increases robustness and improves overall performance when evaluating on previously unseen contexts. Finally, we found that pooling the training data from all the contexts enhances the models' in-context performance.}, address = {New York, NY, USA}, groups = {AI and Law, NLP}, keywords = {domain adaptation, annotation, transfer learning, document segmentation, multi-lingual sentence embeddings, adjudicatory decisions}, numpages = {10}, } @InBook{Prakken1997, author = {Prakken, H. and Sartor, G.}, booktitle = {Logical Models of Legal Argumentation}, date = {1997}, title = {A Dialectical Model of Assessing Conflicting Arguments in Legal Reasoning}, doi = {10.1007/978-94-011-5668-4_6}, editor = {Prakken, Henry and Sartor, Giovanni}, isbn = {978-94-011-5668-4}, location = {Dordrecht}, pages = {175--211}, publisher = {Springer Netherlands}, url = {https://doi.org/10.1007/978-94-011-5668-4_6}, abstract = {Inspired by legal reasoning, this paper presents a formal framework for assessing conflicting arguments. Its use is illustrated with applications to realistic legal examples, and the potential for implementation is discussed. The framework has the form of a logical system for defeasible argumentation. Its language, which is of a logic-programming-like nature, has both weak and explicit negation, and conflicts between arguments are decided with the help of priorities on the rules. An important feature of the system is that these priorities are not fixed, but are themselves defeasibly derived as conclusions within the system. Thus debates on the choice between conflicting arguments can also be modelled.}, groups = {AI and Law, Argument Mining}, } @Article{Atkinson2007, author = {Katie Atkinson and Trevor Bench-Capon}, date = {2007}, journaltitle = {Artificial Intelligence}, title = {Practical reasoning as presumptive argumentation using action based alternating transition systems}, doi = {https://doi.org/10.1016/j.artint.2007.04.009}, issn = {0004-3702}, note = {Argumentation in Artificial Intelligence}, number = {10}, pages = {855-874}, url = {https://www.sciencedirect.com/science/article/pii/S0004370207000689}, volume = {171}, abstract = {In this paper we describe an approach to practical reasoning, reasoning about what it is best for a particular agent to do in a given situation, based on presumptive justifications of action through the instantiation of an argument scheme, which is then subject to examination through a series of critical questions. We identify three particular aspects of practical reasoning which distinguish it from theoretical reasoning. We next provide an argument scheme and an associated set of critical questions which is able to capture these features. In order that both the argument scheme and the critical questions can be given precise interpretations we use the semantic structure of an Action-Based Alternating Transition System as the basis for their definition. We then work through a detailed example to show how this approach to practical reasoning can be applied to a problem solving situation, and briefly describe some other previous applications of the general approach. In a second example we relate our account to the social laws paradigm for co-ordinating multi-agent systems. The contribution of the paper is to provide firm foundations for an approach to practical reasoning based on presumptive argument in terms of a well-known model for representing the effects of actions of a group of agents.}, groups = {AI and Law}, keywords = {Argumentation, Practical reasoning, Value based argumentation, Argument schemes, AATS}, } @InBook{Wyner2010, author = {Wyner, Adam and Mochales-Palau, Raquel and Moens, Marie-Francine and Milward, David}, booktitle = {Semantic Processing of Legal Texts: Where the Language of Law Meets the Law of Language}, date = {2010}, title = {Approaches to Text Mining Arguments from Legal Cases}, doi = {10.1007/978-3-642-12837-0_4}, editor = {Francesconi, Enrico and Montemagni, Simonetta and Peters, Wim and Tiscornia, Daniela}, isbn = {978-3-642-12837-0}, location = {Berlin, Heidelberg}, pages = {60--79}, publisher = {Springer Berlin Heidelberg}, url = {https://doi.org/10.1007/978-3-642-12837-0_4}, abstract = {This paper describes recent approaches using text-mining to automatically profile and extract arguments from legal cases. We outline some of the background context and motivations. We then turn to consider issues related to the construction and composition of corpora of legal cases. We show how a Context-Free Grammar can be used to extract arguments, and how ontologies and Natural Language Processing can identify complex information such as case factors and participant roles. Together the results bring us closer to automatic identification of legal arguments.}, groups = {AI and Law, Argument Mining, IRIS23}, } @TechReport{MeyerVitali2019, author = {Meyer-Vitali, A. P. and Bakker, R. M. and van Bekkum, M. A. and de Boer, M. and Burghouts, G. and van Diggelen, J. and Dijk, J. and Grappiolo, C. and de Greeff, J. and Huizing, A. and Raaijmakers, S.}, date = {2019}, institution = {TNO}, title = {Hybrid AI: White Paper}, type = {resreport}, url = {https://repository.tno.nl//islandora/object/uuid:510e0747-30f4-4af4-9447-9330b7bd4b47}, } @Article{Stepin2021, author = {Stepin, Ilia and Alonso, Jose M. and Catala, Alejandro and Pereira-Fariña, Martín}, date = {2021}, journaltitle = {IEEE Access}, title = {A Survey of Contrastive and Counterfactual Explanation Generation Methods for Explainable Artificial Intelligence}, doi = {10.1109/ACCESS.2021.3051315}, issn = {2169-3536}, pages = {11974-12001}, volume = {9}, abstract = {A number of algorithms in the field of artificial intelligence offer poorly interpretable decisions. To disclose the reasoning behind such algorithms, their output can be explained by means of so-called evidence-based (or factual) explanations. Alternatively, contrastive and counterfactual explanations justify why the output of the algorithms is not any different and how it could be changed, respectively. It is of crucial importance to bridge the gap between theoretical approaches to contrastive and counterfactual explanation and the corresponding computational frameworks. In this work we conduct a systematic literature review which provides readers with a thorough and reproducible analysis of the interdisciplinary research field under study. We first examine theoretical foundations of contrastive and counterfactual accounts of explanation. Then, we report the state-of-the-art computational frameworks for contrastive and counterfactual explanation generation. In addition, we analyze how grounded such frameworks are on the insights from the inspected theoretical approaches. As a result, we highlight a variety of properties of the approaches under study and reveal a number of shortcomings thereof. Moreover, we define a taxonomy regarding both theoretical and practical approaches to contrastive and counterfactual explanation.}, groups = {Ethical AI}, } @Article{Falakmasir2017, author = {Falakmasir, Mohammad H. and Ashley, Kevin D.}, date = {2017}, journaltitle = {Legal Knowledge and Information Systems}, title = {Utilizing Vector Space Models for Identifying Legal Factors from Text}, doi = {10.3233/978-1-61499-838-9-183}, pages = {193-192}, volume = {302}, groups = {AI and Law, IRIS23}, } @InProceedings{Panagis2016, author = {Panagis, Yannis and Christensen, Martin Lolle and Šadl, Urška}, booktitle = {Legal Knowledge and Information Systems}, date = {2016}, title = {On Top of Topics: Leveraging Topic Modeling to Study the Dynamic Case-Law of International Courts}, doi = {10.3233/978-1-61499-726-9-161}, editor = {Bex, Floris and Villata, Serena}, pages = {161-166}, publisher = {IOS Press}, url = {https://ebooks.iospress.nl/publication/45751}, volume = {294}, groups = {AI and Law}, } @InProceedings{Craandijk2020, author = {Craandijk, Dennis and Bex, Floris}, booktitle = {Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, {IJCAI-20}}, date = {2020-07}, title = {Deep Learning for Abstract Argumentation Semantics}, doi = {10.24963/ijcai.2020/231}, editor = {Christian Bessiere}, pages = {1667-1673}, publisher = {Deep Learning for Abstract Argumentation Semantics}, groups = {AI and Law}, } @Article{Nanda2019, author = {Nanda, Rohan and Siragusa, Giovanni and Di Caro, Luigi and Boella, Guido and Grossio, Lorenzo and Gerbaudo, Marco and Costamagna, Francesco}, date = {2019}, journaltitle = {Artificial Intelligence and Law}, title = {Unsupervised and supervised text similarity systems for automated identification of national implementing measures of European directives}, doi = {10.1007/s10506-018-9236-y}, issn = {1572-8382}, number = {2}, pages = {199--225}, url = {https://doi.org/10.1007/s10506-018-9236-y}, volume = {27}, abstract = {The automated identification of national implementations (NIMs) of European directives by text similarity techniques has shown promising preliminary results. Previous works have proposed and utilized unsupervised lexical and semantic similarity techniques based on vector space models, latent semantic analysis and topic models. However, these techniques were evaluated on a small multilingual corpus of directives and NIMs. In this paper, we utilize word and paragraph embedding models learned by shallow neural networks from a multilingual legal corpus of European directives and national legislation (from Ireland, Luxembourg and Italy) to develop unsupervised semantic similarity systems to identify transpositions. We evaluate these models and compare their results with the previous unsupervised methods on a multilingual test corpus of 43 Directives and their corresponding NIMs. We also develop supervised machine learning models to identify transpositions and compare their performance with different feature sets.}, groups = {AI and Law, NLP}, refid = {Nanda2019}, } @InProceedings{AlAbdulkarim2016, author = {Al-Abdulkarim, Latifa and Atkinson, Katie and Bench-Capon, Trevor}, booktitle = {Legal Knowledge and Information Systems}, date = {2016}, title = {Statement Types in Legal Argument}, doi = {10.3233/978-1-61499-726-9-3}, editor = {Bex, Floris and Villata, Serena}, pages = {3-12}, publisher = {IOS Press}, url = {https://ebooks.iospress.nl/publication/45751}, volume = {294}, groups = {AI and Law, NLP, IRIS23}, } @Article{Medvedeva2020, author = {Medvedeva, Masha and Vols, Michel and Wieling, Martijn}, date = {2020}, journaltitle = {Artificial Intelligence and Law}, title = {Using machine learning to predict decisions of the European Court of Human Rights}, doi = {10.1007/s10506-019-09255-y}, issn = {1572-8382}, number = {2}, pages = {237--266}, url = {https://doi.org/10.1007/s10506-019-09255-y}, volume = {28}, abstract = {When courts started publishing judgements, big data analysis (i.e. large-scale statistical analysis of case law and machine learning) within the legal domain became possible. By taking data from the European Court of Human Rights as an example, we investigate how natural language processing tools can be used to analyse texts of the court proceedings in order to automatically predict (future) judicial decisions. With an average accuracy of 75% in predicting the violation of 9 articles of the European Convention on Human Rights our (relatively simple) approach highlights the potential of machine learning approaches in the legal domain. We show, however, that predicting decisions for future cases based on the cases from the past negatively impacts performance (average accuracy range from 58 to 68%). Furthermore, we demonstrate that we can achieve a relatively high classification performance (average accuracy of 65%) when predicting outcomes based only on the surnames of the judges that try the case.}, groups = {AI and Law}, refid = {Medvedeva2020}, } @Article{Aubaid2018, author = {Aubaid, Asmaa and Mishra, Alok}, date = {2018-11}, title = {Text Classification Using Word Embedding in Rule-Based Methodologies: A Systematic Mapping}, doi = {10.18421/TEM74-31}, pages = {902-914}, volume = {7}, groups = {AI and Law, NLP}, } @InProceedings{Ferre2018, author = {Ferr{\'e}, Arnaud and Del{\'e}ger, Louise and Zweigenbaum, Pierre and N{\'e}dellec, Claire}, booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)}, date = {2018-05}, title = {Combining rule-based and embedding-based approaches to normalize textual entities with an ontology}, location = {Miyazaki, Japan}, publisher = {European Language Resources Association (ELRA)}, url = {https://aclanthology.org/L18-1543}, groups = {AI and Law, NLP}, } @InProceedings{Jang2019, author = {Kyoungrok Jang and Sung-Hyon Myaeng and Hee-Cheol Seo and Joohee Park}, date = {2019}, title = {Selection and Interpretation of Embedding Subspace forQuery Classification}, url = {https://ears2019.github.io/Jang-EARS2019.pdf}, groups = {AI and Law, NLP}, } @InProceedings{Mandal2017, author = {Mandal, Arpan and Chaki, Raktim and Saha, Sarbajit and Ghosh, Kripabandhu and Pal, Arindam and Ghosh, Saptarshi}, booktitle = {Proceedings of the 10th Annual ACM India Compute Conference}, date = {2017}, title = {Measuring Similarity among Legal Court Case Documents}, doi = {10.1145/3140107.3140119}, isbn = {9781450353236}, location = {Bhopal, India}, pages = {1–9}, publisher = {Association for Computing Machinery}, series = {Compute '17}, url = {https://doi.org/10.1145/3140107.3140119}, abstract = {Computing the similarity between two legal documents is an important challenge in the Legal Information Retrieval domain. Efficient calculation of this similarity has useful applications in various tasks such as identifying relevant prior cases for a given case document. Prior works have proposed network-based and text-based methods for measuring similarity between legal documents. However, there are certain limitations in the prior methods. Network-based measures are not always meaningfully applicable since legal citation networks are usually very sparse. On the other hand, only primitive text-based similarity measures, such as TF-IDF based approaches, have been tried till date. In this work, we focus on improving text-based methodologies for computing the similarity between two legal documents. In addition to TF-IDF based measures, we use advanced similarity measures (such as topic modeling) and neural network models (such as word embeddings and document embeddings). We perform extensive experiments on a large dataset of Indian Supreme Court cases, and compare among various methodologies for measuring the textual similarity of legal documents. Our experiments show that embedding based approaches perform better than other approaches. We also demonstrate that the proposed embedding-based methodologies significantly outperforms a baseline hybrid methodology involving both network-based and text-based similarity.}, address = {New York, NY, USA}, groups = {AI and Law, NLP}, keywords = {Doc2vec, Court Cases, Legal Information Retrieval, Topic Modeling, Legal Document Similarity, Word2vec, Word Embeddings}, numpages = {9}, } @InProceedings{Daniels1997, author = {Daniels, Jody J. and Rissland, Edwina L.}, booktitle = {Proceedings of the 6th International Conference on Artificial Intelligence and Law}, date = {1997}, title = {Finding Legally Relevant Passages in Case Opinions}, doi = {10.1145/261618.261627}, isbn = {0897919246}, location = {Melbourne, Australia}, pages = {39–46}, publisher = {Association for Computing Machinery}, series = {ICAIL '97}, url = {https://doi.org/10.1145/261618.261627}, address = {New York, NY, USA}, groups = {AI and Law}, numpages = {8}, } @InProceedings{Brueninghaus2005, author = {Br\"{u}ninghaus, Stefanie and Ashley, Kevin D.}, booktitle = {Proceedings of the 10th International Conference on Artificial Intelligence and Law}, date = {2005}, title = {Generating Legal Arguments and Predictions from Case Texts}, doi = {10.1145/1165485.1165497}, isbn = {1595930817}, location = {Bologna, Italy}, pages = {65–74}, publisher = {Association for Computing Machinery}, series = {ICAIL '05}, url = {https://doi.org/10.1145/1165485.1165497}, abstract = {In this paper, we present methods for automatically finding abstract, legally relevant concepts in case texts and demonstrate how they can be used to make predictions of case outcomes, given the texts as inputs.In a set of experiments to test these methods, we focus on the open question of how best to represent legal text for finding abstract concepts. We compare different ways of representing legal case texts in order to test whether adding domain knowledge and some linguistic information can improve performance.We found that replacing individual names by roles in the case texts led to better indexing, and that adding certain syntactic and semantic information, in the form of Propositional Patterns that capture a sense of "who did what", led to better prediction. Our experiments also showed that of three learning algorithms, Nearest Neighbor worked best in learning how to identify indexing concepts in texts.In these experiments, we introduced a prototype system that can reason with text cases; it analyzes a case, predicts its outcome considering other cases in the database, and explains the prediction, all starting with a textual description of the case's facts as input.}, address = {New York, NY, USA}, groups = {AI and Law}, numpages = {10}, } @PhdThesis{VanOpijnen2014, author = {Van Opijnen, Marc}, date = {2014-02-20}, institution = {Leibniz Center for Law}, title = {Op en in het web: Hoe de toegankelijkheid van rechterlijke uitspraken kan worden verbeterd}, url = {https://dare.uva.nl/search?identifier=1a7c2ac9-2226-4088-a4bf-a7254153e950}, abstract = {Dankzij internet kan de rechterlijke macht tegenwoordig zelf belangwekkende uitspraken publiceren. Dit proefschrift behandelt enkele van de hiermee samenhangende vraagstukken. Allereerst het juridisch kader: bestaat er een plicht tot publicatie, en strekt die zich uit tot alle uitspraken? Welke eisen worden gesteld aan selectiecriteria, anonimisering, hergebruik en de toegankelijkheid van jurisprudentiedatabanken? Vervolgens wordt onderzocht in hoeverre de Nederlandse praktijk, zoals die zich vooral op Rechtspraak.nl heeft ontwikkeld, aan dit juridisch kader voldoet. Publieke uitsprakendatabanken bevatten inmiddels honderdduizenden uitspraken, maar de zoekmogelijkheden zijn bij deze groei achtergebleven. Om hierin verbetering te brengen worden drie onderwerpen nader uitgediept. Allereerst de noodzaak van unieke en persistente identificatie en citatie van rechterlijke uitspraken. Als oplossing voor de zich hierbij voordoende moeilijkheden is in deze studie de European Case Law Identifier (ECLI) ontwikkeld. Het tweede thema ziet op de behoefte van de juridische informatiezoeker om bronnen te vinden die gerelateerd zijn aan bepaalde jurisprudentie of regelgeving. Om de hiermee bestaande problemen op te lossen is software ontwikkeld waarmee in tekstbestanden voorkomende jurisprudentiecitaties en wetsverwijzingen worden herkend en omgezet in gestructureerde links. Met behulp hiervan kunnen rechterlijke uitspraken in hun juridische context worden doorzocht en beschouwd. Met behulp van deze aldus gecreëerde linked data is ten slotte een statistisch model ontwikkeld - het Model for Automated Rating of Case law (MARC) - waarmee uitspraken kunnen worden ingedeeld in een vijftal juridische-relevantieklassen. Met deze ‘MARC-indicator’ - die weerspiegelt hoe belangrijk ‘de juridische crowd’ een uitspraak vindt - kunnen zoekresultaten beter worden gefilterd en beoordeeld.}, groups = {AI and Law, IRIS23}, } @Article{Atkinson2017, author = {Atkinson, Katie and Baroni, Pietro and Giacomin, Massimiliano and Hunter, Anthony and Prakken, Henry and Reed, Chris and Simari, Guillermo and Thimm, Matthias and Villata, Serena}, date = {2017-10}, journaltitle = {AI Magazine}, title = {Towards Artificial Argumentation}, doi = {10.1609/aimag.v38i3.2704}, number = {3}, pages = {25-36}, url = {https://ojs.aaai.org/index.php/aimagazine/article/view/2704}, volume = {38}, abstractnote = {The field of computational models of argument is emerging as an important aspect of artificial intelligence research. The reason for this is based on the recognition that if we are to develop robust intelligent systems, then it is imperative that they can handle incomplete and inconsistent information in a way that somehow emulates the way humans tackle such a complex task. And one of the key ways that humans do this is to use argumentation either internally, by evaluating arguments and counterarguments‚ or externally, by for instance entering into a discussion or debate where arguments are exchanged. As we report in this review, recent developments in the field are leading to technology for artificial argumentation, in the legal, medical, and e-government domains, and interesting tools for argument mining, for debating technologies, and for argumentation solvers are emerging.}, groups = {AI and Law}, } @Article{Prakken2015, author = {Henry Prakken and Giovanni Sartor}, date = {2015}, journaltitle = {Artificial Intelligence}, title = {Law and logic: A review from an argumentation perspective}, doi = {https://doi.org/10.1016/j.artint.2015.06.005}, issn = {0004-3702}, pages = {214-245}, url = {https://www.sciencedirect.com/science/article/pii/S0004370215000910}, volume = {227}, abstract = {This article reviews legal applications of logic, with a particularly marked concern for logical models of legal argument. We argue that the law is a rich test bed and important application field for logic-based AI research. First applications of logic to the representation of legal regulations are reviewed, where the main emphasis is on representation and where the legal conclusions follow from that representation as a matter of deduction. This includes the representation of deontic concepts, normative positions, legal ontologies, time and change. Then legal applications of logic are reviewed where legal rules are not just applied but are the object of reasoning and discourse. This includes arguing about applying statutory rules in unforeseen circumstances, interpretative reasoning in light of the facts of a case, and evidential reasoning to establish the facts of a case. This part of the review has special emphasis on argumentation-based approaches. This also holds for the final part, which discusses formal models of legal procedure and of multi-agent interaction in legal proceedings. The review concludes with identifying some of the main open research problems. The review shows that modern legal applications of logic confirm the recent trend of widening the scope of logic from deduction to information flow, argumentation and interaction.}, keywords = {Legal logic, Legal reasoning, Argumentation, AI & law}, } @Article{Mochales2011, author = {Mochales, Raquel and Moens, Marie-Francine}, date = {2011}, journaltitle = {Artificial Intelligence and Law}, title = {Argumentation mining}, doi = {10.1007/s10506-010-9104-x}, issn = {1572-8382}, number = {1}, pages = {1--22}, url = {https://doi.org/10.1007/s10506-010-9104-x}, volume = {19}, abstract = {Argumentation mining aims to automatically detect, classify and structure argumentation in text. Therefore, argumentation mining is an important part of a complete argumentation analyisis, i.e. understanding the content of serial arguments, their linguistic structure, the relationship between the preceding and following arguments, recognizing the underlying conceptual beliefs, and understanding within the comprehensive coherence of the specific topic. We present different methods to aid argumentation mining, starting with plain argumentation detection and moving forward to a more structural analysis of the detected argumentation. Different state-of-the-art techniques on machine learning and context free grammars are applied to solve the challenges of argumentation mining. We also highlight fundamental questions found during our research and analyse different issues for future research on argumentation mining.}, groups = {AI and Law, Argument Mining}, refid = {Mochales2011}, } @Article{Lippi2016, author = {Lippi, Marco and Torroni, Paolo}, date = {2016-03}, journaltitle = {ACM Trans. Internet Technol.}, title = {Argumentation Mining: State of the Art and Emerging Trends}, doi = {10.1145/2850417}, issn = {1533-5399}, number = {2}, url = {https://doi.org/10.1145/2850417}, volume = {16}, abstract = {Argumentation mining aims at automatically extracting structured arguments from unstructured textual documents. It has recently become a hot topic also due to its potential in processing information originating from the Web, and in particular from social media, in innovative ways. Recent advances in machine learning methods promise to enable breakthrough applications to social and economic sciences, policy making, and information technology: something that only a few years ago was unthinkable. In this survey article, we introduce argumentation models and methods, review existing systems and applications, and discuss challenges and perspectives of this exciting new research area.}, articleno = {10}, groups = {AI and Law, Argument Mining}, issue_date = {April 2016}, keywords = {computational linguistics, social media, machine learning, artificial intelligence, Argumentation mining, knowledge representation}, location = {New York, NY, USA}, numpages = {25}, publisher = {Association for Computing Machinery}, } @Article{Knott1994, author = {Knott, Alistair and Dale, Robert}, date = {1994}, journaltitle = {Discourse Processes}, title = {Using linguistic phenomena to motivate a set of coherence relations}, doi = {10.1080/01638539409544883}, number = {1}, pages = {35-62}, url = {https://doi.org/10.1080/01638539409544883}, volume = {18}, abstract = {The notion that a text is coherent in virtue of the “relations” that hold between the elements of that text has become fairly common currency, both in the study of discourse coherence and in the field of text generation. The set of relations proposed in Rhetorical Structure Theory (Mann \& Thompson, 1987) has had particular influence in both of these fields. But the widespread adoption of “relational” terminology belies a certain amount of confusion about the relational constructs themselves: No two theorists use exactly the same set of relations, and often there seems no motivation for introducing a new relation beyond considerations of descriptive adequacy or engineering expedience. To alleviate this confusion, it is useful to think of relations not just as constructs with descriptive or operational utility, but as constructs with psychological reality, modelling real cognitive processes in readers and writers. This conception of coherence relations suggests a methodology for delineating a set of relations to work with. Evidence that a relation is actually used by speakers of a language can be obtained by looking at the language itself—in particular by looking at the range of cue phrases the language provides for signalling relations. It is to be expected that simple methods will have evolved for signalling the relations we find most useful. This article presents a bottom‐up methodology for determining a set of relations on the basis of the cue phrases that can be used to mark them in text. This methodology has the advantage of starting from concrete linguistic data, rather than from controversial assumptions about notions like “intention” and “semantics.”}, groups = {Argument Mining}, publisher = {Routledge}, } @Article{Moens2018, author = {Moens, Marie-Francine}, date = {2018}, journaltitle = {Argument & Computation}, title = {Argumentation mining: How can a machine acquire common sense and world knowledge?}, doi = {10.3233/AAC-170025}, issn = {1946-2174}, number = {1}, pages = {1--14}, volume = {9}, abstract = {Argumentation mining is an advanced form of human language understanding by the machine. This is a challenging task for a machine. When sufficient explicit discourse markers are present in the language utterances, the argumentation can be interpreted by the machine with an acceptable degree of accuracy. However, in many real settings, the mining task is difficult due to the lack or ambiguity of the discourse markers, and the fact that a substantial amount of knowledge needed for the correct recognition of the argumentation, its composing elements and their relationships is not explicitly present in the text, but makes up the background knowledge that humans possess when interpreting language. In this article1 1 The article is the written version of a keynote lecture given at COMMA 2016, the 6th International Conference on Computational Models of Argument, on September 13, 2016 at Potsdam University, Germany. we focus on how the machine can automatically acquire the needed common sense and world knowledge. As very few research has been done in this respect, many of the ideas proposed in this article are tentative, but start being researched. The article is the written version of a keynote lecture given at COMMA 2016, the 6th International Conference on Computational Models of Argument, on September 13, 2016 at Potsdam University, Germany. We give an overview of the latest methods for human language understanding that map language to a formal knowledge representation that facilitates other tasks (for instance, a representation that is used to visualize the argumentation or that is easily shared in a decision or argumentation support system). Most current systems are trained on texts that are manually annotated. Then we go deeper into the new field of representation learning that nowadays is very much studied in computational linguistics. This field investigates methods for representing language as statistical concepts or as vectors, allowing straightforward methods of compositionality. The methods often use deep learning and its underlying neural network technologies to learn concepts from large text collections in an unsupervised way (i.e., without the need for manual annotations). We show how these methods can help the argumentation mining process, but also demonstrate that these methods need further research to automatically acquire the necessary background knowledge and more specifically common sense and world knowledge. We propose a number of ways to improve the learning of common sense and world knowledge by exploiting textual and visual data, and touch upon how we can integrate the learned knowledge in the argumentation mining process.}, groups = {Argument Mining}, keywords = {Natural language understanding, representation learning, argumentative text processing}, publisher = {IOS Press}, } @InProceedings{Lippi2015, author = {Lippi, Marco and Torroni, Paolo}, booktitle = {Proceedings of the 24th International Conference on Artificial Intelligence}, date = {2015}, title = {Context-Independent Claim Detection for Argument Mining}, isbn = {9781577357384}, location = {Buenos Aires, Argentina}, pages = {185–191}, publisher = {AAAI Press}, series = {IJCAI'15}, abstract = {Argumentation mining aims to automatically identify structured argument data from unstructured natural language text. This challenging, multi-faceted task is recently gaining a growing attention, especially due to its many potential applications. One particularly important aspect of argumentation mining is claim identification. Most of the current approaches are engineered to address specific domains. However, argumentative sentences are often characterized by common rhetorical structures, independently of the domain. We thus propose a method that exploits structured parsing information to detect claims without resorting to contextual information, and yet achieve a performance comparable to that of state-of-the-art methods that heavily rely on the context.}, groups = {Argument Mining}, numpages = {7}, } @InProceedings{Ashley2013a, author = {Ashley, Kevin D. and Walker, Vern R.}, booktitle = {Proceedings of the Fourteenth International Conference on Artificial Intelligence and Law}, date = {2013}, title = {Toward Constructing Evidence-Based Legal Arguments Using Legal Decision Documents and Machine Learning}, doi = {10.1145/2514601.2514622}, isbn = {9781450320801}, location = {Rome, Italy}, pages = {176–180}, publisher = {Association for Computing Machinery}, series = {ICAIL '13}, url = {https://doi.org/10.1145/2514601.2514622}, abstract = {This paper explores how to extract argumentation-relevant information automatically from a corpus of legal decision documents, and how to build new arguments using that information. For decision texts, we use the Vaccine/Injury Project (V/IP) Corpus, which contains default-logic annotations of argument structure. We supplement this with presuppositional annotations about entities, events, and relations that play important roles in argumentation, and about the level of confidence that arguments would be successful. We then propose how to integrate these semantic-pragmatic annotations with syntactic and domain-general semantic annotations, such as those generated in the DeepQA architecture, and outline how to apply machine learning and scoring techniques similar to those used in the IBM Watson system for playing the Jeopardy! question-answer game. We replace this game-playing goal, however, with the goal of learning to construct legal arguments.}, address = {New York, NY, USA}, groups = {Argument Mining}, keywords = {IBM Watson, text annotation, default-logic framework, DeepQA, legal argumentation, presuppositional annotation}, numpages = {5}, } @InProceedings{Ashley2013, author = {Ashley, Kevin D. and Walker, Vern R.}, booktitle = {Proceedings of the Fourteenth International Conference on Artificial Intelligence and Law}, date = {2013}, title = {Toward Constructing Evidence-Based Legal Arguments Using Legal Decision Documents and Machine Learning}, doi = {10.1145/2514601.2514622}, isbn = {9781450320801}, location = {Rome, Italy}, pages = {176–180}, publisher = {Association for Computing Machinery}, series = {ICAIL '13}, url = {https://doi.org/10.1145/2514601.2514622}, abstract = {This paper explores how to extract argumentation-relevant information automatically from a corpus of legal decision documents, and how to build new arguments using that information. For decision texts, we use the Vaccine/Injury Project (V/IP) Corpus, which contains default-logic annotations of argument structure. We supplement this with presuppositional annotations about entities, events, and relations that play important roles in argumentation, and about the level of confidence that arguments would be successful. We then propose how to integrate these semantic-pragmatic annotations with syntactic and domain-general semantic annotations, such as those generated in the DeepQA architecture, and outline how to apply machine learning and scoring techniques similar to those used in the IBM Watson system for playing the Jeopardy! question-answer game. We replace this game-playing goal, however, with the goal of learning to construct legal arguments.}, address = {New York, NY, USA}, groups = {AI and Law}, keywords = {IBM Watson, text annotation, default-logic framework, DeepQA, legal argumentation, presuppositional annotation}, numpages = {5}, } @Article{Shulayeva2017, author = {Shulayeva, Olga and Siddharthan, Advaith and Wyner, Adam}, date = {2017}, journaltitle = {Artificial Intelligence and Law}, title = {Recognizing cited facts and principles in legal judgements}, doi = {10.1007/s10506-017-9197-6}, issn = {1572-8382}, number = {1}, pages = {107--126}, url = {https://doi.org/10.1007/s10506-017-9197-6}, volume = {25}, abstract = {In common law jurisdictions, legal professionals cite facts and legal principles from precedent cases to support their arguments before the court for their intended outcome in a current case. This practice stems from the doctrine of stare decisis, where cases that have similar facts should receive similar decisions with respect to the principles. It is essential for legal professionals to identify such facts and principles in precedent cases, though this is a highly time intensive task. In this paper, we present studies that demonstrate that human annotators can achieve reasonable agreement on which sentences in legal judgements contain cited facts and principles (respectively, $$\kappa =0.65$$and $$\kappa =0.95$$for inter- and intra-annotator agreement). We further demonstrate that it is feasible to automatically annotate sentences containing such legal facts and principles in a supervised machine learning framework based on linguistic features, reporting per category precision and recall figures of between 0.79 and 0.89 for classifying sentences in legal judgements as cited facts, principles or neither using a Bayesian classifier, with an overall $$\kappa$$of 0.72 with the human-annotated gold standard.}, groups = {Argument Mining, AI and Law, IRIS23}, refid = {Shulayeva2017}, } @InProceedings{Sardianos2015, author = {Sardianos, Christos and Katakis, Ioannis Manousos and Petasis, Georgios and Karkaletsis, Vangelis}, booktitle = {Proceedings of the 2nd Workshop on Argumentation Mining}, date = {2015-06}, title = {Argument Extraction from News}, doi = {10.3115/v1/W15-0508}, location = {Denver, CO}, pages = {56--66}, publisher = {Association for Computational Linguistics}, url = {https://aclanthology.org/W15-0508}, groups = {Argument Mining}, } @Article{vanderWaa2021, author = {Jasper {van der Waa} and Elisabeth Nieuwburg and Anita Cremers and Mark Neerincx}, date = {2021}, journaltitle = {Artificial Intelligence}, title = {Evaluating XAI: A comparison of rule-based and example-based explanations}, doi = {https://doi.org/10.1016/j.artint.2020.103404}, issn = {0004-3702}, pages = {103404}, url = {https://www.sciencedirect.com/science/article/pii/S0004370220301533}, volume = {291}, abstract = {Current developments in Artificial Intelligence (AI) led to a resurgence of Explainable AI (XAI). New methods are being researched to obtain information from AI systems in order to generate explanations for their output. However, there is an overall lack of valid and reliable evaluations of the effects on users' experience of, and behavior in response to explanations. New XAI methods are often based on an intuitive notion what an effective explanation should be. Rule- and example-based contrastive explanations are two exemplary explanation styles. In this study we evaluate the effects of these two explanation styles on system understanding, persuasive power and task performance in the context of decision support in diabetes self-management. Furthermore, we provide three sets of recommendations based on our experience designing this evaluation to help improve future evaluations. Our results show that rule-based explanations have a small positive effect on system understanding, whereas both rule- and example-based explanations seem to persuade users in following the advice even when incorrect. Neither explanation improves task performance compared to no explanation. This can be explained by the fact that both explanation styles only provide details relevant for a single decision, not the underlying rational or causality. These results show the importance of user evaluations in assessing the current assumptions and intuitions on effective explanations.}, groups = {Ethical AI}, keywords = {Explainable Artificial Intelligence (XAI), User evaluations, Contrastive explanations, Artificial Intelligence (AI), Machine learning, Decision support systems}, } @Article{Goodchild2010, author = {Philip Goodchild}, date = {2010}, journaltitle = {SubStance}, title = {Philosophy as a Way of Life: Deleuze on Thinking and Money}, issn = {00492426, 15272095}, number = {1}, pages = {24--37}, url = {http://www.jstor.org/stable/40801057}, volume = {39}, publisher = {University of Wisconsin Press}, } @Article{Miller2019, author = {Tim Miller}, date = {2019}, journaltitle = {Artificial Intelligence}, title = {Explanation in artificial intelligence: Insights from the social sciences}, doi = {https://doi.org/10.1016/j.artint.2018.07.007}, issn = {0004-3702}, pages = {1-38}, url = {https://www.sciencedirect.com/science/article/pii/S0004370218305988}, volume = {267}, abstract = {There has been a recent resurgence in the area of explainable artificial intelligence as researchers and practitioners seek to provide more transparency to their algorithms. Much of this research is focused on explicitly explaining decisions or actions to a human observer, and it should not be controversial to say that looking at how humans explain to each other can serve as a useful starting point for explanation in artificial intelligence. However, it is fair to say that most work in explainable artificial intelligence uses only the researchers' intuition of what constitutes a ‘good’ explanation. There exist vast and valuable bodies of research in philosophy, psychology, and cognitive science of how people define, generate, select, evaluate, and present explanations, which argues that people employ certain cognitive biases and social expectations to the explanation process. This paper argues that the field of explainable artificial intelligence can build on this existing research, and reviews relevant papers from philosophy, cognitive psychology/science, and social psychology, which study these topics. It draws out some important findings, and discusses ways that these can be infused with work on explainable artificial intelligence.}, groups = {Ethical AI, XAI}, keywords = {Explanation, Explainability, Interpretability, Explainable AI, Transparency}, } @Misc{Keane2020, author = {Mark T. Keane and Barry Smyth}, date = {2020}, title = {Good Counterfactuals and Where to Find Them: A Case-Based Technique for Generating Counterfactuals for Explainable AI (XAI)}, eprint = {2005.13997}, eprintclass = {cs.AI}, eprinttype = {arXiv}, groups = {XAI}, } @Article{Soermo2005, author = {Sørmo, Frode and Cassens, Jörg and Aamodt, Agnar}, date = {2005}, journaltitle = {Artificial Intelligence Review}, title = {Explanation in Case-Based Reasoning-Perspectives and Goals}, doi = {10.1007/s10462-005-4607-7}, issn = {1573-7462}, number = {2}, pages = {109--143}, url = {https://doi.org/10.1007/s10462-005-4607-7}, volume = {24}, abstract = {We present an overview of different theories of explanation from the philosophy and cognitive science communities. Based on these theories, as well as models of explanation from the knowledge-based systems area, we present a framework for explanation in case-based reasoning (CBR) based on explanation goals. We propose ways that the goals of the user and system designer should be taken into account when deciding what is a good explanation for a given CBR system. Some general types of goals relevant to many CBR systems are identified, and used to survey existing methods of explanation in CBR. Finally, we identify some future challenges.}, refid = {Sørmo2005}, } @Article{Leake2005, author = {Leake, David and Mcsherry, David}, date = {2005}, journaltitle = {Artificial Intelligence Review}, title = {Introduction to the Special Issue on Explanation in Case-Based Reasoning}, doi = {10.1007/s10462-005-4606-8}, issn = {1573-7462}, number = {2}, pages = {103--108}, url = {https://doi.org/10.1007/s10462-005-4606-8}, volume = {24}, refid = {Leake2005}, } @Article{Belle2020, author = {Vaishak Belle and Ioannis Papantonis}, date = {2020}, journaltitle = {CoRR}, title = {Principles and Practice of Explainable Machine Learning}, eprint = {2009.11698}, eprinttype = {arXiv}, url = {https://arxiv.org/abs/2009.11698}, volume = {abs/2009.11698}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/abs-2009-11698.bib}, groups = {XAI, Ethical AI}, timestamp = {Wed, 30 Sep 2020 16:16:22 +0200}, } @InProceedings{Jin2019, author = {Weina Jin and Sheelagh Carpendale and Simon Fraser and G. Hamarneh and Diane Gromala}, date = {2019}, title = {Bridging AI Developers and End Users: an End-User-Centred Explainable AI Taxonomy and Visual Vocabularies}, groups = {XAI, Ethical AI}, } @Article{Jin2021, author = {Weina Jin and Jianyu Fan and Diane Gromala and Philippe Pasquier and Ghassan Hamarneh}, date = {2021}, journaltitle = {CoRR}, title = {{EUCA:} {A} Practical Prototyping Framework towards End-User-Centered Explainable Artificial Intelligence}, eprint = {2102.02437}, eprinttype = {arXiv}, url = {https://arxiv.org/abs/2102.02437}, volume = {abs/2102.02437}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/abs-2102-02437.bib}, groups = {XAI, Ethical AI}, timestamp = {Tue, 09 Feb 2021 13:35:56 +0100}, } @Collection{Baroni2018, date = {2018-01-28}, editor = {Baroni, Pietro and Gabbay, Dov and Giacomin, Massimiliano and van der Torre, Leendert}, title = {Handbook of Formal Argumentation}, publisher = {College Publications}, } @InCollection{Prakken2018, author = {Prakken, Henry}, booktitle = {Handbook of Formal Argumentation}, date = {2018-01-28}, title = {Historical overview of formal argumentation.}, editor = {Baroni, Pietro and Gabbay, Dov and Giacomin, Massimiliano and van der Torre, Leendert}, pages = {73-141}, publisher = {College Publications}, } @InProceedings{Levy2014, author = {Levy, Ran and Bilu, Yonatan and Hershcovich, Daniel and Aharoni, Ehud and Slonim, Noam}, booktitle = {Proceedings of {COLING} 2014, the 25th International Conference on Computational Linguistics: Technical Papers}, date = {2014-08}, title = {Context Dependent Claim Detection}, location = {Dublin, Ireland}, pages = {1489--1500}, publisher = {Dublin City University and Association for Computational Linguistics}, url = {https://aclanthology.org/C14-1141}, groups = {Argument Mining}, } @Article{Adadi2018, author = {Adadi, Amina and Berrada, Mohammed}, date = {2018}, journaltitle = {IEEE Access}, title = {Peeking Inside the Black-Box: A Survey on Explainable Artificial Intelligence (XAI)}, doi = {10.1109/ACCESS.2018.2870052}, pages = {52138-52160}, volume = {6}, groups = {XAI, Ethical AI}, } @Article{Guidotti2018, author = {Guidotti, Riccardo and Monreale, Anna and Ruggieri, Salvatore and Turini, Franco and Giannotti, Fosca and Pedreschi, Dino}, date = {2018-08}, journaltitle = {ACM Comput. Surv.}, title = {A Survey of Methods for Explaining Black Box Models}, doi = {10.1145/3236009}, issn = {0360-0300}, number = {5}, url = {https://doi.org/10.1145/3236009}, volume = {51}, abstract = {In recent years, many accurate decision support systems have been constructed as black boxes, that is as systems that hide their internal logic to the user. This lack of explanation constitutes both a practical and an ethical issue. The literature reports many approaches aimed at overcoming this crucial weakness, sometimes at the cost of sacrificing accuracy for interpretability. The applications in which black box decision systems can be used are various, and each approach is typically developed to provide a solution for a specific problem and, as a consequence, it explicitly or implicitly delineates its own definition of interpretability and explanation. The aim of this article is to provide a classification of the main problems addressed in the literature with respect to the notion of explanation and the type of black box system. Given a problem definition, a black box type, and a desired explanation, this survey should help the researcher to find the proposals more useful for his own work. The proposed classification of approaches to open black box models should also be useful for putting the many research open questions in perspective.}, articleno = {93}, groups = {XAI, Ethical AI}, issue_date = {January 2019}, keywords = {explanations, interpretability, transparent models, Open the black box}, location = {New York, NY, USA}, numpages = {42}, publisher = {Association for Computing Machinery}, } @Article{Wilkinson2016, author = {Wilkinson, Mark D. and Dumontier, Michel and Aalbersberg, IJsbrand Jan and Appleton, Gabrielle and Axton, Myles and Baak, Arie and Blomberg, Niklas and Boiten, Jan-Willem and da Silva Santos, Luiz Bonino and Bourne, Philip E. and Bouwman, Jildau and Brookes, Anthony J. and Clark, Tim and Crosas, Mercè and Dillo, Ingrid and Dumon, Olivier and Edmunds, Scott and Evelo, Chris T. and Finkers, Richard and Gonzalez-Beltran, Alejandra and Gray, Alasdair J. G. and Groth, Paul and Goble, Carole and Grethe, Jeffrey S. and Heringa, Jaap and ’t Hoen, Peter A. C. and Hooft, Rob and Kuhn, Tobias and Kok, Ruben and Kok, Joost and Lusher, Scott J. and Martone, Maryann E. and Mons, Albert and Packer, Abel L. and Persson, Bengt and Rocca-Serra, Philippe and Roos, Marco and van Schaik, Rene and Sansone, Susanna-Assunta and Schultes, Erik and Sengstag, Thierry and Slater, Ted and Strawn, George and Swertz, Morris A. and Thompson, Mark and van der Lei, Johan and van Mulligen, Erik and Velterop, Jan and Waagmeester, Andra and Wittenburg, Peter and Wolstencroft, Katherine and Zhao, Jun and Mons, Barend}, date = {2016}, journaltitle = {Scientific Data}, title = {The FAIR Guiding Principles for scientific data management and stewardship}, doi = {10.1038/sdata.2016.18}, issn = {2052-4463}, number = {1}, pages = {160018}, url = {https://doi.org/10.1038/sdata.2016.18}, volume = {3}, abstract = {There is an urgent need to improve the infrastructure supporting the reuse of scholarly data. A diverse set of stakeholders--representing academia, industry, funding agencies, and scholarly publishers--have come together to design and jointly endorse a concise and measureable set of principles that we refer to as the FAIR Data Principles. The intent is that these may act as a guideline for those wishing to enhance the reusability of their data holdings. Distinct from peer initiatives that focus on the human scholar, the FAIR Principles put specific emphasis on enhancing the ability of machines to automatically find and use the data, in addition to supporting its reuse by individuals. This Comment is the first formal publication of the FAIR Principles, and includes the rationale behind them, and some exemplar implementations in the community.}, refid = {Wilkinson2016}, } @InProceedings{Adhikari2019, author = {Adhikari, Ajaya and Tax, David M. J. and Satta, Riccardo and Faeth, Matthias}, booktitle = {2019 IEEE International Conference on Fuzzy Systems (FUZZ-IEEE)}, date = {2019}, title = {LEAFAGE: Example-based and Feature importance-based Explanations for Black-box ML models}, doi = {10.1109/FUZZ-IEEE.2019.8858846}, pages = {1-7}, } @WWW{EuropeanCommission2021, author = {{European Commission}}, date = {2021-04-21}, title = {Proposal for a regulation of the European Parliament and of the Council Laying down harmonised rules on Artificial Intelligence (Artificial Intelligence Act) and amending certain union legislative acts}, url = {https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX:52021PC0206}, groups = {Ethical AI}, } @TechReport{Rechtspraak2019, date = {2019-03-20}, institution = {IVO Rechtspraak}, title = {Open Data van de Rechtspraak}, pagetotal = {29}, subtitle = {Technische Specificatie}, type = {techreport}, url = {https://www.rechtspraak.nl/SiteCollectionDocuments/Technische-documentatie-Open-Data-van-de-Rechtspraak.pdf}, version = {1.15}, groups = {AI and Law}, } @Article{Aamodt1994, author = {Aamodt, Agnar and Plaza, Enric}, date = {1994}, journaltitle = {AI Communications}, title = {Case-Based Reasoning: Foundational Issues, Methodological Variations, and System Approaches}, doi = {10.3233/AIC-1994-7104}, number = {1}, pages = {39--59}, volume = {7}, abstract = {Case-based reasoning is a recent approach to problem solving and learning that has got a lot of attention over the last few years. Originating in the US, the basic idea and underlying theories have spread to other continents, and we are now within a period of highly active research in case-based reasoning in Europe as well. This paper gives an overview of the foundational issues related to case-based reasoning, describes some of the leading methodological approaches within the field, and exemplifies the current state through pointers to some systems. Initially, a general framework is defined, to which the subsequent descriptions and discussions will refer. The framework is influenced by recent methodologies for knowledge level descriptions of intelligent systems. The methods for case retrieval, reuse, solution testing, and learning are summarized, and their actual realization is discussed in the light of a few example systems that represent different CBR approaches. We also discuss the role of case-based methods as one type of reasoning and learning method within an integrated system architecture.}, groups = {Argument Mining, Case-based reasoning}, publisher = {IOS Press}, } @Article{Levy2015, author = {Levy, Omer and Goldberg, Yoav and Dagan, Ido}, date = {2015}, journaltitle = {Transactions of the Association for Computational Linguistics}, title = {Improving Distributional Similarity with Lessons Learned from Word Embeddings}, doi = {10.1162/tacl_a_00134}, pages = {211--225}, url = {https://aclanthology.org/Q15-1016}, volume = {3}, abstract = {Recent trends suggest that neural-network-inspired word embedding models outperform traditional count-based distributional models on word similarity and analogy detection tasks. We reveal that much of the performance gains of word embeddings are due to certain system design choices and hyperparameter optimizations, rather than the embedding algorithms themselves. Furthermore, we show that these modifications can be transferred to traditional distributional models, yielding similar gains. In contrast to prior reports, we observe mostly local or insignificant performance differences between the methods, with no global advantage to any single approach over the others.}, groups = {NLP}, location = {Cambridge, MA}, publisher = {MIT Press}, } @Article{Wilson2015, author = {Benjamin J. Wilson and Adriaan M. J. Schakel}, date = {2015}, journaltitle = {CoRR}, title = {Controlled Experiments for Word Embeddings}, eprint = {1510.02675}, eprintclass = {cs.CL}, eprinttype = {arXiv}, url = {https://arxiv.org/abs/1510.02675}, volume = {abs/1510.02675}, abstract = {An experimental approach to studying the properties of word embeddings is proposed. Controlled experiments, achieved through modifications of the training corpus, permit the demonstration of direct relations between word properties and word vector direction and length. The approach is demonstrated using the word2vec CBOW model with experiments that independently vary word frequency and word co-occurrence noise. The experiments reveal that word vector length depends more or less linearly on both word frequency and the level of noise in the co-occurrence distribution of the word. The coefficients of linearity depend upon the word. The special point in feature space, defined by the (artificial) word with pure noise in its co-occurrence distribution, is found to be small but non-zero.}, groups = {NLP}, } @Article{Mehta2021, author = {Mehta, Vivek and Bawa, Seema and Singh, Jasmeet}, date = {2021-09}, journaltitle = {Complex & intelligent systems}, title = {WEClustering: word embeddings based text clustering technique for large datasets}, doi = {10.1007/s40747-021-00512-9}, issn = {2199-4536}, number = {34777978}, pages = {1--14}, url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8421191/}, abstract = {A massive amount of textual data now exists in digital repositories in the form of research articles, news articles, reviews, Wikipedia articles, and books, etc. Text clustering is a fundamental data mining technique to perform categorization, topic extraction, and information retrieval. Textual datasets, especially which contain a large number of documents are sparse and have high dimensionality. Hence, traditional clustering techniques such as K-means, Agglomerative clustering, and DBSCAN cannot perform well. In this paper, a clustering technique especially suitable to large text datasets is proposed that overcome these limitations. The proposed technique is based on word embeddings derived from a recent deep learning model named "Bidirectional Encoders Representations using Transformers". The proposed technique is named as WEClustering. The proposed technique deals with the problem of high dimensionality in an effective manner, hence, more accurate clusters are formed. The technique is validated on several datasets of varying sizes and its performance is compared with other widely used and state of the art clustering techniques. The experimental comparison shows that the proposed clustering technique gives a significant improvement over other techniques as measured by metrics such Purity and Adjusted Rand Index.}, comment = {34777978[pmid] PMC8421191[pmcid]}, database = {PubMed}, groups = {NLP}, keywords = {BERT, Big data, Document clustering, Pattern recognition, Semantic clustering, Text mining}, publisher = {Springer International Publishing}, } @InProceedings{Tiddi2015, author = {Ilaria Tiddi and Mathieu D'Aquin and Enrico Motta}, booktitle = {Proceedings of the 8th International Conference on Knowledge Capture, K-CAP 2015}, date = {2015-10-07}, title = {An ontology design pattern to define explanations}, doi = {10.1145/2815833.2815844}, language = {English}, note = {8th International Conference on Knowledge Capture, K-CAP 2015 ; Conference date: 07-10-2015 Through 10-10-2015}, publisher = {Association for Computing Machinery, Inc}, series = {Proceedings of the 8th International Conference on Knowledge Capture, K-CAP 2015}, abstract = {In this paper, we propose an ontology design pattern for the concept of {"}explanation{"}. The motivation behind this work comes from our research, which focuses on automatically identifying explanations for data patterns. If we want to produce explanations from data agnostically from the appli- cation domain, we first need a formal definition of what an explanation is, i.e. which are its components, their roles or their interactions. We analysed and surveyed works from the disciplines grouped under the name of Cognitive Sciences, with the aim of identifying differences and commonalities in the way their researchers intend the concept of explanation. We then produced not only an ontology design pattern to model it, but also the instantiations of this in each of the analysed disciplines. Besides those contributions, the paper presents how the proposed ontology design pattern can be used to analyse the validity of the explanations produced by our, and other, frameworks.}, day = {7}, groups = {XAI}, keywords = {Explanation, Knowledge Discovery, Ontology Design Pattern}, } @InProceedings{Chari2020, author = {Chari, Shruthi and Seneviratne, Oshani and Gruen, Daniel M. and Foreman, Morgan A. and Das, Amar K. and McGuinness, Deborah L.}, booktitle = {The Semantic Web -- ISWC 2020}, date = {2020}, title = {Explanation Ontology: A Model of Explanations for User-Centered AI}, editor = {Pan, Jeff Z. and Tamma, Valentina and d'Amato, Claudia and Janowicz, Krzysztof and Fu, Bo and Polleres, Axel and Seneviratne, Oshani and Kagal, Lalana}, isbn = {978-3-030-62466-8}, location = {Cham}, pages = {228--243}, publisher = {Springer International Publishing}, abstract = {Explainability has been a goal for Artificial Intelligence (AI) systems since their conception, with the need for explainability growing as more complex AI models are increasingly used in critical, high-stakes settings such as healthcare. Explanations have often added to an AI system in a non-principled, post-hoc manner. With greater adoption of these systems and emphasis on user-centric explainability, there is a need for a structured representation that treats explainability as a primary consideration, mapping end user needs to specific explanation types and the system's AI capabilities. We design an explanation ontology to model both the role of explanations, accounting for the system and user attributes in the process, and the range of different literature-derived explanation types. We indicate how the ontology can support user requirements for explanations in the domain of healthcare. We evaluate our ontology with a set of competency questions geared towards a system designer who might use our ontology to decide which explanation types to include, given a combination of users' needs and a system's capabilities, both in system design settings and in real-time operations. Through the use of this ontology, system designers will be able to make informed choices on which explanations AI systems can and should provide.}, groups = {XAI}, } @InProceedings{Le2014, author = {Le, Quoc and Mikolov, Tomas}, booktitle = {Proceedings of the 31st International Conference on Machine Learning}, date = {2014}, title = {Distributed Representations of Sentences and Documents}, editor = {Xing, Eric P. and Jebara, Tony}, location = {Bejing, China}, number = {2}, pages = {1188--1196}, publisher = {PMLR}, series = {Proceedings of Machine Learning Research}, url = {https://proceedings.mlr.press/v32/le14.html}, volume = {32}, abstract = {Many machine learning algorithms require the input to be represented as a fixed length feature vector. When it comes to texts, one of the most common representations is bag-of-words. Despite their popularity, bag-of-words models have two major weaknesses: they lose the ordering of the words and they also ignore semantics of the words. For example, "powerful," "strong" and "Paris" are equally distant. In this paper, we propose an unsupervised algorithm that learns vector representations of sentences and text documents. This algorithm represents each document by a dense vector which is trained to predict words in the document. Its construction gives our algorithm the potential to overcome the weaknesses of bag-of-words models. Empirical results show that our technique outperforms bag-of-words models as well as other techniques for text representations. Finally, we achieve new state-of-the-art results on several text classification and sentiment analysis tasks.}, pdf = {http://proceedings.mlr.press/v32/le14.pdf}, } @InProceedings{Landthaler2016, author = {Landthaler, Jörg and Waltl, Bernhard and Holl, Patrick and Matthes, Florian}, booktitle = {Legal Knowledge and Information Systems}, date = {2016}, title = {Extending Full Text Search for Legal Document Collections Using Word Embeddings}, doi = {10.3233/978-1-61499-726-9-73}, editor = {Bex, Floris and Villata, Serena}, pages = {73-83}, publisher = {IOS Press}, url = {https://ebooks.iospress.nl/publication/45751}, volume = {294}, groups = {AI and Law, NLP}, } @Article{Martens2014, author = {Martens, David and Provost, Foster}, date = {2014-03}, journaltitle = {MIS Q.}, title = {Explaining Data-Driven Document Classifications}, doi = {10.25300/MISQ/2014/38.1.04}, issn = {0276-7783}, number = {1}, pages = {73–100}, url = {https://doi.org/10.25300/MISQ/2014/38.1.04}, volume = {38}, abstract = {Many document classification applications require human understanding of the reasons for data-driven classification decisions by managers, client-facing employees, and the technical team. Predictive models treat documents as data to be classified, and document data are characterized by very high dimensionality, often with tens of thousands to millions of variables (words). Unfortunately, due to the high dimensionality, understanding the decisions made by document classifiers is very difficult. This paper begins by extending the most relevant prior theoretical model of explanations for intelligent systems to account for some missing elements. The main theoretical contribution is the definition of a new sort of explanation as a minimal set of words (terms, generally), such that removing all words within this set from the document changes the predicted class from the class of interest. We present an algorithm to find such explanations, as well as a framework to assess such an algorithm's performance. We demonstrate the value of the new approach with a case study from a real-world document classification task: classifying web pages as containing objectionable content, with the goal of allowing advertisers to choose not to have their ads appear on those pages. A second empirical demonstration on news-story topic classification shows the explanations to be concise and document-specific, and to be capable of providing understanding of the exact reasons for the classification decisions, of the workings of the classification models, and of the business application itself. We also illustrate how explaining the classifications of documents can help to improve data quality and model performance.}, groups = {XAI}, issue_date = {March 2014}, keywords = {document classification, instance level explanation, comprehensibility, text mining}, location = {USA}, numpages = {28}, publisher = {Society for Information Management and The Management Information Systems Research Center}, } @Article{Ramon2020, author = {Ramon, Yanou and Martens, David and Provost, Foster and Evgeniou, Theodoros}, date = {2020}, journaltitle = {Advances in Data Analysis and Classification}, title = {A comparison of instance-level counterfactual explanation algorithms for behavioral and textual data: SEDC, LIME-C and SHAP-C}, doi = {10.1007/s11634-020-00418-3}, issn = {1862-5355}, number = {4}, pages = {801--819}, url = {https://doi.org/10.1007/s11634-020-00418-3}, volume = {14}, abstract = {Predictive systems based on high-dimensional behavioral and textual data have serious comprehensibility and transparency issues: linear models require investigating thousands of coefficients, while the opaqueness of nonlinear models makes things worse. Counterfactual explanations are becoming increasingly popular for generating insight into model predictions. This study aligns the recently proposed linear interpretable model-agnostic explainer and Shapley additive explanations with the notion of counterfactual explanations, and empirically compares the effectiveness and efficiency of these novel algorithms against a model-agnostic heuristic search algorithm for finding evidence counterfactuals using 13 behavioral and textual data sets. We show that different search methods have different strengths, and importantly, that there is much room for future research.}, groups = {XAI}, refid = {Ramon2020}, } @InProceedings{8622073, author = {Chhatwal, Rishi and Gronvall, Peter and Huber-Fliflet, Nathaniel and Keeling, Robert and Zhang, Jianping and Zhao, Haozhen}, booktitle = {2018 IEEE International Conference on Big Data (Big Data)}, date = {2018}, title = {Explainable Text Classification in Legal Document Review A Case Study of Explainable Predictive Coding}, doi = {10.1109/BigData.2018.8622073}, pages = {1905-1911}, groups = {XAI, AI and Law}, } @Article{Madaan2021, author = {Madaan, Nishtha and Padhi, Inkit and Panwar, Naveen and Saha, Diptikalyan}, date = {2021-05}, journaltitle = {Proceedings of the AAAI Conference on Artificial Intelligence}, title = {Generate Your Counterfactuals: Towards Controlled Counterfactual Generation for Text}, number = {15}, pages = {13516-13524}, url = {https://ojs.aaai.org/index.php/AAAI/article/view/17594}, volume = {35}, abstractnote = {Machine Learning has seen tremendous growth recently, which has led to a larger adaptation of ML systems for educational assessments, credit risk, healthcare, employment, criminal justice, to name a few. The trustworthiness of ML and NLP systems is a crucial aspect and requires a guarantee that the decisions they make are fair and robust. Aligned with this, we propose a novel framework GYC, to generate a set of exhaustive counterfactual text, which are crucial for testing these ML systems. Our main contributions include a) We introduce GYC, a framework to generate counterfactual samples such that the generation is plausible, diverse, goal-oriented, and effective, b) We generate counterfactual samples, that can direct the generation towards a corresponding \texttt{condition} such as named-entity tag, semantic role label, or sentiment. Our experimental results on various domains show that GYC generates counterfactual text samples exhibiting the above four properties. GYC generates counterfactuals that can act as test cases to evaluate a model and any text debiasing algorithm.}, } @Article{deVries2019, author = {{de Vries}, Wietse and {van Cranenburgh}, Andreas and Bisazza, Arianna and Caselli, Tommaso and Noord, Gertjan and {Van and Nissim}, Malvina}, date = {2019}, journaltitle = {CoRR}, title = {BERTje: A Dutch BERT Model}, eprint = {1912.09582}, eprintclass = {cs.CL}, eprinttype = {arXiv}, url = {http://arxiv.org/abs/1912.09582}, volume = {abs/1912.09582}, } @InBook{Steging2021, author = {Steging, Cor and Renooij, Silja and Verheij, Bart}, booktitle = {Legal Knowledge and Information Systems}, date = {2021}, title = {Rationale Discovery and Explainable AI}, doi = {10.3233/FAIA210341}, editor = {Schweighofer, Erich}, isbn = {978-1-64368-252-5}, pages = {225-234}, series = {Frontiers in Artificial Intelligence and Applications}, volume = {346}, groups = {AI and Law, XAI}, } @Article{Vlek2014, author = {Vlek, Charlotte S. and Prakken, Henry and Renooij, Silja and Verheij, Bart}, date = {2014}, journaltitle = {Artificial Intelligence and Law}, title = {Building Bayesian networks for legal evidence with narratives: a case study evaluation}, doi = {10.1007/s10506-014-9161-7}, issn = {1572-8382}, number = {4}, pages = {375--421}, url = {https://doi.org/10.1007/s10506-014-9161-7}, volume = {22}, abstract = {In a criminal trial, evidence is used to draw conclusions about what happened concerning a supposed crime. Traditionally, the three main approaches to modeling reasoning with evidence are argumentative, narrative and probabilistic approaches. Integrating these three approaches could arguably enhance the communication between an expert and a judge or jury. In previous work, techniques were proposed to represent narratives in a Bayesian network and to use narratives as a basis for systematizing the construction of a Bayesian network for a legal case. In this paper, these techniques are combined to form a design method for constructing a Bayesian network based on narratives. This design method is evaluated by means of an extensive case study concerning the notorious Dutch case of the Anjum murders.}, groups = {AI and Law}, refid = {Vlek2014}, } @InBook{Gorski2021, author = {G\'{o}rski, \L{}ukasz and Ramakrishna, Shashishekar}, booktitle = {Proceedings of the Eighteenth International Conference on Artificial Intelligence and Law}, date = {2021}, title = {Explainable Artificial Intelligence, Lawyer's Perspective}, isbn = {9781450385268}, location = {New York, NY, USA}, pages = {60–68}, publisher = {Association for Computing Machinery}, url = {https://doi.org/10.1145/3462757.3466145}, abstract = {Explainable artificial intelligence (XAI) is a research direction that was already put under scrutiny, in particular in the AI&Law community. Whilst there were notable developments in the area of (general, not necessarily legal) XAI, user experience studies regarding such methods, as well as more general studies pertaining to the concept of explainability among the users are still lagging behind. This paper firstly, assesses the performance of different explainability methods (Grad-CAM, LIME, SHAP), in explaining the predictions for a legal text classification problem; those explanations were then judged by legal professionals according to their accuracy. Secondly, the same respondents were asked to give their opinion on the desired qualities of (explainable) artificial intelligence (AI) legal decision system and to present their general understanding of the term XAI. This part was treated as a pilot study for a more pronounced one regarding the lawyer's position on AI, and XAI in particular.}, groups = {XAI, AI and Law}, numpages = {9}, } @InProceedings{deGreeff2021, author = {de Greeff, Joachim and de Boer, Maaike H. T. and Hillerström, Fieke H. J. and Bomhof, Freek and Jorritsma, Wiard and Neerincx, Mark}, booktitle = {Proceedings of the AAAI 2021 Spring Symposium on Combining Machine Learning and Knowledge Engineering (AAAI-MAKE 2021)}, date = {2021-03-22}, title = {The FATE System: FAir, Transparent and Explainable Decision Making}, eventdate = {2021-03-22/2021-03-24}, url = {http://resolver.tudelft.nl/uuid:56112570-f78b-429f-a52f-656283162e16}, } @Misc{Teufel1999, author = {Simone Teufel and Marc Moens}, date = {1999}, title = {Discourse-Level Argumentation in Scientific Articles: Human and Automatic Annotation}, groups = {Argument Mining}, } @PhdThesis{Teufel1999a, author = {Teufel, Simone}, date = {1999}, institution = {University of Edinburgh}, title = {Argumentative Zoning: Information Extraction from Scientific Text}, pagetotal = {352}, url = {https://www.cl.cam.ac.uk/~sht25/thesis/t.pdf}, groups = {Argument Mining}, } @Book{Toulmin2003, author = {Toulmin, Stephen E.}, date = {2003}, title = {The Uses of Argument}, doi = {10.1017/CBO9780511840005}, edition = {2}, publisher = {Cambridge University Press}, groups = {Argument Mining}, place = {Cambridge}, } @Article{Gorski2020, author = {Lukasz G{\'{o}}rski and Shashishekar Ramakrishna and Jedrzej M. Nowosielski}, date = {2020}, journaltitle = {CoRR}, title = {Towards Grad-CAM Based Explainability in a Legal Text Processing Pipeline}, eprint = {2012.09603}, eprintclass = {cs.HC}, eprinttype = {arXiv}, url = {https://arxiv.org/abs/2012.09603}, volume = {abs/2012.09603}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/journals/corr/abs-2012-09603.bib}, groups = {Ethical AI, AI and Law, XAI}, timestamp = {Sun, 03 Jan 2021 18:46:06 +0100}, } @InProceedings{Binsbergen2020, author = {van Binsbergen, L. Thomas and Liu, Lu-Chi and van Doesburg, Robert and van Engers, Tom}, booktitle = {Proceedings of the 19th ACM SIGPLAN International Conference on Generative Programming: Concepts and Experiences}, date = {2020}, title = {eFLINT: A Domain-Specific Language for Executable Norm Specifications}, doi = {10.1145/3425898.3426958}, isbn = {9781450381741}, location = {Virtual, USA}, pages = {124–136}, publisher = {Association for Computing Machinery}, series = {GPCE 2020}, url = {https://doi.org/10.1145/3425898.3426958}, abstract = {Software systems that share potentially sensitive data are subjected to laws, regulations, policies and/or contracts. The monitoring, control and enforcement processes applied to these systems are currently to a large extent manual, which we rather automate by embedding the processes as dedicated and adaptable software services in order to improve efficiency and effectiveness. This approach requires such regulatory services to be closely aligned with a formal description of the relevant norms. This paper presents eFLINT, a domain-specific language developed for formalizing norms. The theoretical foundations of the language are found in transition systems and in Hohfeld's framework of legal fundamental conceptions. The language can be used to formalize norms from a large variety of sources. The resulting specifications are executable and support several forms of reasoning such as automatic case assessment, manual exploration and simulation. Moreover, the specifications can be used to develop regulatory services for several types of monitoring, control and enforcement. The language is evaluated through a case study formalizing articles 6(1)(a) and 16 of the General Data Protection Regulation (GDPR). A prototype implementation of eFLINT is discussed and is available online.}, address = {New York, NY, USA}, keywords = {executable specifications, domain-specific language, normative modeling, GDPR, policy enforcement}, numpages = {13}, } @Article{Miller2021, author = {Miller, Tim}, date = {2021}, journaltitle = {The Knowledge Engineering Review}, title = {Contrastive explanation: a structural-model approach}, doi = {10.1017/S0269888921000102}, pages = {e14}, volume = {36}, publisher = {Cambridge University Press}, } @InProceedings{Mothilal2020, author = {Mothilal, Ramaravind K. and Sharma, Amit and Tan, Chenhao}, booktitle = {Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency}, date = {2020}, title = {Explaining Machine Learning Classifiers through Diverse Counterfactual Explanations}, doi = {10.1145/3351095.3372850}, isbn = {9781450369367}, location = {Barcelona, Spain}, pages = {607–617}, publisher = {Association for Computing Machinery}, series = {FAT* '20}, url = {https://doi.org/10.1145/3351095.3372850}, abstract = {Post-hoc explanations of machine learning models are crucial for people to understand and act on algorithmic predictions. An intriguing class of explanations is through counterfactuals, hypothetical examples that show people how to obtain a different prediction. We posit that effective counterfactual explanations should satisfy two properties: feasibility of the counterfactual actions given user context and constraints, and diversity among the counterfactuals presented. To this end, we propose a framework for generating and evaluating a diverse set of counterfactual explanations based on determinantal point processes. To evaluate the actionability of counterfactuals, we provide metrics that enable comparison of counterfactual-based methods to other local explanation methods. We further address necessary tradeoffs and point to causal implications in optimizing for counterfactuals. Our experiments on four real-world datasets show that our framework can generate a set of counterfactuals that are diverse and well approximate local decision boundaries, outperforming prior approaches to generating diverse counterfactuals. We provide an implementation of the framework at https://github.com/microsoft/DiCE.}, address = {New York, NY, USA}, numpages = {11}, } @InProceedings{Adhikari2022, author = {Adhikari, Ajaya and Wenink, Edwin and van der Waa, Jasper and Bouter, Cornelis and Tolios, Ioannis and Raaijmakers, Stephan}, booktitle = {Proceedings of the 15th International Conference on PErvasive Technologies Related to Assistive Environments}, date = {2022}, title = {Towards FAIR Explainable AI: A Standardized Ontology for Mapping XAI Solutions to Use Cases, Explanations, and AI Systems}, doi = {10.1145/3529190.3535693}, isbn = {9781450396318}, location = {Corfu, Greece}, pages = {562–568}, publisher = {Association for Computing Machinery}, series = {PETRA '22}, abstract = {Several useful taxonomies have been published that survey the eXplainable AI (XAI) research field. However, these taxonomies typically do not show the relation between XAI solutions and several use case aspects, such as the explanation goal or the task context. In order to better connect the field of XAI research with concrete use cases and user needs, we designed the ASCENT (Ai System use Case Explanation oNTology) framework, which is a new ontology and corresponding metadata standard with three complementary modules for different aspects of an XAI solution: one for aspects of AI systems, another for use case aspects, and yet another for explanation properties. The descriptions of XAI solutions in this framework include whether the XAI solution has a positive, negative, inconclusive or unresearched relation with use case elements. Descriptions in ASCENT thus emphasize the (user) evaluation of XAI solutions in order to support finding validated practices for application in industry, as well as being helpful for identifying research gaps. Describing XAI solutions according to the proposed common metadata standard is an important step towards the FAIR (Findable, Accessible, Interoperable, Reusable) usage of XAI solutions.}, address = {New York, NY, USA}, keywords = {FAIR, XAI ontology, user-centered, ASCENT}, numpages = {7}, } @Article{Wenink2020, author = {Wenink, Edwin}, date = {2020}, journaltitle = {Turning Magazine}, title = {Tech Giants will battle over your health data}, pages = {12-13}, volume = {2: AI & Health}, } @Article{Wenink, author = {Wenink, Edwin}, date = {2013}, journaltitle = {Splijtstof}, title = {Deconstructie in werking}, editor = {Leijssenaar, Bas}, number = {2}, pages = {49-58}, url = {https://adoc.pub/splijtstof-jaargang-42-nummer-2-kerstnummer-2013.html}, volume = {42}, } @InProceedings{Schwartzenberg2020, author = {Carel Schwartzenberg and Tom M. van Engers and Yuan Li}, booktitle = {Proceedings from BNAIC/BeneLearn 2020}, date = {2020}, title = {The fidelity of global surrogates in interpretable Machine Learning}, editor = {Cao, Lu and Kosters, Walter and Lijffijt, Jefrey}, pages = {269-283}, groups = {XAI, IRIS23}, } @InProceedings{Medvedeva2021, author = {Masha Medvedeva and Thijmen Dam and Martijn Wieling and Michel Vols}, booktitle = {Legal Knowledge and Information Systems}, date = {2021}, title = {Automatically identifying eviction cases and outcomes within case law of Dutch Courts of First Instance}, doi = {10.3233/FAIA210312}, editor = {Erich Schweighofer}, eventtitleaddon = {JURIX 2021: The Thirty-fourth Annual Conference}, isbn = {978-1-64368-252-5}, language = {English}, pages = {13--22}, publisher = {IOS Press}, series = {Frontiers in Artificial Intelligence and Applications}, abstract = {In this paper we attempt to identify eviction judgements within all case law published by Dutch courts in order to automate data collection, previously conducted manually. To do so we performed two experiments. The first focused on identifying judgements related to eviction, while the second focused on identifying the outcome of the cases in the judgements (eviction vs. dismissal of the landlord{\textquoteright}s claim). In the process of conducting the experiments for this study, we have created a manually annotated dataset with eviction-related judgements and their outcomes.}, groups = {AI and Law, IRIS23}, } @Article{Rudin2019, author = {Rudin, Cynthia}, date = {2019}, journaltitle = {Nature Machine Intelligence}, title = {Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead}, doi = {10.1038/s42256-019-0048-x}, issn = {2522-5839}, number = {5}, pages = {206--215}, url = {https://doi.org/10.1038/s42256-019-0048-x}, volume = {1}, abstract = {Black box machine learning models are currently being used for high-stakes decision making throughout society, causing problems in healthcare, criminal justice and other domains. Some people hope that creating methods for explaining these black box models will alleviate some of the problems, but trying to explain black box models, rather than creating models that are interpretable in the first place, is likely to perpetuate bad practice and can potentially cause great harm to society. The way forward is to design models that are inherently interpretable. This Perspective clarifies the chasm between explaining black boxes and using inherently interpretable models, outlines several key reasons why explainable black boxes should be avoided in high-stakes decisions, identifies challenges to interpretable machine learning, and provides several example applications where interpretable models could potentially replace black box models in criminal justice, healthcare and computer vision.}, groups = {IRIS23}, refid = {Rudin2019}, } @Article{Leek2015, author = {Jeffery T. Leek and Roger D. Peng}, date = {2015}, journaltitle = {Science}, title = {What is the question?}, doi = {10.1126/science.aaa6146}, number = {6228}, pages = {1314-1315}, url = {https://www.science.org/doi/abs/10.1126/science.aaa6146}, volume = {347}, abstract = {Mistaking the type of question being considered is the most common error in data analysis Over the past 2 years, increased focus on statistical analysis brought on by the era of big data has pushed the issue of reproducibility out of the pages of academic journals and into the popular consciousness (1). Just weeks ago, a paper about the relationship between tissue-specific cancer incidence and stem cell divisions (2) was widely misreported because of misunderstandings about the primary statistical argument in the paper (3). Public pressure has contributed to the massive recent adoption of reproducible research tools, with corresponding improvements in reproducibility. But an analysis can be fully reproducible and still be wrong. Even the most spectacularly irreproducible analyses—like those underlying the ongoing lawsuits (4) over failed genomic signatures for chemotherapy assignment (5)—are ultimately reproducible (6). Once an analysis is reproducible, the key question we want to answer is, “Is this data analysis correct?” We have found that the most frequent failure in data analysis is mistaking the type of question being considered.}, } @Article{Donoho2017, author = {David Donoho}, date = {2017}, journaltitle = {Journal of Computational and Graphical Statistics}, title = {50 Years of Data Science}, doi = {10.1080/10618600.2017.1384734}, number = {4}, pages = {745-766}, url = {https://doi.org/10.1080/10618600.2017.1384734}, volume = {26}, abstract = {More than 50 years ago, John Tukey called for a reformation of academic statistics. In “The Future of Data Analysis,” he pointed to the existence of an as-yet unrecognized science, whose subject of interest was learning from data, or “data analysis.” Ten to 20 years ago, John Chambers, Jeff Wu, Bill Cleveland, and Leo Breiman independently once again urged academic statistics to expand its boundaries beyond the classical domain of theoretical statistics; Chambers called for more emphasis on data preparation and presentation rather than statistical modeling; and Breiman called for emphasis on prediction rather than inference. Cleveland and Wu even suggested the catchy name “data science” for this envisioned field. A recent and growing phenomenon has been the emergence of “data science” programs at major universities, including UC Berkeley, NYU, MIT, and most prominently, the University of Michigan, which in September 2015 announced a \$100M “Data Science Initiative” that aims to hire 35 new faculty. Teaching in these new programs has significant overlap in curricular subject matter with traditional statistics courses; yet many academic statisticians perceive the new programs as “cultural appropriation.” This article reviews some ingredients of the current “data science moment,” including recent commentary about data science in the popular media, and about how/whether data science is really different from statistics. The now-contemplated field of data science amounts to a superset of the fields of statistics and machine learning, which adds some technology for “scaling up” to “big data.” This chosen superset is motivated by commercial rather than intellectual developments. Choosing in this way is likely to miss out on the really important intellectual event of the next 50 years. Because all of science itself will soon become data that can be mined, the imminent revolution in data science is not about mere “scaling up,” but instead the emergence of scientific studies of data analysis science-wide. In the future, we will be able to predict how a proposal to change data analysis workflows would impact the validity of data analysis across all of science, even predicting the impacts field-by-field. Drawing on work by Tukey, Cleveland, Chambers, and Breiman, I present a vision of data science based on the activities of people who are “learning from data,” and I describe an academic field dedicated to improving that activity in an evidence-based manner. This new field is a better academic enlargement of statistics and machine learning than today’s data science initiatives, while being able to accommodate the same short-term goals. Based on a presentation at the Tukey Centennial Workshop, Princeton, NJ, September 18, 2015.}, publisher = {Taylor & Francis}, } @InProceedings{Wenink2023, author = {Wenink, Edwin and Kwisthout, Johan and van Engers, Tom}, booktitle = {Legal Informatics as Science of Legal Methods. Proceedings of the 26th International Legal Informatics Symposium}, date = {2023}, title = {Punishment Extraction from Dutch criminal cases in Courts of First Instance}, editor = {Schweighofer, Erich and Zanol, Jakob and Eder, Stefan}, eventtitle = {IRIS 2023}, isbn = {978-3-98595-714-9}, location = {Bern, Swiss}, pages = {233-241}, publisher = {Weblaw}, venue = {Salzburg, Austria}, abstract = {This paper explores the merits of a pattern- and rule-based approach for the automated extraction of punishments from Dutch criminal cases in courts of fi rst instance. Automated extraction of case outcomes leverages the increasing amount of information becoming available through digital technologies and aids the creation of big data sets for work in legal informatics and AI & Law. This work addresses domain-specifi c challenges, in particular that Dutch criminal case decisions may impose a single combined sentence for multiple facts or impose multiple sentences in the same decision. Manual evaluation of the developed method shows that the use of interpretable methods is a viable approach in the legal domain.}, comment = {Location uitgever is Bern; locatie van conferentie was Salzburg, Austria. Welke neem ik?}, } @Comment{jabref-meta: databaseType:biblatex;} @Comment{jabref-meta: grouping: 0 AllEntriesGroup:; 1 StaticGroup:Ethical AI\;0\;0\;0x8a8a8aff\;\;Literature related to both AI ethics as well as technical solutions for implementing ethical AI\;; 1 StaticGroup:AI and Law\;0\;0\;0x8a8a8aff\;\;\;; 1 StaticGroup:Argument Mining\;0\;0\;0x8a8a8aff\;\;\;; 1 StaticGroup:XAI\;0\;0\;0x8a8a8aff\;\;Explainable AI\;; 1 StaticGroup:Case-based reasoning\;0\;1\;0x8a8a8aff\;\;\;; 1 StaticGroup:NLP\;0\;0\;0x8a8a8aff\;\;Natural Language Processing & Distributional Semantics\;; 1 KeywordGroup:My Publications\;0\;author\;Wenink, Edwin\;0\;0\;1\;0x8a8a8aff\;\;\;; 2 StaticGroup:IRIS23\;0\;1\;0x8a8a8aff\;\;References used in IRIS23 paper\;; }