@article{Hevner2007, abstract = {As a commentary to Juhani Iivari's insightful essay, I briefly analyze design science research as an embodiment of three closely related cycles of activities. The Relevance Cycle inputs requirements from the contextual envi- ronment into the research and introduces the research artifacts into environ- mental field testing. The Rigor Cycle provides grounding theories and methods along with domain experience and expertise from the foundations knowledge base into the research and adds the new knowledge generated by the research to the growing knowledge base. The central Design Cycle sup- ports a tighter loop of research activity for the construction and evaluation of design artifacts and processes. The recognition of these three cycles in a research project clearly positions and differentiates design science from other research paradigms. The commentary concludes with a claim to the pragmatic nature}, author = {Hevner, Alan R}, isbn = {0905-0167}, issn = {09050167}, journal = {Scandinavian Journal of Information Systems}, keywords = {design cycle,design science,relevance cycle,rigor cycle}, number = {2}, pages = {87--92}, title = {{A Three Cycle View of Design Science Research}}, volume = {19}, year = {2007} } @article{Hevner2004, abstract = {Two paradigms characterize much of the research in the Information Systems discipline: behavioral science and design science. The behavioral science paradigm seeks to develop and verify theories that explain or predict human or organizational behavior. The design-science paradigm seeks to extend the boundaries of human and organizational capabilities by creating new and innovative artifacts. Both paradigms are foundational to the IS discipline, positioned as it is at the confluence of people, organizations, and technology. Our objective is to describe the performance of design-science research in Information Systems via a concise conceptual framework and clear guidelines for understanding, executing, and evaluating the research. In the design-science paradigm, knowledge and understanding of a problem domain and its solution are achieved in the building and application of the designed artifact. Three recent exemplars in the research literature are used to demonstrate the application of these guidelines. We conclude with an analysis of the challenges of performing high-quality design-science research in the context of the broader IS community.}, archivePrefix = {arXiv}, arxivId = {http://dl.acm.org/citation.cfm?id=2017212.2017217}, author = {Hevner, Alan R and March, Salvatore T and Park, Jinsoo and Ram, Sudha}, doi = {10.2307/25148625}, eprint = {/dl.acm.org/citation.cfm?id=2017212.2017217}, isbn = {0276-7783}, issn = {02767783}, journal = {MIS Quarterly}, keywords = {Information Systems research methodologies,business environment,creativity,design artifact,design science,experimental methods,search strategies,technology infrastructure}, number = {1}, pages = {75--105}, pmid = {12581935}, primaryClass = {http:}, title = {{Design Science in Information Systems Research}}, volume = {28}, year = {2004} } @book{Johannesson2014, abstract = {This book is an introductory text on design science, intended to support both graduate students and researchers in structuring, undertaking and presenting design science work. It builds on established design science methods as well as recent work on presenting design science studies and ethical principles for design science, and also offers novel instruments for visualizing the results, both in the form of process diagrams and through a canvas format. This work focuses on design science as applied to information systems and technology, but it also includes examples from, and perspectives of, other fields of human practice. --}, address = {Cham}, author = {Johannesson, Paul and Perjons, Erik}, booktitle = {Springer International Publishing Switzerland}, doi = {10.1007/978-3-319-10632-8}, isbn = {978-3-319-10631-1}, pages = {197}, publisher = {Springer International Publishing}, title = {{An Introduction to Design Science}}, year = {2014} } @article{Peffers2007, abstract = {JSTOR is a not-for-profit service that helps scholars, researchers, and students discover, use, and build upon a wide range of content in a trusted digital archive. We use information technology and tools to increase productivity and facilitate new forms of scholarship. For more information about JSTOR, please contact support@jstor.org.}, archivePrefix = {arXiv}, arxivId = {z0022}, author = {Peffers, Ken and Tuunanen, Tuure and Rothenberger, Marcus A and Chatterjee, Samir}, doi = {10.2753/MIS0742-1222240302}, eprint = {z0022}, isbn = {0742-1222}, issn = {0742-1222}, journal = {Journal of Management Information Systems}, month = {dec}, number = {3}, pages = {45--77}, pmid = {28843849}, title = {{A Design Science Research Methodology for Information Systems Research}}, volume = {24}, year = {2007} } @incollection{Sonnenberg2012, abstract = {The central outcome of design science research (DSR) is prescriptive knowledge in the form of IT artifacts and recommendations. However, prescrip-tive knowledge is considered to have no truth value in itself. Given this assumption, the validity of DSR outcomes can only be assessed by means of descriptive knowledge to be obtained at the conclusion of a DSR process. This is reflected in the build-evaluate pattern of current DSR methodologies. Recog-nizing the emergent nature of IT artifacts this build-evaluate pattern, however, poses unfavorable implications regarding the achievement of rigor within a DSR project. While it is vital in DSR to prove the usefulness of an artifact a ri-gorous DSR process also requires justifying and validating the artifact design it-self even before it has been put into use. This paper proposes three principles for evaluating DSR artifacts which not only address the evaluation of an arti-fact's usefulness but also the evaluation of design decisions made to build an artifact. In particular, it is argued that by following these principles the prescrip-tive knowledge produced in DSR can be considered to have a truth-like value.}, author = {Sonnenberg, Christian and vom Brocke, Jan}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-642-29863-9_28}, isbn = {9783642298622}, issn = {03029743}, keywords = {Design science research,design theory,epistemology,evaluation}, pages = {381--397}, title = {{Evaluations in the Science of the Artificial – Reconsidering the Build-Evaluate Pattern in Design Science Research}}, volume = {7286 LNCS}, year = {2012} } @article{Venable2016, abstract = {Evaluation is a central and essential activity in conducting rigorous Design Science Research (DSR), yet there is surprisingly little guidance about designing the DSR evaluation activity beyond suggesting possible methods that could be used for evaluation. This paper extends the notable exception of the existing framework of Pries-Heje et al [11] to address this problem. The paper proposes an extended DSR evaluation framework together with a DSR evaluation design method that can guide DSR researchers in choosing an appropriate strategy for evaluation of the design artifacts and design theories that form the output from DSR. The extended DSR evaluation framework asks the DSR researcher to consider (as input to the choice of the DSR evaluation strategy) contextual factors of goals, conditions, and constraints on the DSR evaluation, e.g. the type and level of desired rigor, the type of artifact, the need to support formative development of the designed artifacts, the properties of the artifact to be evaluated, and the constraints on resources available, such as time, labor, facilities, expertise, and access to research subjects. The framework and method support matching these in the first instance to one or more DSR evaluation strategies, including the choice of ex ante (prior to artifact construction) versus ex post evaluation (after artifact construction) and naturalistic (e.g., field setting) versus artificial evaluation (e.g., laboratory setting). Based on the recommended evaluation strategy(ies), guidance is provided concerning what methodologies might be appropriate within the chosen strategy(ies).}, author = {Venable, John and Pries-Heje, Jan and Baskerville, Richard}, doi = {10.1057/ejis.2014.36}, isbn = {978-3-642-29862-2}, issn = {0960-085X}, journal = {European Journal of Information Systems}, keywords = {design science research,evaluation method,evaluation strategy,information,research methodology,systems evaluation}, month = {jan}, number = {1}, pages = {77--89}, title = {{FEDS: a Framework for Evaluation in Design Science Research}}, volume = {25}, year = {2016} } @book{Wieringa2014, abstract = {Abstract Design scientists have to balance the demands of methodological rigor that they share with purely curiosity-driven scientists, with the demands of practical utility that they share with utility-driven engineers. Balancing these conflicting demands can be ... $\backslash$n}, address = {Berlin, Heidelberg}, author = {Wieringa, Roel J.}, doi = {10.1007/978-3-662-43839-8}, isbn = {978-3-662-43838-1}, issn = {0270-5257}, keywords = {Design Science}, pages = {493}, publisher = {Springer Berlin Heidelberg}, title = {{Design Science Methodology for Information Systems and Software Engineering}}, year = {2014} } @article{Falessi2018, abstract = {{\textcopyright} 2017 Springer Science+Business Media New York[Context] Controlled experiments are an important empirical method to generate and validate theories. Many software engineering experiments are conducted with students. It is often claimed that the use of students as participants in experiments comes at the cost of low external validity while using professionals does not. [Objective] We believe a deeper understanding is needed on the external validity of software engineering experiments conducted with students or with professionals. We aim to gain insight about the pros and cons of using students and professionals in experiments. [Method] We performed an unconventional, focus group approach and a follow-up survey. First, during a session at ISERN 2014, 65 empirical researchers, including the seven authors, argued and discussed the use of students in experiments with an open mind. Afterwards, we revisited the topic and elicited experts' opinions to foster discussions. Then we derived 14 statements and asked the ISERN attendees excluding the authors, to provide their level of agreement with the statements. Finally, we analyzed the researchers' opinions and used the findings to further discuss the statements. [Results] Our survey results showed that, in general, the respondents disagreed with us about the drawbacks of professionals. We, on the contrary, strongly believe that no population (students, professionals, or others) can be deemed better than another in absolute terms. [Conclusion] Using students as participants remains a valid simplification of reality needed in laboratory contexts. It is an effective way to advance software engineering theories and technologies but, like any other aspect of study settings, should be carefully considered during the design, execution, interpretation, and reporting of an experiment. The key is to understand which developer population portion is being represented by the participants in an experiment. Thus, a proposal for describing experimental participants is put forward.}, author = {Falessi, Davide and Juristo, Natalia and Wohlin, Claes and Turhan, Burak and M{\"{u}}nch, J{\"{u}}rgen and Jedlitschka, Andreas and Oivo, Markku}, doi = {10.1007/s10664-017-9523-3}, issn = {1382-3256}, journal = {Empirical Software Engineering}, keywords = {Experimentation,Generalization,Participants in experiments,Subjects of experiments,Threats to validity}, month = {feb}, number = {1}, pages = {452--489}, publisher = {Empirical Software Engineering}, title = {{Empirical software engineering experts on the use of students and professionals in experiments}}, volume = {23}, year = {2018} } @article{Host2000, abstract = {In many studies in software engineering students are used instead of professional software developers, although the objective is to draw conclusions valid for professional software developers. This paper presents a study where the difference between the two groups is evaluated. People from the two groups have individually carried out a non-trivial software engineering judgement task involving the assessment of howten different factors affect the lead-time of software development projects. It is found that the differences are only minor, and it is concluded that software engineering students may be used instead of professional software developers under certain conditions. These conditions are identified and described based on generally accepted criteria for validity evaluation of empirical studies.}, author = {Host, Martin and Regnell, Bj{\"{o}}rn and Wohlin, Claes}, doi = {10.1023/A:1026586415054}, issn = {1382-3256}, journal = {Empirical Software Engineering}, number = {3}, pages = {201--214}, title = {{Using students as subjects - a comparative study of students and professionals in lead-time impact assessment}}, volume = {5}, year = {2000} } @incollection{Jedlitschka2008, address = {London}, author = {Jedlitschka, Andreas and Ciolkowski, Marcus and Pfahl, Dietmar}, booktitle = {Guide to Advanced Empirical Software Engineering}, doi = {10.1007/978-1-84800-044-5_8}, pages = {201--228}, publisher = {Springer London}, title = {{Reporting Experiments in Software Engineering}}, year = {2008} } @inproceedings{Salman2015, abstract = {Background: Most of the experiments in software engineering (SE) employ students as subjects. This raises concerns about the realism of the results acquired through students and adaptability of the results to software industry. Aim: We compare students and professionals to understand how well students represent professionals as experimental subjects in SE research. Method: The comparison was made in the context of two test-driven development experiments conducted with students in an academic setting and with professionals in a software organization. We measured the code quality of several tasks implemented by both subject groups and checked whether students and professionals perform similarly in terms of code quality metrics. Results: Except for minor differences, neither of the subject groups is better than the other. Professionals produce larger, yet less complex, methods when they use their traditional development approach, whereas both subject groups perform similarly when they apply a new approach for the first time. Conclusion: Given a carefully scoped experiment on a development approach that is new to both students and professionals, similar performances are observed. Further investigation is necessary to analyze the effects of subject demographics and level of experience on the results of SE experiments.}, author = {Salman, Iflaah and Misirli, Ayse Tosun and Juristo, Natalia}, booktitle = {2015 IEEE/ACM 37th IEEE International Conference on Software Engineering}, doi = {10.1109/ICSE.2015.82}, isbn = {978-1-4799-1934-5}, issn = {02705257}, keywords = {Code quality,Empirical study,Experimentation,Test-driven development}, month = {may}, pages = {666--676}, publisher = {IEEE}, title = {{Are Students Representatives of Professionals in Software Engineering Experiments?}}, volume = {1}, year = {2015} } @book{Wohlin2012, abstract = {Empirical software engineering research can be organized in several ways, including experiments, cases studies, and surveys. Experiments sample over the variables, trying to represent all possible cases; cases studies sample from the variables, representing only the typical cases(s). Every case study or experiment should have a hypothesis to express the desired result. The experimental design is especially important because it identifies key variables and their relationships. The design uses balancing, blocking, and local control to help minimize error. Analysis techniques depend on the design, the distribution of the data, and the type of investigation being carried out. Different techniques allow us to look at variable interaction and to look at combinations of effects. Using a technique similar to a board game, we can determine when we have enough evidence to demonstrate clear relationships among variables. {\textcopyright} 1997 Academic Press Inc.}, address = {Berlin, Heidelberg}, author = {Wohlin, Claes and Runeson, Per and H{\"{o}}st, Martin and Ohlsson, Magnus C. and Regnell, Bj{\"{o}}rn and Wessl{\'{e}}n, Anders}, booktitle = {Experimentation in Software Engineering}, doi = {10.1007/978-3-642-29044-2}, isbn = {978-3-642-29043-5}, pages = {1--236}, publisher = {Springer Berlin Heidelberg}, title = {{Experimentation in Software Engineering}}, volume = {9783642290}, year = {2012} } @incollection{Easterbrook2008, abstract = {Selecting a research method for empirical software engineering research is problematic because the benefits and challenges to using each method are not yet well catalogued. Therefore, this chapter describes a number of empirical methods available. It examines the goals of each and analyzes the types of questions each best addresses. Theoretical stances behind the methods, practical considerations in the application of the methods and data collection are also briefly reviewed. Taken together, this information provides a suitable basis for both understanding and selecting from the variety of methods applicable to empirical software engineering.}, address = {London}, author = {Easterbrook, Steve and Singer, Janice and Storey, Margaret-Anne and Damian, Daniela}, booktitle = {Guide to Advanced Empirical Software Engineering}, doi = {10.1007/978-1-84800-044-5_11}, keywords = {empirical,survey,theory}, pages = {285--311}, publisher = {Springer London}, title = {{Selecting Empirical Methods for Software Engineering Research}}, year = {2008} } @book{Felderer2020, address = {Cham}, doi = {10.1007/978-3-030-32489-6}, editor = {Felderer, Michael and Travassos, Guilherme Horta}, isbn = {978-3-030-32488-9}, publisher = {Springer International Publishing}, title = {{Contemporary Empirical Methods in Software Engineering}}, year = {2020} } @inproceedings{Kitchenham2004a, abstract = {Our objective is to describe how software engineering might benefit from an evidence-based approach and to identify the potential difficulties associated with the approach. We compared the organisation and technical infrastructure supporting evidence-based medicine (EBM) with the situation in software engineering. We considered the impact that factors peculiar to software engineering (i.e. the skill factor and the lifecycle factor) would have on our ability to practice evidence-based software engineering (EBSE). EBSE promises a number of benefits by encouraging integration of research results with a view to supporting the needs of many different stakeholder groups. However, we do not currently have the infrastructure needed for widespread adoption of EBSE. The skill factor means software engineering experiments are vulnerable to subject and experimenter bias. The lifecycle factor means it is difficult to determine how technologies will behave once deployed. Software engineering would benefit from adopting what it can of the evidence approach provided that it deals with the specific problems that arise from the nature of software engineering.}, author = {Kitchenham, B.A. and Dyba, Tore and Jorgensen, M.}, booktitle = {Proceedings. 26th International Conference on Software Engineering}, doi = {10.1109/ICSE.2004.1317449}, isbn = {0-7695-2163-0}, issn = {0270-5257}, pages = {273--281}, publisher = {IEEE Comput. Soc}, title = {{Evidence-based software engineering}}, year = {2004} } @article{Runeson2009, abstract = {Case study is a suitable research methodology for software engineering research since it studies contemporary phenomena in its natural context. However, the understanding of what constitutes a case study varies, and hence the quality of the resulting studies. This paper aims at providing an introduction to case study methodology and guidelines for researchers conducting case studies and readers studying reports of such studies. The content is based on the authors' own experience from conducting and reading case studies. The terminology and guidelines are compiled from different methodology handbooks in other research domains, in particular social science and information systems, and adapted to the needs in software engineering. We present recommended practices for software engineering case studies as well as empirically derived and evaluated checklists for researchers and readers of case study research.}, archivePrefix = {arXiv}, arxivId = {arXiv:gr-qc/9809069v1}, author = {Runeson, Per and H{\"{o}}st, Martin}, doi = {10.1007/s10664-008-9102-8}, eprint = {9809069v1}, isbn = {1382325615737616}, issn = {1382-3256}, journal = {Empirical Software Engineering}, keywords = {Case study,Checklists,Guidelines,Research methodology}, month = {apr}, number = {2}, pages = {131--164}, pmid = {28843849}, primaryClass = {arXiv:gr-qc}, title = {{Guidelines for conducting and reporting case study research in software engineering}}, volume = {14}, year = {2009} } @book{Shull2008, abstract = {Empirical studies have become an integral element of software engineering research and practice. This unique text/reference includes chapters from some of the top international empirical software engineering researchers and focuses on the practical knowledge necessary for conducting, reporting and using empirical methods in software engineering. Part 1, Research Methods and Techniques, examines the proper use of various strategies for collecting and analysing data, and the uses for which those strategies are most appropriate. Part 2, Practical Foundations, provides a discussion of several important global issues that need to be considered from the very beginning of research planning. Finally, Knowledge Creation offers insight on using a set of disparate studies to provide useful decision support. Topics and features: Offers information across a range of techniques, methods, and qualitative and quantitative issues, providing a toolkit for the reader that is applicable across the diversity of software development contexts Presents reference material with concrete software engineering examples Provides guidance on how to design, conduct, analyse, interpret and report empirical studies, taking into account the common difficulties and challenges encountered in the field Arms researchers with the information necessary to avoid fundamental risks Tackles appropriate techniques for addressing disparate studies ensuring the relevance of empirical software engineering, and showing its practical impact Describes methods that are less often used in the field, providing less conventional but still rigorous and useful ways of collecting data Supplies detailed information on topics (such as surveys) that often contain methodological errors This broad-ranging, practical guide will prove an invaluable and useful reference for practising software engineers and researchers. In addition, it will be suitable for graduate students studying empirical methods in software development. Dr. Forrest Shull is a senior scientist at the Fraunhofer Center for Experimental Software Engineering, Maryland, and the director of its Measurement and Knowledge Management Division. In addition, he serves as associate editor in chief of IEEE Software magazine, specializing in empirical studies. Dr. Janice Singer heads the Human Computer Interaction program at the National Research Council, Canada. She has been conducting empirical research in software engineering for the past 12 years. Dr. Dag Sj{\o}berg is currently research director of the software engineering group of the Simula Research Laboratory, Norway, which is ranked No. 3 in the world (out of 1400 institutions) in an evaluation in 2007 in the area of software and systems engineering.}, address = {London}, archivePrefix = {arXiv}, arxivId = {arXiv:1011.1669v3}, author = {Shull, Forrest and Singer, Janice and Sj{\o}berg, Dag I K}, doi = {10.1007/978-1-84800-044-5}, editor = {Shull, Forrest and Singer, Janice and Sj{\o}berg, Dag I. K.}, eprint = {arXiv:1011.1669v3}, isbn = {978-1-84800-043-8}, issn = {1098-6596}, pages = {1--388}, pmid = {6565}, publisher = {Springer London}, title = {{Guide to Advanced Empirical Software Engineering}}, year = {2008} } @incollection{Singer2008, address = {London}, author = {Singer, Janice and Sim, Susan E and Lethbridge, Timothy C}, booktitle = {Guide to Advanced Empirical Software Engineering}, doi = {10.1007/978-1-84800-044-5_1}, pages = {9--34}, publisher = {Springer London}, title = {{Software Engineering Data Collection for Field Studies}}, year = {2008} } @article{Stol2018, abstract = {A variety of research methods and techniques are available to SE researchers, and while several overviews exist, there is consistency neither in the research methods covered nor in the terminology used. Furthermore, research is sometimes critically reviewed for characteristics inherent to the methods. We adopt a taxonomy from the social sciences, termed here the ABC framework for SE research, which offers a holistic view of eight archetypal research strategies. ABC refers to the research goal that strives for generalizability over Actors (A) and precise measurement of their Behavior (B), in a realistic Context (C). The ABC framework uses two dimensions widely considered to be key in research design: the level of obtrusiveness of the research and the generalizability of research findings. We discuss metaphors for each strategy and their inherent limitations and potential strengths. We illustrate these research strategies in two key SE domains, global software engineering and requirements engineering, and apply the framework on a sample of 75 articles. Finally, we discuss six ways in which the framework can advance SE research.}, author = {Stol, Klaas-Jan and Fitzgerald, Brian}, doi = {10.1145/3241743}, issn = {1049-331X}, journal = {ACM Transactions on Software Engineering and Methodology}, keywords = {Research methodology,Research strategy}, month = {oct}, number = {3}, pages = {1--51}, title = {{The ABC of Software Engineering Research}}, volume = {27}, year = {2018} } @incollection{Wieringa2014a, abstract = {This book provides guidelines for practicing design science in the fields of information systems and software engineering research. A design process usually iterates over two activities: first designing an artifact that improves something for stakeholders and subsequently empirically investigating the performance of that artifact in its context. This “validation in context” is a key feature of the book - since an artifact is designed for a context, it should also be validated in this context.The book is divided into five parts. Part I discusses the fundamental nature of design science and its artifacts, as well as related design research questions and goals. Part II deals with the design cycle, i.e. the creation, design and validation of artifacts based on requirements and stakeholder goals. To elaborate this further, Part III presents the role of conceptual frameworks and theories in design science. Part IV continues with the empirical cycle to investigate artifacts in context, and presents the different elements of research problem analysis, research setup and data analysis. Finally, Part V deals with the practical application of the empirical cycle by presenting in detail various research methods, including observational case studies, case-based and sample-based experiments and technical action research. These main sections are complemented by two generic checklists, one for the design cycle and one for the empirical cycle.The book is written for students as well as academic and industrial researchers in software engineering or information systems. It provides guidelines on how to effectively structure research goals, how to analyze research problems concerning design goals and knowledge questions, how to validate artifact designs and how to empirically investigate artifacts in context – and finally how to present the results of the design cycle as a whole.}, address = {Berlin, Heidelberg}, author = {Wieringa, Roel J.}, booktitle = {Design Science Methodology for Information Systems and Software Engineering}, doi = {10.1007/978-3-662-43839-8_2}, isbn = {9783662438398}, issn = {0270-5257}, pages = {13--23}, publisher = {Springer Berlin Heidelberg}, title = {{Research Goals and Research Questions}}, year = {2014} } @article{Wohlin2015a, abstract = {Several factors make empirical research in software engineering particularly challenging as it requires studying not only technology but its stakeholders' activities while drawing concepts and theories from social science. Researchers, in general, agree that selecting a research design in empirical software engineering research is challenging, because the implications of using individual research methods are not well recorded. The main objective of this article is to make researchers aware and support them in their research design, by providing a foundation of knowledge about empirical software engineering research decisions, in order to ensure that researchers make well-founded and informed decisions about their research designs. This article provides a decision-making structure containing a number of decision points, each one of them representing a specific aspect on empirical software engineering research. The article provides an introduction to each decision point and its constituents, as well as to the relationships between the different parts in the decision-making structure. The intention is the structure should act as a starting point for the research design before going into the details of the research design chosen. The article provides an in-depth discussion of decision points in relation to the research design when conducting empirical research. {\textcopyright} 2014, Springer Science+Business Media New York.}, author = {Wohlin, Claes and Aurum, Ayb{\"{u}}ke}, doi = {10.1007/s10664-014-9319-7}, issn = {1382-3256}, journal = {Empirical Software Engineering}, keywords = {Empirical software engineering research,Research design,Research methods,Selecting research method}, month = {dec}, number = {6}, pages = {1427--1455}, title = {{Towards a decision-making structure for selecting a research design in empirical software engineering}}, volume = {20}, year = {2015} } @incollection{Wohlin2003, author = {Wohlin, Claes and H{\"{o}}st, Martin and Henningsson, Kennet}, booktitle = {Esernet}, doi = {10.1007/978-3-540-45143-3_2}, isbn = {3-540-40672-7}, pages = {7--23}, publisher = {Springer, Berlin, Heidelberg}, title = {{Empirical Research Methods in Software Engineering}}, volume = {2765}, year = {2003} } @article{Garousi2019, abstract = {Context: A Multivocal Literature Review (MLR) is a form of a Systematic Literature Review (SLR) which includes the grey literature (e.g., blog posts, videos and white papers) in addition to the published (formal) literature (e.g., journal and conference papers). MLRs are useful for both researchers and practitioners since they provide summaries both the state-of-the art and –practice in a given area. MLRs are popular in other fields and have recently started to appear in software engineering (SE). As more MLR studies are conducted and reported, it is important to have a set of guidelines to ensure high quality of MLR processes and their results. Objective: There are several guidelines to conduct SLR studies in SE. However, several phases of MLRs differ from those of traditional SLRs, for instance with respect to the search process and source quality assessment. Therefore, SLR guidelines are only partially useful for conducting MLR studies. Our goal in this paper is to present guidelines on how to conduct MLR studies in SE. Method: To develop the MLR guidelines, we benefit from several inputs: (1) existing SLR guidelines in SE, (2), a literature survey of MLR guidelines and experience papers in other fields, and (3) our own experiences in conducting several MLRs in SE. We took the popular SLR guidelines of Kitchenham and Charters as the baseline and extended/adopted them to conduct MLR studies in SE. All derived guidelines are discussed in the context of an already-published MLR in SE as the running example. Results: The resulting guidelines cover all phases of conducting and reporting MLRs in SE from the planning phase, over conducting the review to the final reporting of the review. In particular, we believe that incorporating and adopting a vast set of experience-based recommendations from MLR guidelines and experience papers in other fields have enabled us to propose a set of guidelines with solid foundations. Conclusion: Having been developed on the basis of several types of experience and evidence, the provided MLR guidelines will support researchers to effectively and efficiently conduct new MLRs in any area of SE. The authors recommend the researchers to utilize these guidelines in their MLR studies and then share their lessons learned and experiences.}, author = {Garousi, Vahid and Felderer, Michael and M{\"{a}}ntyl{\"{a}}, Mika V.}, doi = {10.1016/j.infsof.2018.09.006}, issn = {09505849}, journal = {Information and Software Technology}, keywords = {Evidence-based software engineering,Grey literature,Guidelines,Literature study,Multivocal literature review,Systematic literature review,Systematic mapping study}, month = {feb}, number = {September 2018}, pages = {101--121}, publisher = {Elsevier B.V.}, title = {{Guidelines for including grey literature and conducting multivocal literature reviews in software engineering}}, volume = {106}, year = {2019} } @inproceedings{Garousi2016, abstract = {Systematic Literature Reviews (SLR) may not provide insight into the "state of the practice" in SE, as they do not typically include the "grey" (non-published) literature. A Multivocal Literature Review (MLR) is a form of a SLR which includes grey literature in addition to the published (formal) literature. Only a few MLRs have been published in SE so far. We aim at raising the awareness for MLRs in SE by addressing two research questions (RQs): (1) What types of knowledge are missed when a SLR does not include the multivocal literature in a SE field? and (2) What do we, as a community, gain when we include the multivocal literature and conduct MLRs? To answer these RQs, we sample a few example SLRs and MLRs and identify the missing and the gained knowledge due to excluding or including the grey literature. We find that (1) grey literature can give substantial benefits in certain areas of SE, and that (2) the inclusion of grey literature brings forward certain challenges as evidence in them is often experience and opinion based. Given these conflicting viewpoints, the authors are planning to prepare systematic guidelines for performing MLRs in SE.}, address = {New York, New York, USA}, author = {Garousi, Vahid and Felderer, Michael and M{\"{a}}ntyl{\"{a}}, Mika V.}, booktitle = {Proceedings of the 20th International Conference on Evaluation and Assessment in Software Engineering - EASE '16}, doi = {10.1145/2915970.2916008}, isbn = {9781450336918}, keywords = {Empirical software engineering,Grey literature,MLR,Multivocal Literature Reviews,Research methodology,SLR,Systematic literature reviews}, pages = {1--6}, publisher = {ACM Press}, title = {{The need for multivocal literature reviews in software engineering}}, volume = {01-03-June}, year = {2016} } @incollection{Garousi2020, abstract = {Researchers generally place the most trust in peer-reviewed, published information, such as journals and conference papers. By contrast, software engineering (SE) practitioners typically do not have the time, access or expertise to review and benefit from such publications. As a result, practitioners are more likely to turn to other sources of information that they trust, e.g., trade magazines, online blog-posts, survey results or technical reports, collectively referred to as Grey Literature (GL). Furthermore, practitioners also share their ideas and experiences as GL, which can serve as a valuable data source for research. While GL itself is not a new topic in SE, using, benefitting and synthesizing knowledge from the GL in SE is a contemporary topic in empirical SE research and we are seeing that researchers are increasingly benefitting from the knowledge available within GL. The goal of this chapter is to provide an overview to GL in SE, together with insights on how SE researchers can effectively use and benefit from the knowledge and evidence available in the vast amount of GL.}, address = {Cham}, author = {Garousi, Vahid and Felderer, Michael and M{\"{a}}ntyl{\"{a}}, Mika V. and Rainer, Austen}, booktitle = {Contemporary Empirical Methods in Software Engineering}, doi = {10.1007/978-3-030-32489-6_14}, isbn = {9783030324896}, pages = {385--413}, publisher = {Springer International Publishing}, title = {{Benefitting from the Grey Literature in Software Engineering Research}}, year = {2020} } @inproceedings{Neto2019, abstract = {Background: In recent years, studies involving Grey Literature (GL) have been growing and attracting the attention of researchers in software engineering (SE). One of the sources of GL refers to content produced by professionals based on their practical experiences? Recent researches in the SE states that GL can complement areas of research that are not yet clearly defined in the scientific literature. In this context, the Multivocal Literature Review (MLR), a form of Systematic Literature Review (SLR) with the inclusion of GL, emerges. Goal: Provide preliminary work about the current research involving MLR studies? First, we investigate the motivation of the researchers to include GL in review studies; and second, we examine how GL was included in the studies. Method: A tertiary study was conducted to search MLR studies published between 2009 to April of 2019. Results: The main motivations for including GL in review studies are: lack of academic research on the topic, emerging research on this topic, and complementary evidence in the GL? Internet articles and white papers were the main sources of GL data used. Conclusions: The conducting of MLR studies is still in its early stages; we have identified only 12 secondary studies. The MLR studies were conducted using guidelines for performing SLRs. What we consider to be a threat to the validity of these studies, since guidelines to conduct SLR studies do not provide recommendations for quality analysis and synthesis of primary studies, including GL.}, author = {Neto, Geraldo Torres G. and Santos, Wylliams B. and Endo, Patricia Takako and Fagundes, Roberta A.A.}, booktitle = {2019 ACM/IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM)}, doi = {10.1109/ESEM.2019.8870142}, isbn = {978-1-7281-2968-6}, month = {sep}, pages = {1--6}, publisher = {IEEE}, title = {{Multivocal literature reviews in software engineering: Preliminary findings from a tertiary study}}, volume = {2019-Septe}, year = {2019} } @article{Rainer2019, abstract = {Background: Software engineering research has a growing interest in grey literature (GL). Aim: To improve the identification of relevant and rigorous GL. Method: We develop and demonstrate heuristics to find more relevant and rigorous GL. The heuristics generate stratified samples of search and post–search datasets using a formally structured set of search keywords. Conclusion: The heuristics require further evaluation. We are developing a tool to implement the heuristics.}, author = {Rainer, Austen and Williams, Ashley}, doi = {10.1016/j.infsof.2018.10.007}, issn = {09505849}, journal = {Information and Software Technology}, keywords = {Grey literature review,Quality criteria,Reasoning,Search engines}, month = {feb}, pages = {231--233}, publisher = {Elsevier B.V.}, title = {{Heuristics for improving the rigour and relevance of grey literature searches for software engineering research}}, volume = {106}, year = {2019} } @article{Dyer2015, abstract = {In today's software-centric world, ultra-large-scale software repositories, such as SourceForge, GitHub, and Google Code, are the new library of Alexandria. They contain an enormous corpus of software and related information. Scientists and engineers alike are interested in analyzing this wealth of information. However, systematic extraction and analysis of relevant data from these repositories for testing hypotheses is hard, and best left for mining software repository (MSR) experts! Specifically, mining source code yields significant insights into software development artifacts and processes. Unfortunately, mining source code at a large scale remains a difficult task. Previous approaches had to either limit the scope of the projects studied, limit the scope of the mining task to be more coarse grained, or sacrifice studying the history of the code. In this article we address mining source code: (a) at a very large scale; (b) at a fine-grained level of detail; and (c) with full history information. To address these challenges, we present domain-specific language features for source-code mining in our language and infrastructure called Boa . The goal of Boa is to ease testing MSR-related hypotheses. Our evaluation demonstrates that Boa substantially reduces programming efforts, thus lowering the barrier to entry. We also show drastic improvements in scalability.}, author = {Dyer, Robert and Nguyen, Hoan Anh and Rajan, Hridesh and Nguyen, Tien N.}, doi = {10.1145/2803171}, issn = {1049-331X}, journal = {ACM Transactions on Software Engineering and Methodology}, keywords = {Boa,Domain-specific language,Ease of use,Lower barrier to entry,Mining software repositories,Scalable}, month = {dec}, number = {1}, pages = {1--34}, title = {{Boa: Ultra-large-scale software repository and source-code mining}}, volume = {25}, year = {2015} } @inproceedings{Hassan2008, abstract = {Source control repositories, bug repositories, archived communications, deployment logs, and code repositories are examples of software repositories that are commonly available for most software projects. The Mining Software Repositories (MSR) field analyzes and cross-links the rich data available in these repositories to uncover interesting and actionable information about software systems. By transforming these repositories from static record-keeping ones into active repositories, we can guide decision processes in modern software projects. For example, data in source control repositories, traditionally used to archive code, could be linked with data in bug repositories to help practitioners propagate complex changes and to warn them about risky code based on prior changes and bugs. In this paper, we present a brief history of the MSR field and discuss several recent achievements and results of using MSR techniques to support software research and practice. We then discuss the various opportunities and challenges that lie in the road ahead for this important and emerging field. {\textcopyright} 2008 IEEE.}, author = {Hassan, Ahmed E.}, booktitle = {2008 Frontiers of Software Maintenance}, doi = {10.1109/FOSM.2008.4659248}, isbn = {978-1-4244-2654-6}, month = {sep}, pages = {48--57}, publisher = {IEEE}, title = {{The road ahead for Mining Software Repositories}}, year = {2008} } @inproceedings{Hassan2010, abstract = {Mining software engineering data has emerged as a successful research direction over the past decade. In this position paper, we advocate Software Intelligence (SI) as the future of mining software engineering data, within modern software engineering research, practice, and education. We coin the name SI as an inspiration from the Business Intelligence (BI) field, which offers concepts and techniques to improve business decision making by using fact-based support systems. Similarly, SI offers software practitioners (not just developers) up-to-date and pertinent information to support their daily decision-making processes. SI should support decision-making processes throughout the lifetime of a software system not just during its development phase. The vision of SI has yet to become a reality that would enable software engineering research to have a strong impact on modern software practice. Nevertheless, recent advances in the Mining Software Repositories (MSR) field show great promise and provide strong support for realizing SI in the near future. This position paper summarizes the state of practice and research of SI, and lays out future research directions for mining software engineering data to enable SI. Copyright 2010 ACM.}, address = {New York, New York, USA}, author = {Hassan, Ahmed E. and Xie, Tao}, booktitle = {Proceedings of the FSE/SDP workshop on Future of software engineering research - FoSER '10}, doi = {10.1145/1882362.1882397}, isbn = {9781450304276}, keywords = {Mining software engineering data,Mining software repositories,Software intelligence}, pages = {161}, publisher = {ACM Press}, title = {{Software Intelligence: The Future of Mining Software Engineering Data}}, year = {2010} } @inproceedings{Kalliamvakou2014, abstract = {With over 10 million git repositories, GitHub is becoming one of the most important source of software artifacts on the Internet. Researchers are starting to mine the information stored in GitHub's event logs, trying to understand how its users employ the site to collaborate on software. However, so far there have been no studies describing the quality and properties of the data available from GitHub. We document the results of an empirical study aimed at understanding the characteristics of the repositories in GitHub and how users take advantage of GitHub's main features-namely commits, pull requests, and issues. Our results indicate that, while GitHub is a rich source of data on software development, mining GitHub for research purposes should take various potential perils into consideration. We show, for example, that the majority of the projects are personal and inactive; that GitHub is also being used for free storage and as a Web hosting service; and that almost 40{\%} of all pull requests do not appear as merged, even though they were. We provide a set of recommendations for software engineering researchers on how to approach the data in GitHub.}, address = {New York, New York, USA}, author = {Kalliamvakou, Eirini and Gousios, Georgios and Blincoe, Kelly and Singer, Leif and German, Daniel M. and Damian, Daniela}, booktitle = {Proceedings of the 11th Working Conference on Mining Software Repositories - MSR 2014}, doi = {10.1145/2597073.2597074}, isbn = {9781450328630}, keywords = {Bias,Code reviews,Git,GitHub,Mining software repositories}, pages = {92--101}, publisher = {ACM Press}, title = {{The promises and perils of mining GitHub}}, year = {2014} } @inproceedings{Poncin2011, abstract = {Software developers' activities are in general recorded in software repositories such as version control systems, bug trackers and mail archives. While abundant information is usually present in such repositories, successful information extraction is often challenged by the necessity to simultaneously analyze different repositories and to combine the information obtained. We propose to apply process mining techniques, originally developed for business process analysis, to address this challenge. However, in order for process mining to become applicable, different software repositories should be combined, and "related" software development events should be matched: e.g., mails sent about a file, modifications of the file and bug reports that can be traced back to it. The combination and matching of events has been implemented in FRASR (FRamework for Analyzing Software Repositories), augmenting the process mining framework ProM. FRASR has been successfully applied in a series of case studies addressing such aspects of the development process as roles of different developers and the way bug reports are handled. {\textcopyright} 2011 IEEE.}, author = {Poncin, Wouter and Serebrenik, Alexander and van den Brand, Mark}, booktitle = {2011 15th European Conference on Software Maintenance and Reengineering}, doi = {10.1109/CSMR.2011.5}, isbn = {978-1-61284-259-2}, issn = {15345351}, keywords = {Process mining,Software repositories}, month = {mar}, pages = {5--14}, publisher = {IEEE}, title = {{Process Mining Software Repositories}}, year = {2011} } @inproceedings{Spadini2018, abstract = {Software repositories contain historical and valuable information about the overall development of software systems. Mining software repositories (MSR) is nowadays considered one of the most interesting growing fields within software engineering. MSR focuses on extracting and analyzing data available in software repositories to uncover interesting, useful, and actionable information about the system. Even though MSR plays an important role in software engineering research, few tools have been created and made public to support developers in extracting information from Git repository. In this paper, we present Pydriller, a Python Framework that eases the process of mining Git. We compare our tool against the state-of-the-art Python Framework GitPython, demonstrating that Pydriller can achieve the same results with, on average, 50{\%} less LOC and significantly lower complexity.}, address = {New York, NY, USA}, author = {Spadini, Davide and Aniche, Maur{\'{i}}cio and Bacchelli, Alberto}, booktitle = {Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering}, doi = {10.1145/3236024.3264598}, isbn = {9781450355735}, keywords = {Git,GitPython,Mining Software Repositories,Python}, month = {oct}, pages = {908--911}, publisher = {ACM}, title = {{PyDriller: Python framework for mining software repositories}}, year = {2018} } @book{Creswell2018, address = {Los Angeles, CA, USA}, author = {Creswell, John Ward and Creswell, John David}, edition = {5th}, isbn = {9781506386706}, publisher = {SAGE Publications, Inc}, title = {{Research Design: Qualitative, Quantitative, and Mixed Methods Approaches}}, year = {2018} } @book{Feyerabend1993, address = {London, UK}, author = {Feyerabend, Paul}, edition = {3rd}, publisher = {Verso}, title = {{Against Method: Outline of an Anarchistic Theory of Knowledge}}, year = {1993} } @book{Godfrey-Smith2003, address = {Chicago, IL, USA}, author = {Godfrey-Smith, Peter}, doi = {10.7208/chicago/9780226300610.001.0001}, isbn = {978-0-226-30063-4}, publisher = {University of Chicago Press}, title = {{Theory and Reality: An Introduction to the Philosophy of Science}}, year = {2003} } @book{Kuhn1970, address = {Chicago, IL, USA}, author = {Kuhn, Thomas S.}, edition = {2nd}, isbn = {0-226-45803-2}, publisher = {University of Chicago Press}, title = {{The Structure of Scientific Revolutions}}, year = {1970} } @book{Okasha2016, author = {Okasha, Samir}, doi = {10.1093/actrade/9780198745587.001.0001}, isbn = {9780198745587}, month = {jul}, publisher = {Oxford University Press}, title = {{Philosophy of Science: A Very Short Introduction}}, year = {2016} } @article{Rehman2016, author = {Rehman, Adil Abdul and Alharthi, Khalid}, journal = {International Journal of Educational Investigations}, number = {8}, pages = {51--59}, title = {{An Introduction to Research Paradigms}}, volume = {3}, year = {2016} } @article{Adolph2011, abstract = {Grounded Theory is a research method that generates theory from data and is useful for understanding how people resolve problems that are of concern to them. Although the method looks deceptively simple in concept, implementing Grounded Theory research can often be confusing in practice. Furthermore, despite many papers in the social science disciplines and nursing describing the use of Grounded Theory, there are very few examples and relevant guides for the software engineering researcher. This paper describes our experience using classical (i.e., Glaserian) Grounded Theory in a software engineering context and attempts to interpret the canons of classical Grounded Theory in a manner that is relevant to software engineers.We providemodel to help the software engineering researchers interpret the often fuzzy definitions found in Grounded Theory texts and share our experience and lessons learned during our research. We summarize these lessons learned in a set of fifteen guidelines. {\textcopyright} Springer Science+Business Media, LLC 2011.}, author = {Adolph, Steve and Hall, Wendy and Kruchten, Philippe}, doi = {10.1007/s10664-010-9152-6}, issn = {1382-3256}, journal = {Empirical Software Engineering}, keywords = {Empirical software engineering research,Grounded theory,Qualitative research,Theory generation}, month = {aug}, number = {4}, pages = {487--513}, title = {{Using grounded theory to study the experience of software development}}, volume = {16}, year = {2011} } @article{Boren2000, abstract = {Thinking-aloud protocols may be the most widely used method in usability testing, but the descriptions of this practice in the usability literature and the work habits of practitioners do not conform to the theoretical basis most often cited for it: Ericsson and Simon's seminal work PROTOCOL ANALYSIS: VERBAL REPORTS AS DATA. After reviewing Ericsson and Simon's theoretical basis for thinking aloud, we review the ways in which actual usability practice diverges from this model. We then explore the concept of SPEECH GENRE as an alternative theoretical framework. We first consider uses of this new framework that are consistent with Simon and Ericsson's goal of eliciting a verbal report that is as undirected, undisturbed, and constant as possible. We then go on to consider how the proposed new approach might handle problems that arise in usability testing that appear to require interventions not supported in the older model.}, author = {Boren, T. and Ramey, Judith}, doi = {10.1109/47.867942}, issn = {03611434}, journal = {IEEE Transactions on Professional Communication}, number = {3}, pages = {261--278}, title = {{Thinking aloud: reconciling theory and practice}}, volume = {43}, year = {2000} } @inproceedings{Cruzes2011, abstract = {Thematic analysis is an approach that is often used for identifying, analyzing, and reporting patterns (themes) within data in primary qualitative research. 'Thematic synthesis' draws on the principles of thematic analysis and identifies the recurring themes or issues from multiple studies, interprets and explains these themes, and draws conclusions in systematic reviews. This paper conceptualizes the thematic synthesis approach in software engineering as a scientific inquiry involving five steps that parallel those of primary research. The process and outcome associated with each step are described and illustrated with examples from systematic reviews in software engineering. {\textcopyright} 2011 IEEE.}, author = {Cruzes, Daniela S. and Dyba, T.}, booktitle = {2011 International Symposium on Empirical Software Engineering and Measurement}, doi = {10.1109/ESEM.2011.36}, isbn = {978-1-4577-2203-5}, keywords = {Evidence-based and empirical software engineering,Research synthesis,Secondary research,Systematic review}, month = {sep}, pages = {275--284}, publisher = {IEEE}, title = {{Recommended Steps for Thematic Synthesis in Software Engineering}}, year = {2011} } @book{Figueiredo2007, abstract = {This chapter presents an example of action research in the healthcare industry. A synopsis of the project is provided, followed by a discussion of why action research was successful in this project and in this industry. Further discussion focuses on the overall use of action research to increase interactions between academic researchers and business practitioners. The authors propose the increased use of action research as a synergistic tool to improve the relevance of academic research while increasing rigor in the healthcare industry}, address = {Boston, MA}, author = {Figueiredo, Ant{\'{o}}nio Dias and Cunha, Paulo Rupino}, booktitle = {Information System Action Research An Applied View of Emerging Concepts and Methods}, doi = {10.1007/978-0-387-36060-7}, editor = {Kock, Ned}, isbn = {978-0-387-36059-1}, pages = {61--96}, publisher = {Springer US}, series = {Integrated Series in Information Systems}, title = {{Information Systems Action Research}}, volume = {13}, year = {2007} } @inproceedings{Hove2005, abstract = {Many phenomena related to software development are qualitative in nature. Relevant measures of such phenomena are often collected using semi-structured interviews. Such interviews involve high costs, and the quality of the collected data is related to how the interviews are conducted. Careful planning and conducting of the interviews are therefore necessary, and experiences from interview studies in software engineering should consequently be collected and analyzed to provide advice to other researchers. We have brought together experiences from 12 software engineering studies, in which a total of 280 interviews were conducted. Four areas were particularly challenging when planning and conducting these interviews; estimating the necessary effort, ensuring that the interviewer had the needed skills, ensuring good interaction between interviewer and interviewees, and using the appropriate tools and project artifacts. The paper gives advice on how to handle these areas and suggests what information about the interviews should be included when reporting studies where interviews have been used in data collection. Knowledge from other disciplines is included. By sharing experience, knowledge about the accomplishments of software engineering interviews is increased and hence, measures of high quality can be achieved}, author = {Hove, S.E. and Anda, Bente}, booktitle = {11th IEEE International Software Metrics Symposium (METRICS'05)}, doi = {10.1109/METRICS.2005.24}, isbn = {0-7695-2371-4}, issn = {15301435}, number = {Metrics}, pages = {23--23}, publisher = {IEEE}, title = {{Experiences from Conducting Semi-structured Interviews in Empirical Software Engineering Research}}, year = {2005} } @techreport{Mayring2014, address = {Klagenfurt}, author = {Mayring, Philipp}, keywords = {content analysis,empirical social research,qualitative method,quantitative method,research approach,text analysis}, title = {{Qualitative content analysis: theoretical foundation, basic procedures and software solution}}, year = {2014} } @incollection{Seaman2008, abstract = {Essential Guide to Qualitative Methods in Organizational Research is an excellent resource for students and researchers in the areas of organization studies, management research and organizational psychology, bringing together in one volume the range of methods available for undertaking qualitative data collection and analysis. The volume includes 30 chapters, each focusing on a specific technique. The chapters cover traditional research methods, analysis techniques, and interventions as well as the latest developments in the field. Each chapter reviews how the method has been used in organizational research, discusses the advantages and disadvantages of using the method, and presents a case study example of the method in use. A list of further reading is supplied for those requiring additional information about a given method. The comprehensive and accessible nature of this collection will make it an essential and lasting handbook for researchers and students studying organizations.}, address = {London}, author = {Seaman, Carolyn B.}, booktitle = {Guide to Advanced Empirical Software Engineering}, doi = {10.1007/978-1-84800-044-5_2}, isbn = {0761948880}, issn = {07619488}, pages = {35--62}, pmid = {50}, publisher = {Springer London}, title = {{Qualitative Methods}}, year = {2008} } @inproceedings{ShaochunXu2005, abstract = {This paper proposes dialog-based protocol for the study of the cognitive activities during software development and evolution. The dialog-based protocol, derived from the idea of pair programming, is a significant alternative to the common think-aloud protocol, because it lessens the Hawthorne and placebo effects. Using screen-capturing and voice recording instead of videotaping further reduces the Hawthorne effect. The self-directed learning theory provides an encoding scheme and can be used in analyzing the data. A case study illustrates this new approach. {\textcopyright} 2005 IEEE.}, author = {{Shaochun Xu} and Rajlich, V{\'{a}}clav}, booktitle = {2005 International Symposium on Empirical Software Engineering}, doi = {10.1109/ISESE.2005.1541848}, isbn = {0-7803-9507-7}, pages = {383--392}, publisher = {IEEE}, title = {{Dialog-based protocol: an empirical research method for cognitive activities in software engineering}}, year = {2005} } @article{Sharp2016, abstract = {Ethnography is a qualitative research method used to study people and cultures. It is largely adopted in disciplines outside software engineering, including different areas of computer science. Ethnography can provide an in-depth understanding of the socio-technological realities surrounding everyday software development practice, i.e., it can help to uncover not only what practitioners do, but also why they do it. Despite its potential, ethnography has not been widely adopted by empirical software engineering researchers, and receives little attention in the related literature. The main goal of this paper is to explain how empirical software engineering researchers would benefit from adopting ethnography. This is achieved by explicating four roles that ethnography can play in furthering the goals of empirical software engineering: to strengthen investigations into the social and human aspects of software engineering; to inform the design of software engineering tools; to improve method and process development; and to inform research programmes. This article introduces ethnography, explains its origin, context, strengths and weaknesses, and presents a set of dimensions that position ethnography as a useful and usable approach to empirical software engineering research. Throughout the paper, relevant examples of ethnographic studies of software practice are used to illustrate the points being made.}, author = {Sharp, Helen and Dittrich, Yvonne and de Souza, Cleidson R. B.}, doi = {10.1109/TSE.2016.2519887}, issn = {0098-5589}, journal = {IEEE Transactions on Software Engineering}, keywords = {Design tools and techniques,computer-supported collaborative work,human factors in software design,software engineering process}, month = {aug}, number = {8}, pages = {786--804}, publisher = {IEEE}, title = {{The Role of Ethnographic Studies in Empirical Software Engineering}}, volume = {42}, year = {2016} } @inproceedings{Stol2016, abstract = {Grounded Theory (GT) has proved an extremely useful research approach in several fields including medical sociology, nursing, education and management theory. However, GT is a complex method based on an inductive paradigm that is fundamentally different from the traditional hypothetico-deductive research model. As there are at least three variants of GT, some ostensibly GT research suffers from method slurring, where researchers adopt an arbitrary subset of GT practices that are not recognizable as GT. In this paper, we describe the variants of GT and identify the core set of GT practices. We then analyze the use of grounded theory in software engineering. We carefully and systematically selected 98 articles that mention GT, of which 52 explicitly claim to use GT, with the other 46 using GT techniques only. Only 16 articles provide detailed accounts of their research procedures. We offer guidelines to improve the quality of both conducting and reporting GT studies. The latter is an important extension since current GT guidelines in software engineering do not cover the reporting process, despite good reporting being necessary for evaluating a study and informing subsequent research.}, address = {New York, New York, USA}, author = {Stol, Klaas-Jan and Ralph, Paul and Fitzgerald, Brian}, booktitle = {Proceedings of the 38th International Conference on Software Engineering - ICSE '16}, doi = {10.1145/2884781.2884833}, isbn = {9781450339001}, keywords = {Grounded theory,Guidelines,Review,Software engineering}, pages = {120--131}, publisher = {ACM Press}, title = {{Grounded theory in software engineering research}}, year = {2016} } @article{Ampatzoglou2019, abstract = {Context: Secondary studies are vulnerable to threats to validity. Although, mitigating these threats is crucial for the credibility of these studies, we currently lack a systematic approach to identify, categorize and mitigate threats to validity for secondary studies. Objective: In this paper, we review the corpus of secondary studies, with the aim to identify: (a) the trend of reporting threats to validity, (b) the most common threats to validity and corresponding mitigation actions, and (c) possible categories in which threats to validity can be classified. Method: To achieve this goal we employ the tertiary study research method that is used for synthesizing knowledge from existing secondary studies. In particular, we collected data from more than 100 studies, published until December 2016 in top quality software engineering venues (both journals and conference). Results: Our results suggest that in recent years, secondary studies are more likely to report their threats to validity. However, the presentation of such threats is rather ad hoc, e.g., the same threat may be presented with a different name, or under a different category. To alleviate this problem, we propose a classification schema for reporting threats to validity and possible mitigation actions. Both the classification of threats and the associated mitigation actions have been validated by an empirical study, i.e., Delphi rounds with experts. Conclusion: Based on the proposed schema, we provide a checklist, which authors of secondary studies can use for identifying and categorizing threats to validity and corresponding mitigation actions, while readers of secondary studies can use the checklist for assessing the validity of the reported results.}, author = {Ampatzoglou, Apostolos and Bibi, Stamatia and Avgeriou, Paris and Verbeek, Marijn and Chatzigeorgiou, Alexander}, doi = {10.1016/j.infsof.2018.10.006}, issn = {09505849}, journal = {Information and Software Technology}, keywords = {Empirical software engineering,Literature Review,Secondary studies,Threats to Validity}, month = {feb}, number = {October 2018}, pages = {201--230}, publisher = {Elsevier B.V.}, title = {{Identifying, categorizing and mitigating threats to validity in software engineering secondary studies}}, volume = {106}, year = {2019} } @article{Budgen2018, abstract = {Context: Many of the systematic reviews published in software engineering are related to research or methodological issues and hence are unlikely to be of direct benefit to practitioners or teachers. Those that are relevant to practice and teaching need to be presented in a form that makes their findings usable with minimum interpretation. Objective: We have examined a sample of the many systematic reviews that have been published over a period of six years, in order to assess how well these are reported and identify useful lessons about how this might be done. Method: We undertook a tertiary study, performing a systematic review of systematic reviews. Our study found 178 systematic reviews published in a set of major software engineering journals over the period 2010–2015. Of these, 37 provided recommendations or conclusions of relevance to education and/or practice and we used the DARE criteria as well as other attributes related to the systematic review process to analyse how well they were reported. Results: We have derived a set of 12 ‘lessons' that could help authors with reporting the outcomes of a systematic review in software engineering. We also provide an associated checklist for use by journal and conference referees. Conclusion: There are several areas where better reporting is needed, including quality assessment, synthesis, and the procedures followed by the reviewers. Researchers, practitioners, teachers and journal referees would all benefit from better reporting of systematic reviews, both for clarity and also for establishing the provenance of any findings.}, author = {Budgen, David and Brereton, Pearl and Drummond, Sarah and Williams, Nikki}, doi = {10.1016/j.infsof.2017.10.017}, issn = {09505849}, journal = {Information and Software Technology}, keywords = {Provenance of findings,Reporting quality,Systematic review}, month = {mar}, pages = {62--74}, publisher = {Elsevier B.V.}, title = {{Reporting systematic reviews: Some lessons from a tertiary study}}, volume = {95}, year = {2018} } @incollection{Cartaxo2020, abstract = {Integrating research evidence into practice is one of the main goals of Evidence-Based Software Engineering (EBSE). Secondary studies, one of the main EBSE products, are intended to summarize the best research evidence and make them easily consumable by practitioners. However, recent studies show that some secondary studies lack connections with software engineering practice. In this chapter, we present the concept of Rapid Reviews, which are lightweight secondary studies focused on delivering evidence to practitioners in a timely manner. Rapid reviews support practitioners in their decision-making, and should be conducted bounded to a practical problem, inserted into a practical context. Thus, Rapid Reviews can be easily integrated in a knowledge/technology transfer initiative. After describing the basic concepts, we present the results and experiences of conducting two Rapid Reviews. We also provide guidelines to help researchers and practitioners who want to conduct Rapid Reviews, and we finally discuss topics that my concern the research community about the feasibility of Rapid Reviews as an Evidence-Based method. In conclusion, we believe Rapid Reviews might interest researchers and practitioners working in the intersection between software engineering research and practice.}, address = {Cham}, archivePrefix = {arXiv}, arxivId = {2003.10006}, author = {Cartaxo, Bruno and Pinto, Gustavo and Soares, Sergio}, booktitle = {Contemporary Empirical Methods in Software Engineering}, doi = {10.1007/978-3-030-32489-6_13}, eprint = {2003.10006}, isbn = {9783030324896}, pages = {357--384}, publisher = {Springer International Publishing}, title = {{Rapid Reviews in Software Engineering}}, year = {2020} } @incollection{Felizardo2020, address = {Cham}, author = {Felizardo, Katia R and Carver, Jeffrey C}, booktitle = {Contemporary Empirical Methods in Software Engineering}, doi = {10.1007/978-3-030-32489-6_12}, isbn = {9783030324896}, pages = {327--355}, publisher = {Springer International Publishing}, title = {{Automating Systematic Literature Review}}, year = {2020} } @article{Ivarsson2011, abstract = {One of the main goals of an applied research field such as software engineering is the transfer and widespread use of research results in industry. To impact industry, researchers developing technologies in academia need to provide tangible evidence of the advantages of using them. This can be done trough step-wise validation, enabling researchers to gradually test and evaluate technologies to finally try them in real settings with real users and applications. The evidence obtained, together with detailed information on how the validation was conducted, offers rich decision support material for industry practitioners seeking to adopt new technologies and researchers looking for an empirical basis on which to build new or refined technologies. This paper presents model for evaluating the rigor and industrial relevance of technology evaluations in software engineering. The model is applied and validated in a comprehensive systematic literature review of evaluations of requirements engineering technologies published in software engineering journals. The aim is to show the applicability of the model and to characterize how evaluations are carried out and reported to evaluate the state-of-research. The review shows that the model can be applied to characterize evaluations in requirements engineering. The findings from applying the model also show that the majority of technology evaluations in requirements engineering lack both industrial relevance and rigor. In addition, the research field does not show any improvements in terms of industrial relevance over time. {\textcopyright} Springer Science+Business Media, LLC 2010.}, author = {Ivarsson, Martin and Gorschek, Tony}, doi = {10.1007/s10664-010-9146-4}, journal = {Empirical Software Engineering}, keywords = {Requirements engineering,Systematic review,Technology evaluation}, number = {3}, pages = {365--395}, title = {{A method for evaluating rigor and industrial relevance of technology evaluations}}, volume = {16}, year = {2011} } @techreport{Kitchenham2007, abstract = {The objective of this report is to propose comprehensive guidelines for systematic literature reviews appropriate for software engineering researchers, including PhD students. A systematic literature review is a means of evaluating and interpreting all available research relevant to a particular research question, topic area, or phenomenon of interest. Systematic reviews aim to present a fair evaluation of a research topic by using a trustworthy, rigorous, and auditable methodology. The guidelines presented in this report were derived from three existing guidelines used by medical researchers, two books produced by researchers with social science backgrounds and discussions with researchers from other disciplines who are involved in evidence-based practice. The guidelines have been adapted to reflect the specific problems of software engineering research. The guidelines cover three phases of a systematic literature review: planning the review, conducting the review and reporting the review. They provide a relatively high level description. They do not consider the impact of the research questions on the review procedures, nor do they specify in detail the mechanisms needed to perform meta-analysis.}, address = {Keele, UK}, author = {Kitchenham, Barbara and Charters, Stuart}, booktitle = {Technical Report EBSE-2007-01}, institution = {School of Computer Science and Mathematics, Keele University}, pages = {65}, title = {{Guidelines for performing Systematic Literature reviews in Software Engineering}}, year = {2007} } @article{Kitchenham2009, abstract = {Background: In 2004 the concept of evidence-based software engineering (EBSE) was introduced at the ICSE04 conference. Aims: This study assesses the impact of systematic literature reviews (SLRs) which are the recommended EBSE method for aggregating evidence. Method: We used the standard systematic literature review method employing a manual search of 10 journals and 4 conference proceedings. Results: Of 20 relevant studies, eight addressed research trends rather than technique evaluation. Seven SLRs addressed cost estimation. The quality of SLRs was fair with only three scoring less than 2 out of 4. Conclusions: Currently, the topic areas covered by SLRs are limited. European researchers, particularly those at the Simula Laboratory appear to be the leading exponents of systematic literature reviews. The series of cost estimation SLRs demonstrate the potential value of EBSE for synthesising evidence and making it available to practitioners. ?? 2008 Elsevier B.V. All rights reserved.}, author = {Kitchenham, Barbara and {Pearl Brereton}, O. and Budgen, David and Turner, Mark and Bailey, John and Linkman, Stephen}, doi = {10.1016/j.infsof.2008.09.009}, isbn = {0950-5849}, issn = {09505849}, journal = {Information and Software Technology}, keywords = {Cost estimation,Evidence-based software engineering,Systematic literature review,Systematic review quality,Tertiary study}, month = {jan}, number = {1}, pages = {7--15}, title = {{Systematic literature reviews in software engineering – A systematic literature review}}, volume = {51}, year = {2009} } @article{Petersen2008, abstract = {BACKGROUND: A software engineering systematic map is a defined method to build a classification scheme and structure a software engineering field of interest. The analysis of results focuses on frequencies of publications for categories within the scheme. Thereby, the coverage of the research field can be determined. Different facets of the scheme can also be combined to answer more specific research questions. OBJECTIVE: We describe how to conduct a systematic mapping study in software engineering and provide guidelines. We also compare systematic maps and systematic reviews to clarify how to chose between them. This comparison leads to a set of guidelines for systematic maps. METHOD: We have defined a systematic mapping process and applied it to complete a systematic mapping study. Furthermore, we compare systematic maps with systematic reviews by systematically analyzing existing systematic reviews. RESULTS: We describe a process for software engineering systematic mapping studies and compare it to systematic reviews. Based on this, guidelines for conducting systematic maps are defined. CONCLUSIONS: Systematic maps and reviews are different in terms of goals, breadth, validity issues and implications. Thus, they should be used complementarily and require different methods (e.g., for analysis).}, author = {Petersen, Kai and Feldt, Robert and Mujtaba, Shahid and Mattsson, Michael}, isbn = {0-7695-2555-5}, issn = {02181940}, journal = {EASE'08 Proceedings of the 12th international conference on Evaluation and Assessment in Software Engineering}, keywords = {evidence based software engineering,systematic mapping studies,systematic reviews}, pages = {68--77}, title = {{Systematic mapping studies in software engineering}}, year = {2008} } @article{Petersen2015, abstract = {Context Systematic mapping studies are used to structure a research area, while systematic reviews are focused on gathering and synthesizing evidence. The most recent guidelines for systematic mapping are from 2008. Since that time, many suggestions have been made of how to improve systematic literature reviews (SLRs). There is a need to evaluate how researchers conduct the process of systematic mapping and identify how the guidelines should be updated based on the lessons learned from the existing systematic maps and SLR guidelines. Objective To identify how the systematic mapping process is conducted (including search, study selection, analysis and presentation of data, etc.); to identify improvement potentials in conducting the systematic mapping process and updating the guidelines accordingly. Method We conducted a systematic mapping study of systematic maps, considering some practices of systematic review guidelines as well (in particular in relation to defining the search and to conduct a quality assessment). Results In a large number of studies multiple guidelines are used and combined, which leads to different ways in conducting mapping studies. The reason for combining guidelines was that they differed in the recommendations given. Conclusion The most frequently followed guidelines are not sufficient alone. Hence, there was a need to provide an update of how to conduct systematic mapping studies. New guidelines have been proposed consolidating existing findings.}, archivePrefix = {arXiv}, arxivId = {arXiv:1011.1669v3}, author = {Petersen, Kai and Vakkalanka, Sairam and Kuzniarz, Ludwik}, doi = {10.1016/j.infsof.2015.03.007}, eprint = {arXiv:1011.1669v3}, isbn = {0360-1315}, issn = {09505849}, journal = {Information and Software Technology}, keywords = {Guidelines,Software engineering,Systematic mapping studies}, month = {aug}, pages = {1--18}, pmid = {25246403}, title = {{Guidelines for conducting systematic mapping studies in software engineering: An update}}, volume = {64}, year = {2015} } @article{Schryen2015, abstract = {The literature review is an established research genre in many academic disciplines, including the IS discipline. Although many scholars agree that systematic literature reviews should be rigorous, few instructional texts for compiling a solid literature review, at least with regard to the IS discipline, exist. In response to this shortage, in this tutorial, I provide practical guidance for both students and researchers in the IS community who want to methodologically conduct qualitative literature reviews. The tutorial differs from other instructional texts in two regards. First, in contrast to most textbooks, I cover not only searching and synthesizing the literature but also the challenging tasks of framing the literature review, interpreting research findings, and proposing research paths. Second, I draw on other texts that provide guidelines for writing literature reviews in the IS discipline but use many examples of published literature reviews. I use an integrated example of a literature review, which guides the reader through the overall process of compiling a literature review. Keywords:}, author = {Schryen, Guido}, doi = {10.17705/1CAIS.03712}, issn = {15293181}, journal = {Communications of the Association for Information Systems}, keywords = {Literature review,Literature synthesis,Methodology,Research agenda,Research gaps,Tutorial}, pages = {286--325}, title = {{Writing Qualitative IS Literature Reviews—Guidelines for Synthesis, Interpretation, and Guidance of Research}}, volume = {37}, year = {2015} } @inproceedings{Wohlin2014, abstract = {Background: Systematic literature studies have become common in software engineering, and hence it is important to understand how to conduct them efficiently and reliably. Objective: This paper presents guidelines for conducting literature reviews using a snowballing approach, and they are illustrated and evaluated by replicating a published systematic literature review. Method: The guidelines are based on the experience from conducting several systematic literature reviews and experimenting with different approaches. Results: The guidelines for using snowballing as a way to search for relevant literature was successfully applied to a systematic literature review. Conclusions: It is concluded that using snowballing, as a first search strategy, may very well be a good alternative to the use of database searches. Copyright 2014 ACM.}, address = {New York, New York, USA}, author = {Wohlin, Claes}, booktitle = {Proceedings of the 18th International Conference on Evaluation and Assessment in Software Engineering - EASE '14}, doi = {10.1145/2601248.2601268}, isbn = {9781450324762}, keywords = {Replication,Snowball search,Snowballing,Systematic literature review,Systematic mapping studies}, pages = {1--10}, publisher = {ACM Press}, title = {{Guidelines for snowballing in systematic literature studies and a replication in software engineering}}, year = {2014} } @incollection{Wohlin2013, abstract = {The dependence on quality software in all areas of life is what makes software engineering a key discipline for todays society. Thus, over the last few decades it has been increasingly recognized that it is particularly important to demonstrate the value of software engineering methods in real-world environments, a task which is the focus of empirical software engineering. One of the leading protagonists of this discipline worldwide is Prof. Dr. Dr. h.c. Dieter Rombach, who dedicated his entire career to empirical software engineering. For his many important contributions to the field he has received numerous awards and recognitions, including the U.S. National Science Foundations Presidential Young Investigator Award and the Cross of the Order of Merit of the Federal Republic of Germany. He is a Fellow of both the ACM and the IEEE Computer Society. This book, published in honor of his 60th birthday, is dedicated to Dieter Rombach and his contributions to software engineering in general, as well as to empirical software engineering in particular. This book presents invited contributions from a number of the most internationally renowned software engineering researchers like Victor Basili, Barry Boehm, Manfred Broy, Carlo Ghezzi, Michael Jackson, Leon Osterweil, and, of course, by Dieter Rombach himself. Several key experts from the Fraunhofer IESE, the institute founded and led by Dieter Rombach, also contributed to the book. The contributions summarize some of the most important trends in software engineering today and outline a vision for the future of the field. The book is structured into three main parts. The first part focuses on the classical foundations of software engineering, such as notations, architecture, and processes, while the second addresses empirical software engineering in particular as the core field of Dieter Rombachs contributions. Finally, the third part discusses a broad vision for the future of software engineering.}, address = {Berlin, Heidelberg}, author = {Wohlin, Claes}, booktitle = {Perspectives on the Future of Software Engineering}, doi = {10.1007/978-3-642-37395-4_10}, isbn = {9783642373954}, pages = {145--157}, publisher = {Springer Berlin Heidelberg}, title = {{An Evidence Profile for Software Engineering Research and Practice}}, volume = {9783642373}, year = {2013} } @inproceedings{Zhou2016a, abstract = {—Context: The assessment of Threats to Validity (TTVs) is critical to secure the quality of empirical studies in Software Engineering (SE). In the recent decade, Systematic Literature Review (SLR) was becoming an increasingly important empirical research method in SE as it was able to provide the strongest evidence. One of the mechanisms of insuring the level of scientific value in the findings of an SLR is to rigorously assess its validity. Hence, it is necessary to realize the status quo and issues of TTVs of SLRs in SE. Objective: This study aims to investigate the-state-of-the-practice of TTVs of the SLRs published in SE, and further support SE researchers to improve the assessment and strategies against TTVs in order to increase the quality of SLRs in SE. Method: We conducted a tertiary study by reviewing the SLRs in SE that report the assessment of TTVs. Results: We identified 316 SLRs published from 2004 to the first half of 2015, in which TTVs are discussed. The issues associated to TTVs were also summarized and categorized. Conclusion: The common TTVs related to SLR research, such as internal validity and reliability, were thoroughly discussed in most SLRs. The threats to construct validity and external validity drew less attention. Moreover, there are few strategies and tactics being reported to cope with the various TTVs.}, address = {Hamilton, New Zealand}, author = {Zhou, Xin and Jin, Yuqin and Zhang, He and Li, Shanshan and Huang, Xin}, booktitle = {2016 23rd Asia-Pacific Software Engineering Conference (APSEC)}, doi = {10.1109/APSEC.2016.031}, isbn = {978-1-5090-5575-3}, keywords = {Evidence-Based Software Engineering,Systematic (Literature) Review,Threats to Validity}, pages = {153--160}, publisher = {IEEE}, title = {{A Map of Threats to Validity of Systematic Literature Reviews in Software Engineering}}, year = {2016} } @incollection{Ciolkowski2003, abstract = {A survey is an empirical research strategy for the collection of information from heterogeneous sources. In this way, survey results often exhibit a high degree of external validity. It is complementary to other empirical research strategies such as controlled experiments, which usually have their strengths in the high internal validity of the findings. While there is a growing number of (quasi-)controlled experiments reported in the software engineering literature, few results of large scale surveys have been reported there. Hence, there is still a lack of knowledge on how to use surveys in a systematic manner for software engineering empirical research. This chapter introduces a process for preparing, conducting, and analyzing a software engineering survey. The focus of the work is on questionnaire-based surveys rather than literature surveys. The survey process is driven by practical experiences from two large-scale efforts in the review and inspection area. There are two main results from this work. First, the process itself allows researchers in empirical software engineering to follow a systematic, disciplined approach. Second, the experiences from applying the process help avoid common pitfalls that endanger both the research process and its results. We report on two (descriptive) surveys on software reviews that applied the survey process, and we present our experiences, as well as models for survey effort and duration factors derived from these experiences. {\textcopyright} Springer-Verlag Berlin Heidelberg 2003.}, author = {Ciolkowski, Marcus and Laitenberger, Oliver and Vegas, Sira and Biffl, Stefan}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-540-45143-3_7}, isbn = {978-3-540-45143-3}, pages = {104--128}, publisher = {Springer Berlin Heidelberg}, title = {{Practical Experiences in the Design and Conduct of Surveys in Empirical Software Engineering}}, volume = {2765}, year = {2003} } @techreport{Kasunic2005, abstract = {A survey can characterize the knowledge, attitudes, and behaviors of a large group of people through the study of a subset of them. However, to protect the validity of conclusions drawn from a survey, certain procedures must be followed throughout the process of designing, developing, and distributing the survey questionnaire. Surveys are used extensively by software and systems engineering organizations to provide insight into complex issues, assist with problem solving, and support effective decision making. This document presents a seven-stage, end-to-end process for conducting a survey.}, address = {Pittsburgh, PA}, author = {Kasunic, Mark}, institution = {Carnegie Mellon University, Software Engineering Institute}, isbn = {0780348907}, pages = {143}, title = {{Designing an Effective Survey}}, year = {2005} } @incollection{Kitchenham2008, address = {London}, author = {Kitchenham, Barbara A. and Pfleeger, Shari L.}, booktitle = {Guide to Advanced Empirical Software Engineering}, doi = {10.1007/978-1-84800-044-5_3}, pages = {63--92}, publisher = {Springer London}, title = {{Personal Opinion Surveys}}, year = {2008} } @article{Molleri2020, abstract = {Context: Over the past decade Software Engineering research has seen a steady increase in survey-based studies, and there are several guidelines providing support for those willing to carry out surveys. The need for auditing survey research has been raised in the literature. Checklists have been used both to conduct and to assess different types of empirical studies, such as experiments and case studies. Objective: To operationalize the assessment of survey studies by means of a checklist. To fulfill such goal, we aim to derive a checklist from standards for survey research and further evaluate the appropriateness of the checklist in the context of software engineering research. Method: We systematically aggregated knowledge from 12 methodological studies supporting survey-based research in software engineering. We identified the key stages of the survey process and its recommended practices through thematic analysis and vote counting. We evaluated the checklist by applying it to existing surveys and analyzed the results. Thereafter, we gathered the feedback of experts (the surveys' authors) on our analysis and used the feedback to improve the survey checklist. Results: The evaluation provided insights regarding limitations of the checklist in relation to its understanding and objectivity. In particular, 19 of the 38 checklist items were improved according to the feedback received from experts. Conclusion: The proposed checklist is appropriate for auditing survey reports as well as a support tool to guide ongoing research with regard to the survey design process. A discussion on how to use the checklist and what its implications are for research practice is also provided.}, author = {Moll{\'{e}}ri, Jefferson Seide and Petersen, Kai and Mendes, Emilia}, doi = {10.1016/j.infsof.2019.106240}, issn = {09505849}, journal = {Information and Software Technology}, keywords = {Assessment,Checklist,Methodology,Survey}, month = {mar}, pages = {106240}, title = {{An empirically evaluated checklist for surveys in software engineering}}, volume = {119}, year = {2020} } @incollection{Wagner2020, abstract = {While being an important and often used research method, survey research has been less often discussed on a methodological level in empirical software engineering than other types of research. This chapter compiles a set of important and challenging issues in survey research based on experiences with several large-scale international surveys. The chapter covers theory building, sampling, invitation and follow-up, statistical as well as qualitative analysis of survey data and the usage of psychometrics in software engineering surveys.}, address = {Cham}, author = {Wagner, Stefan and Mendez, Daniel and Felderer, Michael and Graziotin, Daniel and Kalinowski, Marcos}, booktitle = {Contemporary Empirical Methods in Software Engineering}, doi = {10.1007/978-3-030-32489-6_4}, isbn = {9783030324896}, pages = {93--125}, publisher = {Springer International Publishing}, title = {{Challenges in Survey Research}}, year = {2020} } @article{Gregor2006, abstract = {The aim of this research essay is to examine the structural nature of theory in Information Systems. Despite the importance of theory, questions relating to its form and structure are neglected in comparison with questions relating to epistemology. The essay addresses issues of causality, explanation, prediction, and generalization that underlie an understanding of theory. A taxonomy is proposed that classifies information systems theories with respect to the manner in which four central goals are addressed: analysis, explanation, prediction, and prescription. Five interrelated types of theory are distinguished: (1) theory for analyzing, (2) theory for explaining, (3) theory for predicting, (4) theory for explaining and predicting, and (5) theory for design and action. Examples illustrate the nature of each theory type. The applicability of the taxonomy is demonstrated by classifying a sample of journal articles. The paper contributes by showing that multiple views of theory exist and by exposing the assumptions underlying different viewpoints. In addition, it is suggested that the type of theory under development can influence the choice of an epistemological approach. Support is given for the legitimacy and value of each theory type. The building of integrated bodies of theory that encompass all theory types is advocated.}, author = {Gregor, Shirley}, doi = {10.2307/25148742}, issn = {02767783}, journal = {MIS Quarterly}, keywords = {Causality,Design science,Design theory,Explanation,Generalization,Information systems discipline,Interpretivist theory,Philosophy of science,Philosophy of social sciences,Prediction,Theory,Theory structure,Theory taxonomy}, number = {3}, pages = {611}, title = {{The Nature of Theory in Information Systems}}, volume = {30}, year = {2006} } @article{Johnson2012, author = {Johnson, Pontus and Ekstedt, Mathias and Jacobson, Ivar}, doi = {10.1109/MS.2012.127}, issn = {0740-7459}, journal = {IEEE Software}, keywords = {engineering,explanation,prediction,science,software engineering theory,theory}, month = {sep}, number = {5}, pages = {96--96}, publisher = {IEEE}, title = {{Where's the Theory for Software Engineering?}}, volume = {29}, year = {2012} } @article{Langley1999, abstract = {In this article I describe and compare a number of alternative generic strategies for the analysis oi process data, looking at the consequences oi these strategies ior emerging theories. I evaluate the strengths and weaknesses of the strategies in terms oi their capacity to generate theory that is accurate, porsimonious. general, and useful and suggest that method and theory are inextricably intertwined, that multiple strategies are oiten advisable, and that no analysis strategy will produce theory without an uncodiiiable creative leap, however small. Finally, I argue that there is room in the organizational research literature ior more openness within the academic community toward a variety of iorms oi coupling between theory and data.}, author = {Langley, Ann}, doi = {10.2307/259349}, issn = {03637425}, journal = {The Academy of Management Review}, month = {oct}, number = {4}, pages = {691}, title = {{Strategies for Theorizing from Process Data}}, volume = {24}, year = {1999} } @inproceedings{Ralph2015, abstract = {A process theory is an explanation of how an entity changes and develops. While software engineering is fundamentally concerned with how software artifacts change and develop, little research explicitly builds and empirically evaluates software engineering process theories. This lack of theory obstructs scientific consensus by focusing the academic community on methods. Methods inevitably oversimplify and over-rationalize reality, obfuscating crucial phenomena including uncertainty, problem framing and illusory requirements. Better process theories are therefore needed to ground software engineering in empirical reality. However, poor understanding of process theory issues impedes research and publication. This paper therefore attempts to clarify the nature and types of process theories, explore their development and provide specific guidance for their empirically evaluation.}, author = {Ralph, Paul}, booktitle = {2015 IEEE/ACM 37th IEEE International Conference on Software Engineering}, doi = {10.1109/ICSE.2015.25}, isbn = {978-1-4799-1934-5}, issn = {02705257}, keywords = {Case study,Field study,Process theory,Questionnaire,Research methodology}, month = {may}, pages = {20--31}, publisher = {IEEE}, title = {{Developing and Evaluating Software Engineering Process Theories}}, volume = {1}, year = {2015} } @inproceedings{Stol2013, abstract = {There has been a growing interest in the role of theory within Software Engineering (SE) research. For several decades, researchers within the SE research community have argued that, to become a real engineering science, SE needs to develop stronger theoretical foundations. A few authors have proposed guidelines for constructing theories, building on insights from other disciplines. However, so far, much SE research is not guided by explicit theory, nor does it produce explicit theory. In this paper we argue that SE research does, in fact, show traces of theory, which we call theory fragments. We have adapted an analytical framework from the social sciences, named the Validity Network Schema (VNS), that we use to illustrate the role of theorizing in SE research. We illustrate the use of this framework by dissecting three well known research papers, each of which has had significant impact on their respective subdisciplines. We conclude this paper by outlining a number of implications for future SE research, and show how by increasing awareness and training, development of SE theories can be improved. {\textcopyright} 2013 IEEE.}, author = {Stol, Klaas-Jan and Fitzgerald, Brian}, booktitle = {2013 2nd SEMAT Workshop on a General Theory of Software Engineering (GTSE)}, doi = {10.1109/GTSE.2013.6613863}, isbn = {978-1-4673-6273-3}, keywords = {Software engineering research,empirical research,middle-range theory,theory building,theory fragment}, month = {may}, pages = {5--14}, publisher = {IEEE}, title = {{Uncovering theories in software engineering}}, year = {2013} } @inproceedings{Baumer1996, abstract = {In recent years the development of highly interactive software systems with graphical user inter{\$}aces has become increasingly common. The acceptance of such a system depends to a large degree on the quality of its user interface. Prototyping is an excellent means for generating ideas about how a user inteflace can be designed, and it helps to evaluate the quality of a solution at an early stage. In this paper we present the basic concepts behind user inte flace prototyping, a classijication of tools supporting it and a case study of nine major industrial projects. Based on our analysis of these projects we present the following conclusions: Prototyping is used more consciously than in recent years. No project applied a traditional Eife-cycle approach, which is one of the reasons why most of them were successful. Prototypes are increasingly used as a vehicle for developing and demonstrating visions of innovative systems.}, author = {Baumer, Dirk and Bischofberger, W. and Lichter, Horst and Zullighoven, H.}, booktitle = {Proceedings of IEEE 18th International Conference on Software Engineering}, doi = {10.1109/ICSE.1996.493447}, isbn = {0-8186-7247-1}, pages = {532--541}, publisher = {IEEE Comput. Soc. Press}, title = {{User interface prototyping-concepts, tools, and experience}}, year = {1996} } @article{Davis1989, author = {Davis, Fred D.}, doi = {10.2307/249008}, issn = {02767783}, journal = {MIS Quarterly}, month = {sep}, number = {3}, pages = {319}, title = {{Perceived Usefulness, Perceived Ease of Use, and User Acceptance of Information Technology}}, volume = {13}, year = {1989} } @inproceedings{Devadiga2017, author = {Devadiga, Nitish M}, booktitle = {2017 2nd International Conference on Communication and Electronics Systems (ICCES)}, doi = {10.1109/CESYS.2017.8321218}, isbn = {978-1-5090-5013-0}, month = {oct}, pages = {924--930}, publisher = {IEEE}, title = {{Tailoring architecture centric design method with rapid prototyping}}, year = {2017} } @article{Ko2015, abstract = {Empirical studies, often in the form of controlled experiments, have been widely adopted in software engineering research as a way to evaluate the merits of new software engineering tools. However, controlled experiments involving human participants actually using new tools are still rare, and when they are conducted, some have serious validity concerns. Recent research has also shown that many software engineering researchers view this form of tool evaluation as too risky and too difficult to conduct, as they might ultimately lead to inconclusive or negative results. In this paper, we aim both to help researchers minimize the risks of this form of tool evaluation, and to increase their quality, by offering practical methodological guidance on designing and running controlled experiments with developers. Our guidance fills gaps in the empirical literature by explaining, from a practical perspective, options in the recruitment and selection of human participants, informed consent, experimental procedures, demographic measurements, group assignment, training, the selecting and design of tasks, the measurement of common outcome variables such as success and time on task, and study debriefing. Throughout, we situate this guidance in the results of a new systematic review of the tool evaluations that were published in over 1,700 software engineering papers published from 2001 to 2011.}, author = {Ko, Andrew J. and LaToza, Thomas D. and Burnett, Margaret M.}, doi = {10.1007/s10664-013-9279-3}, issn = {1382-3256}, journal = {Empirical Software Engineering}, keywords = {Experiments,Human participants,Human subjects,Research methodology,Tools}, month = {feb}, number = {1}, pages = {110--141}, title = {{A practical guide to controlled experiments of software engineering tools with human participants}}, volume = {20}, year = {2015} } @article{Naumann1982, abstract = {Leading MIS executives and academicians have identified systems development as one of the most critical issues of the 1980s. Their concerns include providing user accessibility to stored information, reducing development cost and delay, increasing developer productivity, and increasing MIS's impact on organizational growth, productivity, and profitability. Among the number of proposed alternative approaches to traditional systems development, prototyping is mentioned frequently. Prototyping is routine in hardware development but not software. The authors review published references to prototyping and related concepts, and synthesize a process model for information systems. In this model, resource requirements are enumerated and discussed. The article includes an analysis of the economics of prototyping, and a brief discussion of several examples. Prototyping for information systems development addresses today's critical issues; it will no doubt raise a new set of research questions for tomorrow.}, author = {Naumann, Justus D. and Jenkins, A. Milton}, doi = {10.2307/248654}, issn = {02767783}, journal = {MIS Quarterly}, keywords = {Economics,Information systems,Methodology,Productivity,Systems analysis,Systems design}, month = {sep}, number = {3}, pages = {29}, title = {{Prototyping: The New Paradigm for Systems Development}}, volume = {6}, year = {1982} } @article{Taherdoost2018, abstract = {Recognition the needs and acceptance of individuals is the beginning stage of any businesses and this understanding would be helpful to find the way of future development, thus academicians are interested to realize the factors that drive users' acceptance or rejection of technologies. A number of models and frameworks have been developed to explain user adoption of new technologies and these models introduce factors that can affect the user acceptance. In this paper, an overview of theories and models regarding user acceptance of technology has been provided. The existing review will emphasize literature that tries to show how developers and researchers presage the level of admission any information technology will attain.}, author = {Taherdoost, Hamed}, doi = {10.1016/j.promfg.2018.03.137}, issn = {23519789}, journal = {Procedia Manufacturing}, keywords = {Acceptance Model,Acceptance Theory,Adoption Model,Adoption Theory,User Acceptance,User Adoption}, pages = {960--967}, publisher = {Elsevier B.V.}, title = {{A review of technology acceptance and adoption models and theories}}, volume = {22}, year = {2018} }