@article{Ampatzoglou2019, abstract = {Context: Secondary studies are vulnerable to threats to validity. Although, mitigating these threats is crucial for the credibility of these studies, we currently lack a systematic approach to identify, categorize and mitigate threats to validity for secondary studies. Objective: In this paper, we review the corpus of secondary studies, with the aim to identify: (a) the trend of reporting threats to validity, (b) the most common threats to validity and corresponding mitigation actions, and (c) possible categories in which threats to validity can be classified. Method: To achieve this goal we employ the tertiary study research method that is used for synthesizing knowledge from existing secondary studies. In particular, we collected data from more than 100 studies, published until December 2016 in top quality software engineering venues (both journals and conference). Results: Our results suggest that in recent years, secondary studies are more likely to report their threats to validity. However, the presentation of such threats is rather ad hoc, e.g., the same threat may be presented with a different name, or under a different category. To alleviate this problem, we propose a classification schema for reporting threats to validity and possible mitigation actions. Both the classification of threats and the associated mitigation actions have been validated by an empirical study, i.e., Delphi rounds with experts. Conclusion: Based on the proposed schema, we provide a checklist, which authors of secondary studies can use for identifying and categorizing threats to validity and corresponding mitigation actions, while readers of secondary studies can use the checklist for assessing the validity of the reported results.}, author = {Ampatzoglou, Apostolos and Bibi, Stamatia and Avgeriou, Paris and Verbeek, Marijn and Chatzigeorgiou, Alexander}, doi = {10.1016/j.infsof.2018.10.006}, issn = {09505849}, journal = {Information and Software Technology}, keywords = {Empirical software engineering,Literature Review,Secondary studies,Threats to Validity}, month = {feb}, number = {October 2018}, pages = {201--230}, publisher = {Elsevier B.V.}, title = {{Identifying, categorizing and mitigating threats to validity in software engineering secondary studies}}, volume = {106}, year = {2019} } @article{Budgen2018, abstract = {Context: Many of the systematic reviews published in software engineering are related to research or methodological issues and hence are unlikely to be of direct benefit to practitioners or teachers. Those that are relevant to practice and teaching need to be presented in a form that makes their findings usable with minimum interpretation. Objective: We have examined a sample of the many systematic reviews that have been published over a period of six years, in order to assess how well these are reported and identify useful lessons about how this might be done. Method: We undertook a tertiary study, performing a systematic review of systematic reviews. Our study found 178 systematic reviews published in a set of major software engineering journals over the period 2010–2015. Of these, 37 provided recommendations or conclusions of relevance to education and/or practice and we used the DARE criteria as well as other attributes related to the systematic review process to analyse how well they were reported. Results: We have derived a set of 12 ‘lessons' that could help authors with reporting the outcomes of a systematic review in software engineering. We also provide an associated checklist for use by journal and conference referees. Conclusion: There are several areas where better reporting is needed, including quality assessment, synthesis, and the procedures followed by the reviewers. Researchers, practitioners, teachers and journal referees would all benefit from better reporting of systematic reviews, both for clarity and also for establishing the provenance of any findings.}, author = {Budgen, David and Brereton, Pearl and Drummond, Sarah and Williams, Nikki}, doi = {10.1016/j.infsof.2017.10.017}, issn = {09505849}, journal = {Information and Software Technology}, keywords = {Provenance of findings,Reporting quality,Systematic review}, month = {mar}, pages = {62--74}, publisher = {Elsevier B.V.}, title = {{Reporting systematic reviews: Some lessons from a tertiary study}}, volume = {95}, year = {2018} } @incollection{Cartaxo2020, abstract = {Integrating research evidence into practice is one of the main goals of Evidence-Based Software Engineering (EBSE). Secondary studies, one of the main EBSE products, are intended to summarize the best research evidence and make them easily consumable by practitioners. However, recent studies show that some secondary studies lack connections with software engineering practice. In this chapter, we present the concept of Rapid Reviews, which are lightweight secondary studies focused on delivering evidence to practitioners in a timely manner. Rapid reviews support practitioners in their decision-making, and should be conducted bounded to a practical problem, inserted into a practical context. Thus, Rapid Reviews can be easily integrated in a knowledge/technology transfer initiative. After describing the basic concepts, we present the results and experiences of conducting two Rapid Reviews. We also provide guidelines to help researchers and practitioners who want to conduct Rapid Reviews, and we finally discuss topics that my concern the research community about the feasibility of Rapid Reviews as an Evidence-Based method. In conclusion, we believe Rapid Reviews might interest researchers and practitioners working in the intersection between software engineering research and practice.}, address = {Cham}, archivePrefix = {arXiv}, arxivId = {2003.10006}, author = {Cartaxo, Bruno and Pinto, Gustavo and Soares, Sergio}, booktitle = {Contemporary Empirical Methods in Software Engineering}, doi = {10.1007/978-3-030-32489-6_13}, eprint = {2003.10006}, isbn = {9783030324896}, pages = {357--384}, publisher = {Springer International Publishing}, title = {{Rapid Reviews in Software Engineering}}, year = {2020} } @incollection{Felizardo2020, address = {Cham}, author = {Felizardo, Katia R and Carver, Jeffrey C}, booktitle = {Contemporary Empirical Methods in Software Engineering}, doi = {10.1007/978-3-030-32489-6_12}, isbn = {9783030324896}, pages = {327--355}, publisher = {Springer International Publishing}, title = {{Automating Systematic Literature Review}}, year = {2020} } @article{Ivarsson2011, abstract = {One of the main goals of an applied research field such as software engineering is the transfer and widespread use of research results in industry. To impact industry, researchers developing technologies in academia need to provide tangible evidence of the advantages of using them. This can be done trough step-wise validation, enabling researchers to gradually test and evaluate technologies to finally try them in real settings with real users and applications. The evidence obtained, together with detailed information on how the validation was conducted, offers rich decision support material for industry practitioners seeking to adopt new technologies and researchers looking for an empirical basis on which to build new or refined technologies. This paper presents model for evaluating the rigor and industrial relevance of technology evaluations in software engineering. The model is applied and validated in a comprehensive systematic literature review of evaluations of requirements engineering technologies published in software engineering journals. The aim is to show the applicability of the model and to characterize how evaluations are carried out and reported to evaluate the state-of-research. The review shows that the model can be applied to characterize evaluations in requirements engineering. The findings from applying the model also show that the majority of technology evaluations in requirements engineering lack both industrial relevance and rigor. In addition, the research field does not show any improvements in terms of industrial relevance over time. {\textcopyright} Springer Science+Business Media, LLC 2010.}, author = {Ivarsson, Martin and Gorschek, Tony}, doi = {10.1007/s10664-010-9146-4}, journal = {Empirical Software Engineering}, keywords = {Requirements engineering,Systematic review,Technology evaluation}, number = {3}, pages = {365--395}, title = {{A method for evaluating rigor and industrial relevance of technology evaluations}}, volume = {16}, year = {2011} } @techreport{Kitchenham2007, abstract = {The objective of this report is to propose comprehensive guidelines for systematic literature reviews appropriate for software engineering researchers, including PhD students. A systematic literature review is a means of evaluating and interpreting all available research relevant to a particular research question, topic area, or phenomenon of interest. Systematic reviews aim to present a fair evaluation of a research topic by using a trustworthy, rigorous, and auditable methodology. The guidelines presented in this report were derived from three existing guidelines used by medical researchers, two books produced by researchers with social science backgrounds and discussions with researchers from other disciplines who are involved in evidence-based practice. The guidelines have been adapted to reflect the specific problems of software engineering research. The guidelines cover three phases of a systematic literature review: planning the review, conducting the review and reporting the review. They provide a relatively high level description. They do not consider the impact of the research questions on the review procedures, nor do they specify in detail the mechanisms needed to perform meta-analysis.}, address = {Keele, UK}, author = {Kitchenham, Barbara and Charters, Stuart}, booktitle = {Technical Report EBSE-2007-01}, institution = {School of Computer Science and Mathematics, Keele University}, pages = {65}, title = {{Guidelines for performing Systematic Literature reviews in Software Engineering}}, year = {2007} } @article{Kitchenham2009, abstract = {Background: In 2004 the concept of evidence-based software engineering (EBSE) was introduced at the ICSE04 conference. Aims: This study assesses the impact of systematic literature reviews (SLRs) which are the recommended EBSE method for aggregating evidence. Method: We used the standard systematic literature review method employing a manual search of 10 journals and 4 conference proceedings. Results: Of 20 relevant studies, eight addressed research trends rather than technique evaluation. Seven SLRs addressed cost estimation. The quality of SLRs was fair with only three scoring less than 2 out of 4. Conclusions: Currently, the topic areas covered by SLRs are limited. European researchers, particularly those at the Simula Laboratory appear to be the leading exponents of systematic literature reviews. The series of cost estimation SLRs demonstrate the potential value of EBSE for synthesising evidence and making it available to practitioners. ?? 2008 Elsevier B.V. All rights reserved.}, author = {Kitchenham, Barbara and {Pearl Brereton}, O. and Budgen, David and Turner, Mark and Bailey, John and Linkman, Stephen}, doi = {10.1016/j.infsof.2008.09.009}, isbn = {0950-5849}, issn = {09505849}, journal = {Information and Software Technology}, keywords = {Cost estimation,Evidence-based software engineering,Systematic literature review,Systematic review quality,Tertiary study}, month = {jan}, number = {1}, pages = {7--15}, title = {{Systematic literature reviews in software engineering – A systematic literature review}}, volume = {51}, year = {2009} } @article{Petersen2008, abstract = {BACKGROUND: A software engineering systematic map is a defined method to build a classification scheme and structure a software engineering field of interest. The analysis of results focuses on frequencies of publications for categories within the scheme. Thereby, the coverage of the research field can be determined. Different facets of the scheme can also be combined to answer more specific research questions. OBJECTIVE: We describe how to conduct a systematic mapping study in software engineering and provide guidelines. We also compare systematic maps and systematic reviews to clarify how to chose between them. This comparison leads to a set of guidelines for systematic maps. METHOD: We have defined a systematic mapping process and applied it to complete a systematic mapping study. Furthermore, we compare systematic maps with systematic reviews by systematically analyzing existing systematic reviews. RESULTS: We describe a process for software engineering systematic mapping studies and compare it to systematic reviews. Based on this, guidelines for conducting systematic maps are defined. CONCLUSIONS: Systematic maps and reviews are different in terms of goals, breadth, validity issues and implications. Thus, they should be used complementarily and require different methods (e.g., for analysis).}, author = {Petersen, Kai and Feldt, Robert and Mujtaba, Shahid and Mattsson, Michael}, isbn = {0-7695-2555-5}, issn = {02181940}, journal = {EASE'08 Proceedings of the 12th international conference on Evaluation and Assessment in Software Engineering}, keywords = {evidence based software engineering,systematic mapping studies,systematic reviews}, pages = {68--77}, title = {{Systematic mapping studies in software engineering}}, year = {2008} } @article{Petersen2015, abstract = {Context Systematic mapping studies are used to structure a research area, while systematic reviews are focused on gathering and synthesizing evidence. The most recent guidelines for systematic mapping are from 2008. Since that time, many suggestions have been made of how to improve systematic literature reviews (SLRs). There is a need to evaluate how researchers conduct the process of systematic mapping and identify how the guidelines should be updated based on the lessons learned from the existing systematic maps and SLR guidelines. Objective To identify how the systematic mapping process is conducted (including search, study selection, analysis and presentation of data, etc.); to identify improvement potentials in conducting the systematic mapping process and updating the guidelines accordingly. Method We conducted a systematic mapping study of systematic maps, considering some practices of systematic review guidelines as well (in particular in relation to defining the search and to conduct a quality assessment). Results In a large number of studies multiple guidelines are used and combined, which leads to different ways in conducting mapping studies. The reason for combining guidelines was that they differed in the recommendations given. Conclusion The most frequently followed guidelines are not sufficient alone. Hence, there was a need to provide an update of how to conduct systematic mapping studies. New guidelines have been proposed consolidating existing findings.}, archivePrefix = {arXiv}, arxivId = {arXiv:1011.1669v3}, author = {Petersen, Kai and Vakkalanka, Sairam and Kuzniarz, Ludwik}, doi = {10.1016/j.infsof.2015.03.007}, eprint = {arXiv:1011.1669v3}, isbn = {0360-1315}, issn = {09505849}, journal = {Information and Software Technology}, keywords = {Guidelines,Software engineering,Systematic mapping studies}, month = {aug}, pages = {1--18}, pmid = {25246403}, title = {{Guidelines for conducting systematic mapping studies in software engineering: An update}}, volume = {64}, year = {2015} } @article{Schryen2015, abstract = {The literature review is an established research genre in many academic disciplines, including the IS discipline. Although many scholars agree that systematic literature reviews should be rigorous, few instructional texts for compiling a solid literature review, at least with regard to the IS discipline, exist. In response to this shortage, in this tutorial, I provide practical guidance for both students and researchers in the IS community who want to methodologically conduct qualitative literature reviews. The tutorial differs from other instructional texts in two regards. First, in contrast to most textbooks, I cover not only searching and synthesizing the literature but also the challenging tasks of framing the literature review, interpreting research findings, and proposing research paths. Second, I draw on other texts that provide guidelines for writing literature reviews in the IS discipline but use many examples of published literature reviews. I use an integrated example of a literature review, which guides the reader through the overall process of compiling a literature review. Keywords:}, author = {Schryen, Guido}, doi = {10.17705/1CAIS.03712}, issn = {15293181}, journal = {Communications of the Association for Information Systems}, keywords = {Literature review,Literature synthesis,Methodology,Research agenda,Research gaps,Tutorial}, pages = {286--325}, title = {{Writing Qualitative IS Literature Reviews—Guidelines for Synthesis, Interpretation, and Guidance of Research}}, volume = {37}, year = {2015} } @inproceedings{Wohlin2014, abstract = {Background: Systematic literature studies have become common in software engineering, and hence it is important to understand how to conduct them efficiently and reliably. Objective: This paper presents guidelines for conducting literature reviews using a snowballing approach, and they are illustrated and evaluated by replicating a published systematic literature review. Method: The guidelines are based on the experience from conducting several systematic literature reviews and experimenting with different approaches. Results: The guidelines for using snowballing as a way to search for relevant literature was successfully applied to a systematic literature review. Conclusions: It is concluded that using snowballing, as a first search strategy, may very well be a good alternative to the use of database searches. Copyright 2014 ACM.}, address = {New York, New York, USA}, author = {Wohlin, Claes}, booktitle = {Proceedings of the 18th International Conference on Evaluation and Assessment in Software Engineering - EASE '14}, doi = {10.1145/2601248.2601268}, isbn = {9781450324762}, keywords = {Replication,Snowball search,Snowballing,Systematic literature review,Systematic mapping studies}, pages = {1--10}, publisher = {ACM Press}, title = {{Guidelines for snowballing in systematic literature studies and a replication in software engineering}}, year = {2014} } @incollection{Wohlin2013, abstract = {The dependence on quality software in all areas of life is what makes software engineering a key discipline for todays society. Thus, over the last few decades it has been increasingly recognized that it is particularly important to demonstrate the value of software engineering methods in real-world environments, a task which is the focus of empirical software engineering. One of the leading protagonists of this discipline worldwide is Prof. Dr. Dr. h.c. Dieter Rombach, who dedicated his entire career to empirical software engineering. For his many important contributions to the field he has received numerous awards and recognitions, including the U.S. National Science Foundations Presidential Young Investigator Award and the Cross of the Order of Merit of the Federal Republic of Germany. He is a Fellow of both the ACM and the IEEE Computer Society. This book, published in honor of his 60th birthday, is dedicated to Dieter Rombach and his contributions to software engineering in general, as well as to empirical software engineering in particular. This book presents invited contributions from a number of the most internationally renowned software engineering researchers like Victor Basili, Barry Boehm, Manfred Broy, Carlo Ghezzi, Michael Jackson, Leon Osterweil, and, of course, by Dieter Rombach himself. Several key experts from the Fraunhofer IESE, the institute founded and led by Dieter Rombach, also contributed to the book. The contributions summarize some of the most important trends in software engineering today and outline a vision for the future of the field. The book is structured into three main parts. The first part focuses on the classical foundations of software engineering, such as notations, architecture, and processes, while the second addresses empirical software engineering in particular as the core field of Dieter Rombachs contributions. Finally, the third part discusses a broad vision for the future of software engineering.}, address = {Berlin, Heidelberg}, author = {Wohlin, Claes}, booktitle = {Perspectives on the Future of Software Engineering}, doi = {10.1007/978-3-642-37395-4_10}, isbn = {9783642373954}, pages = {145--157}, publisher = {Springer Berlin Heidelberg}, title = {{An Evidence Profile for Software Engineering Research and Practice}}, volume = {9783642373}, year = {2013} } @inproceedings{Zhou2016a, abstract = {—Context: The assessment of Threats to Validity (TTVs) is critical to secure the quality of empirical studies in Software Engineering (SE). In the recent decade, Systematic Literature Review (SLR) was becoming an increasingly important empirical research method in SE as it was able to provide the strongest evidence. One of the mechanisms of insuring the level of scientific value in the findings of an SLR is to rigorously assess its validity. Hence, it is necessary to realize the status quo and issues of TTVs of SLRs in SE. Objective: This study aims to investigate the-state-of-the-practice of TTVs of the SLRs published in SE, and further support SE researchers to improve the assessment and strategies against TTVs in order to increase the quality of SLRs in SE. Method: We conducted a tertiary study by reviewing the SLRs in SE that report the assessment of TTVs. Results: We identified 316 SLRs published from 2004 to the first half of 2015, in which TTVs are discussed. The issues associated to TTVs were also summarized and categorized. Conclusion: The common TTVs related to SLR research, such as internal validity and reliability, were thoroughly discussed in most SLRs. The threats to construct validity and external validity drew less attention. Moreover, there are few strategies and tactics being reported to cope with the various TTVs.}, address = {Hamilton, New Zealand}, author = {Zhou, Xin and Jin, Yuqin and Zhang, He and Li, Shanshan and Huang, Xin}, booktitle = {2016 23rd Asia-Pacific Software Engineering Conference (APSEC)}, doi = {10.1109/APSEC.2016.031}, isbn = {978-1-5090-5575-3}, keywords = {Evidence-Based Software Engineering,Systematic (Literature) Review,Threats to Validity}, pages = {153--160}, publisher = {IEEE}, title = {{A Map of Threats to Validity of Systematic Literature Reviews in Software Engineering}}, year = {2016} }