% Encoding: UTF-8 @InProceedings{Turton2017, author = {Terece L. Turton and Colin Ware and Francesca Samsel and David H. Rogers}, title = {A Crowdsourced Approach to Colormap Assessment}, booktitle = {EuroVis Workshop on Reproducibility, Verification, and Validation in Visualization (EuroRV3)}, year = {2017}, editor = {Kai Lawonn and Noeska Smit and Douglas Cunningham}, publisher = {The Eurographics Association}, abstract = {Despite continual research and discussion on the perceptual effects of color in scientific visualization, psychophysical testing is often limited. In-person lab studies can be expensive and time-consuming while results can be difficult to extrapolate from meticulously controlled laboratory conditions to the real world of the visualization user. We draw on lessons learned from the use of crowdsourced participant pools in the behavioral sciences and information visualization to apply a crowdsourced approach to a classic psychophysical experiment assessing the ability of a colormap to impart metric information. We use an online presentation analogous to the color key task from Ware's 1988 paper, Color Sequences for Univariate Maps, testing colormaps similar to those in the original paper along with contemporary colormap standards and new alternatives in the scientific visualization domain. We explore the issue of potential contamination from color deficient participants and establish that perceptual color research can appropriately leverage a crowdsourced participant pool without significant CVD concerns. The updated version of the Ware color key task also provides a method to assess and compare colormaps.}, doi = {10.2312/eurorv3.20171106}, isbn = {978-3-03868-041-3}, } @InProceedings{Wigdor2007, author = {Daniel Wigdor and Chia Shen and Clifton Forlines and Ravin Balakrishnan}, title = {Perception of elementary graphical elements in tabletop and multi-surface environments}, booktitle = {Proceedings of the {SIGCHI} conference on Human factors in computing systems - {CHI} '07}, year = {2007}, publisher = {{ACM} Press}, abstract = {Information shown on a tabletop display can appear distorted when viewed by a seated user. Even worse, the impact of this distortion is different depending on the location of the information on the display. In this paper, we examine how this distortion affects the perception of the basic graphical elements of information visualization shown on displays at various angles. We first examine perception of these elements on a single display, and then compare this to perception across displays, in order to evaluate the effectiveness of various elements for use in a tabletop and multi-display environment. We found that the perception of some graphical elements is more robust to distortion than others. We then develop recommendations for building data visualizations for these environments.}, doi = {10.1145/1240624.1240701}, url = {https://doi.org/10.1145%2F1240624.1240701}, } @InProceedings{Heer2009, author = {Jeffrey Heer and Nicholas Kong and Maneesh Agrawala}, title = {Sizing the horizon}, booktitle = {Proceedings of the 27th international conference on Human factors in computing systems - {CHI} 09}, year = {2009}, publisher = {{ACM} Press}, abstract = {We investigate techniques for visualizing time series data and evaluate their effect in value comparison tasks. We compare line charts with horizon graphs - a space-efficient time series visualization technique - across a range of chart sizes, measuring the speed and accuracy of subjects' estimates of value differences between charts. We identify transition points at which reducing the chart height results in significantly differing drops in estimation accuracy across the compared chart types, and we find optimal positions in the speed-accuracy tradeoff curve at which viewers performed quickly without attendant drops in accuracy. Based on these results, we propose approaches for increasing data density that optimize graphical perception.}, doi = {10.1145/1518701.1518897}, url = {https://doi.org/10.1145%2F1518701.1518897}, } @InProceedings{Heer2010, author = {Jeffrey Heer and Michael Bostock}, title = {Crowdsourcing graphical perception}, booktitle = {Proceedings of the 28th international conference on Human factors in computing systems - {CHI} '10}, year = {2010}, publisher = {{ACM} Press}, abstract = {Understanding perception is critical to effective visualization design. With its low cost and scalability, crowdsourcing presents an attractive option for evaluating the large design space of visualizations; however, it first requires validation. In this paper, we assess the viability of Amazon's Mechanical Turk as a platform for graphical perception experiments. We replicate previous studies of spatial encoding and luminance contrast and compare our results. We also conduct new experiments on rectangular area perception (as in treemaps or cartograms) and on chart size and gridline spacing. Our results demonstrate that crowdsourced perception experiments are viable and contribute new insights for visualization design. Lastly, we report cost and performance data from our experiments and distill recommendations for the design of crowdsourced studies.}, doi = {10.1145/1753326.1753357}, url = {https://doi.org/10.1145%2F1753326.1753357}, keywords = {crowdsourcing} } @Article{Kong2010, author = {N Kong and J Heer and M Agrawala}, title = {Perceptual Guidelines for Creating Rectangular Treemaps}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2010}, volume = {16}, number = {6}, pages = {990--998}, month = {nov}, abstract = {Treemaps aspace-fillingre space-filling visualizations that make efficient use of limited display space to depict large amounts of hierarchical data. Creating perceptually effective treemaps requires carefully managing a number of design parameters including the aspect ratio and luminance of rectangles. Moreover, treemaps encode values using area, which has been found to be less accurate than judgments of other visual encodings, such as length. We conduct a series of controlled experiments aimed at producing a set of design guidelines for creating effective rectangular treemaps. We find no evidence that luminance affects area judgments, but observe that aspect ratio does have an effect. Specifically, we find that the accuracy of area comparisons suffers when the compared rectangles have extreme aspect ratios or when both are squares. Contrary to common assumptions, the optimal distribution of rectangle aspect ratios within a treemap should include non-squares, but should avoid extreme aspect ratios. We then compare treemaps with hierarchical bar chart displays to identify the data densities at which length-encoded bar charts become less effective than area-encoded treemaps. We report the transition points at which treemaps exhibit judgment accuracy on par with bar charts for both leaf and non-leaf tree nodes. We also find that even at relatively low data densities treemaps result in faster comparisons than bar charts. Based on these results, we present a set of guidelines for the effective use of treemaps.}, doi = {10.1109/tvcg.2010.186}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2010.186}, } @Article{Liu2014, author = {Zhicheng Liu and Jeffrey Heer}, title = {The Effects of Interactive Latency on Exploratory Visual Analysis}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2014}, volume = {20}, number = {12}, pages = {2122--2131}, month = {dec}, abstract = {To support effective exploration, it is often stated that interactive visualizations should provide rapid response times. However, the effects of interactive latency on the process and outcomes of exploratory visual analysis have not been systematically studied. We present an experiment measuring user behavior and knowledge discovery with interactive visualizations under varying latency conditions. We observe that an additional delay of 500ms incurs significant costs, decreasing user activity and data set coverage. Analyzing verbal data from think-aloud protocols, we find that increased latency reduces the rate at which users make observations, draw generalizations and generate hypotheses. Moreover, we note interaction effects in which initial exposure to higher latencies leads to subsequently reduced performance in a low-latency setting. Overall, increased latency causes users to shift exploration strategy, in turn affecting performance. We discuss how these results can inform the design of interactive analysis tools.}, doi = {10.1109/tvcg.2014.2346452}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2014.2346452}, } @Article{Kay2016, author = {Matthew Kay and Jeffrey Heer}, title = {Beyond Weber's Law: A Second Look at Ranking Visualizations of Correlation}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2016}, volume = {22}, number = {1}, pages = {469--478}, month = {jan}, abstract = {Models of human perception - including perceptual “laws” - can be valuable tools for deriving visualization design recommendations. However, it is important to assess the explanatory power of such models when using them to inform design. We present a secondary analysis of data previously used to rank the effectiveness of bivariate visualizations for assessing correlation (measured with Pearson's r) according to the well-known Weber-Fechner Law. Beginning with the model of Harrison et al. [1], we present a sequence of refinements including incorporation of individual differences, log transformation, censored regression, and adoption of Bayesian statistics. Our model incorporates all observations dropped from the original analysis, including data near ceilings caused by the data collection process and entire visualizations dropped due to large numbers of observations worse than chance. This model deviates from Weber's Law, but provides improved predictive accuracy and generalization. Using Bayesian credibility intervals, we derive a partial ranking that groups visualizations with similar performance, and we give precise estimates of the difference in performance between these groups. We find that compared to other visualizations, scatterplots are unique in combining low variance between individuals and high precision on both positively- and negatively correlated data. We conclude with a discussion of the value of data sharing and replication, and share implications for modeling similar experimental data.}, doi = {10.1109/tvcg.2015.2467671}, keywords = {scatterplots}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2015.2467671}, } @Article{Ziemkiewicz2013, author = {C. Ziemkiewicz and A. Ottley and R. J. Crouser and A. R. Yauilla and S. L. Su and W. Ribarsky and R. Chang}, title = {How Visualization Layout Relates to Locus of Control and Other Personality Factors}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2013}, volume = {19}, number = {7}, pages = {1109--1121}, month = {jul}, abstract = {Existing research suggests that individual personality differences are correlated with a user’s speed and accuracy in solving problems with different types of complex visualization systems. We extend this research by isolating factors in personality traits as well as in the visualizations that could have contributed to the observed correlation. We focus on a personality trait known as "locus of control" (LOC), which represents a person’s tendency to see themselves as controlled by or in control of external events. To isolate variables of the visualization design, we control extraneous factors such as color, interaction, and labeling. We conduct a user study with four visualizations that gradually shift from a list metaphor to a containment metaphor and compare the participants’ speed, accuracy, and preference with their locus of control and other personality factors. Our findings demonstrate that there is indeed a correlation between the two: participants with an internal locus of control perform more poorly with visualizations that employ a containment metaphor, while those with an external locus of control perform well with such visualizations. These results provide evidence for the externalization theory of visualization. Finally, we propose applications of these findings to adaptive visual analytics and visualization evaluation.}, doi = {10.1109/tvcg.2012.180}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2012.180}, } @InProceedings{Levy1996, author = {Ellen Levy and Jeff Zacks and Barbara Tversky and Diane Schiano}, title = {Gratuitous graphics? Putting preferences in perspective}, booktitle = {Proceedings of the {SIGCHI} conference on Human factors in computing systems common ground - {CHI} '96}, year = {1996}, publisher = {{ACM} Press}, abstract = {Rapid growth in 3-D rendering technologies has deluged us with glitzy graphical representations. In what contexts do people find 3-D graphs of 2-D data both attractive and useful? We examine students' preferences for graphical display formats under several use scenarios. Line graphs were preferred more for conveying trends than details, and more for promoting memorability than for immediate use; bar graphs showed the opposite pattern. 3-D graphs were preferred more for depicting details than trends, more for memorability than immediate use, and more for showing others than oneself. The reverse held for 2-D graphs.}, doi = {10.1145/238386.238400}, keywords = {3D}, url = {https://doi.org/10.1145%2F238386.238400}, } @Article{Spence1990, author = {Ian Spence}, title = {Visual psychophysics of simple graphical elements.}, journal = {Journal of Experimental Psychology: Human Perception and Performance}, year = {1990}, volume = {16}, number = {4}, pages = {683--692}, abstract = {The accuracy with which graphical elements are judged was assessed in a psychophysical task that parallels the real-life use of graphs. The task is a variant of the Metfessel-Comrey constant-sum method, and an associated model based on Stevens's law is proposed. The stimuli were horizontal and vertical lines, bars, pie and disk slices, cylinders, boxes, and table entries (numbers). Stevens's law exponents were near unity for numbers and 1-dimensional elements but were also close to 1 for elements possessing 2 or 3 apparent dimensions (Ss accommodate extraneous dimensions that do not carry variation, changing the effective dimensionality of the stimulus). Judgment errors were small, with numbers yielding the best performance; elements such as bars and pie slices were judged almost as accurately; disk elements were judged least accurately, but the magnitude of the errors was not large.}, doi = {10.1037/0096-1523.16.4.683}, publisher = {American Psychological Association ({APA})}, url = {https://doi.org/10.1037%2F0096-1523.16.4.683}, } @Article{Schiano1992, author = {Diane J. Schiano and Barbara Tversky}, title = {Structure and strategy in encoding simplified graphs}, journal = {Memory & Cognition}, year = {1992}, volume = {20}, number = {1}, pages = {12--20}, abstract = {Tversky and Schiano (1989) found a systematic bias toward the 45° line in memory for the slopes of identical lines when embedded in graphs, but not in maps, suggesting the use of a cognitive reference frame specifically for encoding meaningful graphs. The present experiments explore this issue further using the linear configurations alone as stimuli. Experimental and 2 demonstrate that perception and immediate memory for the slope of a test line within orthogonal “axes” are predictable from purely structural considerations. In Experiments 3 and 4, subjects were instructed to use a diagonal-reference strategy in viewing the stimuli, which were described as “graphs” only in Experiment 3. Results for both studies showed the diagonal bias previously found only for graphs. This pattern provides converging evidence for the diagonal as a cognitive reference frame in encoding linear graphs, and demonstrates that even in highly simplified displays, strategic factors can produce encoding biases not predictable solely from stimulus structure alone.}, month = {jan}, doi = {10.3758/bf03208249}, publisher = {Springer Nature}, url = {https://doi.org/10.3758%2Fbf03208249}, } @Article{Zacks1999, author = {Jeff Zacks and Barbara Tversky}, title = {Bars and lines: A study of graphic communication}, journal = {Memory & Cognition}, year = {1999}, volume = {27}, number = {6}, pages = {1073--1079}, month = {nov}, abstract = {Interpretations of graphs seem to be rooted in principles of cognitive naturalness and information processing rather than arbitrary correspondences. These predict that people should more readily associate bars with discrete comparisons between data points because bars are discrete entities and facilitate point estimates. They should more readily associate lines with trends because lines connect discrete entities and directly represent slope. The predictions were supported in three experiments—two examining comprehension and one production. The correspondence does not seem to depend on explicit knowledge of rules. Instead, it may reflect the influence of the communicative situation as well as the perceptual properties of graphs.}, doi = {10.3758/bf03201236}, publisher = {Springer Nature}, url = {https://doi.org/10.3758%2Fbf03201236}, } @InProceedings{Harrison2013, author = {Lane Harrison and Drew Skau and Steven Franconeri and Aidong Lu and Remco Chang}, title = {Influencing visual judgment through affective priming}, booktitle = {Proceedings of the {SIGCHI} Conference on Human Factors in Computing Systems - {CHI} '13}, year = {2013}, publisher = {{ACM} Press}, abstract = {Recent research suggests that individual personality differences can influence performance with visualizations. In addition to stable personality traits, research in psychology has found that temporary changes in affect (emotion) can also significantly impact performance during cognitive tasks. In this paper, we show that affective priming also influences user performance on visual judgment tasks through an experiment that combines affective priming with longstanding graphical perception experiments. Our results suggest that affective priming can influence accuracy in common graphical perception tasks. We discuss possible explanations for these findings, and describe how these findings can be applied to design visualizations that are less (or more) susceptible to error in common visualization contexts.}, doi = {10.1145/2470654.2481410}, url = {https://doi.org/10.1145%2F2470654.2481410}, } @Article{Cleveland1984, author = {W S Cleveland and Robert McGill}, title = {Graphical Perception: Theory, Experimentation, and Application to the Development of Graphical Methods}, journal = {Journal of the American Statistical Association}, year = {1984}, volume = {79}, number = {387}, pages = {531--554}, abstract = {The subject of graphical methods for data analysis and for data presentation needs a scientific foundation. In this article we take a few steps in the direction of establishing such a foundation. Our approach is based on graphical perception—the visual decoding of information encoded on graphs—and it includes both theory and experimentation to test the theory. The theory deals with a small but important piece of the whole process of graphical perception. The first part is an identification of a set of elementary perceptual tasks that are carried out when people extract quantitative information from graphs. The second part is an ordering of the tasks on the basis of how accurately people perform them. Elements of the theory are tested by experimentation in which subjects record their judgments of the quantitative information on graphs. The experiments validate these elements but also suggest that the set of elementary tasks should be expanded. The theory provides a guideline for graph construction: Graphs should employ elementary tasks as high in the ordering as possible. This principle is applied to a variety of graphs, including bar charts, divided bar charts, pie charts, and statistical maps with shading. The conclusion is that radical surgery on these popular graphs is needed, and as replacements we offer alternative graphical forms—dot charts, dot charts with grouping, and framed-rectangle charts.}, doi = {10.2307/2288400}, } @InProceedings{Cole2009, author = {Forrester Cole and Kevin Sanik and Doug DeCarlo and Adam Finkelstein and Thomas Funkhouser and Szymon Rusinkiewicz and Manish Singh}, title = {How well do line drawings depict shape?}, booktitle = {{ACM} {SIGGRAPH} 2009 papers on - {SIGGRAPH} '09}, year = {2009}, publisher = {{ACM} Press}, abstract = {This paper investigates the ability of sparse line drawings to depict 3D shape. We perform a study in which people are shown an image of one of twelve 3D objects depicted with one of six styles and asked to orient a gauge to coincide with the surface normal at many positions on the object’s surface. The normal estimates are compared with each other and with ground truth data provided by a registered 3D surface model to analyze accuracy and precision. The paper describes the design decisions made in collecting a large data set (275,000 gauge measurements) and provides analysis to answer questions about how well people interpret shapes from drawings. Our findings suggest that people interpret certain shapes almost as well from a line drawing as from a shaded image, that current computer graphics line drawing techniques can effectively depict shape and even match the effectiveness of artist’s drawings, and that errors in depiction are often localized and can be traced to particular properties of the lines used. The data collected for this study will become a publicly available resource for further studies of this type.}, doi = {10.1145/1576246.1531334}, keywords = {3D}, url = {https://doi.org/10.1145%2F1576246.1531334}, } @InProceedings{Haroz2015, author = {Steve Haroz and Robert Kosara and Steven L. Franconeri}, title = {{ISOTYPE} Visualization}, booktitle = {Proceedings of the 33rd Annual {ACM} Conference on Human Factors in Computing Systems - {CHI} '15}, year = {2015}, publisher = {{ACM} Press}, abstract = {Although the infographic and design communities have used simple pictographic representations for decades, it is still unclear whether they can make visualizations more effective. Using simple charts, we tested how pictographic representations impact (1) memory for information just viewed, as well as under the load of additional information, (2) speed of finding information, and (3) engagement and preference in seeking out these visualizations. We find that superfluous images can distract. But we find no user costs -- and some intriguing benefits -- when pictographs are used to represent the data. }, doi = {10.1145/2702123.2702275}, url = {https://doi.org/10.1145%2F2702123.2702275}, } @Article{Hollands1998, author = {J. G. Hollands and Ian Spence}, title = {Judging proportion with graphs: the summation model}, journal = {Applied Cognitive Psychology}, year = {1998}, volume = {12}, number = {2}, pages = {173--190}, month = {apr}, abstract = {People take longer to judge part-to-whole relationships with bar graphs than with pie charts or divided bar graphs. Subjects may perform summation operations to establish the whole with bar graphs, which would be unnecessary for other graph types depicting the whole with a single object. To test this summation model, the number of components forming the whole was varied with bars, divided bars, reference bars, and pies in three experiments. Response time increased with the number of components for bar graphs but there was little increase for other graph types in Experiment 1. An accuracy emphasis in Experiment 2 produced generally longer response times, but had little effect on the time per summation. The summation operation was not used when graphs were displayed briefly in Experiment 3, although subjects still took longer with bars. The estimated time for a summation operation is consistent with estimates derived from other research. In general, the bar graph is not effective for proportion judgments, and its disadvantage becomes potentially greater as the number of components increases}, doi = {10.1002/(sici)1099-0720(199804)12:2<173::aid-acp499>3.0.co;2-k}, keywords = {pie-charts}, publisher = {Wiley-Blackwell}, url = {https://doi.org/10.1002%2F%28sici%291099-0720%28199804%2912%3A2%3C173%3A%3Aaid-acp499%3E3.0.co%3B2-k}, } @InProceedings{Hullman2011, author = {Jessica Hullman and Eytan Adar and Priti Shah}, title = {The impact of social information on visual judgments}, booktitle = {Proceedings of the 2011 annual conference on Human factors in computing systems - {CHI} '11}, year = {2011}, publisher = {{ACM} Press}, abstract = {Social visualization systems have emerged to support collective intelligence-driven analysis of a growing influx of open data. As with many other online systems, social signals (e.g., forums, polls) are commonly integrated to drive use. Unfortunately, the same social features that can provide rapid, high-accuracy analysis are coupled with the pitfalls of any social system. Through an experiment involving over 300 subjects, we address how social information signals (social proof) affect quantitative judgments in the context of graphical perception. We identify how unbiased social signals lead to fewer errors over non-social settings and conversely, how biased signals lead to more errors. We further reflect on how systematic bias nullifies certain collective intelligence benefits, and we provide evidence of the formation of information cascades. We describe how these findings can be applied to collaborative visualization systems to produce more accurate individual interpretations in social contexts.}, doi = {10.1145/1978942.1979157}, url = {https://doi.org/10.1145%2F1978942.1979157}, } @Article{Simkin1987, author = {David Simkin and Reid Hastie}, title = {An Information-Processing Analysis of Graph Perception}, journal = {Journal of the American Statistical Association}, year = {1987}, volume = {82}, number = {398}, pages = {454--465}, month = {jun}, abstract = {Social visualization systems have emerged to support collective intelligence-driven analysis of a growing influx of open data. As with many other online systems, social signals (e.g., forums, polls) are commonly integrated to drive use. Unfortunately, the same social features that can provide rapid, high-accuracy analysis are coupled with the pitfalls of any social system. Through an experiment involving over 300 subjects, we address how social information signals (social proof) affect quantitative judgments in the context of graphical perception. We identify how unbiased social signals lead to fewer errors over non-social settings and conversely, how biased signals lead to more errors. We further reflect on how systematic bias nullifies certain collective intelligence benefits, and we provide evidence of the formation of information cascades. We describe how these findings can be applied to collaborative visualization systems to produce more accurate individual interpretations in social contexts.}, doi = {10.1080/01621459.1987.10478448}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F01621459.1987.10478448}, } @Article{Robertson2008, author = {G. Robertson and R. Fernandez and D. Fisher and B. Lee and J. Stasko}, title = {Effectiveness of Animation in Trend Visualization}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2008}, volume = {14}, number = {6}, pages = {1325--1332}, month = {nov}, abstract = {Animation has been used to show trends in multi-dimensional data. This technique has recently gained new prominence for presentations, most notably with Gapminder Trendalyzer. In Trendalyzer, animation together with interesting data and an engaging presenter helps the audience understand the results of an analysis of the data. It is less clear whether trend animation is effective for analysis. This paper proposes two alternative trend visualizations that use static depictions of trends: one which shows traces of all trends overlaid simultaneously in one display and a second that uses a small multiples display to show the trend traces side-by-side. The paper evaluates the three visualizations for both analysis and presentation. Results indicate that trend animation can be challenging to use even for presentations; while it is the fastest technique for presentation and participants find it enjoyable and exciting, it does lead to many participant errors. Animation is the least effective form for analysis; both static depictions of trends are significantly faster than animation, and the small multiples display is more accurate.}, doi = {10.1109/tvcg.2008.125}, keywords = {animation}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2008.125}, } @Article{Harrison2014, author = {Lane Harrison and Fumeng Yang and Steven Franconeri and Remco Chang}, title = {Ranking Visualizations of Correlation Using Weber's Law}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2014}, volume = {20}, number = {12}, pages = {1943--1952}, month = {dec}, abstract = {Despite years of research yielding systems and guidelines to aid visualization design, practitioners still face the challenge of identifying the best visualization for a given dataset and task. One promising approach to circumvent this problem is to leverage perceptual laws to quantitatively evaluate the effectiveness of a visualization design. Following previously established methodologies, we conduct a large scale (n=1687) crowdsourced experiment to investigate whether the perception of correlation in nine commonly used visualizations can be modeled using Weber’s law. The results of this experiment contribute to our understanding of information visualization by establishing that: 1) for all tested visualizations, the precision of correlation judgment could be modeled by Weber’s law, 2) correlation judgment precision showed striking variation between negatively and positively correlated data, and 3) Weber models provide a concise means to quantify, compare, and rank the perceptual precision afforded by a visualization.}, doi = {10.1109/tvcg.2014.2346979}, keywords = {scatterplots}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2014.2346979}, } @InProceedings{Cawthon2007, author = {Nick Cawthon and Andrew Vande Moere}, title = {The Effect of Aesthetic on the Usability of Data Visualization}, booktitle = {2007 11th International Conference Information Visualization ({IV} '07)}, year = {2007}, month = {jul}, publisher = {{IEEE}}, abstract = {Aesthetic seems currently under represented in most current data visualization evaluation methodologies. This paper investigates the results of an online survey of 285 participants, measuring both perceived aesthetic as well as the efficiency and effectiveness of retrieval tasks across a set of 11 different data visualization techniques. The data visualizations represent an identical hierarchical dataset, which has been normalized in terms of color, typography and layout balance. This study measured parameters such as speed of completion, accuracy rate, task abandonment and latency of erroneous response. Our findings demonstrate a correlation between latency in task abandonment and erroneous response time in relation to visualization's perceived aesthetic. These results support the need for an increased recognition for aesthetic in the typical evaluation process of data visualization techniques.}, doi = {10.1109/iv.2007.147}, url = {https://doi.org/10.1109%2Fiv.2007.147}, } @Article{Lewandowsky1989, author = {Stephan Lewandowsky and Ian Spence}, title = {Discriminating Strata in Scatterplots}, journal = {Journal of the American Statistical Association}, year = {1989}, volume = {84}, number = {407}, pages = {682--688}, month = {sep}, abstract = {When multiple groups are shown in a scatterplot each stratum is represented by a different symbol; for example, three strata might be coded using red, green, and yellow circles. Various symbol types were compared by behavioral experiment: Subjects were fastest when strata were coded using different colors and slowest when strata were coded with confusable letters—but there were no differences in accuracy. Accuracy differed only when processing time was restricted, again with different colors and confusable letters representing the two extremes. We conclude that color is the optimal symbol type and show that measuring response latency in addition to accuracy is essential in research on graphical perception.}, doi = {10.1080/01621459.1989.10478821}, keywords = {scatterplots}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F01621459.1989.10478821}, } @Article{Siegrist1996, author = {Michael Siegrist}, title = {The use or misuse of three-dimensional graphs to represent lower-dimensional data}, journal = {Behaviour & Information Technology}, year = {1996}, volume = {15}, number = {2}, pages = {96--100}, month = {jan}, abstract = {Some statisticians hold strong opinions regarding graphs with a 3-D look. However, in experiments little attention has been paid to the effects of adding decorative depth. The performance of subjects on pie charts and bar charts with and without 3-D was evaluated in the present experiment. Subjects were asked to make relative magnitude estimates for different graphs. For pie charts, better performance was observed for 2-D than for 3-D charts. For the bar charts, a more differentiated picture emerged: performance was dependent on the position, height and dimension of the bars. However, 3-D bar charts had the one disadvantage that subjects needed more time to evaluate this type of graph.}, doi = {10.1080/014492996120300}, keywords = {pie-charts, 3D}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F014492996120300}, } @Article{Demiralp2014, author = {Cagatay Demiralp Demiralp and Michael S. Bernstein and Jeffrey Heer}, title = {Learning Perceptual Kernels for Visualization Design}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2014}, volume = {20}, number = {12}, pages = {1933--1942}, month = {dec}, abstract = {Visualization design can benefit from careful consideration of perception, as different assignments of visual encoding variables such as color, shape and size affect how viewers interpret data. In this work, we introduce perceptual kernels: distance matrices derived from aggregate perceptual judgments. Perceptual kernels represent perceptual differences between and within visual variables in a reusable form that is directly applicable to visualization evaluation and automated design. We report results from crowd-sourced experiments to estimate kernels for color, shape, size and combinations thereof. We analyze kernels estimated using five different judgment types-including Likert ratings among pairs, ordinal triplet comparisons, and manual spatial arrangement-and compare them to existing perceptual models. We derive recommendations for collecting perceptual similarities, and then demonstrate how the resulting kernels can be applied to automate visualization design decisions.}, doi = {10.1109/tvcg.2014.2346978}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2014.2346978}, } @Article{Cleveland1982, author = {William S. Cleveland and Persi Diaconis and Robert McGill}, title = {Variables on Scatterplots Look More Highly Correlated When the Scales Are Increased}, journal = {Science}, year = {1982}, volume = {216}, number = {4550}, pages = {1138--1141}, month = {jun}, abstract = {Judged association between two variables represented on scatterplots increased when the scales on the horizontal and vertical axes were simultaneously increased so that the size of the point cloud within the frame of the plot decreased. Judged association was very diferentfrom the correlation coeficient, r, which is the most widely used measure of association.}, doi = {10.1126/science.216.4550.1138}, keywords = {scatterplots}, publisher = {American Association for the Advancement of Science ({AAAS})}, url = {https://doi.org/10.1126%2Fscience.216.4550.1138}, } @Article{Spence2004, author = {Ian Spence}, title = {The Apparent and Effective Dimensionality of Representations of Objects}, journal = {Human Factors: The Journal of the Human Factors and Ergonomics Society}, year = {2004}, volume = {46}, number = {4}, pages = {738--747}, abstract = {Information displays commonly use 2-D and 3-D objects even though the numbers represented are 1-D. This practice may be problematic because the psychophysical relation between perceived and physical magnitudes is generally nonlinear for areas and volumes. Nonetheless, this research shows that apparent 2-D and 3-D objects can produce linear psychophysical functions if only one dimension shows variation. Processing time increases with the number of dimensions in the objects that show variation, not with the apparent dimensionality. Indeed, when only one dimension showed variation, apparent 3-D objects were judged more quickly than were apparent 2-D or 1-D objects. These results present a challenge for computational models of size perception and have implications for the design of information displays. Actual or potential applications of this research include the design and use of statistical graphs and information displays; objects that display variation in more than one dimension should not be used to represent single (1-D) numerical variables if they are to be judged accurately and rapidly.}, doi = {10.1518/hfes.46.4.738.56809}, keywords = {3D}, publisher = {{SAGE} Publications}, url = {https://doi.org/10.1518%2Fhfes.46.4.738.56809}, } @InProceedings{Hale2017, author = {Scott A. Hale and Graham McNeill and Jonathan Bright}, title = {Where'd it go? How geographic and force-directed layouts affect network task performances}, booktitle = {Eurographics Conference on Visualization (EuroVis) 2017 Volume 36 (2017), Number 3 J. Heer, T. Ropinski and J. van Wijk (Guest Editors)}, year = {2017}, abstract = {When visualizing geospatial network data, it is possible to position nodes according to their geographic locations or to po- sition nodes using standard network layout algorithms that ignore geographic location. Such data is increasingly common in interactive displays of Internet-connected sensor data, but network layouts that ignore geographic location data are rarely em- ployed. We conduct a user experiment to compare the effects of geographic and force-directed network layouts on three common network tasks: locating a node, determining the path length between two nodes, and comparing the degree of two nodes. We found a geographic layout was superior for locating a node but inferior for determining the path length between two nodes. The two layouts performed similarly when participants compared the degree of two nodes. We also tested a relaxed- or pseudo- geographic layout created with multidimensional scaling and found it performed as well or better than the pure geographic layout on all tasks but remained inferior to the force-directed layout for the path-length task. We suggest interactive displays of geospatial network data allow viewers to switch between geographic and force-directed layouts, although further research is needed to understand the extent to which viewers are able to choose the most appropriate layout for a given task.}, } @InProceedings{Ziemkiewicz2010, author = {Caroline Ziemkiewicz and Robert Kosara}, title = {Laws of Attraction: From Perceived Forces to Conceptual Similarity}, year = {2010}, publisher = {IEEE}, abstract = {Many of the pressing questions in information visualization deal with how exactly a user reads a collection of visual marks as information about relationships between entities. Previous research has suggested that people see parts of a visualization as objects, and may metaphorically interpret apparent physical relationships between these objects as suggestive of data relationships. We explored this hypothesis in detail in a series of user experiments. Inspired by the concept of implied dynamics in psychology, we first studied whether perceived gravity acting on a mark in a scatterplot can lead to errors in a participant’s recall of the mark’s position. The results of this study suggested that such position errors exist, but may be more strongly influenced by attraction between marks. We hypothesized that such apparent attraction may be influenced by elements used to suggest relationship between objects, such as connecting lines, grouping elements, and visual similarity. We further studied what visual elements are most likely to cause this attraction effect, and whether the elements that best predicted attraction errors were also those which suggested conceptual relationships most strongly. Our findings show a correlation between attraction errors and intuitions about relatedness, pointing towards a possible mechanism by which the perception of visual marks becomes an interpretation of data relationships.}, } @InProceedings{Ziemkiewicz2010a, author = {Caroline Ziemkiewicz and Robert Kosara}, title = {Implied dynamics in information visualization}, booktitle = {Proceedings of the International Conference on Advanced Visual Interfaces - {AVI} '10}, year = {2010}, publisher = {{ACM} Press}, abstract = {Information visualization is a powerful method for understanding and working with data. However, we still have an incomplete understanding of how people use visualization to think about information. We propose that people use visualization to support comprehension and reasoning by viewing abstract visual representations as physical scenes with a set of implied dynamics between objects. Inferences based on these implied dynamics are metaphorically extended to form inferences about the represented information. This view predicts that even seemingly meaningless properties of a visualization, including such minor design elements as borders, background areas, and the connectedness of parts, may affect how people perceive semantic aspects of data by suggesting different potential dynamics between data points. We present a study that supports this claim and discuss the design implications of this theory of information visualization.}, doi = {10.1145/1842993.1843031}, url = {https://doi.org/10.1145%2F1842993.1843031}, } @InProceedings{Skau2017, author = {Drew Skau and Robert Kosara}, title = {Readability and Precision in Pictorial Bar Charts}, booktitle = {Eurographics Conference on Visualization (EuroVis) 2017 Short Paper B. Kozlíková, T. Schreck, and T. Wischgoll (Editors)}, year = {2017}, abstract = {Bar charts embellished with unique artistic styles, or made to look like real objects, are common in information graphics. Embellishments are typically considered detrimental to readability and accuracy, since they add clutter and noise. Previous work has found that some of the shapes used, like rounded tops, triangles, etc., decreased accuracy when judging relative and absolute sizes, while T-shaped bars even showed a slight increase relative to the basic bar chart. In this paper, we report on a study that adds pictorial elements to bar charts of four different shapes tested previously, thus also including the elements of color and texture. We find that pictorial bar charts reduce accuracy, but not beyond the effect already observed for their shape. They also do not significantly increase response time. Embellished bar charts may not be as problematic as commonly assumed.}, } @Article{Skau2016, author = {Drew Skau and Robert Kosara}, title = {Arcs, Angles, or Areas: Individual Data Encodings in Pie and Donut Charts}, journal = {Computer Graphics Forum}, year = {2016}, volume = {35}, number = {3}, pages = {121--130}, month = {jun}, abstract = {Pie and donut charts have been a hotly debated topic in the visualization community for some time now. Even though pie charts have been around for over 200 years, our understanding of the perceptual factors used to read data in them is still limited. Data is encoded in pie and donut charts in three ways: arc length, center angle, and segment area. For our first study, we designed variations of pie charts to test the importance of individual encodings for reading accuracy. In our second study, we varied the inner radius of a donut chart from a filled pie to a thin outline to test the impact of removing the central angle. Both studies point to angle being the least important visual cue for both charts, and the donut chart being as accurate as the traditional pie chart.}, doi = {10.1111/cgf.12888}, keywords = {pie-charts}, publisher = {Wiley-Blackwell}, url = {https://doi.org/10.1111%2Fcgf.12888}, } @InProceedings{Kosara2016, author = {Robert Kosara and and Drew Skau}, title = {Judgment Error in Pie Chart Variations}, booktitle = {Eurographics Conference on Visualization (EuroVis) 2016 Short Paper E. Bertini, N. Elmqvist, and T. Wischgoll (Guest Editors)}, year = {2016}, abstract = {Pie charts and their variants are prevalent in business settings and many other uses, even if they are not popular with the academic community. In a recent study, we found that contrary to general belief, there is no clear evidence that these charts are read based on the central angle. Instead, area and arc length appear to be at least equally important. In this paper, we build on that study to test several pie chart variations that are popular in information graphics: exploded pie chart, pie with larger slice, elliptical pie, and square pie (in addition to a regular pie chart used as the baseline). We find that even variants that do not distort central angle cause greater error than regular pie charts. Charts that distort the shape show the highest error. Many of our predictions based on the previous study’s results are borne out by this study’s findings.}, doi = {10.2312/eurovisshort.20161167}, keywords = {pie-charts}, } @InProceedings{Kosara2016a, author = {Robert Kosara}, title = {An Empire Built On Sand}, booktitle = {Proceedings of the Beyond Time and Errors on Novel Evaluation Methods for Visualization - {BELIV} '16}, year = {2016}, publisher = {{ACM} Press}, abstract = {If we were to design Information Visualization from scratch, we would start with the basics: understand the principles of perception, test how they apply to different data encodings, build up those encodings to see if the principles still apply, etc. Instead, visualization was created from the other end: by building visual displays without an idea of how or if they worked, and then finding the relevant perceptual and other basics here and there. This approach has the problem that we end up with a very patchy understanding of the foundations of our field. More than that, there is a good amount of unproven assumptions, aesthetic judgments, etc. mixed in with the evidence. We often don't even realize how much we rely on the latter, and can't easily identify them because they have been so deeply incorporated into the fabric of our field. In this paper, I attempt to tease apart what we know and what we only think we know, using a few examples. The goal is to point out specific gaps in our knowledge, and to encourage researchers in the field to start questioning the underlying assumptions. Some of them are probably sound and will hold up to scrutiny. But some of them will not. We need to find out which is which and systematically build up a better foundation for our field. If we intend to develop ever more and better techniques and systems, we can't keep ignoring the base, or it will all come tumbling down sooner or later.}, doi = {10.1145/2993901.2993909}, url = {https://doi.org/10.1145%2F2993901.2993909}, } @InProceedings{Kosara2010, author = {Robert Kosara and Caroline Ziemkiewicz}, title = {Do Mechanical Turks dream of square pie charts?}, booktitle = {Proceedings of the 3rd {BELIV}'10 Workshop on {BEyond} time and errors: novel {evaLuation} methods for Information Visualization - {BELIV} '10}, year = {2010}, publisher = {{ACM} Press}, abstract = {Online studies are an attractive alternative to the laborintensive lab study, and promise the possibility of reaching a larger variety and number of people than at a typical university. There are also a number of draw-backs, however, that have made these studies largely impractical so far. Amazon's Mechanical Turk is a web service that facilitates the assignment of small, web-based tasks to a large pool of anonymous workers. We used it to conduct several perception and cognition studies, one of which was identical to a previous study performed in our lab. We report on our experiences and present ways to avoid common problems by taking them into account in the study design, and taking advantage of Mechanical Turk's features.}, doi = {10.1145/2110192.2110202}, keywords = {pie-charts}, url = {https://doi.org/10.1145%2F2110192.2110202}, } @InProceedings{Hullman2017, author = {Jessica Hullman and Robert Kosara and and Heidi Lam}, title = {Finding a Clear Path: Structuring Strategies for Visualization Sequences}, booktitle = {Eurographics Conference on Visualization (EuroVis) 2017 Volume 36 (2017), Number 3 J. Heer, T. Ropinski and J. van Wijk (Guest Editors)}, year = {2017}, abstract = {Little is known about how people structure sets of visualizations to support sequential viewing. We contribute findings from several studies examining visualization sequencing and reception. In our first study, people made decisions between various possible structures as they ordered a set of related visualizations (consisting of either bar charts or thematic maps) into what they considered the clearest sequence for showing the data. We find that most people structure visualization sequences hierarchically: they create high level groupings based on shared data properties like time period, measure, level of aggregation, and spatial region, then order the views within these groupings. We also observe a tendency for certain types of similarities between views, like a common spatial region or aggregation level, to be seen as more appropriate categories for organizing views in a sequence than others, like a common time period or measure. In a second study, we find that viewers’ perceptions of the quality and intention of different sequences are largely consistent with the perceptions of the users who created them. The understanding of sequence preferences and perceptions that emerges from our studies has implications for the development of visualization authoring tools and sequence recommendations for guided analysis.}, } @Article{Gattis1996, author = {Merideth Gattis and Keith J. Holyoak}, title = {Mapping conceptual to spatial relations in visual reasoning.}, journal = {Journal of Experimental Psychology: Learning, Memory, and Cognition}, year = {1996}, volume = {22}, number = {1}, pages = {231--239}, abstract = {In 3 experiments, the authors investigated the impact of goals and perceptual relations on graph interpretation when people evaluate functional dependencies between continuous variables. Participants made inferences about the relative rate of 2 continuous linear variables (altitude and temperature). The authors varied the assignments of variables to axes, the perceived cause–effect relation between the variables, and the causal status of the variable being queried. The most striking finding was that accuracy was greater when the slope-mapping constraint was honored, which requires that the variable being queried be assigned to the vertical axis, so that steeper lines map to faster changes in the queried variable. The authors propose that graphs provide external instantiations of intermediate mental representations, enabling people to move from visuospatial representations to abstractions through the use of natural mappings between perceptual and conceptual relations.}, doi = {10.1037/0278-7393.22.1.231}, publisher = {American Psychological Association ({APA})}, url = {https://doi.org/10.1037%2F0278-7393.22.1.231}, } @Article{Elting1999, author = {L. S Elting and C. G Martin and S. B Cantor and E. B Rubenstein}, title = {Influence of data display formats on physician investigators' decisions to stop clinical trials: prospective trial with repeated measures}, journal = {{BMJ}}, year = {1999}, volume = {318}, number = {7197}, pages = {1527--1531}, month = {jun}, abstract = {Objective: To examine the effect of the method of data display on physician investigators' decisions to stop hypothetical clinical trials for an unplanned statistical analysis. Design: Prospective, mixed model design with variables between subjects and within subjects (repeated measures). Setting: Comprehensive cancer centre. Participants: 34 physicians, stratified by academic rank, who were conducting clinical trials. Interventions:Participants were shown tables, pie charts, bar graphs, and icon displays containing hypothetical data from a clinical trial and were asked to decide whether to continue the trial or stop for an unplanned statistical analysis. Main outcome measure:Percentage of accurate decisions with each type of display. Results: Accuracy of decisions was affected by the type of data display and positive or negative framing of the data. More correct decisions were made with icon displays than with tables, pie charts, and bar graphs (82% v 68%, 56%, and 43%, respectively; P=0.03) and when data were negatively framed rather than positively framed in tables (93% v 47%; P=0.004). Conclusions: Clinical investigators' decisions can be affected by factors unrelated to the actual data. In the design of clinical trials information systems, careful consideration should be given to the method by which data are framed and displayed in order to reduce the impact of these extraneous factors.}, doi = {10.1136/bmj.318.7197.1527}, publisher = {{BMJ}}, url = {https://doi.org/10.1136%2Fbmj.318.7197.1527}, } @Article{Rensink2010, author = {Ronald A. Rensink and Gideon Baldridge}, title = {The Perception of Correlation in Scatterplots}, journal = {Computer Graphics Forum}, year = {2010}, volume = {29}, number = {3}, pages = {1203--1210}, month = {aug}, abstract = {We present a rigorous way to evaluate the visual perception of correlation in scatterplots, based on classical psychophysical methods originally developed for simple properties such as brightness. Although scatterplots are graphically complex, the quantity they convey is relatively simple. As such, it may be possible to assess the perception of correlation in a similar way. Scatterplots were each of 5.0° extent, containing 100 points with a bivariate normal distribution. Means were 0.5 of the range of the points, and standard deviations 0.2 of this range. Precision was determined via an adaptive algorithm to find the just noticeable differences (jnds) in correlation, i.e., the difference between two side-by-side scatterplots that could be discriminated 75% of the time. Accuracy was measured by direct estimation, using reference scatterplots with fixed upper and lower values, with a test scatterplot adjusted so that its correlation appeared to be halfway between these. This process was recursively applied to yield several further estimates. Results of the discrimination tests show jnd(r) = k (1/b – r), where r is the Pearson correlation, and parameters 0 < k, b < 1. Integration yields a subjective estimate of correlation g(r) = ln(1 – br) / ln(1 – b). The values of b found via discrimination closely match those found via direct estimation. As such, it appears that the perception of correlation in a scatterplot is completely described by two related performance curves, specified by two easily-measured parameters.}, doi = {10.1111/j.1467-8659.2009.01694.x}, keywords = {scatterplots}, publisher = {Wiley-Blackwell}, url = {https://doi.org/10.1111%2Fj.1467-8659.2009.01694.x}, } @Article{Croxton1927, author = {Frederick E. Croxton and Roy E. Stryker}, title = {Bar Charts versus Circle Diagrams}, journal = {Journal of the American Statistical Association}, year = {1927}, volume = {22}, number = {160}, pages = {473--482}, month = {dec}, doi = {10.1080/01621459.1927.10502976}, keywords = {pie-charts}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F01621459.1927.10502976}, } @Article{Eells1926, author = {Walter Crosby Eells}, title = {The Relative Merits of Circles and Bars for Representing Component Parts}, journal = {Journal of the American Statistical Association}, year = {1926}, volume = {21}, number = {154}, pages = {119--132}, month = {jun}, doi = {10.1080/01621459.1926.10502165}, keywords = {pie-charts}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F01621459.1926.10502165}, } @Article{Croxton1932, author = {Frederick E. Croxton and Harold Stein}, title = {Graphic Comparisons by Bars, Squares, Circles, and Cubes}, journal = {Journal of the American Statistical Association}, year = {1932}, volume = {27}, number = {177}, pages = {54--60}, month = {mar}, doi = {10.1080/01621459.1932.10503227}, keywords = {pie-charts, 3D}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F01621459.1932.10503227}, } @Article{Huhn1927, author = {R. von Huhn}, title = {Further Studies in the Graphic Use of Circles and Bars}, journal = {Journal of the American Statistical Association}, year = {1927}, volume = {22}, number = {157}, pages = {31--36}, month = {mar}, doi = {10.1080/01621459.1927.10502938}, keywords = {pie-charts}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F01621459.1927.10502938}, } @Article{Spence1991, author = {Ian Spence and Stephan Lewandowsky}, title = {Displaying proportions and percentages}, journal = {Applied Cognitive Psychology}, year = {1991}, volume = {5}, number = {1}, pages = {61--77}, month = {jan}, abstract = {Pie and bar charts are commonly used to display percentage or proportional data, but professional data analysts have frowned on the use of the pie chart on the grounds that judgements of area are less accurate than judgements of lenth. Thus the bar chart has been favoured. When the amount of data to be communicated is small, some authorities have advocated the use of properly constructed tables, as another option. The series of experiments reported here suggests that there is little to choose between the pie and the bar chart, with the former enjoying a slight advantage if the required judgement is a complicated one, but that both forms of chart are superior to the table. Thus our results do not support the commonly expressed opinion that pie charts are inferior. An analysis of the nature of the task and a review of the psychophysical literature suggest that the traditional prejudice against the pie chart is misguided.}, doi = {10.1002/acp.2350050106}, keywords = {pie-charts}, publisher = {Wiley-Blackwell}, url = {https://doi.org/10.1002%2Facp.2350050106}, } @Article{Flannery1971, author = {James John Flannery}, title = {The relative effectiveness of some common graduated point symbols in the presentation of quantitative data}, journal = {Cartographica: The International Journal for Geographic Information and Geovisualization}, year = {1971}, volume = {8}, number = {2}, pages = {96--109}, month = {dec}, abstract = {Circles with their areas varying in direct proportion to quantities represented are a common form of graduated point symbols. When so used, unfortunately, the average map reader perceives a smaller quantitative difference than intended because circle size differences are usually underestimated. An apparent size scale developed empirically fifteen years ago is claimed to eliminate the problem of consistent underestimation. More recent investigations by psychologists and cartographers support the apparent size scale. Bars communicate quantitative variation effectively when graduated in the traditional manner on a linear basis, but wedges require an apparent size scale and even then are less accurately judged.}, doi = {10.3138/j647-1776-745h-3667}, publisher = {University of Toronto Press Inc. ({UTPress})}, url = {https://doi.org/10.3138%2Fj647-1776-745h-3667}, } @Article{Lee2016, author = {Sukwon Lee and Sung-Hee Kim and Ya-Hsin Hung and Heidi Lam and Youn-Ah Kang and Ji Soo Yi}, title = {How do People Make Sense of Unfamiliar Visualizations?: A Grounded Model of Novice's Information Visualization Sensemaking}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2016}, volume = {22}, number = {1}, pages = {499--508}, month = {jan}, abstract = {In this paper, we would like to investigate how people make sense of unfamiliar information visualizations. In order to achieve the research goal, we conducted a qualitative study by observing 13 participants when they endeavored to make sense of three unfamiliar visualizations (i.e., a parallel-coordinates plot, a chord diagram, and a treemap) that they encountered for the first time. We collected data including audio/video record of think-aloud sessions and semi-structured interview; and analyzed the data using the grounded theory method. The primary result of this study is a grounded model of NOvice's information VIsualization Sensemaking (NOVIS model), which consists of the five major cognitive activities: 1 encountering visualization, 2 constructing a frame, 3 exploring visualization, 4 questioning the frame, and 5 floundering on visualization. We introduce the NOVIS model by explaining the five activities with representative quotes from our participants. We also explore the dynamics in the model. Lastly, we compare with other existing models and share further research directions that arose from our observations.}, doi = {10.1109/tvcg.2015.2467195}, keywords = {parallel-coordinates}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2015.2467195}, } @Article{Carswell1993, author = {C. Melody Carswell and Cathy Emery and Andrea M. Lonon}, title = {Stimulus complexity and information integration in the spontaneous interpretations of line graphs}, journal = {Applied Cognitive Psychology}, year = {1993}, volume = {7}, number = {4}, pages = {341--357}, month = {aug}, abstract = {Viewers of a graph will readily interpret its contents, even when given no explicit instructions regarding what information to extract. However, little is known about the strategies that subjects adopt when making such spontaneous interpretations. In the present experiments, subjects studied single-function line graphs for self-determined periods. They provided written interpretations immediately following examination of each graph. The structural complexity of stimulus graphs was varied by eliminating symmetry, and by adding data points, departures from linearity, and trend reversals. Across two experiments, number of trend reversals was the main determinant of comprehension difficulty as measured by study times. An increased number of reversals also resulted in more local, detail-oriented content in interpretations. By contrast, the presence of such emergent features as symmetry and linearity led to increases in the amount of integrative, global content in interpretations, usually at the expense of local detail. Surprisingly, increases in the number of data points led to similar increases in the grain of subjects' interpretations. The last finding may reflect a shift from point-by-point to integrative study strategies necessitated by capacity limitations in working memory.}, doi = {10.1002/acp.2350070407}, publisher = {Wiley-Blackwell}, url = {https://doi.org/10.1002%2Facp.2350070407}, } @Article{Hollands1992, author = {J. G. Hollands and Ian Spence}, title = {Judgments of change and proportion in graphical perception.}, journal = {J. G. Hollands and Ian Spence Human Factors: The Journal of the Human Factors and Ergonomics Society}, year = {1992}, volume = {34}, number = {3}, pages = {313-334}, abstract = {Subjects judged change and proportion when viewing graphs in two experiments. Change was judged more quickly and accurately with line and bar graphs than with pie charts or tiered bar graphs, and this difference was larger when the rate of change was smaller. Without a graduated scale, proportion was judged more quickly and accurately with pie charts and divided bar graphs than with line or bar graphs. Perception is direct when it requires simpler or fewer mental operations; we propose that perception of change is direct with line and bar graphs, whereas perception of proportion is direct with pie charts and divided bar graphs. The results are also consistent with the proximity compatibility principle. Suggestions for improving the design of graphical displays are given.}, keywords = {pie-charts}, } @Article{Shah2011, author = {Shah, Priti and Freedman, Eric G.}, title = {Bar and Line Graph Comprehension: An Interaction of Top-Down and Bottom-Up Processes}, journal = {Topics in Cognitive Science}, year = {2011}, volume = {3}, number = {3}, pages = {560--578}, issn = {1756-8765}, abstract = {This experiment investigated the effect of format (line vs. bar), viewers’ familiarity with variables, and viewers’ graphicacy (graphical literacy) skills on the comprehension of multivariate (three variable) data presented in graphs. Fifty-five undergraduates provided written descriptions of data for a set of 14 line or bar graphs, half of which depicted variables familiar to the population and half of which depicted variables unfamiliar to the population. Participants then took a test of graphicacy skills. As predicted, the format influenced viewers’ interpretations of data. Specifically, viewers were more likely to describe x–y interactions when viewing line graphs than when viewing bar graphs, and they were more likely to describe main effects and “z–y” (the variable in the legend) interactions when viewing bar graphs than when viewing line graphs. Familiarity of data presented and individuals’ graphicacy skills interacted with the influence of graph format. Specifically, viewers were most likely to generate inferences only when they had high graphicacy skills, the data were familiar and thus the information inferred was expected, and the format supported those inferences. Implications for multivariate data display are discussed.}, doi = {10.1111/j.1756-8765.2009.01066.x}, publisher = {Blackwell Publishing Ltd}, url = {http://dx.doi.org/10.1111/j.1756-8765.2009.01066.x}, } @Article{Correll2014, author = {Michael Correll and Michael Gleicher}, title = {Error Bars Considered Harmful: Exploring Alternate Encodings for Mean and Error}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2014}, volume = {20}, number = {12}, pages = {2142--2151}, month = {dec}, abstract = {When making an inference or comparison with uncertain, noisy, or incomplete data, measurement error and confidence intervals can be as important for judgment as the actual mean values of different groups. These often misunderstood statistical quantities are frequently represented by bar charts with error bars. This paper investigates drawbacks with this standard encoding, and considers a set of alternatives designed to more effectively communicate the implications of mean and error data to a general audience, drawing from lessons learned from the use of visual statistics in the information visualization community. We present a series of crowd-sourced experiments that confirm that the encoding of mean and error significantly changes how viewers make decisions about uncertain data. Careful consideration of design tradeoffs in the visual presentation of data results in human reasoning that is more consistently aligned with statistical inferences. We suggest the use of gradient plots (which use transparency to encode uncertainty) and violin plots (which use width) as better alternatives for inferential tasks than bar charts with error bars.}, doi = {10.1109/tvcg.2014.2346298}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2014.2346298}, } @Article{Bobko1979, author = {Philip Bobko and Ronald Karren}, title = {The perception of Pearson product moment correlations from bivariate scatterplots}, journal = {Personnel Psychology}, year = {1979}, volume = {32}, number = {2}, pages = {313--325}, month = {jun}, abstract = {Perceptions about the Pearson product moment correlation, r, from bivariate scatterplots were investigated through the use of a questionnaire. It was found that subjects who are relatively sophisticated in psychometric techniques tend to underestimate the magnitude of r, with most pronounced disparity in the range .2 < |r| < .6. Additionally, estimates of r from specially designed scatterplots indicated that subjects (1) correctly estimated the effects of range restriction, (2) underestimated the effects of attenuating outliers, (3) incorrectly reduced estimates of r when the regression slope was relatively high or low, and (4) often failed to consider the effects of removing the middle third of the data. Several implications of these generally conservative estimations are discussed.}, doi = {10.1111/j.1744-6570.1979.tb02137.x}, keywords = {scatterplots}, publisher = {Wiley-Blackwell}, url = {https://doi.org/10.1111%2Fj.1744-6570.1979.tb02137.x}, } @Article{Doherty2007, author = {Michael E. Doherty and Richard B. Anderson and Andrea M. Angott and Dale S. Klopfer}, title = {The perception of scatterplots}, journal = {Perception & Psychophysics}, year = {2007}, volume = {69}, number = {7}, pages = {1261--1272}, month = {oct}, abstract = {Four experiments investigated the perception of correlations from scatterplots. All graphic properties, other than error variance, that have been shown to affect subjective but not objective correlation(r) were held constant. Participants in Experiment 1 ranked 21 scatterplots according to the magnitude ofr. In Experiments 2 and 3, participants made yes/no judgments to indicate whether a scatterplot was high (signal) or low (noise). Values ofr for signal and noise scatterplots varied across participants. Differences between correlations for signal and for noise scatterplots were constant inr in Experiment 2, and constant inr2 in Experiment 3. Standard deviations of the ranks in Experiment 1 and ď values in Experiments 2 and 3 showed that discriminability increased with the magnitude ofr. In Experiment 4, faculty and graduate students in psychology and sociology made point estimates ofr for single scatterplots. Estimates were negatively accelerated functions of objective correlation.}, doi = {10.3758/bf03193961}, keywords = {scatterplots}, publisher = {Springer Nature}, url = {https://doi.org/10.3758%2Fbf03193961}, } @Article{Lauer1989, author = {Thomas W. Lauer and Gerald V. Post}, title = {Density in scatterplots and the estimation of correlation}, journal = {Behaviour & Information Technology}, year = {1989}, volume = {8}, number = {3}, pages = {235--244}, abstract = {The construction of a graphical presentation involves the representation of information by means of visual symbols.The acquisition of information from the resultant graph is a perceptual process that involves the decoding and interpretation of the visual symbols. Hence good design decisions will be based on an understanding of the information acquisition process and in particular graphical perception. This study examines the perception of bivariate normal data presented in a scatter diagram, and creates a model that successfully explains how individuals perceive the information contained in scatterplots. Subjects were shown a series of scatter diagrams on the CRT of a microcomputer and were asked to estimate correlation. Several variables were examined that explain estimated correlation including regression slope, dispersion, number of points displayed, and the size of the CRT screen. All of these factors were found to significantly affect subjects' estimates of correlation.}, doi = {10.1080/01449298908914554}, keywords = {scatterplots}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F01449298908914554}, } @Article{Meyer1997, author = {Joachim Meyer and Meirav Taieb and Ittai Flascher}, title = {Correlation estimates as perceptual judgments.}, journal = {Journal of Experimental Psychology: Applied}, year = {1997}, volume = {3}, number = {1}, pages = {3--20}, abstract = { Correlation estimates from scatterplots were studied as an example for an intuitive decision task. Three experiments showed that subjective correlation estimates are based on geometric properties of the displays. People with different levels of statistical training were found to assess correlations from scatterplots in close accordance with the power function rest = 1 – aXb, where X is the mean of the geometrical distances between the points and the regression line or a similar central axis. Changes of the slope of the displayed point cloud and the introduction of outliers affected estimates as predicted from the function. The study demonstrated that intuitive judgments in a complex domain are based on the perception of geometric features of the relevant information. By applying these findings, graphic designers can accurately predict how changes in a display will affect viewers' impressions.}, doi = {10.1037/1076-898x.3.1.3}, keywords = {scatterplots}, publisher = {American Psychological Association ({APA})}, url = {https://doi.org/10.1037%2F1076-898x.3.1.3}, } @Article{Pollack1960, author = {Irwin Pollack}, title = {Identification of visual correlational scatterplots.}, journal = {Journal of Experimental Psychology}, year = {1960}, volume = {59}, number = {6}, pages = {351--360}, abstract = {Visual correlation scattergrams were obtained by mixing a common noise source with independent noise sources and displaying the mixtures across the coordinates of an oscilloscope. The task of S was to identify whether the reference correlation or a higher correlation was presented. The results were interpreted in terms of the task of S as a tester of alternative statistical hypotheses under conditions of varying reliability of the display information.}, doi = {10.1037/h0042245}, keywords = {scatterplots}, publisher = {American Psychological Association ({APA})}, url = {https://doi.org/10.1037%2Fh0042245}, } @Article{Strahan1978, author = {R. F. Strahan and C. J. Hansen}, title = {Underestimating Correlation from Scatterplots}, journal = {Applied Psychological Measurement}, year = {1978}, volume = {2}, number = {4}, pages = {543--550}, month = {oct}, abstract = {Eighty subjects estimated the correlation coefficient, r, for each of 13 computer-printed scatter plots. Making judgments were 46 students in a graduate-level statistics course and 34 faculty and graduate students in a department of psychology. The actual correlation values ranged from .010 to .995, with 200 observations in each scatterplot and with the order of scatterplot presentation ran domized. As predicted, subjects underestimated the degree of actual correlation. Also as predicted, but with substantial moderation by a method-of-presentation factor, this underestimation was most pronounced in the middle of the correlational range—between the 0 and 1 extremes. Though perception of correlation was shown not to be veridical (i.e., in terms of r), little support was given one alternative view — its being in terms of r^2.}, doi = {10.1177/014662167800200409}, keywords = {scatterplots}, publisher = {{SAGE} Publications}, url = {https://doi.org/10.1177%2F014662167800200409}, } @Article{Li2008, author = {Jing Li and Jean-Bernard Martens and Jarke J van Wijk}, title = {Judging correlation from scatterplots and parallel coordinate plots}, journal = {Information Visualization}, year = {2008}, volume = {9}, number = {1}, pages = {13--30}, month = {may}, abstract = {Scatterplots and parallel coordinate plots (PCPs) can both be used to assess correlation visually. In this paper, we compare these two visualization methods in a controlled user experiment. More specifically, 25 participants were asked to report observed correlation as a function of the sample correlation under varying conditions of visualization method, sample size and observation time. A statistical model is proposed to describe the correlation judgment process. The accuracy and the bias in the judgments in the different conditions are established by interpreting the parameters in this model. A discriminability index is proposed to characterize the performance accuracy in each experi- mental condition. Moreover, a statistical test is applied to derive whether or not the human sensation scale differs from a theoretically optimal (i.e., unbiased) judgment scale. Based on these analyses, we conclude that users can reliably distinguish twice as many different correlation levels when using scatterplots as when using PCPs. We also find that there is a bias towards reporting nega- tive correlations when using PCPs. Therefore, we conclude that scatterplots are more effective than parallel plots in supporting visual correlation analysis}, doi = {10.1057/ivs.2008.13}, keywords = {scatterplots, parallel-coordinates}, publisher = {{SAGE} Publications}, url = {https://doi.org/10.1057%2Fivs.2008.13}, } @Article{Tremmel1995, author = {Lothar Tremmel}, title = {The Visual Separability of Plotting Symbols in Scatterplots}, journal = {Journal of Computational and Graphical Statistics}, year = {1995}, volume = {4}, number = {2}, pages = {101--112}, month = {jun}, abstract = { Which symbols should be used to represent different groups of data in the same scatterplot? Hypotheses are derived to predict which symbol pairs should lead to good separability, based on the contrast of the symbols' visual properties or "features." In two experiments, experimental scatterplots were shown to subjects on a computer screen; the dependent variable was the decision time to judge which of the two presented symbols was the more frequent one. Analyses of the within-subject effects yielded the following results: (1) Important feature contrasts are brightness, number of line endings, and cur- vature. (2) Symbols that differ simultaneously in two feature dimensions may be more separable than symbols that differ only in either one. (3) The contrasts between circular symbols and radial line symbols like the plus sign or the asterisk are excellent. Practical applications of these findings are discussed, as well as their contribution to the theory of visual perception.}, doi = {10.1080/10618600.1995.10474669}, keywords = {scatterplots}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F10618600.1995.10474669}, } @Article{Gillan2000, author = {Douglas J. Gillan and Anna Burd Callahan}, title = {A Componential Model of Human Interaction with Graphs: {VI}. Cognitive Engineering of Pie Graphs}, journal = {Human Factors: The Journal of the Human Factors and Ergonomics Society}, year = {2000}, volume = {42}, number = {4}, pages = {566--591}, month = {dec}, abstract = {This paper proposes and tests the following three-component model of reading a pie graph to estimate segment size: (a) selecting a mentally represented anchor segment (25%, 50%, or 75%), (b) mentally aligning representations of the anchor and target segments, and (c) mentally adjusting the size of the anchor to match the target. Experiment 1 showed that the size difference between the target and closest anchor and the angular displacement of the target from vertical predicted response times (RTs) and absolute error. Experiment 2 demonstrated that an aligned pie graph, designed to reduce the "align" portion of the process, produced faster RTs and lower error than did a regular pie graph. Experiment 3 showed that a pie graph labeled at the anchor values produced the same response times and absolute error as a regular pie graph but that a pie labeled off the anchor points produced a very different pattern of results. The discussion relates the results to the componential model and describes applications in increasing pie graph usability and developing design guidelines. Actual or potential applications of this research include guidelines for graph design and more usable pie graphs. }, doi = {10.1518/001872000779698024}, keywords = {scatterplots, pie-charts}, publisher = {{SAGE} Publications}, url = {https://doi.org/10.1518%2F001872000779698024}, } @InProceedings{Bateman2010, author = {Scott Bateman and Regan Mandryk and Carl Gutwin and Aaron Genest and David McDine and Christopher Brooks}, title = {Useful Junk? The Effects of Visual Embellishment on Comprehension and Memorability of Charts}, booktitle = {ACM Conference on Human Factors in Computing Systems (CHI 2010)}, year = {2010}, pages = {2573-2582}, address = {Atlanta, GA, USA}, note = {Best paper award}, abstract = {Guidelines for designing information charts often state that the presentation should reduce 'chart junk' - visual embellishments that are not essential to understanding the data. In contrast, some popular chart designers wrap the presented data in detailed and elaborate imagery, raising the questions of whether this imagery is really as detrimental to understanding as has been proposed, and whether the visual embellishment may have other benefits. To investigate these issues, we conducted an experiment that compared embellished charts with plain ones, and measured both interpretation accuracy and long-term recall. We found that people's accuracy in describing the embellished charts was no worse than for plain charts, and that their recall after a two-to-three-week gap was significantly better. Although we are cautious about recommending that all charts be produced in this style, our results question some of the premises of the minimalist approach to chart design. }, keywords = {chartjunk}, } @Article{Borgo2012, author = {R. Borgo and A. Abdul-Rahman and F. Mohamed and P. W. Grant and I. Reppa and L. Floridi and Min Chen}, title = {An Empirical Study on Using Visual Embellishments in Visualization}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2012}, volume = {18}, number = {12}, pages = {2759--2768}, month = {dec}, abstract = {In written and spoken communications, figures of speech (e.g., metaphors and synecdoche) are often used as an aid to help convey abstract or less tangible concepts. However, the benefits of using rhetorical illustrations or embellishments in visualization have so far been inconclusive. In this work, we report an empirical study to evaluate hypotheses that visual embellishments may aid memorization, visual search and concept comprehension. One major departure from related experiments in the literature is that we make use of a dual- task methodology in our experiment. This design offers an abstraction of typical situations where viewers do not have their full attention focused on visualization (e.g., in meetings and lectures). The secondary task introduces “divided attention”, and makes the effects of visual embellishments more observable. In addition, it also serves as additional masking in memory-based trials. The results of this study show that visual embellishments can help participants better remember the information depicted in visualization. On the other hand, visual embellishments can have a negative impact on the speed of visual search. The results show a complex pattern as to the benefits of visual embellishments in helping participants grasp key concepts from visualization.}, doi = {10.1109/tvcg.2012.197}, keywords = {chartjunk}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2012.197}, } @Article{Borkin2015, author = {M. Borkin and Z. Bylinski and N, Kim and C. Bainbridge and C. Yeh and D. Borkin and Pfister H. and A. Oliva}, title = {Beyond Memorability: Visualization Recognition and Recall}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2015}, volume = {PP}, pages = {1--1}, issn = {1077-2626}, abstract = {In this paper we move beyond memorability and investigate how visualizations are recognized and recalled. For this study we labeled a dataset of 393 visualizations and analyzed the eye movements of 33 participants as well as thousands of participantgenerated text descriptions of the visualizations. This allowed us to determine what components of a visualization attract peopletextquoterights attention, and what information is encoded into memory. Our findings quantitatively support many conventional qualitative design guidelines, including that (1) titles and supporting text should convey the message of a visualization, (2) if used appropriately, pictograms do not interfere with understanding and can improve recognition, and (3) redundancy helps effectively communicate the message. Importantly, we show that visualizations memorable textquotedblleftat-a-glancetextquotedblright are also capable of effectively conveying the message of the visualization. Thus, a memorable visualization is often also an effective one.}, doi = {10.1109/TVCG.2015.2467732}, keywords = {chartjunk}, lines = {1--13}, url = {http://ieeexplore.ieee.org.ezp-prod1.hul.harvard.edu/xpl/articleDetails.jsp?arnumber=7192646\&newsearch=true\&queryText=Beyond\%20Memorability:\%20Visualization\%20Recognition\%20and\%20Recall}, } @Article{Borkin2013, author = {Michelle A. Borkin and Azalea A. Vo and Zoya Bylinskii and Phillip Isola and Shashank Sunkavalli and Aude Oliva and Hanspeter Pfister}, title = {What Makes a Visualization Memorable?}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2013}, volume = {19}, number = {12}, pages = {2306--2315}, month = {dec}, abstract = {An ongoing debate in the Visualization community concerns the role that visualization types play in data understanding. In human cognition, understanding and memorability are intertwined. As a first step towards being able to ask questions about impact and effectiveness, here we ask: “What makes a visualization memorable?” We ran the largest scale visualization study to date using 2,070 single-panel visualizations, categorized with visualization type (e.g., bar chart, line graph, etc.), collected from news media sites, government reports, scientific journals, and infographic sources. Each visualization was annotated with additional attributes, including ratings for data-ink ratios and visual densities. Using Amazon’s Mechanical Turk, we collected memorability scores for hundreds of these visualizations, and discovered that observers are consistent in which visualizations they find memorable and forgettable. We find intuitive results (e.g., attributes like color and the inclusion of a human recognizable object enhance memorability) and less intuitive results (e.g., common graphs are less memorable than unique visualization types). Altogether our findings suggest that quantifying memorability is a general metric of the utility of information, an essential step towards determining how to design effective visualizations.}, doi = {10.1109/tvcg.2013.234}, keywords = {chartjunk}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2013.234}, } @TechReport{Bederson1998, author = {Benjamin B. Bederson and Angela Boltman}, title = {Does Animation Help Users Build Mental Maps of Spatial Information}, year = {1998}, number = {http://www.cs.umd.edu/hcil}, abstract = {We examine how animating a viewpoint change in a spatial information system affects a user’s ability to build a mental map of the information in the space. We found that animation improves users' ability to reconstruct the information space, with no penalty on task performance time. We believe that this study provides strong evidence for adding animated transitions in many applications with fixed spatial data where the user navigates around the data space.}, keywords = {animation}, } @Article{Archambault2011, author = {D Archambault and H Purchase and B Pinaud}, title = {Animation, Small Multiples, and the Effect of Mental Map Preservation in Dynamic Graphs}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2011}, volume = {17}, number = {4}, pages = {539--552}, month = {apr}, abstract = {In this paper, we present the results of a human-computer interaction experiment that compared the performance of the animation of dynamic graphs to the presentation of small multiples and the effect that mental map preservation had on the two conditions. Questions used in the experiment were selected to test both local and global properties of graph evolution over time. The data sets used in this experiment were derived from standard benchmark data sets of the information visualization community. We found that small multiples gave significantly faster performance than animation overall and for each of our five graph comprehension tasks. In addition, small multiples had significantly more errors than animation for the tasks of determining sets of nodes or edges added to the graph during the same timeslice, although a positive time-error correlation coefficient suggests that, in this case, faster responses did not lead to more errors. This result suggests that, for these two tasks, animation is preferable if accuracy is more important than speed. Preserving the mental map under either the animation or the small multiples condition had little influence in terms of error rate and response time.}, doi = {10.1109/tvcg.2010.78}, keywords = {animation}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2010.78}, } @Article{Chevalier2014, author = {Fanny Chevalier and Pierre Dragicevic and Steven Franconeri}, title = {The Not-so-Staggering Effect of Staggered Animated Transitions on Visual Tracking}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2014}, volume = {20}, number = {12}, pages = {2241--2250}, month = {dec}, abstract = {Interactive visual applications often rely on animation to transition from one display state to another. There are multiple animation techniques to choose from, and it is not always clear which should produce the best visual correspondences between display elements. One major factor is whether the animation relies on staggering-an incremental delay in start times across the moving elements. It has been suggested that staggering may reduce occlusion, while also reducing display complexity and producing less overwhelming animations, though no empirical evidence has demonstrated these advantages. Work in perceptual psychology does show that reducing occlusion, and reducing inter-object proximity (crowding) more generally, improves performance in multiple object tracking. We ran simulations confirming that staggering can in some cases reduce crowding in animated transitions involving dot clouds (as found in, e.g., animated 2D scatterplots). We empirically evaluated the effect of two staggering techniques on tracking tasks, focusing on cases that should most favour staggering. We found that introducing staggering has a negligible, or even negative, impact on multiple object tracking performance. The potential benefits of staggering may be outweighed by strong costs: a loss of common-motion grouping information about which objects travel in similar paths, and less predictability about when any specific object would begin to move. Staggering may be beneficial in some conditions, but they have yet to be demonstrated. The present results are a significant step toward a better understanding of animation pacing, and provide direction for further research.}, doi = {10.1109/tvcg.2014.2346424}, keywords = {animation}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2014.2346424}, } @Article{Heer2007, author = {Jeffrey Heer and George Robertson}, title = {Animated Transitions in Statistical Data Graphics}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2007}, volume = {13}, number = {6}, pages = {1240--1247}, month = {nov}, abstract = {In this paper we investigate the effectiveness of animated transitions between common statistical data graphics such as bar charts, pie charts, and scatter plots. We extend theoretical models of data graphics to include such transitions, introducing a taxonomy of transition types. We then propose design principles for creating effective transitions and illustrate the application of these principles in DynaVis, a visualization system featuring animated data graphics. Two controlled experiments were conducted to assess the efficacy of various transition types, finding that animated transitions can significantly improve graphical perception.}, doi = {10.1109/tvcg.2007.70539}, keywords = {animation}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2007.70539}, } @Article{Talbot2014, author = {Justin Talbot and Vidya Setlur and Anushka Anand}, title = {Four Experiments on the Perception of Bar Charts}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2014}, volume = {20}, number = {12}, pages = {2152--2160}, month = {dec}, abstract = {Bar charts are one of the most common visualization types. In a classic graphical perception paper, Cleveland & McGill studied how different bar chart designs impact the accuracy with which viewers can complete simple perceptual tasks. They found that people perform substantially worse on stacked bar charts than on aligned bar charts, and that comparisons between adjacent bars are more accurate than between widely separated bars. However, the study did not explore why these differences occur. In this paper, we describe a series of follow-up experiments to further explore and explain their results. While our results generally confirm Cleveland & McGill's ranking of various bar chart configurations, we provide additional insight into the bar chart reading task and the sources of participants' errors. We use our results to propose new hypotheses on the perception of bar charts.}, doi = {10.1109/tvcg.2014.2346320}, keywords = {barcharts}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2014.2346320}, } @Article{Barfield1989, author = {Woodrow Barfield and Robert Robless}, title = {The effects of two- or three-dimensional graphics on the problem-solving performance of experienced and novice decision makers}, journal = {Behaviour & Information Technology}, year = {1989}, volume = {8}, number = {5}, pages = {369--385}, month = {oct}, abstract = {An experiment was performed to investigate the relationship between iwo-dimensional (2-D) or three-dimensional (3-D) graphs displayed on paper or computer and the problem-solving performance of experienced and novice managers. The effects ofthese variables on solution times, confidence in answers and effectivenessof solutions for a production management case were examined. It was predicted that experienced managers would engage in forward chaining as a problem-solving strategy, while novices would use backward chaining as a problem-solvingtechnique (Larkin et al. 1980).Results indicated that solution times were faster for computer than for paper presentations of data, but no significant relationship between response times and dimensionality of graphs was found. Novice subjects produced more accurate answers using 2-D paper presentations of graphs, while experienced managers produced more accurate answers when provided with 3-D graphs on computer. Further, experienced and novice managers were more confident of their answers when provided 2-D graphs as decision aids than with any other mode of presentation. Verbal protocols and retrospective reports indicated that in solving the cases experienced managers engaged in forward chaining, backward chaining and means-ends analysis as problem-solving techniques more often than novices.}, doi = {10.1080/01449298908914567}, keywords = {3D}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F01449298908914567}, } @Article{Carswell1991, author = {C. Melody Carswell and Sylvia Frankenberger and Donald Bernhard}, title = {Graphing in depth: perspectives on the use of three-dimensional graphs to represent lower-dimensional data}, journal = {Behaviour & Information Technology}, year = {1991}, volume = {10}, number = {6}, pages = {459--474}, month = {nov}, abstract = {An experiment was performed to investigate the relationship between two-dimensional (2-D) or three-dimensional (3-D) graphs displayed on paper or computer and the problem-solving performance of experienced and novice managers. The effects of these variables on solution times, confidence in answers and effectiveness of solutions for a production management case were examined. It was predicted that experienced managers would engage in forward chaining as a problem-solving strategy, while novices would use backward chaining as a problem-solving technique (Larkin et al. 1980). Results indicated that solution times were faster for computer than for paper presentations of data, but no significant relationship between response times and dimensionality of graphs was found. Novice subjects produced more accurate answers using 2-D paper presentations of graphs, while experienced managers produced more accurate answers when provided with 3-D graphs on computer. Further, experienced and novice managers were more confident of their answers when provided 2-D graphs as decision aids than with any other mode of presentation. Verbal protocols and retrospective reports indicated that in solving the cases experienced managers engaged in forward chaining, backward chaining and means-ends analysis as problem-solving techniques more often than novices.}, doi = {10.1080/01449299108924304}, keywords = {3D}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F01449299108924304}, } @Article{Hughes2001, author = {Brian M. Hughes}, title = {Just Noticeable Differences In 2d And 3d Bar Charts: A Psychophysical Analysis Of Chart Readability}, journal = {Perceptual and Motor Skills}, year = {2001}, volume = {92}, number = {2}, pages = {495--503}, month = {apr}, abstract = {The comparative readability of 2-dimensional (ID) and 3-dimensional (3D) bar charts was assessed using the method of constant stimuli, and the corresponding Weber constants and just-noticeable-differences were computed. It was predicted that the just-noticeable-difference for 3D charts would be larger than for 2D charts. 36 bar charts (18 2D and 18 3D) were prepared for individual presentation on an overhead projector. Each chart contained two bars, one of standard size (25 units) and a second that varied in size from 24 to 26 units in increments of 0.25 units (1% of the standard). 57 undergraduates in psychology were shown the 36 charts in a random sequence for 3 sec. each, separated by 3-sec. intervals. Participants recorded comparative size judgements of bars for each chart. Multivariate analyses identify an advantage in accuracy (in the order of magnitude of approximately 10%) for 2D rather than 3D bar charts, after controlling for sex, age, and use of corrective lenses. The computed Weber constants for these judgements were similar to chose computed for visual intensity over 100 years ago, but - as predicted - the just-noticeable-difference for 3D charts was larger (implying that larger differences are needed on 3D charts to distinguish closely related bars). Furthermore the use of traditional psycho- physical approaches (such as the method of constant stimuli) in assessing graphical aids would appear to be justified.}, doi = {10.2466/pms.2001.92.2.495}, keywords = {3D, barcharts}, publisher = {{SAGE} Publications}, url = {https://doi.org/10.2466%2Fpms.2001.92.2.495}, } @Article{Zacks1998, author = {Jeff Zacks and Ellen Levy and Barbara Tversky and Diane J. Schiano}, title = {Reading bar graphs: Effects of extraneous depth cues and graphical context.}, journal = {Journal of Experimental Psychology: Applied}, year = {1998}, volume = {4}, number = {2}, pages = {119--138}, abstract = {Manipulating the way a graph is drawn influences viewers' ability to extract information from it. In a series of experiments with simple bar graphs, the authors varied the rendering characteristics and relative heights of the bars and asked participants to estimate the quantities portrayed. The addition of 3-dimensional (3D) perspective depth cues lowered accuracy. This accuracy disadvantage diminished when a short delay was introduced before judgments were reported. The height of the judged bar relative to nearby graphical elements also affected accuracy; this effect was about 1 order of magnitude larger and remained intact when the delay was introduced. Nearby elements also affected viewers' bias (under- or overestimation). These effects do not seem to be due to misestimation of object depth. The results suggest that warnings about accuracy decrements due to 3D shading may be overstated, whereas distortions due to neighboring elements should be of more concern.}, doi = {10.1037/1076-898x.4.2.119}, keywords = {3D, barcharts}, publisher = {American Psychological Association ({APA})}, url = {https://doi.org/10.1037%2F1076-898x.4.2.119}, } @Article{Newman2012, author = {George E. Newman and Brian J. Scholl}, title = {Bar graphs depicting averages are perceptually misinterpreted: The within-the-bar bias}, journal = {Psychonomic Bulletin & Review}, year = {2012}, volume = {19}, number = {4}, pages = {601--607}, month = {may}, abstract = {Perhaps the most common method of depicting data, in both scientific communication and popular media, is the bar graph. Bar graphs often depict measures of central tendency, but they do so asymmetrically: A mean, for example, is depicted not by a point, but by the edge of a bar that originates from a single axis. Here we show that this graphical asymmetry gives rise to a corresponding cognitive asymmetry. When viewers are shown a bar depicting a mean value and are then asked to judge the likelihood of a particular data point being part of its underlying distribution, viewers judge points that fall within the bar as being more likely than points equidistant from the mean, but outside the bar—as if the bar somehow “contained” the relevant data. This "within-the-bar bias" occurred (a) for graphs with and without error bars, (b) for bars that originated from both lower and upper axes, (c) for test points with equally extreme numeric labels, (d) both from memory (when the bar was no longer visible) and in online perception (while the bar was visible during the judgment), (e) both within and between subjects, and (f) in populations including college students, adults from the broader community, and online samples. We posit that this bias may arise due to principles of object perception, and we show how it has downstream implications for decision making.}, doi = {10.3758/s13423-012-0247-5}, keywords = {barcharts}, publisher = {Springer Nature}, url = {https://doi.org/10.3758%2Fs13423-012-0247-5}, } @Article{Godau2016, author = {Claudia Godau and Tom Vogelgesang and Robert Gaschler}, title = {Perception of bar graphs - A biased impression?}, journal = {Computers in Human Behavior}, year = {2016}, volume = {59}, pages = {67--73}, month = {jun}, abstract = {Computers provide ubiquitous contact to data graphs. Yet, employing the power of the human perception system bears the risk of being subject to its biases. Data graphs are used to present the means of different conditions and are supposed to convey group information, such as variability across conditions, as well as the grand average. Across three samples, we tested whether there is a bias in the central tendency perceived in bar graphs, 53 participants with a mean age of 27 years (plus replication with N = 38, mean age = 23 years). Participants were provided with bar and point graphs and had to judge their means. We found that the mean value was systematically underestimated in bar graphs (but not in point graphs) across different methods of testing for biased evaluation. In a second experiment (N = 80, mean age = 24 years) we replicated and extended this finding, by testing the effect of outliers on the bias in average estimation. For instance, outliers might trigger controlled processing. Yet, the underestimation of the average was replicated and was not affected by including outliers – despite that the estimate was torn towards the outlier. Thus, we should be cautious with relying on bar graphs when a bias free estimate of the grand average is relevant.}, doi = {10.1016/j.chb.2016.01.036}, keywords = {barcharts}, publisher = {Elsevier {BV}}, url = {https://doi.org/10.1016%2Fj.chb.2016.01.036}, } @InProceedings{Pandey2015, author = {Anshul Vikram Pandey and Katharina Rall and Margaret L. Satterthwaite and Oded Nov and Enrico Bertini}, title = {How Deceptive are Deceptive Visualizations?}, booktitle = {Proceedings of the 33rd Annual {ACM} Conference on Human Factors in Computing Systems - {CHI} '15}, year = {2015}, publisher = {{ACM} Press}, abstract = {In this paper, we present an empirical analysis of deceptive visualizations. We start with an in-depth analysis of what deception means in the context of data visualization, and categorize deceptive visualizations based on the type of deception they lead to. We identify popular distortion techniques and the type of visualizations those distortions can be applied to, and formalize why deception occurs with those distortions. We create four deceptive visualizations using the selected distortion techniques, and run a crowdsourced user study to identify the deceptiveness of those visualizations. We then present the findings of our study and show how deceptive each of these visual distortion techniques are, and for what kind of questions the misinterpretation occurs. We also analyze individual differences among participants and present the effect of some of those variables on participants' responses. This paper presents a first step in empirically studying deceptive visualizations, and will pave the way for more research in this direction.}, doi = {10.1145/2702123.2702608}, keywords = {deception}, url = {https://doi.org/10.1145%2F2702123.2702608}, } @InProceedings{Andrews2007, author = {Keith Andrews and Janka Kasanicka}, title = {A Comparative Study of Four Hierarchy Browsers using the Hierarchical Visualisation Testing Environment ({HVTE})}, booktitle = {2007 11th International Conference Information Visualization ({IV} '07)}, year = {2007}, month = {jul}, publisher = {{IEEE}}, abstract = {Four hierarchy browsers were compared in a counter- balanced repeated measures study with 32 test users. The four browsers tested were in-house implementations of 1) a windows explorer style tree view, 2) an information pyramids browser, 3) a treemap browser, and 4 ) a hyperbolic browser. Each user performed eight tasks with each browser. Task completion time, subjective ratings, and over- all preference data were collected. Almost no significant differences in performance were found, but users significantly preferred the tree view browser. The four browsers are implemented as part of the Hierarchical Visualisation System (HVS), a Java framework for visualising hierarchies. The Hierarchical Visualisation Testing Environment (HVTE) is a semi-automated testing environment built on top of HVS, which presents a sequence of tasks from a test case database to the user, together with an associated browser and test hierarchy, and automates the collection of timing data.}, doi = {10.1109/iv.2007.8}, keywords = {hierachy}, url = {https://doi.org/10.1109%2Fiv.2007.8}, } @InProceedings{Kobsa2004, author = {Kobsa, Alfred}, title = {User Experiments with Tree Visualization Systems}, booktitle = {Proceedings of the IEEE Symposium on Information Visualization}, year = {2004}, series = {INFOVIS '04}, pages = {9--16}, address = {Washington, DC, USA}, publisher = {IEEE Computer Society}, abstract = {This paper describes a comparative experiment with five well-known tree visualization systems, and Windows Explorer as a baseline system. Subjects performed tasks relating to the structure of a directory hierarchy, and to attributes of files and directories. Task completion times, correctness and user satisfaction were measured, and video recordings of subjects’ interaction with the systems were made. Significant system and task type effects and an interaction between system and task type were found. Qualitative analyses of the video recordings were thereupon conducted to determine reasons for the observed differences, resulting in several findings and design recommendations as well as implications for future experiments with tree visualization systems.}, acmid = {1038776}, doi = {10.1109/INFOVIS.2004.70}, isbn = {0-7803-8779-3}, keywords = {hierachy}, numpages = {8}, url = {http://dx.doi.org/10.1109/INFOVIS.2004.70}, } @Article{Cleveland1985, author = {W. S. Cleveland and R. McGill}, title = {Graphical Perception and Graphical Methods for Analyzing Scientific Data}, journal = {Science}, year = {1985}, volume = {229}, number = {4716}, pages = {828--833}, month = {aug}, abstract = {Graphical perception is the visual decoding of the quantitative and qualitative information encoded on graphs. Recent investigations have uncovered basic principles of human graphical perception that have important implications for the display of data. The computer graphics revolution has stimulated the invention of many graphical methods for analyzing and presenting scientific data, such as box plots, two-tiered error bars, scatterplot smoothing, dot charts, and graphing on a log base 2 scale.}, doi = {10.1126/science.229.4716.828}, publisher = {American Association for the Advancement of Science ({AAAS})}, url = {https://doi.org/10.1126%2Fscience.229.4716.828}, } @Article{Cleveland1987, author = {William S. Cleveland and Robert McGill}, title = {Graphical Perception: The Visual Decoding of Quantitative Information on Graphical Displays of Data}, journal = {Journal of the Royal Statistical Society. Series A (General)}, year = {1987}, volume = {150}, number = {3}, pages = {192}, abstract = {Studies in graphical perception, both theoretical and experimental, provide a scientific foundation for the construction area of statistical graphics. From these studies a paradigm that has important applications for practice has begun to emerge. The paradigm is based on elementary codes: Basic geometric and textural aspects of a graph that encode the quantitative information. The methodology that can be invoked to study graphical perception is illustratedby an investigation of the shape parameter of a two-variable graph, a topic that has had much discussion, but little scientific study, for at least 70 years.}, doi = {10.2307/2981473}, publisher = {{JSTOR}}, url = {https://doi.org/10.2307%2F2981473}, } @Article{Cleveland1982a, author = {William S. Cleveland and Charles S. Harris and Robert McGill}, title = {Judgments of Circle Sizes on Statistical Maps}, journal = {Journal of the American Statistical Association}, year = {1982}, volume = {77}, number = {379}, pages = {541--547}, month = {sep}, abstract = {Symbols whose sizes code a variable are often superimposed on a map to show how the value of the variable changes geographically. In many cases the areas of the symbols are proportional to the values of the variable. But previous psychological experiments indicate that people's judgments of area typically are not proportional to area, but rather to area raised to a power less than one. In many of these studies, however, the symbols are presented one at a time. Here we describe an experiment that tests whether this experimental laboratory result extends to statistical maps. We had 24 viewers judge sets of circles presented simultaneously, either with or without maplike grid ticks, labels, scale, and border. With these multiple-circle displays, judgments were related to actual area by power functions with an average exponent close to one, implying that it is appropriate to code symbols by area on statistical maps.}, doi = {10.1080/01621459.1982.10477844}, keywords = {circles}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1080%2F01621459.1982.10477844}, } @Article{Gilmartin1981, author = {Patricia P. Gilmartin}, title = {Influences of map context on circle perception}, journal = {Annals of the Association of American Geographers}, year = {1981}, volume = {71}, number = {2}, pages = {253--258}, month = {jun}, abstract = {The nature of the cartographic environment within which a proportional circle is seen can influence the perceived characteristics of the map symbol. The apparent sizes of circles may be biased by: I ) the effect of size contrast between a target circle and closely adjacent circles; 2) the presence or absence on the map of internal borders: and 3) the size of the target circle itself. Statistical analyses indicate that size contrasts between a given map symbol and other symbols in the immediate area do alter the perceived size of the symbol. Target circles that were viewed within the context of smaller circles appeared to be larger (by an average of thirteen percent) than the same target circles when they were seen among large context circles. The presence of internal map borders tended to reduce the differences in estimates that resulted from size contrast between a target circle and adjacent circles}, doi = {10.1111/j.1467-8306.1981.tb01351.x}, keywords = {circles}, publisher = {Informa {UK} Limited}, url = {https://doi.org/10.1111%2Fj.1467-8306.1981.tb01351.x}, } @Article{Tversky2002, author = {Barbara Tversky and Julie Bauer Morrison and Mireille Betrancourt}, title = {Animation: can it facilitate?}, journal = {International Journal of Human-Computer Studies}, year = {2002}, volume = {57}, number = {4}, pages = {247--262}, month = {oct}, abstract = {Graphics have been used since ancient times to portray things that are inherently spatiovisual, like maps and building plans. More recently, graphics have been used to portray things that are metaphorically spatiovisual, like graphs and organizational charts. The assumption is that graphics can facilitate comprehension, learning, memory, communication and inference. Assumptions aside, research on static graphics has shown that only carefully designed and appropriate graphics prove to be beneficial for conveying complex systems. Effective graphics conform to the Congruence Principle according to which the content and format of the graphic should correspond to the content and format of the concepts to be conveyed. From this, it follows that animated graphics should be effective in portraying change over time. Yet the research on the efficacy of animated over static graphics is not encouraging. In cases where animated graphics seem superior to static ones, scrutiny reveals lack of equivalence between animated and static graphics in content or procedures; the animated graphics convey more information or involve interactivity. Animations of events may be ineffective because animations violate the second principle of good graphics, the Apprehension Principle, according to which graphics should be accurately perceived and appropriately conceived. Animations are often too complex or too fast to be accurately perceived. Moreover, many continuous events are conceived of as sequences of discrete steps. Judicious use of interactivity may overcome both these disadvantages. Animations may be more effective than comparable static graphics in situations other than conveying complex systems, for example, for real time reorientations in time and space.}, doi = {10.1006/ijhc.2002.1017}, keywords = {animation}, publisher = {Elsevier {BV}}, url = {https://doi.org/10.1006%2Fijhc.2002.1017}, } @Article{Beach1966, author = {Lee Roy Beach and Thomas S. Scopp}, title = {Inferences about correlations}, journal = {Psychonomic Science}, year = {1966}, volume = {6}, number = {6}, pages = {253--254}, month = {jun}, abstract = {The study examined intuitive inferences about population correlations on the basis of samples of data from the populations. The Ss saw samples of bivariate observations, inferred the signs of the population correlations, and stated their confidence in the inferences. Optimality of inferences and of confidence both increased as the magnitude of the sample correlations increased.}, doi = {10.3758/bf03328053}, keywords = {scatterplots}, publisher = {Springer Nature}, url = {https://doi.org/10.3758%2Fbf03328053}, } @InProceedings{Correll2017, author = {Michael Correll and Jeffrey Heer}, title = {Regression by Eye: Estimating Trends in Bivariate Visualizations}, booktitle = {ACM Human Factors in Computing Systems (CHI)}, year = {2017}, abstract = {Observing trends and predicting future values are common tasks for viewers of bivariate data visualizations. As many charts do not explicitly include trend lines or related statisti- cal summaries, viewers often visually estimate trends directly from a plot. How reliable are the inferences viewers draw when performing such regression by eye? Do particular visu- alization designs or data features bias trend perception? We present a series of crowdsourced experiments that assess the accuracy of trends estimated using regression by eye across a variety of bivariate visualizations, and examine potential sources of bias in these estimations. We find that viewers accurately estimate trends in many standard visualizations of bivariate data, but that both visual features (e.g., “within-the- bar” bias) and data features (e.g., the presence of outliers) can result in visual estimates that systematically diverge from standard least-squares regression models.}, doi = {10.1145/3025453.3025922}, keywords = {scatterplots}, url = {http://idl.cs.washington.edu/papers/regression-by-eye}, } @Article{Kanjanabose2015, author = {Rassadarie Kanjanabose and Alfie Abdul-Rahman and Min Chen}, title = {A Multi-task Comparative Study on Scatter Plots and Parallel Coordinates Plots}, journal = {Computer Graphics Forum}, year = {2015}, volume = {34}, number = {3}, pages = {261--270}, month = {jun}, abstract = {Previous empirical studies for comparing parallel coordinates plots and scatter plots showed some uncertainty about their relative merits. Some of these studies focused on the task of value retrieval, where visualization usually has a limited advantage over reading data directly. In this paper, we report an empirical study that compares user performance, in terms of accuracy and response time, in the context of four different visualization tasks, namely value retrieval, clustering, outlier detection, and change detection. In order to evaluate the relative merits of the two types of plots with a common base line (i.e., reading data directly), we included three forms of stimuli, data tables, scatter plots, and parallel coordinate plots. Our results show that data tables are better suited for the value retrieval task, while parallel coordinates plots generally outperform the two other visual representations in three other tasks. Subjective feedbacks from the users are also consistent with the quantitative analyses. As visualization is commonly used for aiding multiple observational and analytical tasks, our results provided new evidence to support the prevailing enthusiasm for parallel coordinates plots in the field of visualization.}, doi = {10.1111/cgf.12638}, keywords = {scatterplots, parallel-coordinates}, publisher = {Wiley-Blackwell}, url = {https://doi.org/10.1111%2Fcgf.12638}, } @Article{Meyer1992, author = {Joachim Meyer and David Shinar}, title = {Estimating Correlations from Scatterplots}, journal = {Human Factors}, year = {1992}, volume = {34}, number = {3}, pages = {335-349}, abstract = { Previous attempts to establish the function relating intuitive estimates of correlations from scatterplots to accepted statistical measures have led to unsatisfying results. In this study two experiments dealt with the effects of the statistical training of the viewer and various characteristics of the display on estimates. Statistical knowledge was related to higher estimates of correlations and the use of a wider range of values, but people with and without statistical knowledge were equally affected by the type of dispersion of the point cloud, the mere display of the regression line, and the slope of the regression line. Results indicate that estimates of correlations from scatterplots are partly based on perceptual processes that are influenced by visual properties of the display and are unrelated to the cognitive structures created by formal statistical training. }, doi = {10.1177/001872089203400307}, eprint = {http://dx.doi.org/10.1177/001872089203400307}, url = { http://dx.doi.org/10.1177/001872089203400307 }, } @Article{Lewandowsky1989a, author = {Stephan Lewandowsky and Ian Spence}, title = {The Perception of Statistical Graphs}, journal = {Sociological Methods & Research}, year = {1989}, volume = {18}, number = {2-3}, pages = {200--242}, month = {nov}, abstract = {Graphs have been an essential tool for the analysis and communication of statistical data for about 200 years. Despite widespread use and their importance in science, business, and many other walks of life, relatively little is known about how people perceive and process statistical graphs. This article reviews several empirical studies designed to explore the suitability of various graphs for a variety of purposes, and discusses the relevant theoretical psychological literature. The role of traditional psychophysics is considered, especially in connection with the long-running dispute concerning the relative merits of pie and bar charts. The review also discusses experiments on the perception of scatterplots and the use of multivariate displays, and points out the need for more empirical work.}, doi = {10.1177/0049124189018002002}, publisher = {{SAGE} Publications}, url = {https://doi.org/10.1177%2F0049124189018002002}, } @InProceedings{Sher2017, author = {Varshita Sher and Karen G. Bemis and Ilaria Liccardi and Min Chen}, title = {An Empirical Study on the Reliability of Perceiving Correlation Indices using Scatterplots}, booktitle = {Eurographics Conference on Visualization (EuroVis) 2017 Volume 36 (2017), Number 3 J. Heer, T. Ropinski and J. van Wijk (Guest Editors)}, year = {2017}, abstract = {Scatterplots have been in use for about two centuries, primarily for observing the relationship between two variables and commonly for supporting correlation analysis. In this paper, we report an empirical study that examines how humans’ perception of correlation using scatterplots relates to the Pearson’s product-moment correlation coefficient (PPMCC) – a commonly used statistical measure of correlation. In particular, we study human participants’ estimation of correlation under different con- ditions, e.g., different PPMCC values, different densities of data points, different levels of symmetry of data enclosures, and different patterns of data distribution. As the participants were instructed to estimate the PPMCC of each stimulus scatterplot, the difference between the estimated and actual PPMCC is referred to as an offset. The results of the study show that varying PPMCC values, symmetry of data enclosure, or data distribution does have an impact on the average offsets, while only large variations in density cause an impact that is statistically significant. This study indicates that humans’ perception of correlation using scatterplots does not correlate with computed PPMCC in a consistent manner. The magnitude of offsets may be affected not only by the difference between individuals, but also by geometric features of data enclosures. It suggests that visualizing scatterplots does not provide adequate support to the task of retrieving their corresponding PPMCC indicators, while the un- derlying model of humans’ perception of correlation using scatterplots ought to feature other variables in addition to PPMCC. The paper also includes a theoretical discussion on the cost-benefit of using scatterplots.}, keywords = {scatterplots}, } @Article{Johansson2008, author = {Jimmy Johansson and Camilla Forsell and Mats Lind and Matthew Cooper}, title = {Perceiving Patterns in Parallel Coordinates: Determining Thresholds for Identification of Relationships}, journal = {Information Visualization}, year = {2008}, volume = {7}, number = {2}, pages = {152--162}, month = {jun}, abstract = {This article presents a study that investigates the ability of humans to perceive relationships (patterns) in parallel coordinates, an ability that is crucial to the use of this popular visualization technique. It introduces a visual quality metric, acceptable distortions of patterns, which establishes the level of noise that may be present in data while allowing accurate identification of patterns. This metric was used to assess perceptual performance of standard 2D parallel coordinates and multi-relational 3D parallel coordinates in two experiments. In multi-relational 3D parallel coordinates the axes are placed on a circle with a focusaxis in the centre, allowing a simultaneous analysis between the focus variable and all other variables. The experiments aimed to determine the maximum number of variables that can be, from a user's point of view, efficiently used in a multi-relational 3D parallel coordinates display and to present a first attempt to study users' ability to analyse noisy data in parallel coordinates. The results show that, in terms of the acceptable level of noise in data, a multi-relational 3D parallel coordinates visualization having 11 axes (variables) is as efficient as standard 2D parallel coordinates. Visualizing a larger number of variables would possibly require a greater amount of manipulation of the visualization and thus be less efficient.}, doi = {10.1057/palgrave.ivs.9500166}, keywords = {parallel-coordinates}, publisher = {{SAGE} Publications}, url = {https://doi.org/10.1057%2Fpalgrave.ivs.9500166}, } @Article{Kuang2012, author = {X. Kuang and H. Zhang and S. Zhao and M.J. McGuffin}, title = {Tracing Tuples Across Dimensions: A Comparison of Scatterplots and Parallel Coordinate Plots}, journal = {Computer Graphics Forum}, year = {2012}, volume = {31}, number = {3pt4}, pages = {1365--1374}, month = {jun}, abstract = {One of the fundamental tasks for analytic activity is retrieving (i.e., reading) the value of a particular quantity in an information visualization. However, few previous studies have compared user performance in such value retrieval tasks for different visualizations. We present an experimental comparison of user performance (time and error distance) across four multivariate data visualizations. Three variants of scatterplot (SCP) visualizations, namely SCPs with common vertical axes (SCP-common), SCPs with a staircase layout (SCP-staircase), and SCPs with rotated axes between neighboring cells (SCP-rotated), and a baseline parallel coordinate plots (PCP) were compared. Results show that the baseline PCP is better than SCP-rotated and SCP-staircase under all conditions, while the difference between SCP-common and PCP depends on the dimensionality and density of the dataset. PCP shows advantages over SCP-common when the dimensionality and density of the dataset are low, but SCP-common eventually outperforms PCP as data dimensionality and density increase. The results suggest guidelines for the use of SCPs and PCPs that can benefit future researchers and practitioners}, doi = {10.1111/j.1467-8659.2012.03129.x}, keywords = {scatterplots, parallel-coordinates}, publisher = {Wiley-Blackwell}, url = {https://doi.org/10.1111%2Fj.1467-8659.2012.03129.x}, } @InProceedings{Siirtola2009, author = {Harri Siirtola and Tuuli Laivo and Tomi Heimonen and Kari-Jouko Räihä}, title = {Visual Perception of Parallel Coordinate Visualizations}, booktitle = {2009 13th International Conference Information Visualisation}, year = {2009}, month = {jul}, publisher = {{IEEE}}, abstract = {Parallel coordinates is a visualization technique that provides an unbiased representation of high-dimensional data. The parallel configuration of axes treats data dimensions uniformly and is well suited for exploratory visualization. However, first-time users of parallel coordinate visualizations can find the representation confusing and difficult to understand.We used eye tracking to study how parallel coordinate visualizations are perceived, and compared the results to the optimal visual scan path required to complete the tasks. The results indicate that even first-time users quickly learn how to use parallel coordinate visualizations, pay attention to the correct task-specific areas in the visualization, and become rapidly proficient with it.}, doi = {10.1109/iv.2009.25}, keywords = {scatterplots, parallel-coordinates}, url = {https://doi.org/10.1109%2Fiv.2009.25}, } @Book{Ware2012, title = {Information visualization: perception for design}, publisher = {Morgan Kaufmann}, year = {2012}, author = {Ware, Colin}, address = {Boston}, isbn = {978-0123814647}, } @Book{Kosslyn2006, title = {Graph design for the eye and mind}, publisher = {Oxford University Press}, year = {2006}, author = {Kosslyn, Stephen}, address = {New York}, isbn = {978-0195311846}, } @Book{Cleveland1985a, title = {The elements of graphing data}, publisher = {Wadsworth Advanced Books and Software}, year = {1985}, author = {Cleveland, William}, address = {Monterey, Calif}, isbn = {9780534037307}, } @Article{Micallef2017, author = {Luana Micallef and Gregorio Palmas and Antti Oulasvirta and Tino Weinkauf}, title = {Towards Perceptual Optimization of the Visual Design of Scatterplots}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {23}, number = {6}, pages = {1588--1599}, month = {jun}, abstract = {Designing a good scatterplot can be difficult for non-experts in visualization, because they need to decide on many parameters, such as marker size and opacity, aspect ratio, color, and rendering order. This paper contributes to research exploring the use of perceptual models and quality metrics to set such parameters automatically for enhanced visual quality of a scatterplot. A key consideration in this paper is the construction of a cost function to capture several relevant aspects of the human visual system, examining a scatterplot design for some data analysis task. We show how the cost function can be used in an optimizer to search for the optimal visual design for a user’s dataset and task objectives (e.g., “reliable linear correlation estimation is more important than class separation”). The approach is extensible to different analysis tasks. To test its performance in a realistic setting, we pre-calibrated it for correlation estimation, class separation, and outlier detection. The optimizer was able to produce designs that achieved a level of speed and success comparable to that of those using human-designed presets (e.g., in R or MATLAB). Case studies demonstrate that the approach can adapt a design to the data, to reveal patterns without user intervention.}, doi = {10.1109/tvcg.2017.2674978}, keywords = {scatterplots}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2017.2674978}, } @Article{Micallef2012, author = {Luana Micallef and Pierre Dragicevic and Jean-Daniel Fekete}, title = {Assessing the Effect of Visualizations on Bayesian Reasoning through Crowdsourcing}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2012}, volume = {18}, number = {12}, pages = {2536--2545}, month = {dec}, abstract = {People have difficulty understanding statistical information and are unaware of their wrong judgments, particularly in Bayesian reasoning. Psychology studies suggest that the way Bayesian problems are represented can impact comprehension, but few visual designs have been evaluated and only populations with a specific background have been involved. In this study, a textual and six visual representations for three classic problems were compared using a diverse subject pool through crowdsourcing. Visualizations included area-proportional Euler diagrams, glyph representations, and hybrid diagrams combining both. Our study failed to replicate previous findings in that subjects’ accuracy was remarkably lower and visualizations exhibited no measurable benefit. A second experiment confirmed that simply adding a visualization to a textual Bayesian problem is of little help, even when the text refers to the visualization, but suggests that visualizations are more effective when the text is given without numerical values. We discuss our findings and the need for more such experiments to be carried out on heterogeneous populations of non-experts.}, doi = {10.1109/tvcg.2012.199}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2012.199}, } @Article{Wickham2010, author = {Hadley Wickham and Dianne Cook and Heike Hofmann and Andreas Buja}, title = {Graphical inference for infovis}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2010}, volume = {16}, number = {6}, pages = {973--979}, abstract = {How do we know if what we see is really there? When visualizing data, how do we avoid falling into the trap of apophenia where we see patterns in random noise? Traditionally, infovis has been concerned with discovering new relationships, and statistics with preventing spurious relationships from being reported. We pull these opposing poles closer with two new techniques for rigorous statistical inference of visual discoveries. The "Rorschach" helps the analyst calibrate their understanding of uncertainty and the "line-up" provides a protocol for assessing the significance of visual discoveries, protecting against the discovery of spurious structure.}, publisher = {IEEE}, } @Article{Marriott2012, author = {Kim Marriott and Helen Purchase and Michael Wybrow and Cagatay Goncu}, title = {Memorability of visual features in network diagrams}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2012}, volume = {18}, number = {12}, pages = {2477--2485}, abstract = {We investigate the cognitive impact of various layout features: symmetry, alignment, collinearity, axis alignment and orthogonality&#8212;on the recall of network diagrams (graphs). This provides insight into how people internalize these diagrams and what features should or shouldn't be utilised when designing static and interactive network-based visualisations. Participants were asked to study, remember, and draw a series of small network diagrams, each drawn to emphasise a particular visual feature. The visual features were based on existing theories of perception, and the task enabled visual processing at the visceral level only. Our results strongly support the importance of visual features such as symmetry, collinearity and orthogonality, while not showing any significant impact for node-alignment or parallel edges.}, doi = {10.1109/TVCG.2012.245}, publisher = {IEEE}, } @InProceedings{Velez2005, author = {Maria C. Velez and Deborah Silver and Marilyn Tremaine}, title = {Understanding Visualization through Spatial Ability Differences}, booktitle = {VIS 05. IEEE Visualization, 2005.}, year = {2005}, pages = {511--518}, publisher = {Proceedings of IEEE Visualization}, abstract = {Little is known about the cognitive abilities which influence the comprehension of scientific and information visualizations and what properties of the visualization affect comprehension. Our goal in this paper is to understand what makes visualizations difficult. We address this goal by examining the spatial ability differences in a diverse population selected for spatial ability variance. For example, how is spatial ability related to visualization comprehension? What makes a particular visualization difficult or time intensive for specific groups of subjects? In this paper, we present the results of an experiment designed to answer these questions. Fifty-six subjects were tested on a basic visualization task and given standard paper tests of spatial abilities. An equal number of males and females were recruited in this study in order to increase spatial ability variance. Our results show that high spatial ability is correlated with accuracy on our three-dimensional visualization test, but not with time. High spatial ability subjects also had less difficulty with object complexity and the hidden properties of an object.}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.91.1955}, doi = {10.1109/visual.2005.1532836}, } @Article{Javed2010, author = {Waqas Javed and Bryan McDonnel and Niklas Elmqvist}, title = {Graphical perception of multiple time series.}, journal = {IEEE transactions on visualization and computer graphics}, year = {2010}, volume = {16}, number = {6}, pages = {927--34}, month = {jan}, issn = {1077-2626}, abstract = {Line graphs have been the visualization of choice for temporal data ever since the days of William Playfair (1759-1823), but realistic temporal analysis tasks often include multiple simultaneous time series. In this work, we explore user performance for comparison, slope, and discrimination tasks for different line graph techniques involving multiple time series. Our results show that techniques that create separate charts for each time series--such as small multiples and horizon graphs--are generally more efficient for comparisons across time series with a large visual span. On the other hand, shared-space techniques--like standard line graphs--are typically more efficient for comparisons over smaller visual spans where the impact of overlap and clutter is reduced.}, doi = {10.1109/TVCG.2010.162}, pmid = {20975129}, publisher = {Published by the IEEE Computer Society}, url = {http://www.computer.org/portal/web/csdl/doi/10.1109/TVCG.2010.162}, } @InProceedings{Healey, author = {C.G. Healey}, title = {Choosing effective colours for data visualization}, booktitle = {Proceedings of Seventh Annual IEEE Visualization '96}, pages = {263--270,}, publisher = {ACM}, abstract = {We describe a technique for choosing multiple colours for use during data visualization. Our goal is a systematic method for maximizing the total number of colours available for use, while still allowing an observer to rapidly and accurately search a display for any one of the given colours. Previous research suggests that we need to consider three separate effects during colour selection: colour distance, linear separation, and colour category. We describe a simple method for measuring and controlling all of these effects. Our method was tested by performing a set of target identification studies; we analysed the ability of thirty eight observers to find a colour target in displays that contained differently coloured background elements. Results showed our method can be used to select a group of colours that will provide good differentiation between data elements during data visualization.}, doi = {10.1109/VISUAL.1996.568118}, isbn = {0-89791-864-9}, url = {http://ieeexplore.ieee.org/xpl/freeabs{\_}all.jsp?arnumber=568118 http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=568118}, } @Article{Ware2008, author = {Colin Ware}, title = {Toward a Perceptual Theory of Flow Visualization}, journal = {IEEE Computer Graphics and Applications}, year = {2008}, volume = {28}, number = {2}, pages = {6--11}, month = {mar}, issn = {0272-1716}, abstract = {Currently, most researchers in visualization pay very little attention to vision science. The exception is when the effective use of color is the subject. Little research in flow visualization includes a discussion of the related perceptual theory. Nor does it include an evaluation of effectiveness of the display techniques that are generated. This is so, despite Laidlaw's paper showing that such an evaluation is relatively straightforward. Of course, it's not always necessary to relate visualization research to perceptual theory. If the purpose of the research is to increase the efficiency of an algorithm, then the proper test is one of efficiency, not of perceptual validity. But when a new representation of data is the subject of research, addressing how perceptually effective it is - either by means of a straightforward empirical comparison with existing methods or analytically, relating the new mapping to perceptual theory - should be a matter of course. A strong interdisciplinary approach, including the disciplines of perception, design, and computer science will produce better science and better design in that empirically and theoretically validated visual display techniques will result.}, doi = {10.1109/MCG.2008.39}, url = {http://wwwx.cs.unc.edu/{~}taylorr/Comp715/papers/04459858.pdf http://ieeexplore.ieee.org/xpls/abs{\_}all.jsp?arnumber=4459858 http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4459858}, } @InProceedings{Huber2005, author = {Daniel E Huber and Christopher G Healey}, title = {Visualizing data with motion}, booktitle = {Visualization, 2005. VIS 05. IEEE}, year = {2005}, pages = {527--534}, organization = {IEEE}, abstract = {This paper describes an experimental study of three perceptual properties of motion: flicker, direction, and velocity. Our goal is to understand how to apply these properties to represent data in a visualization environment. Results from our experiments show that all three properties can encode multiple data values, but that minimum visual differences are needed to ensure rapid and accurate target detection: flicker must be coherent and must have a cycle length of 120 milliseconds or greater, direction must differ by at least 20/spl deg/, and velocity must differ by at least 0.43/spl deg/ of subtended visual angle. We conclude with an overview of how we are applying our results to real-world data, and then discuss future work we plan to pursue.}, doi = {10.1109/VISUAL.2005.1532838}, } @InProceedings{Haroz2006, author = {Steve Haroz and Kwan-Liu Ma}, title = {Natural Visualizations}, booktitle = {Eurovis 2006}, year = {2006}, pages = {43--50}, abstract = {This paper demonstrates the prevalence of a shared characteristic between visualizations and images of nature. We have analyzed visualization competitions and user studies of visualizations and found that the more preferred, better performing visualizations exhibit more natural characteristics. Due to our brain being wired to perceive natural images [SO01], testing a visualization for properties similar to those of natural images can help show how well our brain is capable of absorbing the data. In turn, a metric that finds a visualization’s similarity to a natural image may help determine the effectiveness of that visualization. We have found that the results of comparing the sizes and distribution of the objects in a visualization with those of natural standards strongly correlate to one’s preference of that visualization.}, doi = {10.2312/VisSym/EuroVis06/043-050}, isbn = {3-905673-31-2}, url = {http://steveharoz.com/research/natural/NaturalVisualizations.pdf}, } @InProceedings{Haroz2010, author = {Steve Haroz and David Whitney}, title = {Temporal Thresholds for Feature Detection in Flow Visualization}, booktitle = {Proceedings of the 7th Symposium on Applied Perception in Graphics and Visualization}, year = {2010}, series = {APGV '10}, pages = {163--163}, abstract = {Optic flow is the coherent motion of a region in the visual field, and the visual system is astoundingly effective at perceiving this large-scale and complex percept. Whenever you rotate your head or move through a scene, nearly every part of your visual field moves at a different speed and velocity. For example, as you walk through a forest, the trees in the horizon appear to slowly expand, and the trees on your sides appear to rapidly move to the extremities of your vision visual field and disappear. In spite of such a myriad of local motions, all of this information is concisely summarized as moving forward. Such summary encoding of a large variety of elements makes this percept a potentially useful tool to exploit for visualization. For that reason, our goal in this study is to test the visual system's ability to use optic flow for a basic visualization task, feature detection.}, doi = {10.1145/1836248.1836285}, } @Article{Haroz2012, author = {Steve Haroz and David Whitney}, title = {How Capacity Limits of Attention Influence Information Visualization Effectiveness}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2012}, volume = {18}, number = {12}, pages = {2402--2410}, month = {dec}, issn = {1077-2626}, abstract = {In this paper, we explore how the capacity limits of attention influence the effectiveness of information visualizations. We conducted a series of experiments to test how visual feature type (color vs. motion), layout, and variety of visual elements impacted user performance. The experiments tested users' abilities to (1) determine if a specified target is on the screen, (2) detect an odd-ball, deviant target, different from the other visible objects, and (3) gain a qualitative overview by judging the number of unique categories on the screen. Our results show that the severe capacity limits of attention strongly modulate the effectiveness of information visualizations, particularly the ability to detect unexpected information. Keeping in mind these capacity limits, we conclude with a set of design guidelines which depend on a visualization's intended use.}, doi = {10.1109/TVCG.2012.233}, url = {http://steveharoz.com/research/attention/}, } @Article{Szafir2016, author = {Danielle Albers Szafir and Steve Haroz and Michael Gleicher and Steven Franconeri}, title = {Four types of ensemble coding in data visualizations.}, journal = {Journal of vision}, year = {2016}, volume = {16}, number = {5}, pages = {11}, month = {mar}, issn = {1534-7362}, abstract = {Ensemble coding supports rapid extraction of visual statistics about distributed visual information. Researchers typically study this ability with the goal of drawing conclusions about how such coding extracts information from natural scenes. Here we argue that a second domain can serve as another strong inspiration for understanding ensemble coding: graphs, maps, and other visual presentations of data. Data visualizations allow observers to leverage their ability to perform visual ensemble statistics on distributions of spatial or featural visual information to estimate actual statistics on data. We survey the types of visual statistical tasks that occur within data visualizations across everyday examples, such as scatterplots, and more specialized images, such as weather maps or depictions of patterns in text. We divide these tasks into four categories: identification of sets of values, summarization across those values, segmentation of collections, and estimation of structure. We point to unanswered questions for each category and give examples of such cross-pollination in the current literature. Increased collaboration between the data visualization and perceptual psychology research communities can inspire new solutions to challenges in visualization while simultaneously exposing unsolved problems in perception research.}, doi = {10.1167/16.5.11}, pmid = {26982369}, url = {http://jov.arvojournals.org/article.aspx?doi=10.1167/16.5.11 http://www.ncbi.nlm.nih.gov/pubmed/26982369}, } @Article{Aigner2011, author = {W. Aigner and C. Kainz and R. Ma and S. Miksch}, title = {Bertin was Right: An Empirical Evaluation of Indexing to Compare Multivariate Time-Series Data Using Line Plots}, journal = {Computer Graphics Forum}, year = {2011}, volume = {30}, number = {1}, pages = {215--228}, month = {jan}, abstract = {Line plots are very well suited for visually representing time-series. However, several difficulties arise when multivariate heterogeneous time-series data is displayed and compared visually. Especially, if the developments and trends of time-series of different units or value ranges need to be compared, a straightforward overlay could be visually misleading. To mitigate this, visualization pioneer Jacques Bertin presented a method called indexing that transforms data into comparable units for visual representation. In this paper, we want to provide empirical evidence for this method and present a comparative study of the three visual comparison methods linear scale with juxtaposition, log scale with superimposition and indexing. Although for task completion times, indexing only shows slight advantages, the results support the assumption that the indexing method enables the user to perform comparison tasks with a significantly lower error rate. Furthermore, a post-test questionnaire showed that the majority of the participants favour the indexing method over the two other comparison methods.}, doi = {10.1111/j.1467-8659.2010.01845.x}, publisher = {Wiley-Blackwell}, url = {https://doi.org/10.1111%2Fj.1467-8659.2010.01845.x}, } @Article{Hollands2000, author = {J. G. Hollands and Brian P. Dyre}, title = {Bias in proportion judgments: The cyclical power model.}, journal = {Psychological Review}, year = {2000}, volume = {107}, number = {3}, pages = {500--524}, abstract = {When participants make part–whole proportion judgments, systematic bias is commonly observed. In some studies, small proportions are overestimated and large proportions underestimated; in other studies, the reverse pattern occurs. Sometimes the bias pattern repeats cyclically with a higher frequency (e.g., overestimation of proportions less than .25 and between .5 and .75; underestimation otherwise). To account for the various bias patterns, a cyclical power model was derived from Stevens' power law. The model proposes that the amplitude of the bias pattern is determined by the Stevens exponent, β (i.e., the stimulus continuum being judged), and that the frequency of the pattern is determined by a choice of intermediate reference points in the stimulus. When β < 1, an over-then-under pattern is predicted; when β > 1, the under-then-over pattern is predicted. Two experiments confirming the model's assumptions are described. A mixed-cycle version of the model is also proposed that predicts observed asymmetries in bias patterns when the set of reference points varies across trials.}, doi = {10.1037/0033-295x.107.3.500}, publisher = {American Psychological Association ({APA})}, url = {https://doi.org/10.1037%2F0033-295x.107.3.500}, } @Article{Ancker2006, author = {J. S. Ancker and Y. Senathirajah and R. Kukafka and J. B. Starren}, title = {Design Features of Graphs in Health Risk Communication: A Systematic Review}, journal = {Journal of the American Medical Informatics Association}, year = {2006}, volume = {13}, number = {6}, pages = {608--618}, month = {nov}, abstract = {This review describes recent experimental and focus group research on graphics as a method of communication about quantitative health risks. Some of the studies discussed in this review assessed effect of graphs on quantitative reasoning, others assessed effects on behavior or behavioral intentions, and still others assessed viewers’ likes and dislikes. Graphical features that improve the accuracy of quantitative reasoning appear to differ from the features most likely to alter behavior or intentions. For example, graphs that make part-to-whole relationships available visually may help people attend to the relationship between the numerator (the number of people affected by a hazard) and the denominator (the entire population at risk), whereas graphs that show only the numerator appear to inflate the perceived risk and may induce risk-averse behavior. Viewers often preferred design features such as visual simplicity and familiarity that were not associated with accurate quantitative judgments. Communicators should not assume that all graphics are more intuitive than text; many of the studies found that patients’ interpretations of the graphics were dependent upon expertise or instruction. Potentially useful directions for continuing research include interactions with educational level and numeracy and successful ways to communicate uncertainty about risk.}, doi = {10.1197/jamia.m2115}, publisher = {Oxford University Press (OUP)}, url = {https://doi.org/10.1197%2Fjamia.m2115}, } @Misc{Schonlau2008, author = {Matthias Schonlau and Ellen Peters}, title = {Graph Comprehension -- An experiment in displaying data as bar charts, pie charts and tables with and without the gratuitous 3rd dimension}, year = {2008}, abstract = {We investigated whether the type of data display (bar chart, pie chart, or table) or adding a gratuitous third dimension (shading to give the illusion of depth) affects the accuracy of answers of questions about the data. We conducted a randomized experiment with 897 members of the American Life Panel, a nationally representative US web survey panel. We found that displaying data in a table lead to more accurate answers than the choice of bar charts or pie charts. Adding a gratuitous third dimension had no effect on the accuracy of the answers for the bar chart and a small but significant negative effect for the pie chart. Viewing the graph/table for less than 8 seconds resulted in less accurate answers. Older age was associated with increased average viewing time (1.2 seconds per 10 years increase in age) but did not affect the accuracy of the answers. Greater numeracy was associated with more accurate answers.}, keywords = {barcharts, pie-charts, 3D}, url = {https://www.rand.org/content/dam/rand/pubs/working_papers/2008/RAND_WR618.pdf}, } @InCollection{Lewis2006, author = {Daniel Lewis and Steve Haroz and Kwan-Liu Ma}, title = {Layout of Multiple Views for Volume Visualization: A User Study}, booktitle = {Advances in Visual Computing}, publisher = {Springer Berlin Heidelberg}, year = {2006}, pages = {215-226}, abstract = {Volume visualizations can have drastically different appearances when viewed using a variety of transfer functions. A problem then occurs in trying to organize many different views on one screen. We conducted a user study of four layout techniques for these multiple views. We timed participants as they separated different aspects of volume data for both time-invariant and time-variant data using one of four different layout schemes. The layout technique had no impact on performance when used with time-invariant data. With time-variant data, however, the multiple view layouts all resulted in better times than did a single view interface. Surprisingly, different layout techniques for multiple views resulted in no noticeable difference in user performance. In this paper, we describe our study and present the results, which could be used in the design of future volume visualization software to improve the productivity of the scientists who use it.}, doi = {10.1007/11919629_23}, url = {https://doi.org/10.1007%2F11919629_23}, } @Article{Haroz2016, author = {Steve Haroz and Robert Kosara and Steven L. Franconeri}, title = {The Connected Scatterplot for Presenting Paired Time Series}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2016}, volume = {22}, number = {9}, pages = {2174--2186}, month = {sep}, abstract = {The connected scatterplot visualizes two related time series in a scatterplot and connects the points with a line in temporal sequence. News media are increasingly using this technique to present data under the intuition that it is understandable and engaging. To explore these intuitions, we (1) describe how paired time series relationships appear in a connected scatterplot, (2) qualitatively evaluate how well people understand trends depicted in this format, (3) quantitatively measure the types and frequency of misinter pretations, and (4) empirically evaluate whether viewers will preferentially view graphs in this format over the more traditional format. The results suggest that low-complexity connected scatterplots can be understood with little explanation, and that viewers are biased towards inspecting connected scatterplots over the more traditional format. We also describe misinterpretations of connected scatterplots and propose further research into mitigating these mistakes for viewers unfamiliar with the technique.}, doi = {10.1109/tvcg.2015.2502587}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {https://doi.org/10.1109%2Ftvcg.2015.2502587}, } @InProceedings{Kim2017, author = {Yea-Seul Kim and Katharina Reinecke and Jessica Hullman}, title = {Explaining the Gap: Visualizing One's Predictions Improves Recall and Comprehension of Data}, booktitle = {ACM Human Factors in Computing Systems (CHI)}, year = {2017}, abstract = {Information visualizations use interactivity to enable user-driven querying of visualized data. However, users’ interactions with their internal representations, including their expectations about data, are also critical for a visualization to support learning. We present multiple graphically-based techniques for eliciting and incorporating a user’s prior knowledge about data into visualization interaction. We use controlled experiments to evaluate how graphically eliciting forms of prior knowledge and presenting feedback on the gap between prior knowledge and the observed data impacts a user’s ability to recall and understand the data. We find that participants who are prompted to reflect on their prior knowledge by predicting and self-explaining data outperform a control group in recall and comprehension. These effects persist when participants have moderate or little prior knowledge on the datasets. We discuss how the effects differ based on text versus visual presentations of data. We characterize the design space of graphical prediction and feedback techniques and describe design recommendations.}, url = {http://idl.cs.washington.edu/papers/explaining-the-gap}, } @Article{Dimara2017, author = {Evanthia Dimara and Anastasia Bezerianos and Pierre Dragicevic}, title = {The Attraction Effect in Information Visualization}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {23}, number = {1}, pages = {471--480}, month = {jan}, abstract = {The attraction effect is a well-studied cognitive bias in decision making research, where one's choice between two alternatives is influenced by the presence of an irrelevant (dominated) third alternative. We examine whether this cognitive bias, so far only tested with three alternatives and simple presentation formats such as numerical tables, text and pictures, also appears in visualizations. Since visualizations can be used to support decision making - e.g., when choosing a house to buy or an employee to hire - a systematic bias could have important implications. In a first crowdsource experiment, we indeed partially replicated the attraction effect with three alternatives presented as a numerical table, and observed similar effects when they were presented as a scatterplot. In a second experiment, we investigated if the effect extends to larger sets of alternatives, where the number of alternatives is too large for numerical tables to be practical. Our findings indicate that the bias persists for larger sets of alternatives presented as scatterplots. We discuss implications for future research on how to further study and possibly alleviate the attraction effect.}, doi = {10.1109/tvcg.2016.2598594}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2016.2598594}, } @Article{Padilla2017, author = {Lace Padilla and P. Samuel Quinan and Miriah Meyer and Sarah H. Creem-Regehr}, title = {Evaluating the Impact of Binning 2D Scalar Fields}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {23}, number = {1}, pages = {431--440}, month = {jan}, abstract = {The expressiveness principle for visualization design asserts that a visualization should encode all of the available data, and only the available data, implying that continuous data types should be visualized with a continuous encoding channel. And yet, in many domains binning continuous data is not only pervasive, but it is accepted as standard practice. Prior work provides no clear guidance for when encoding continuous data continuously is preferable to employing binning techniques or how this choice affects data interpretation and decision making. In this paper, we present a study aimed at better understanding the conditions in which the expressiveness principle can or should be violated for visualizing continuous data. We provided participants with visualizations employing either continuous or binned greyscale encodings of geospatial elevation data and compared participants' ability to complete a wide variety of tasks. For various tasks, the results indicate significant differences in decision making, confidence in responses, and task completion time between continuous and binned encodings of the data. In general, participants with continuous encodings were faster to complete many of the tasks, but never outperformed those with binned encodings, while performance accuracy with binned encodings was superior to continuous encodings in some tasks. These findings suggest that strict adherence to the expressiveness principle is not always advisable. We discuss both the implications and limitations of our results and outline various avenues for potential work needed to further improve guidelines for using continuous versus binned encodings for continuous data types.}, doi = {10.1109/tvcg.2016.2599106}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {https://doi.org/10.1109%2Ftvcg.2016.2599106}, } @Article{Beecham2017, author = {Roger Beecham and Jason Dykes and Wouter Meulemans and Aidan Slingsby and Cagatay Turkay and Jo Wood}, title = {Map {LineUps}: Effects of spatial structure on graphical inference}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {23}, number = {1}, pages = {391--400}, month = {jan}, abstract = {Fundamental to the effective use of visualization as an analytic and descriptive tool is the assurance that presenting data visually provides the capability of making inferences from what we see. This paper explores two related approaches to quantifying the confidence we may have in making visual inferences from mapped geospatial data. We adapt Wickham et al.'s `Visual Line-up' method as a direct analogy with Null Hypothesis Significance Testing (NHST) and propose a new approach for generating more credible spatial null hypotheses. Rather than using as a spatial null hypothesis the unrealistic assumption of complete spatial randomness, we propose spatially autocorrelated simulations as alternative nulls. We conduct a set of crowdsourced experiments (n=361) to determine the just noticeable difference (JND) between pairs of choropleth maps of geographic units controlling for spatial autocorrelation (Moran's I statistic) and geometric configuration (variance in spatial unit area). Results indicate that people's abilities to perceive differences in spatial autocorrelation vary with baseline autocorrelation structure and the geometric configuration of geographic units. These results allow us, for the first time, to construct a visual equivalent of statistical power for geospatial data. Our JND results add to those provided in recent years by Klippel et al. (2011), Harrison et al. (2014) and Kay & Heer (2015) for correlation visualization. Importantly, they provide an empirical basis for an improved construction of visual line-ups for maps and the development of theory to inform geospatial tests of graphical inference.}, doi = {10.1109/tvcg.2016.2598862}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {https://www.gicentre.net/beechammaplineups2016}, } @Article{Boy2014, author = {Jeremy Boy and Ronald A. Rensink and Enrico Bertini and Jean-Daniel Fekete}, title = {A Principled Way of Assessing Visualization Literacy}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2014}, volume = {20}, number = {12}, pages = {1963--1972}, month = {dec}, doi = {10.1109/tvcg.2014.2346984}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2014.2346984}, } @Article{Pandey2014, author = {Anshul Vikram Pandey and Anjali Manivannan and Oded Nov and Margaret Satterthwaite and Enrico Bertini}, title = {The Persuasive Power of Data Visualization}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2014}, volume = {20}, number = {12}, pages = {2211--2220}, month = {dec}, abstract = {Data visualization has been used extensively to inform users. However, little research has been done to examine the effects of data visualization in influencing users or in making a message more persuasive. In this study, we present experimental research to fill this gap and present an evidence-based analysis of persuasive visualization. We built on persuasion research from psychology and user interfaces literature in order to explore the persuasive effects of visualization. In this experimental study we define the circumstances under which data visualization can make a message more persuasive, propose hypotheses, and perform quantitative and qualitative analyses on studies conducted to test these hypotheses. We compare visual treatments with data presented through barcharts and linecharts on the one hand, treatments with data presented through tables on the other, and then evaluate their persuasiveness. The findings represent a first step in exploring the effectiveness of persuasive visualization. }, doi = {10.1109/tvcg.2014.2346419}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {http://vgc.poly.edu/projects/persuasion/}, } @Article{Gramazio2014, author = {Connor C. Gramazio and Karen B. Schloss and David H. Laidlaw}, title = {The relation between visualization size, grouping, and user performance}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2014}, volume = {20}, number = {12}, pages = {1953--1962}, month = {dec}, abstract = {In this paper we make the following contributions: (1) we describe how the grouping, quantity, and size of visual marks affects search time based on the results from two experiments; (2) we report how search performance relates to self-reported difficulty in finding the target for different display types; and (3) we present design guidelines based on our findings to facilitate the design of effective visualizations. Both Experiment 1 and 2 asked participants to search for a unique target in colored visualizations to test how the grouping, quantity, and size of marks affects user performance. In Experiment 1, the target square was embedded in a grid of squares and in Experiment 2 the target was a point in a scatterplot. Search performance was faster when colors were spatially grouped than when they were randomly arranged. The quantity of marks had little effect on search time for grouped displays ("pop-out"), but increasing the quantity of marks slowed reaction time for random displays. Regardless of color layout (grouped vs. random), response times were slowest for the smallest mark size and decreased as mark size increased to a point, after which response times plateaued. In addition to these two experiments we also include potential application areas, as well as results from a small case study where we report preliminary findings that size may affect how users infer how visualizations should be used. We conclude with a list of design guidelines that focus on how to best create visualizations based on grouping, quantity, and size of visual marks. }, doi = {10.1109/tvcg.2014.2346983}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {https://doi.org/10.1109%2Ftvcg.2014.2346983}, } @Article{Gramazio2017, author = {Connor C. Gramazio and David H. Laidlaw and Karen B. Schloss}, title = {Colorgorical: Creating discriminable and preferable color palettes for information visualization}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {23}, number = {1}, pages = {521--530}, month = {jan}, abstract = {We present an evaluation of Colorgorical, a web-based tool for creating discriminable and aesthetically preferable categorical color palettes. Colorgorical uses iterative semi-random sampling to pick colors from CIELAB space based on user-defined discriminability and preference importances. Colors are selected by assigning each a weighted sum score that applies the user-defined importances to Perceptual Distance, Name Difference, Name Uniqueness, and Pair Preference scoring functions, which compare a potential sample to already-picked palette colors. After, a color is added to the palette by randomly sampling from the highest scoring palettes. Users can also specify hue ranges or build off their own starting palettes. This procedure differs from previous approaches that do not allow customization (e.g., pre-made ColorBrewer palettes) or do not consider visualization design constraints (e.g., Adobe Color and ACE). In a Palette Score Evaluation, we verified that each scoring function measured different color information. Experiment 1 demonstrated that slider manipulation generates palettes that are consistent with the expected balance of discriminability and aesthetic preference for 3-, 5-, and 8-color palettes, and also shows that the number of colors may change the effectiveness of pair-based discriminability and preference scores. For instance, if the Pair Preference slider were upweighted, users would judge the palettes as more preferable on average. Experiment 2 compared Colorgorical palettes to benchmark palettes (ColorBrewer, Microsoft, Tableau, Random). Colorgorical palettes are as discriminable and are at least as preferable or more preferable than the alternative palette sets. In sum, Colorgorical allows users to make customized color palettes that are, on average, as effective as current industry standards by balancing the importance of discriminability and aesthetic preference.}, doi = {10.1109/tvcg.2016.2598918}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {http://vrl.cs.brown.edu/color}, } @Article{Etemadpour2017, author = {Ronak Etemadpour and Angus Graeme Forbes}, title = {Density-based motion}, journal = {Information Visualization}, year = {2017}, volume = {16}, number = {1}, pages = {3--20}, month = {jan}, abstract = {A common strategy for encoding multidimensional data for visual analysis is to use dimensionality reduction techniques that project data from higher dimensions onto a lower-dimensional space. This article examines the use of motion to retain an accurate representation of the point density of clusters that might otherwise be lost when a multidimensional dataset is projected into a two-dimensional space. Specifically, we consider different types of density-based motion, where the magnitude of the motion is directly related to the density of the clusters. We investigate how users interpret motion in two-dimensional scatterplots and whether or not they are able to effectively interpret the point density of the clusters through motion. We conducted a series of user studies with both synthetic and real-world datasets to explore how motion can help users in completing various multidimensional data analysis tasks. Our findings indicate that for some tasks, motion outperforms the static scatterplots; circular path motions in particular give significantly better results compared to the other motions. We also found that users were easily able to distinguish clusters with different densities as long the magnitudes of motion were above a particular threshold. Our results indicate that incorporating density-based motion into visualization analytics systems effectively enables the exploration and analysis of multidimensional datasets.}, doi = {10.1177/1473871615606187}, keywords = {animation}, publisher = {{SAGE} Publications}, url = {https://doi.org/10.1177%2F1473871615606187}, } @InProceedings{Forbes2014, author = {A. G. Forbes and C. Jette and A. Predoehl}, title = {Analyzing intrinsic motion textures created from naturalistic video captures}, booktitle = {2014 International Conference on Information Visualization Theory and Applications (IVAPP)}, year = {2014}, pages = {107-113}, month = {Jan}, abstract = {This paper presents an initial exploration of the plausibility of incorporating subtle motions as a useful modality for encoding (or augmenting the encoding of) data for information visualization tasks. Psychophysics research indicates that the human visual system is highly responsive to identifying and differentiating even the subtlest motions intrinsic to an object. We examine aspects of this intrinsic motion, whereby an object stays in one place while a texture applied to that object changes in subtle but perceptible ways. We hypothesize that the use of subtle intrinsic motions (as opposed to more obvious extrinsic motion) will avoid the clutter and visual fatigue that often discourages visualization designers from incorporating motion. Using transformed video captures of naturalistic motions gathered from the world, we conduct a preliminary user study that attempts ascertains the minimum amount of motion that is easily perceptible to a viewer. We introduce metrics which allow us to categorize these motions in terms of flicker (local amplitude and frequency), flutter (global amplitude and frequency), and average maximum contrast between a pixel and its immediate neighbors. Using these metrics (and a few others), we identify plausible ranges of motion that might be appropriate for visualization tasks, either on their own or in conjunction with other modalities (such as color or shape), without increasing visual fatigue. Based on an analysis of these initial preliminary results, we propose that the use of what we term “intrinsic motion textures” may be a promising modality appropriate for a range of visualization tasks.}, keywords = {animation}, } @Article{Alper2011, author = {B. Alper and T. Hollerer and J. Kuchera-Morin and A. Forbes}, title = {Stereoscopic Highlighting: 2D Graph Visualization on Stereo Displays}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2011}, volume = {17}, number = {12}, pages = {2325--2333}, month = {dec}, abstract = {In this paper we present a new technique and prototype graph visualization system, stereoscopic highlighting, to help answer accessibility and adjacency queries when interacting with a node-link diagram. Our technique utilizes stereoscopic depth to highlight regions of interest in a 2D graph by projecting these parts onto a plane closer to the viewpoint of the user. This technique aims to isolate and magnify specific portions of the graph that need to be explored in detail without resorting to other highlighting techniques like color or motion, which can then be reserved to encode other data attributes. This mechanism of stereoscopic highlighting also enables focus+context views by juxtaposing a detailed image of a region of interest with the overall graph, which is visualized at a further depth with correspondingly less detail. In order to validate our technique, we ran a controlled experiment with 16 subjects comparing static visual highlighting to stereoscopic highlighting on 2D and 3D graph layouts for a range of tasks. Our results show that while for most tasks the difference in performance between stereoscopic highlighting alone and static visual highlighting is not statistically significant, users performed better when both highlighting methods were used concurrently. In more complicated tasks, 3D layout with static visual highlighting outperformed 2D layouts with a single highlighting method. However, it did not outperform the 2D layout utilizing both highlighting techniques simultaneously. Based on these results, we conclude that stereoscopic highlighting is a promising technique that can significantly enhance graph visualizations for certain use cases.}, doi = {10.1109/tvcg.2011.234}, keywords = {3D, networks}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2011.234}, } @Article{Peterson2009, author = {Stephen D. Peterson and Magnus Axholt and Stephen R. Ellis}, title = {Objective and subjective assessment of stereoscopically separated labels in augmented reality}, journal = {Computers & Graphics}, year = {2009}, volume = {33}, number = {1}, pages = {23--33}, month = {feb}, abstract = {We present a new technique for managing visual clutter caused by overlapping labels in complex information displays. This technique, label layering, utilizes stereoscopic disparity as a means to segregate labels in depth for increased legibility and clarity. By distributing overlapping labels in depth, we have found that selection time during a visual search task in situations with high levels of visual overlap is reduced by 4 s or 24%. Our data show that the stereoscopically based depth order of the labels must be correlated with the distance order of their corresponding objects, for practical benefits. An algorithm using our label layering technique accordingly could be an alternative to traditional label placement algorithms that avoid label overlap at the cost of distracting view plane motion, symbology dimming or label size reduction.}, doi = {10.1016/j.cag.2008.11.006}, keywords = {3D}, publisher = {Elsevier {BV}}, url = {https://doi.org/10.1016%2Fj.cag.2008.11.006}, } @Article{Gschwandtner2016, author = {Theresia Gschwandtner and Markus Bögl and Paolo Federico and Silvia Miksch}, title = {Visual Encodings of Temporal Uncertainty: A Comparative User Study}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2016}, volume = {22}, number = {1}, pages = {539--548}, month = {jan}, issn = {1077-2626}, abstract = {A number of studies have investigated different ways of visualizing uncertainty. However, in the temporal dimension, it is still an open question how to best represent uncertainty, since the special characteristics of time require special visual encodings and may provoke different interpretations. Thus, we have conducted a comprehensive study comparing alternative visual encodings of intervals with uncertain start and end times: gradient plots, violin plots, accumulated probability plots, error bars, centered error bars, and ambiguation. Our results reveal significant differences in error rates and completion time for these different visualization types and different tasks. We recommend using ambiguation - using a lighter color value to represent uncertain regions - or error bars for judging durations and temporal bounds, and gradient plots - using fading color or transparency - for judging probability values.}, doi = {10.1109/TVCG.2015.2467752}, keywords = {uncertainty}, } @InProceedings{Federico2016, author = {Paolo Federico and Silvia Miksch}, title = {Evaluation of Two Interaction Techniques for Visualization of Dynamic Graphs}, booktitle = {Graph Drawing and Network Visualization}, year = {2016}, series = {Lecture Notes in Computer Science}, pages = {557--571}, address = {Cham}, month = sep, publisher = {Springer}, abstract = {Several techniques for visualization of dynamic graphs are based on different spatial arrangements of a temporal sequence of node-link diagrams. Many studies in the literature have investigated the importance of maintaining the user’s mental map across this temporal sequence, but usually each layout is considered as a static graph drawing and the effect of user interaction is disregarded. We conducted a task-based controlled experiment to assess the effectiveness of two basic interaction techniques: the adjustment of the layout stability and the highlighting of adjacent nodes and edges. We found that generally both interaction techniques increase accuracy, sometimes at the cost of longer completion times, and that the highlighting outclasses the stability adjustment for many tasks except the most complex ones.}, doi = {10.1007/978-3-319-50106-2_43}, isbn = {978-3-319-50105-5 978-3-319-50106-2}, keywords = {networks}, language = {en}, url = {https://link.springer.com/chapter/10.1007/978-3-319-50106-2_43}, urldate = {2017-06-28}, } @Article{Aigner2012, author = {Wolfgang Aigner and Alexander Rind and Stephan Hoffmann}, title = {Comparative Evaluation of an Interactive Time-Series Visualization that Combines Quantitative Data with Qualitative Abstractions}, journal = {Computer Graphics Forum}, year = {2012}, volume = {31}, number = {3}, pages = {995--1004}, abstract = {In many application areas, analysts have to make sense of large volumes of multivariate time-series data. Explorative analysis of this kind of data is often difficult and overwhelming at the level of raw data. Temporal data abstraction reduces data complexity by deriving qualitative statements that reflect domain-specific key characteristics. Visual representations of abstractions and raw data together with appropriate interaction methods can support analysts in making their data easier to understand. Such a visualization technique that applies smooth semantic zooming has been developed in the context of patient data analysis. However, no empirical evidence on its effectiveness and efficiency is available. In this paper, we aim to fill this gap by reporting on a controlled experiment that compares this technique with another visualization method used in the well-known KNAVE-II framework. Both methods integrate quantitative data with qualitative abstractions whereas the first one uses a composite representation with color-coding to display the qualitative data and spatial position coding for the quantitative data. The second technique uses juxtaposed representations for quantitative and qualitative data with spatial position coding for both. Results show that the test persons using the composite representation were generally faster, particularly for more complex tasks that involve quantitative values as well as qualitative abstractions.}, doi = {10.1111/j.1467-8659.2012.03092.x}, publisher = {Wiley-Blackwell}, } @Article{Saket2017, author = {B. Saket and A. Srinivasan and E. D. Ragan and A. Endert}, title = {Evaluating Interactive Graphical Encodings for Data Visualization}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {PP}, number = {99}, pages = {1-1}, issn = {1077-2626}, abstract = {User interfaces for data visualization often consist of two main components: control panels for user interaction and visual representation. A recent trend in visualization is directly embedding user interaction into the visual representations. For example, instead of using control panels to adjust visualization parameters, users can directly adjust basic graphical encodings (e.g., changing distances between points in a scatterplot) to perform similar parameterizations. However, enabling embedded interactions for data visualization requires a strong understanding of how user interactions influence the ability to accurately control and perceive graphical encodings. In this paper, we study the effectiveness of these graphical encodings when serving as the method for interaction. Our user study includes 12 interactive graphical encodings. We discuss the results in terms of task performance and interaction effectiveness metrics.}, doi = {10.1109/TVCG.2017.2680452}, keywords = {interaction}, } @Article{Alexander2017, author = {Eric Alexander and Chih-Ching Chang and Mariana Shimabukuro and Steven Franconeri and Christopher Collins and Michael Gleicher}, title = {Perceptual Biases in Font Size as a Data Encoding}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {PP}, number = {99}, pages = {1-1}, month = {jul}, issn = {1077-2626}, abstract = {Many visualizations, including word clouds, cartographic labels, and word trees, encode data within the sizes of fonts. While font size can be an intuitive dimension for the viewer, using it as an encoding can introduce factors that may bias the perception of the underlying values. Viewers might conflate the size of a word's font with a word's length, the number of letters it contains, or with the larger or smaller heights of particular characters (‘o’ vs. ‘p’ vs. ‘b’). We present a collection of empirical studies showing that such factors - which are irrelevant to the encoded values - can indeed influence comparative judgements of font size, though less than conventional wisdom might suggest. We highlight the largest potential biases, and describe a strategy to mitigate them.}, doi = {10.1109/TVCG.2017.2723397}, keywords = {tag clouds, font size, text}, url = {https://doi.org/10.1109%2Ftvcg.2017.2723397}, } @Article{Lind2017, author = {Andreas Johnsen Lind and Stefan Bruckner}, title = {Comparing Cross-Sections and 3D Renderings for Surface Matching Tasks using Physical Ground Truths}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2017}, volume = {23}, number = {1}, month = jan, note = {Accepted for publication, to be presented at IEEE SciVis 2016}, abstract = {Within the visualization community there are some well-known techniques for visualizing 3D spatial data and some general assumptions about how perception affects the performance of these techniques in practice. However, there is a lack of empirical research backing up the possible performance differences among the basic techniques for general tasks. One such assumption is that 3D renderings are better for obtaining an overview, whereas cross sectional visualizations such as the commonly used Multi- Planar Reformation (MPR) are better for supporting detailed analysis tasks. In the present study we investigated this common assumption by examining the difference in performance between MPR and 3D rendering for correctly identifying a known surface. We also examined whether prior experience working with image data affects the participant’s performance, and whether there was any difference between interactive or static versions of the visualizations. Answering this question is important because it can be used as part of a scientific and empirical basis for determining when to use which of the two techniques. An advantage of the present study compared to other studies is that several factors were taken into account to compare the two techniques. The problem was examined through an experiment with 45 participants, where physical objects were used as the known surface (ground truth). Our findings showed that: 1. The 3D renderings largely outperformed the cross sections; 2. Interactive visualizations were partially more effective than static visualizations; and 3. The high experience group did not generally outperform the low experience group.}, doi = {10.1109/TVCG.2016.2598602}, event = {IEEE SciVis 2016}, keywords = {3D}, location = {Baltimore, USA}, } @Article{Nothelferinpress, author = {Christine Nothelfer and Michael Gleicher and Steven Franconeri}, title = {Redundant encoding strengthens segmentation and grouping in visual displays of data​}, journal = {Journal of Experimental Psychology: Human Perception and Performance}, year = {in press}, abstract = {The availability and importance of data is accelerating, and our visual system is a critical tool for understanding it. The research field of data visualization seeks design guidelines – often inspired by perceptual psychology – for more efficient visual data analysis. We evaluated a common guideline: when presenting multiple sets of values to a viewer, those sets should be distinguished not just by a single feature, such as color, but redundantly by multiple features, such as color and shape. Despite the broad use of this practice across maps and graphs, it may carry costs, and there is no direct evidence for a benefit. We show that this practice can indeed yield a large benefit for rapidly segmenting objects within a dense display (Experiments 1 and 2), and strengthening visual grouping of display elements (Experiment 3). We predict situations where this benefit might be present, and discuss implications for models of attentional control. }, keywords = {redundancy}, url = {https://docs.wixstatic.com/ugd/f70594_300b6541d62849af91cc88dd46cd5c28.pdf​​}, } @InProceedings{Correll2012, author = {Michael Correll and Danielle Albers Szafir and Steven Franconeri and Michael Gleicher}, title = {Comparing averages in time series data}, booktitle = {Proceedings of the SIGCHI Conference on Human Factors in Computing Systems}, year = {2012}, pages = {1095-1104}, month = {may}, publisher = {{ACM} Press}, abstract = {Visualizations often seek to aid viewers in assessing the big picture in the data, that is, to make judgments about aggregate properties of the data. In this paper, we present an empirical study of a representative aggregate judgment task: finding regions of maximum average in a series. We show how a theory of perceptual averaging suggests a visual design other than the typically-used line graph. We describe an experiment that assesses participants’ ability to estimate averages and make judgments based on these averages. The experiment confirms that this color encoding significantly outperforms the standard practice. The experiment also provides evidence for a perceptual averaging theory.}, doi = {10.1145/2207676.2208556}, isbn = {978-1-4503-1015-4}, keywords = {time series, color}, location = {Austin, TX}, url = {http://dl.acm.org/citation.cfm?id=2208556}, } @Article{Gleicher2013, author = {Michael Gleicher and Michael Correll and Christine Nothelfer and Steven Franconeri}, title = {Perception of average value in multiclass scatterplots​}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2013}, volume = {19}, number = {12}, pages = {2316-2325}, month = {dec}, abstract = {The visual system can make highly efficient aggregate judgements about a set of objects, with speed roughly independent of the number of objects considered. While there is a rich literature on these mechanisms and their ramifications for visual summarization tasks, this prior work rarely considers more complex tasks requiring multiple judgements over long periods of time, and has not considered certain critical aggregation types, such as the localization of the mean value of a set of points. In this paper, we explore these questions using a common visualization task as a case study: relative mean value judgements within multi-class scatterplots. We describe how the perception literature provides a set of expected constraints on the task, and evaluate these predictions with a large-scale perceptual study with crowd-sourced participants. Judgements are no harder when each set contains more points, redundant and conflicting encodings, as well as additional sets, do not strongly affect performance, and judgements are harder when using less salient encodings. These results have concrete ramifications for the design of scatterplots.}, doi = {10.1109/TVCG.2013.183}, keywords = {scatterplots}, url = {http://dl.acm.org/citation.cfm?id=2553744​}, } @Article{Smith2017, author = {Alison Smith and Tak Yeon Lee and Forough Poursabzi-Sangdeh and Jordan Boyd-Graber and Niklas Elmqvist and Leah Findlater}, title = {Evaluating Visual Representations for Topic Understanding and Their Effects on Manually Generated Labels}, journal = {Transactions of the Association for Computational Linguistics}, year = {2017}, volume = {5}, pages = {1--15}, abstract = {Probabilistic topic models are important tools for indexing, summarizing, and analyzing large document collections by their themes. However, promoting end-user understanding of topics remains an open research problem. We compare labels generated by users given four topic visualization techniques—word lists, word lists with bars, word clouds, and network graphs—against each other and against automatically generated labels. Our basis of comparison is participant ratings of how well labels describe documents from the topic. Our study has two phases: a labeling phase where participants label visualized topics and a validation phase where different participants select which labels best describe the topics' documents. Although all visualizations produce similar quality labels, simple visualizations such as word lists allow participants to quickly understand topics, while complex visualizations take longer but expose multi-word expressions that simpler visualizations obscure. Automatic labels lag behind user-created labels, but our dataset of manually labeled topics highlights linguistic patterns (e.g., hypernyms, phrases) that can be used to improve automatic topic labeling algorithms.}, keywords = {Tag Clouds, text}, url = {http://www.aclweb.org/website/old_anthology/Q/Q17/Q17-1001.pdf}, } @InProceedings{Ware2005, author = {Colin Ware and Peter Mitchell}, title = {Reevaluating stereo and motion cues for visualizing graphs in three dimensions}, booktitle = {Proceedings of the 2nd symposium on Appied perception in graphics and visualization - APGV '05}, year = {2005}, publisher = {{ACM} Press}, abstract = {It has been known for some time that larger graphs can be interpreted if viewed in 3D than in 2D. Both kinetic depth cues and stereoscopic depth cues increase the size of the structure that can be interpreted. However, prior studies were carried out using displays that provided a level of detail far short of what the human visual system is capable of resolving. This is especially problematic because human stereoscopic vision is known to be a super-acuity, it operates best under conditions where fine details are present. Therefore we undertook a graph comprehension study using a very high resolution stereoscopic display. We examined the effect of stereo, kinetic depth and using 3D tubes versus lines to display the links. The results showed a much greater benefit for 3D viewing than previous studies. For example, with both motion and depth cues, unskilled observers could see paths between nodes in 333 node graphs with a better than 10% error rate. Skilled observers could see up to a 1000 node graph with less than a 10% error rate. This represented an order of magnitude increase over 2D display. These findings are discussed in terms of their implications for information display.}, doi = {10.1145/1080402.1080411}, keywords = {3D, networks}, url = {https://doi.org/10.1145/1080402.1080411}, } @Article{Johansson2016, author = {Jimmy Johansson and Camilla Forsell}, title = {Evaluation of Parallel Coordinates: Overview, Categorization and Guidelines for Future Research}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2016}, volume = {22}, number = {1}, pages = {579--588}, month = {jan}, abstract = {The parallel coordinates technique is widely used for the analysis of multivariate data. During recent decades significant research efforts have been devoted to exploring the applicability of the technique and to expand upon it, resulting in a variety of extensions. Of these many research activities, a surprisingly small number concerns user-centred evaluations investigating actual use and usability issues for different tasks, data and domains. The result is a clear lack of convincing evidence to support and guide uptake by users as well as future research directions. To address these issues this paper contributes a thorough literature survey of what has been done in the area of user-centred evaluation of parallel coordinates. These evaluations are divided into four categories based on characterization of use, derived from the survey. Based on the data from the survey and the categorization combined with the authors' experience of working with parallel coordinates, a set of guidelines for future research directions is proposed.}, doi = {10.1109/tvcg.2015.2466992}, keywords = {parallel-coordinates}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2015.2466992}, } {Szafir2016Lightness, title={Lightness Constancy in Surface Visualization}, author={Danielle Albers Szafir and Alper Sarikaya and Michael Gleicher}, journal={IEEE Transactions on Visualization and Computer Graphics}, volume={22}, number={9}, pages={2107--2121}, year={2016}, publisher={IEEE} abstract={Color is a common channel for displaying data in surface visualization, but is affected by the shadows and shading used to convey surface depth and shape. Understanding encoded data in the context of surface structure is critical for effective analysis in a variety of domains, such as in molecular biology. In the physical world, lightness constancy allows people to accurately perceive shadowed colors; however, its effectiveness in complex synthetic environments such as surface visualizations is not well understood. We report a series of crowdsourced and laboratory studies that confirm the existence of lightness constancy effects for molecular surface visualizations using ambient occlusion. We provide empirical evidence of how common visualization design decisions can impact viewers' abilities to accurately identify encoded surface colors. These findings suggest that lightness constancy aids in understanding color encodings in surface visualization and reveal a correlation between visualization techniques that improve color interpretation in shadow and those that enhance perceptions of surface depth. These results collectively suggest that understanding constancy in practice can inform effective visualization design.}, doi={10.1109/TVCG.2015.2500240}, keywords={Color, shading, shadow, lightness-constancy, molecular-visualization, surface-visualization}, url={http://ieeexplore.ieee.org/abstract/document/7328340/} } @InProceedings{Szafir2014, author = {Danielle Albers Szafir and Michael Correll and Michael Gleicher}, title = {Task-driven Evaluation of Aggregation in Time Series Visualization}, booktitle = {Proceedings of the SIGCHI Conference on Human Factors in Computing Systems}, year = {2014}, series = {CHI '14}, pages = {551--560}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {Many visualization tasks require the viewer to make judgments about aggregate properties of data. Recent work has shown that viewers can perform such tasks effectively, for example to efficiently compare the maximums or means over ranges of data. However, this work also shows that such effectiveness depends on the designs of the displays. In this paper, we explore this relationship between aggregation task and visualization design to provide guidance on matching tasks with designs. We combine prior results from perceptual science and graphical perception to suggest a set of design variables that influence performance on various aggregate comparison tasks. We describe how choices in these variables can lead to designs that are matched to particular tasks. We use these variables to assess a set of eight different designs, predicting how they will support a set of six aggregate time series comparison tasks. A crowd-sourced evaluation confirms these predictions. These results not only provide evidence for how the specific visualizations support various tasks, but also suggest using the identified design variables as a tool for designing visualizations well suited for various types of tasks.}, acmid = {2557200}, doi = {10.1145/2556288.2557200}, isbn = {978-1-4503-2473-1}, keywords = {time series, aggregation}, location = {Toronto, Ontario, Canada}, numpages = {10}, url = {http://doi.acm.org/10.1145/2556288.2557200}, } @Article{Lin2013, author = {Sharon Lin and Julie Fortuna and Chinmay Kulkarni and Maureen Stone and Jeffrey Heer}, title = {Selecting Semantically-Resonant Colors for Data Visualization}, journal = {Computer Graphics Forum}, year = {2013}, volume = {32}, number = {3pt4}, pages = {401--410}, issn = {1467-8659}, abstract = {We introduce an algorithm for automatic selection of semantically-resonant colors to represent data (e.g., using blue for data about ?oceans?, or pink for ?love?). Given a set of categorical values and a target color palette, our algorithm matches each data value with a unique color. Values are mapped to colors by collecting representative images, analyzing image color distributions to determine value-color affinity scores, and choosing an optimal assignment. Our affinity score balances the probability of a color with how well it discriminates among data values. A controlled study shows that expert-chosen semantically-resonant colors improve speed on chart reading tasks compared to a standard palette, and that our algorithm selects colors that lead to similar gains. A second study verifies that our algorithm effectively selects colors across a variety of data categories.}, doi = {10.1111/cgf.12127}, keywords = {color}, publisher = {Blackwell Publishing Ltd}, url = {http://dx.doi.org/10.1111/cgf.12127}, } @Article{Bartram2011, author = {Lyn Bartram and Billy Cheung and Maureen Stone}, title = {The Effect of Colour and Transparency on the Perception of Overlaid Grids}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2011}, volume = {17}, number = {12}, pages = {1942-1948}, month = {Dec}, issn = {1077-2626}, abstract = {Overlaid reference elements need to be sufficiently visible to effectively relate to the underlying information, but not so obtrusive that they clutter the presentation. We seek to create guidelines for presenting such structures through experimental studies to define boundary conditions for visual intrusiveness. We base our work on the practice of designers, who use transparency to integrate overlaid grids with their underlying imagery. Previous work discovered a useful range of alpha values for black or white grids overlayed on scatterplot images rendered in shades of gray over gray backgrounds of different lightness values. This work compares black grids to blue and red ones on different image types of scatterplots and maps. We expected that the coloured grids over grayscale images would be more visually salient than black ones, resulting in lower alpha values. Instead, we found that there was no significant difference between the boundaries set for red and black grids, but that the boundaries for blue grids were set consistently higher (more opaque). As in our previous study, alpha values are affected by image density rather than image type, and are consistently lower than many default settings. These results have implications for the design of subtle reference structures.}, doi = {10.1109/TVCG.2011.242}, keywords = {color}, } @InProceedings{Correll2013, author = {Michael Correll and Eric Alexander and Michael Gleicher}, title = {Quantity Estimation in Visualizations of Tagged Text}, booktitle = {Proceedings of the SIGCHI Conference on Human Factors in Computing Systems}, year = {2013}, series = {CHI '13}, pages = {2697--2706}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {A valuable task in text visualization is to have viewers make judgments about text that has been annotated (either by hand or by some algorithm such as text clustering or entity extraction). In this work we look at the ability of viewers to make judgments about the relative quantities of tags in annotated text (specifically text tagged with one of a set of qualitatively distinct colors), and examine design choices that can improve performance at extracting statistical information from these texts. We find that viewers can efficiently and accurately estimate the proportions of tag levels over a range of situations; however accuracy can be improved through color choice and area adjustments.}, acmid = {2481373}, doi = {10.1145/2470654.2481373}, isbn = {978-1-4503-1899-0}, keywords = {text, color}, location = {Paris, France}, numpages = {10}, url = {http://doi.acm.org/10.1145/2470654.2481373}, } @InProceedings{Adnan2016, author = {Muhammad Adnan and Mike Just and Lynne Baillie}, title = {Investigating time series visualisations to improve the user experience}, booktitle = {Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems}, year = {2016}, pages = {5444--5455}, organization = {ACM}, abstract = {Research on graphical perception of time series visualisations has focused on visual representation, and not on interaction. Even for visual representation, there has been limited study of the impact on users of visual encodings and the strengths and weaknesses of Cartesian and Polar coordinate systems. In order to address this research gap, we performed a comprehensive graphical perception study that measured the effectiveness of time series visualisations with different interactions, visual encodings and coordinate systems for several tasks. Our results show that, while positional and colour visual encodings were better for most tasks, area visual encoding performed better for data comparison. Most importantly, we identified that introducing interactivity within time series visualisations considerably enhances the user experience, without any loss of efficiency or accuracy. We believe that our findings can greatly improve the development of visual analytics tools using time series visualisations in a variety of domains.}, keywords = {time series}, } @Article{Borkin2011, author = {Borkin, Michelle and Gajos, Krzysztof and Peters, Amanda and Mitsouras, Dimitrios and Melchionna, Simone and Rybicki, Frank and Feldman, Charles and Pfister, Hanspeter}, title = {Evaluation of artery visualizations for heart disease diagnosis}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2011}, volume = {17}, number = {12}, pages = {2479--2488}, abstract = { Heart disease is the number one killer in the United States, and finding indicators of the disease at an early stage is critical for treatment and prevention. In this paper we evaluate visualization techniques that enable the diagnosis of coronary artery disease. A key physical quantity of medical interest is endothelial shear stress (ESS). Low ESS has been associated with sites of lesion formation and rapid progression of disease in the coronary arteries. Having effective visualizations of a patient?s ESS data is vital for the quick and thorough non-invasive evaluation by a cardiologist. We present a task taxonomy for hemodynamics based on a formative user study with domain experts. Based on the results of this study we developed HemoVis, an interactive visualization application for heart disease diagnosis that uses a novel 2D tree diagram representation of coronary artery trees. We present the results of a formal quantitative user study with domain experts that evaluates the effect of 2D versus 3D artery representations and of color maps on identifying regions of low ESS. We show statistically significant results demonstrating that our 2D visualizations are more accurate and efficient than 3D representations, and that a perceptually appropriate color map leads to fewer diagnostic mistakes than a rainbow color map.}, keywords = {3D, color}, publisher = {IEEE}, } @Article{Ware1988, author = {Colin Ware}, title = {Color sequences for univariate maps: Theory, experiments and principles}, journal = {IEEE Computer Graphics and Applications}, year = {1988}, volume = {8}, number = {5}, pages = {41--49}, abstract = {Pseudocoloring for presenting univariate map information on a graphic display system is investigated. The kinds of information available in maps are divided into two classes: metric information denotes the quantity stored at each point on the surface, and form information denotes the shape or structure of the surface. Theoretical principles are proposed to predict which color sequences will be effective at conveying value and form information respectively. According to this theory, a scale that approximates the physical spectrum should be good at conveying value information, because of the reduced effects of simultaneous contrast. It should be poor at conveying form information, however, because the brain prefers form information to come through the lightness-processing channel. Conversely, a gray scale should be poor at conveying value information and good at conveying form information, according to the same theory. These predictions are tested in a series of psychophysical experiments that test five color sequences. The results show that simultaneous contrast can be a major source of error when reading maps, but only partially confirm the form hypothesis. Guidelines are given for designing color sequences to be effective in both conveying form and value information. An experimental color sequence is presented to illustrate these guidelines.}, keywords = {color}, publisher = {IEEE}, } @Article{MacEachren2012, author = {Alan M MacEachren and Robert E Roth and James O'Brien and Bonan Li and Derek Swingley and Mark Gahegan}, title = {Visual Semiotics \& Uncertainty Visualization: An Empirical Study}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2012}, volume = {18}, number = {12}, pages = {2496--2505}, abstract = {This paper presents two linked empirical studies focused on uncertainty visualization. The experiments are framed from two conceptual perspectives. First, a typology of uncertainty is used to delineate kinds of uncertainty matched with space, time, and attribute components of data. Second, concepts from visual semiotics are applied to characterize the kind of visual signification that is appropriate for representing those different categories of uncertainty. This framework guided the two experiments reported here. The first addresses representation intuitiveness, considering both visual variables and iconicity of representation. The second addresses relative performance of the most intuitive abstract and iconic representations of uncertainty on a map reading task. Combined results suggest initial guidelines for representing uncertainty and discussion focuses on practical applicability of results.}, keywords = {Uncertainty}, publisher = {IEEE}, } @Article{Ziemkiewicz2008, author = {C. Ziemkiewicz and R. Kosara}, title = {The Shaping of Information by Visual Metaphors}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2008}, volume = {14}, number = {6}, pages = {1269--1276}, month = {nov}, abstract = {The nature of an information visualization can be considered to lie in the visual metaphors it uses to structure information. The process of understanding a visualization therefore involves an interaction between these external visual metaphors and the user’s internal knowledge representations. To investigate this claim, we conducted an experiment to test the effects of visual and verbal metaphor on the understanding of tree visualizations. Participants answered simple data comprehension questions while viewing either a treemap or a node-link diagram. Questions were worded to reflect a verbal metaphor that was either compatible or incompatible with the visualization a participant was using. The results (based on correctness and response time) suggest that the visual metaphor indeed affects how a user derives information from a visualization. Additionally, we found that the degree to which a user is affected by the metaphor is strongly correlated with the user’s ability to answer task questions correctly. These findings are a first step towards illuminating how visual metaphors shape user understanding and have significant implications for the evaluation, application, and theory of visualization.}, doi = {10.1109/tvcg.2008.171}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, url = {https://doi.org/10.1109%2Ftvcg.2008.171}, } @Article{Skau2015, author = {Drew Skau and Lane Harrison and Robert Kosara}, title = {An Evaluation of the Impact of Visual Embellishments in Bar Charts}, journal = {Computer Graphics Forum}, year = {2015}, volume = {34}, number = {3}, pages = {221--230}, month = {jun}, abstract = {As data visualization becomes further intertwined with the field of graphic design and information graphics, small graphical alterations are made to many common chart formats. Despite the growing prevalence of these embellishments, their effects on communication of the charts’ data is unknown. From an overview of the design space, we have outlined some of the common embellishments that are made to bar charts. We have studied the effects of these chart embellishments on the communication of the charts’ data through a series of user studies on Amazon’s Mechanical Turk platform. The results of these studies lead to a better understanding of how each chart type is perceived, and help provide guiding principles for the graphic design of charts.}, doi = {10.1111/cgf.12634}, keywords = {chartjunk}, publisher = {Wiley-Blackwell}, url = {https://doi.org/10.1111%2Fcgf.12634}, } @InProceedings{Barlow2001, author = {Todd Barlow and Padraic Neville}, title = {A comparison of 2-D visualizations of hierarchies}, booktitle = {{IEEE} Symposium on Information Visualization, 2001. {INFOVIS} 2001.}, year = {2001}, publisher = {{IEEE}}, abstract = {This paper describes two experiments that compare four two-dimensional visualizations of hierarchies: organization chart, icicle plot, treemap, and tree ring. The visualizations are evaluated in the context of decision tree analyses prevalent in data mining applications. The results suggest that either the tree ring or icicle plot is equivalent to the organization chart.}, doi = {10.1109/infvis.2001.963290}, keywords = {hierachy}, url = {https://doi.org/10.1109%2Finfvis.2001.963290}, } @Article{Strobelt2016, author = {Hendrik Strobelt and Daniela Oelke and Bum Chul Kwon and Tobias Schreck and Hanspeter Pfister}, title = {Guidelines for Effective Usage of Text Highlighting Techniques}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2016}, volume = {22}, number = {1}, pages = {489--498}, month = {jan}, abstract = {Semi-automatic text analysis involves manual inspection of text. Often, different text annotations (like part-of-speech or named entities) are indicated by using distinctive text highlighting techniques. In typesetting there exist well-known formatting conventions, such as bold typeface, italics, or background coloring, that are useful for highlighting certain parts of a given text. Also, many advanced techniques for visualization and highlighting of text exist; yet, standard typesetting is common, and the effects of standard typesetting on the perception of text are not fully understood. As such, we surveyed and tested the effectiveness of common text highlighting techniques, both individually and in combination, to discover how to maximize pop-out effects while minimizing visual interference between techniques. To validate our findings, we conducted a series of crowdsourced experiments to determine: i) a ranking of nine commonly-used text highlighting techniques; ii) the degree of visual interference between pairs of text highlighting techniques; iii) the effectiveness of techniques for visual conjunctive search. Our results show that increasing font size works best as a single highlighting technique, and that there are significant visual interferences between some pairs of highlighting techniques. We discuss the pros and cons of different combinations as a design guideline to choose text highlighting techniques for text viewers.}, doi = {10.1109/tvcg.2015.2467759}, keywords = {text}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, } @Article{Ham2008, author = {F. van Ham and B. Rogowitz}, title = {Perceptual Organization in User-Generated Graph Layouts}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2008}, volume = {14}, number = {6}, pages = {1333--1339}, month = {nov}, abstract = {Many graph layout algorithms optimize visual characteristics to achieve useful representations. Implicitly, their goal is to create visual representations that are more intuitive to human observers. In this paper, we asked users to explicitly manipulate nodes in a network diagram to create layouts that they felt best captured the relationships in the data. This allowed us to measure organizational behavior directly, allowing us to evaluate the perceptual importance of particular visual features, such as edge crossings and edge-lengths uniformity. We also manipulated the interior structure of the node relationships by designing data sets that contained clusters, that is, sets of nodes that are strongly interconnected. By varying the degree to which these clusters were “masked” by extraneous edges we were able to measure observers’ sensitivity to the existence of clusters and how they revealed them in the network diagram. Based on these measurements we found that observers are able to recover cluster structure, that the distance between clusters is inversely related to the strength of the clustering, and that users exhibit the tendency to use edges to visually delineate perceptual groups. These results demonstrate the role of perceptual organization in representing graph data and provide concrete recommendations for graph layout algorithms.}, doi = {10.1109/tvcg.2008.155}, keywords = {networks}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {https://doi.org/10.1109%2Ftvcg.2008.155}, } @Article{Keller2006, author = {Rene Keller and Claudia M. Eckert and P. John Clarkson}, title = {Matrices or Node-Link Diagrams: Which Visual Representation is Better for Visualising Connectivity Models?}, journal = {Information Visualization}, year = {2006}, volume = {5}, number = {1}, pages = {62--76}, month = {mar}, abstract = {Adjacency matrices or DSMs (design structure matrices) and node-link diagrams are both visual representations of graphs, which are a common form of data in many disciplines. DSMs are used throughout the engineering community for various applications, such as process modelling or change prediction. However, outside this community, DSMs (and other matrix-based representations of graphs) are rarely applied and node-link diagrams are very popular. This paper will examine, which representation is more suitable for visualising graphs. For this purpose, several user experiments were conducted that aimed to answer this research question in the context of product models used, for example in engineering, but the results can be generalised to other applications. These experiments identify key factors on the readability of graph visualisations and confirm work on comparisons of different representations. This study widens the scope of readability comparisons between node-link and matrix-based representations by introducing new user tasks and replacing simulated, undirected graphs with directed ones employing real-world semantics.}, doi = {10.1057/palgrave.ivs.9500116}, keywords = {networks}, publisher = {SAGE Publications}, url = {https://doi.org/10.1057%2Fpalgrave.ivs.9500116}, } @InProceedings{Ghoniem, author = {M. Ghoniem and J.-D. Fekete and P. Castagliola}, title = {A Comparison of the Readability of Graphs Using Node-Link and Matrix-Based Representations}, booktitle = {{IEEE} Symposium on Information Visualization}, publisher = {{IEEE}}, abstract = {In this paper, we describe a taxonomy of generic graph related tasks and an evaluation aiming at assessing the readability of two representations of graphs: matrix-based representations and node-link diagrams. This evaluation bears on seven generic tasks and leads to important recommendations with regard to the representation of graphs according to their size and density. For instance, we show that when graphs are bigger than twenty vertices, the matrix-based visualization performs better than node-link diagrams on most tasks. Only path finding is consistently in favor of node-link diagrams throughout the evaluation.}, doi = {10.1109/infvis.2004.1}, keywords = {networks}, url = {https://doi.org/10.1109%2Finfvis.2004.1}, } @Article{Ghoniem2005, author = {Mohammad Ghoniem and Jean-Daniel Fekete and Philippe Castagliola}, title = {On the Readability of Graphs Using Node-Link and Matrix-Based Representations: A Controlled Experiment and Statistical Analysis}, journal = {Information Visualization}, year = {2005}, volume = {4}, number = {2}, pages = {114--135}, month = {may}, abstract = {In this article, we describe a taxonomy of generic graph related tasks along with a computer-based evaluation designed to assess the readability of two representations of graphs: matrix-based representations and node-link diagrams. This evaluation encompasses seven generic tasks and leads to insightful recommendations for the representation of graphs according to their size and density. Typically, we show that when graphs are bigger than twenty vertices, the matrix-based visualization outperforms node-link diagrams on most tasks. Only path finding is consistently in favor of node-link diagrams throughout the evaluation.}, doi = {10.1057/palgrave.ivs.9500092}, keywords = {networks}, publisher = {SAGE Publications}, url = {https://doi.org/10.1057%2Fpalgrave.ivs.9500092}, } @Article{Tory2007, author = {Melanie Tory and David Sprague and Fuqu Wu and Wing Yan So and Tamara Munzner}, title = {Spatialization Design: Comparing Points and Landscapes}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2007}, volume = {13}, number = {6}, pages = {1262--1269}, month = {nov}, abstract = {Spatializations represent non-spatial data using a spatial layout similar to a map. We present an experiment comparing different visual representations of spatialized data, to determine which representations are best for a non-trivial search and point estimation task. Primarily, we compare point-based displays to 2D and 3D information landscapes. We also compare a colour (hue) scale to a grey (lightness) scale. For the task we studied, point-based spatializations were far superior to landscapes, and 2D landscapes were superior to 3D landscapes. Little or no benefit was found for redundantly encoding data using colour or greyscale combined with landscape height. 3D landscapes with no colour scale (height-only) were particularly slow and inaccurate. A colour scale was found to be better than a greyscale for all display types, but a greyscale was helpful compared to height-only. These results suggest that point-based spatializations should be chosen over landscape representations, at least for tasks involving only point data itself rather than derived information about the data space.}, doi = {10.1109/tvcg.2007.70596}, keywords = {scatterplot, 3D, color}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {https://doi.org/10.1109%2Ftvcg.2007.70596}, } @Article{Poco2011, author = {J. Poco and R. Etemadpour and F.V. Paulovich and T.V. Long and P. Rosenthal and M.C.F. Oliveira and L. Linsen and R. Minghim}, title = {A Framework for Exploring Multidimensional Data with 3D Projections}, journal = {Computer Graphics Forum}, year = {2011}, volume = {30}, number = {3}, pages = {1111--1120}, month = {jun}, abstract = {Visualization of high-dimensional data requires a mapping to a visual space. Whenever the goal is to preserve similarity relations a frequent strategy is to use 2D projections, which afford intuitive interactive exploration, e.g., by users locating and selecting groups and gradually drilling down to individual objects. In this paper, we propose a framework for projecting high-dimensional data to 3D visual spaces, based on a generalization of the Least-Square Projection (LSP). We compare projections to 2D and 3D visual spaces both quantitatively and through a user study considering certain exploration tasks. The quantitative analysis confirms that 3D projections outperform 2D projections in terms of precision. The user study indicates that certain tasks can be more reliably and confidently answered with 3D projections. Nonetheless, as 3D projections are displayed on 2D screens, interaction is more difficult. Therefore, we incorporate suitable interaction functionalities into a framework that supports 3D transformations, predefined optimal 2D views, coordinated 2D and 3D views, and hierarchical 3D cluster definition and exploration. For visually encoding data clusters in a 3D setup, we employ color coding of projected data points as well as four types of surface renderings. A second user study evaluates the suitability of these visual encodings. Several examples illustrate the framework's applicability for both visual exploration of multidimensional abstract (non-spatial) data as well as the feature space of multi-variate spatial data.}, doi = {10.1111/j.1467-8659.2011.01960.x}, keywords = {scatterplot, 3D}, publisher = {Wiley-Blackwell}, url = {https://doi.org/10.1111%2Fj.1467-8659.2011.01960.x}, } @Article{Sedlmair2013, author = {Michael Sedlmair and Tamara Munzner and Melanie Tory}, title = {Empirical Guidance on Scatterplot and Dimension Reduction Technique Choices}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2013}, volume = {19}, number = {12}, pages = {2634--2643}, month = {dec}, abstract = {To verify cluster separation in high-dimensional data, analysts often reduce the data with a dimension reduction (DR) technique, and then visualize it with 2D Scatterplots, interactive 3D Scatterplots, or Scatterplot Matrices (SPLOMs). With the goal of providing guidance between these visual encoding choices, we conducted an empirical data study in which two human coders manually inspected a broad set of 816 scatterplots derived from 75 datasets, 4 DR techniques, and the 3 previously mentioned scatterplot techniques. Each coder scored all color-coded classes in each scatterplot in terms of their separability from other classes. We analyze the resulting quantitative data with a heatmap approach, and qualitatively discuss interesting scatterplot examples. Our findings reveal that 2D scatterplots are often 'good enough', that is, neither SPLOM nor interactive 3D adds notably more cluster separability with the chosen DR technique. If 2D is not good enough, the most promising approach is to use an alternative DR technique in 2D. Beyond that, SPLOM occasionally adds additional value, and interactive 3D rarely helps but often hurts in terms of poorer class separation and usability. We summarize these results as a workflow model and implications for design. Our results offer guidance to analysts during the DR exploration process.}, doi = {10.1109/tvcg.2013.153}, keywords = {3D, scatterplot}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, } @InProceedings{Etemadpour2013, author = {Ronak Etemadpour and Eric Monson and Lars Linsen}, title = {The Effect of Stereoscopic Immersive Environments on Projection-Based Multi-dimensional Data Visualization}, booktitle = {2013 17th International Conference on Information Visualisation}, year = {2013}, month = {jul}, publisher = {{IEEE}}, abstract = {Multidimensional data impose a challenge for visual analyses. Commonly, dimensionality reduction techniques are used to project the multidimensional data into a 2D visual space. Poco et al. [9] showed that projection into a 3D visual space can increase the performance of common visual analysis tasks due to a higher projection precision. They also backed up their findings with a user study. However, when conducting the user study they displayed the 3D visual space on a 2D screen, which may impede the correct perception of the third dimension. In this paper, we present a study that investigates the effect of stereoscopic environments when used for the visual analysis of multidimensional data after projection into a 3D visual space. We conducted a controlled user study to compare correctness, timing, and confidence in segregation and precision tasks when performed in stereoscopic immersive environments and on a nonstereoscopic 2D screen. In terms of the stereoscopic immersive environments, we operated on and compared results obtained with two setup: a single screen and a six-sided highly immersive system, in both of which interaction was performed with a 3D input device. We investigated whether the stereoscopic immersive environments have an effect on user performance depending on the visual encodings. We used both 3D scatter plots and cluster visualizations in the form of enclosing surfaces or hulls for the visual analysis tasks.}, doi = {10.1109/iv.2013.51}, keywords = {scatterplot, 3D}, } @Article{John2001, author = {Mark St. John and Michael B. Cowen and Harvey S. Smallman and Heather M. Oonk}, title = {The Use of 2D and 3D Displays for Shape-Understanding versus Relative-Position Tasks}, journal = {Human Factors: The Journal of the Human Factors and Ergonomics Society}, year = {2001}, volume = {43}, number = {1}, pages = {79--98}, month = {mar}, abstract = {Research on when and how to use three-dimensional (3D) perspective views on flat screens for operational tasks such as air traffic control is complex. We propose a functional distinction between tasks: those that require shape understanding versus those that require precise judgments of relative position. The distortions inherent in 3D displays hamper judging relative positions, whereas the integration of dimensions in 3D displays facilitates shape understanding. We confirmed these hypotheses with two initial experiments involving simple block shapes. The shape-understanding tasks were identification or mental rotation. The relative-position tasks were locating shadows and determining directions and distances between objects. We then extended the results to four experiments involving complex natural terrain. We compare our distinction with the integral/separable task distinction of Haskel and Wickens (1993). Applications for this research include displays for air traffic control, geoplots for military command and control, and potentially, any display of 3D information.}, doi = {10.1518/001872001775992534}, keywords = {3D}, publisher = {{SAGE} Publications}, url = {https://doi.org/10.1518%2F001872001775992534}, } @InCollection{Cockburn2000, author = {Andy Cockburn and Bruce McKenzie}, title = {An Evaluation of Cone Trees}, booktitle = {People and Computers {XIV} - Usability or Else!}, publisher = {Springer London}, year = {2000}, pages = {425--436}, abstract = {Cone Trees are an appealing interactive 3D visualisation technique for hierarchical data structures. They were originally intended to maximise effective use of available screen space and to better exploit the abilities of the human perceptual system. Prior work has focused on the fidelity of the visualisation rather than providing empirical user studies. This paper describes the design, implementation and evaluation of a lowfidelity animated and rapidly interactive 3D cone tree system. Results of the evaluation show that our subjects were slower at locating data using cone trees than when using a ‘normal’ tree browser, and that their performance deteriorated rapidly as the branching factor of the data-structure increased. Qualitative results, however, indicate that the subjects were enthusiastic about the cone tree visualisation and that they felt it provided a better ‘feel’ for the structure of the information space.}, doi = {10.1007/978-1-4471-0515-2_28}, keywords = {3D, tree}, url = {https://doi.org/10.1007%2F978-1-4471-0515-2_28}, } @InProceedings{Jansen2013, author = {Yvonne Jansen and Pierre Dragicevic and Jean-Daniel Fekete}, title = {Evaluating the efficiency of physical visualizations}, booktitle = {Proceedings of the SIGCHI Conference on Human Factors in Computing Systems - CHI '13}, year = {2013}, publisher = {ACM Press}, abstract = {Data sculptures are an increasingly popular form of physical visualization whose purposes are essentially artistic, communicative or educational. But can physical visualizations help carry out actual information visualization tasks? We present the first infovis study comparing physical to on-screen visualizations. We focus on 3D visualizations, as these are common among physical visualizations but known to be problematic on computers. Taking 3D bar charts as an example, we show that moving visualizations to the physical world can improve users' efficiency at information retrieval tasks. In contrast, augmenting on-screen visualizations with stereoscopic rendering alone or with prop-based manipulation was of limited help. The efficiency of physical visualizations seems to stem from features that are unique to physical objects, such as their ability to be touched and their perfect visual realism. These findings provide empirical motivation for current research on fast digital fabrication and self-reconfiguring interfaces.}, doi = {10.1145/2470654.2481359}, keywords = {physical visualizations, 3D}, } @Article{Kristensson2009, author = {P.O. Kristensson and N. Dahlback and D. Anundi and M. Bjornstad and H. Gillberg and J. Haraldsson and I. Martensson and M. Nordvall and J. Stahl}, title = {An Evaluation of Space Time Cube Representation of Spatiotemporal Patterns}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2009}, volume = {15}, number = {4}, pages = {696--702}, month = {jul}, abstract = {Space time cube representation is an information visualization technique where spatiotemporal data points are mapped into a cube. Information visualization researchers have previously argued that space time cube representation is beneficial in revealing complex spatiotemporal patterns in a data set to users. The argument is based on the fact that both time and spatial information are displayed simultaneously to users, an effect difficult to achieve in other representations. However, to our knowledge the actual usefulness of space time cube representation in conveying complex spatiotemporal patterns to users has not been empirically validated. To fill this gap, we report on a between-subjects experiment comparing novice users' error rates and response times when answering a set of questions using either space time cube or a baseline 2D representation. For some simple questions, the error rates were lower when using the baseline representation. For complex questions where the participants needed an overall understanding of the spatiotemporal structure of the data set, the space time cube representation resulted in on average twice as fast response times with no difference in error rates compared to the baseline. These results provide an empirical foundation for the hypothesis that space time cube representation benefits users analyzing complex spatiotemporal patterns.}, doi = {10.1109/tvcg.2008.194}, keywords = {3D}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, } @Article{Amini2015, author = {Fereshteh Amini and Sebastien Rufiange and Zahid Hossain and Quentin Ventura and Pourang Irani and Michael J. McGuffin}, title = {The Impact of Interactivity on Comprehending 2D and 3D Visualizations of Movement Data}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, year = {2015}, volume = {21}, number = {1}, pages = {122--135}, month = {jan}, abstract = {GPS, RFID, and other technologies have made it increasingly common to track the positions of people and objects over time as they move through two-dimensional spaces. Visualizing such spatio-temporal movement data is challenging because each person or object involves three variables (two spatial variables as a function of the time variable), and simply plotting the data on a 2D geographic map can result in overplotting and occlusion that hides details. This also makes it difficult to understand correlations between space and time. Software such as GeoTime can display such data with a three-dimensional visualization, where the 3rd dimension is used for time. This allows for the disambiguation of spatially overlapping trajectories, and in theory, should make the data clearer. However, previous experimental comparisons of 2D and 3D visualizations have so far found little advantage in 3D visualizations, possibly due to the increased complexity of navigating and understanding a 3D view. We present a new controlled experimental comparison of 2D and 3D visualizations, involving commonly performed tasks that have not been tested before, and find advantages in 3D visualizations for more complex tasks. In particular, we tease out the effects of various basic interactions and find that the 2D view relies significantly on “scrubbing” the timeline, whereas the 3D view relies mainly on 3D camera navigation. Our work helps to improve understanding of 2D and 3D visualizations of spatio-temporal data, particularly with respect to interactivity.}, doi = {10.1109/tvcg.2014.2329308}, keywords = {3D, interactivity}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, } @Article{Isenberg2011, author = {P. Isenberg and A. Bezerianos and Pierre Dragicevic and Jean-Daniel Fekete}, title = {A Study on Dual-Scale Data Charts}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2011}, volume = {17}, number = {12}, pages = {2469--2478}, month = {dec}, abstract = {We present the results of a user study that compares different ways of representing Dual-Scale data charts. Dual-Scale charts incorporate two different data resolutions into one chart in order to emphasize data in regions of interest or to enable the comparison of data from distant regions. While some design guidelines exist for these types of charts, there is currently little empirical evidence on which to base their design. We fill this gap by discussing the design space of Dual-Scale cartesian-coordinate charts and by experimentally comparing the performance of different chart types with respect to elementary graphical perception tasks such as comparing lengths and distances. Our study suggests that cut-out charts which include collocated full context and focus are the best alternative, and that superimposed charts in which focus and context overlap on top of each other should be avoided.}, doi = {10.1109/tvcg.2011.160}, keywords = {dual-scale}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, } @Article{Stock_1991, author = {William A. Stock and John T. Behrens}, title = {Box, Line, and Midgap Plots: Effects of Display Characteristics on the Accuracy and Bias of Estimates of Whisker Length}, journal = {Journal of Educational Statistics}, year = {1991}, month = {mar}, volume = {16}, number = {1}, pages = {1--20}, abstract = {Examined here were the accuracy and bias of estimates of whisker length based on box, line, and midgap plots. For each type of graph, a different sample of undergraduates viewed 48 single-plot graphs. For each plot, subjects were given the length of an interquartile spread and asked to estimate the length of a whisker. Plots varied in spatial orientation (horizontal or vertical), interquartile spread, the ratio of whisker length to interquartile spread, and whisker judged. Overall, estimates of whisker length for box and line plots were more accurate and less biased than those for midgap plots. Interquartile spread, the ratio of whisker length to interquartile spread, and the interaction of these two factors significantly influenced both accuracy and bias. Boxplots displayed a predicted pattern of over- and underestimation. We discuss how the present results complement related work of others (e.g., Cleveland & McGill, 1984) on depiction of quantity. We conclude that midgap plots are less optimal displays than box and line plots.}, doi = {10.3102/10769986016001001} } @Article{2017_eurovis_narrative-flow, author = {Sean McKenna and Nathalie Henry Riche and Bongshin Lee and Jeremy Boy and Miriah Meyer}, title = {Visual Narrative Flow: Exploring Factors Shaping Data Visualization Story Reading Experiences}, journal = {Computer Graphics Forum (EuroVis '17)}, year = {2017}, volume = {36}, number = {3}, pages = {377-387}, doi = {10.1111/cgf.13195}, abstract = {Many factors can shape the flow of visual data‐driven stories, and thereby the way readers experience those stories. Through the analysis of 80 existing stories found on popular websites, we systematically investigate and identify seven characteristics of these stories, which we name “flow‐factors,” and we illustrate how they feed into the broader concept of “visual narrative flow.” These flow‐factors are navigation input, level of control, navigation progress, story layout, role of visualization, story progression, and navigation feedback. We also describe a series of studies we conducted, which shed initial light on how different visual narrative flows impact the reading experience. We report on two exploratory studies, in which we gathered reactions and preferences of readers for stepper‐ vs. scroller‐driven flows. We then report on a crowdsourced study with 240 participants, in which we explore the effect of the combination of different flow‐factors on readers’ engagement. Our results indicate that visuals and navigation feedback (e.g., static vs. animated transitions) have an impact on readers’ engagement, while level of control (e.g., discrete vs. continuous) may not.}, url = {https://mckennapsean.com/projects/narrative-flow/}, keywords = {storytelling, scrollytelling}, } @InProceedings{Kong_2018, author = {Ha-Kyung Kong and Zhicheng Liu and Karrie Karahalios}, title = {Frames and Slants in Titles of Visualizations on Controversial Topics}, booktitle = {Proceedings of the 2018 {CHI} Conference on Human Factors in Computing Systems - {CHI} '18}, year = {2018}, publisher = {{ACM} Press}, doi = {10.1145/3173574.3174012}, abstract = {Slanted framing in news article titles induce bias and influence recall. While recent studies found that viewers focus extensively on titles when reading visualizations, the impact of titles in visualization remains underexplored. We study frames in visualization titles, and how the slanted framing of titles and the viewer's pre-existing attitude impact recall, perception of bias, and change of attitude. When asked to compose visualization titles, people used five existing news frames, an open-ended frame, and a statistics frame. We found that the slant of the title influenced the perceived main message of a visualization, with viewers deriving opposing messages from the same visualization. The results did not show any significant effect on attitude change. We highlight the danger of subtle statistics frames and viewers' unwarranted conviction of the neutrality of visualizations. Finally, we present a design implication for the generation of visualization titles and one for the viewing of titles.}, url = {http://www.zcliu.org/vistitles/CHI18-VisTitles.pdf}, keywords = {framing}, } @InProceedings{Romat_2018, author = {Hugo Romat and Caroline Appert and Benjamin Bach and Nathalie Henry-Riche and Emmanuel Pietriga}, title = {Animated Edge Textures in Node-Link Diagrams}, booktitle = {Proceedings of the 2018 {CHI} Conference on Human Factors in Computing Systems - {CHI} '18}, year = {2018}, publisher = {{ACM} Press}, doi = {10.1145/3173574.3173761}, abstract = {Network edge data attributes are usually encoded using color, opacity, stroke thickness and stroke pattern, or some combination thereof. In addition to these static variables, it is also possible to animate dynamic particles flowing along the edges. This opens a larger design space of animated edge textures, featuring additional visual encodings that have potential not only in terms of visual mapping capacity but also playfulness and aesthetics. Such animated edge textures have been used in several commercial and design-oriented visualizations, but to our knowledge almost always in a relatively ad hoc manner. We introduce a design space and Web-based framework for generating animated edge textures, and report on an initial evaluation of particle properties - particle speed, pattern and frequency - in terms of visual perception.}, url = {http://ilda.saclay.inria.fr/flownet/}, keywords = {animation, networks}, } @InProceedings{Liu_2018, author = {Yang Liu and Jeffrey Heer}, title = {Somewhere Over the Rainbow}, booktitle = {Proceedings of the 2018 {CHI} Conference on Human Factors in Computing Systems - {CHI} '18}, year = {2018}, publisher = {{ACM} Press}, doi = {10.1145/3173574.3174172}, abstract = {An essential goal of quantitative color encoding is the accurate mapping of perceptual dimensions of color to the logical struc- ture of data. Prior research identifies weaknesses of “rainbow” colormaps and advocates for ramping in luminance, while recent work contributes multi-hue colormaps generated using perceptually-uniform color models. We contribute a compar- ative analysis of different colormap types, with a focus on comparing single- and multi-hue schemes. We present a suite of experiments in which subjects perform relative distance judgments among color triplets drawn systematically from each of four single-hue and five multi-hue colormaps. We characterize speed and accuracy across each colormap, and identify conditions that degrade performance. We also find that a combination of perceptual color space and color naming measures more accurately predict user performance than either alone, though the overall accuracy is poor. Based on these results, we distill recommendations on how to design more effective color encodings for scalar data.}, url = {https://idl.cs.washington.edu/files/2018-QuantitativeColor-CHI.pdf}, keywords = {color}, } @Article{Kim2018, author = {Younghoon Kim and Jeffrey Heer}, title = {{Assessing Effects of Task and Data Distribution on the Effectiveness of Visual Encodings}}, journal = {Computer Graphics Forum}, year = {2018}, volume = {37}, number = {3}, pages = {157-167}, doi = {10.1111/cgf.13409}, abstract = {In addition to the choice of visual encodings, the effectiveness of a data visualization may vary with the analytical task being performed and the distribution of data values. To better assess these effects and create refined rankings of visual encodings, we conduct an experiment measuring subject performance across task types (e.g., comparing individual versus aggregate values) and data distributions (e.g., with varied cardinalities and entropies). We compare performance across 12 encoding specifications of trivariate data involving 1 categorical and 2 quantitative fields, including the use of x, y, color, size, and spatial subdivision (i.e., faceting). Our results extend existing models of encoding effectiveness and suggest improved approaches for automated design. For example, we find that colored scatterplots (with positionally-coded quantities and color-coded categories) perform well for comparing individual points, but perform poorly for summary tasks as the number of categories increases.}, url = {https://pdfs.semanticscholar.org/6979/c6e6f385263cfd5dfc34d70e30dddd07778d.pdf}, } @Article{Soni2018, author = {Utkarsh Soni and Yafeng Lu and Brett Hansen and Helen C. Purchase and Stephen Kobourov and Ross Maciejewski}, title = {{The Perception of Graph Properties in Graph Layouts}}, journal = {Computer Graphics Forum}, year = {2018}, volume = {37}, number = {3}, pages = {169-181}, doi = {10.1111/cgf.13410}, abstract = {When looking at drawings of graphs, questions about graph density, community structures, local clustering and other graph properties may be of critical importance for analysis. While graph layout algorithms have focused on minimizing edge crossing, symmetry, and other such layout properties, there is not much known about how these algorithms relate to a user’s ability to perceive graph properties for a given graph layout. In this study, we apply previously established methodologies for perceptual analysis to identify which graph drawing layout will help the user best perceive a particular graph property. We conduct a large scale (n = 588) crowdsourced experiment to investigate whether the perception of two graph properties (graph density and average local clustering coefficient) can be modeled using Weber’s law. We study three graph layout algorithms from three representative classes (Force Directed FD, Circular, and Multi-Dimensional Scaling MDS), and the results of this experiment establish the precision of judgment for these graph layouts and properties. Our findings demonstrate that the perception of graph density can be modeled with Weber’s law. Furthermore, the perception of the average clustering coefficient can be modeled as an inverse of Weber’s law, and the MDS layout showed a significantly different precision of judgment than the FD layout}, keywords = {networks}, url = {http://www2.cs.arizona.edu/~kobourov/ASU-UA-graphs.pdf}, } @Article{Baumer_2018, doi = {10.1145/3214353}, url = {https://doi.org/10.1145%2F3214353}, year = 2018, month = {aug}, publisher = {Association for Computing Machinery ({ACM})}, volume = {25}, number = {4}, pages = {1--26}, author = {Eric P. S. Baumer and Jaime Snyder and Geri K. Gay}, title = {Interpretive Impacts of Text Visualization}, journal = {{ACM} Transactions on Computer-Human Interaction}, keywords = {framing}, abstract = { Information visualizations are often evaluated as a tool in terms of their ability to support performance of a specific task. This article argues that value can be gained by instead evaluating visualizations from a communicative perspective. Specifically, it explores how text visualization can influence the impacts that framing has on the perception of political issues. Using data from a controlled laboratory study, the results presented here demonstrate that exposure to a text visualization can mitigate framing effects. Furthermore, it also shows a transfer effect, where participants who saw the visualization remained uninfluenced by framing in subsequent texts, even when the visualization was absent. These results carry implications for the methods used to evaluate information visualization systems, for understanding the cognitive and interpretive mechanisms by which framing effects occur, and for exploring the design space of interactive text visualization.} } @InProceedings{Kong_2019, doi = {10.1145/3290605.3300576}, url = {https://doi.org/10.1145%2F3290605.3300576}, year = 2019, publisher = {{ACM} Press}, author = {Ha-Kyung Kong and Zhicheng Liu and Karrie Karahalios}, title = {Trust and Recall of Information across Varying Degrees of Title-Visualization Misalignment}, booktitle = {Proceedings of the 2019 {CHI} Conference on Human Factors in Computing Systems - {CHI} '19}, abstract = {Visualizations are emerging as a means of spreading digital misinformation. Prior work has shown that visualization interpretation can be manipulated through slanted titles that favor only one side of the visual story, yet people still think the visualization is impartial. In this work, we study whether such effects continue to exist when titles and visualizations exhibit greater degrees of misalignment: titles whose message differs from the visually cued message in the visualization, and titles whose message contradicts the visualization. We found that although titles with a contradictory slant triggered more people to identify bias compared to titles with a miscued slant, visualizations were persistently perceived as impartial by the majority. Further, people's recall of the visualization's message more frequently aligned with the titles than the visualization. Based on these results, we discuss the potential of leveraging textual components to detect and combat visual-based misinformation with text-based slants.}, keyword = {framing, titles} } @InProceedings{Veras_2019, doi = {10.1145/3290605.3300771}, url = {https://doi.org/10.1145%2F3290605.3300771}, year = 2019, publisher = {{ACM} Press}, author = {Rafael Veras and Christopher Collins}, title = {Saliency Deficit and Motion Outlier Detection in Animated Scatterplots}, booktitle = {Proceedings of the 2019 {CHI} Conference on Human Factors in Computing Systems - {CHI} '19}, abstract = {We report the results of a crowdsourced experiment that measured the accuracy of motion outlier detection in multivariate, animated scatterplots. The targets were outliers either in speed or direction of motion, and were presented with varying levels of saliency in dimensions that are irrelevant to the task of motion outlier detection (e.g., color, size, position). We found that participants had trouble finding the outlier when it lacked irrelevant salient features and that visual channels contribute unevenly to the odds of an outlier being correctly detected. Direction of motion contributes the most to accurate detection of speed outliers, and position contributes the most to accurate detection of direction outliers. We introduce the concept of saliency deficit in which item importance in the data space is not reflected in the visualization due to a lack of saliency. We conclude that motion outlier detection is not well supported in multivariate animated scatterplots.}, keyword = {animation, scatterplot} } @InProceedings{Zhao_2019, doi = {10.1145/3290605.3300462}, url = {https://doi.org/10.1145%2F3290605.3300462}, year = 2019, publisher = {{ACM} Press}, author = {Mingqian Zhao and Huamin Qu and Michael Sedlmair}, title = {Neighborhood Perception in Bar Charts}, booktitle = {Proceedings of the 2019 {CHI} Conference on Human Factors in Computing Systems - {CHI} '19}, keywords = {barcharts}, abstract = {In this paper, we report three user experiments that investigate in how far the perception of a bar in a bar chart changes based on the height of its neighboring bars. We hypothesized that the perception of the very same bar, for instance, might differ when it is surrounded by the top highest vs. the top lowest bars. Our results show that such neighborhood effects exist: a target bar surrounded by high neighbor bars, is perceived to be lower as the same bar surrounded with low neighbors. Yet, the effect size of this neighborhood effect is small compared to other data-inherent effects: the judgment accuracy largely depends on the target bar rank, number of data items, and other data characteristics of the dataset. Based on the findings, we discuss design implications for perceptually optimizing bar charts.} } @InProceedings{Kong_2019, doi = {10.1145/3290605.3300280}, url = {https://doi.org/10.1145%2F3290605.3300280}, year = 2019, publisher = {{ACM} Press}, author = {Ha-Kyung Kong and Wenjie Zhu and Zhicheng Liu and Karrie Karahalios}, title = {Understanding Visual Cues in Visualizations Accompanied by Audio Narrations}, booktitle = {Proceedings of the 2019 {CHI} Conference on Human Factors in Computing Systems - {CHI} '19}, keywords = {audio, annotation}, abstract = {It is often assumed that visual cues, which highlight specific parts of a visualization to guide the audience's attention, facilitate visualization storytelling and presentation. This assumption has not been systematically studied. We present an in-lab experiment and a Mechanical Turk study to examine the effects of integral and separable visual cues on the recall and comprehension of visualizations that are accompanied by audio narration. Eye-tracking data in the in-lab experiment confirm that cues helped the viewers focus on relevant parts of the visualization faster. We found that in general, visual cues did not have a significant effect on learning outcomes, but for specific cue techniques (e.g. glow) or specific chart types (e.g heatmap), cues significantly improved comprehension. Based on these results, we discuss how presenters might select visual cues depending on the role of the cues and the visualization type.} } @InProceedings{Smart_2019, doi = {10.1145/3290605.3300899}, url = {https://doi.org/10.1145%2F3290605.3300899}, year = 2019, publisher = {{ACM} Press}, author = {Stephen Smart and Danielle Albers Szafir}, title = {Measuring the Separability of Shape, Size, and Color in Scatterplots}, booktitle = {Proceedings of the 2019 {CHI} Conference on Human Factors in Computing Systems - {CHI} '19}, keywords = {scatterplots}, abstract = {Scatterplots commonly use multiple visual channels to encode multivariate datasets. Such visualizations often use size, shape, and color as these dimensions are considered separable--dimensions represented by one channel do not significantly interfere with viewers' abilities to perceive data in another. However, recent work shows the size of marks significantly impacts color difference perceptions, leading to broader questions about the separability of these channels. In this paper, we present a series of crowdsourced experiments measuring how mark shape, size, and color influence data interpretation in multiclass scatterplots. Our results indicate that mark shape significantly influences color and size perception, and that separability among these channels functions asymmetrically: shape more strongly influences size and color perceptions in scatterplots than size and color influence shape. Models constructed from the resulting data can help designers anticipate viewer perceptions to build more effective visualizations.} } @InProceedings{Mylavarapu_2019, doi = {10.1145/3290605.3300422}, url = {https://doi.org/10.1145%2F3290605.3300422}, year = 2019, publisher = {{ACM} Press}, author = {Pranathi Mylavarapu and Adil Yalcin and Xan Gregg and Niklas Elmqvist}, title = {Ranked-List Visualization}, booktitle = {Proceedings of the 2019 {CHI} Conference on Human Factors in Computing Systems - {CHI} '19}, keywords = {ranked-list}, abstract = {Visualization of ranked lists is a common occurrence, but many in-the-wild solutions fly in the face of vision science and visualization wisdom. For example, treemaps and bubble charts are commonly used for this purpose, despite the fact that the data is not hierarchical and that length is easier to perceive than area. Furthermore, several new visual representations have recently been suggested in this area, including wrapped bars, packed bars, piled bars, and Zvinca plots. To quantify the differences and trade-offs for these ranked-list visualizations, we here report on a crowdsourced graphical perception study involving six such visual representations, including the ubiquitous scrolled barchart, in three tasks: ranking (assessing a single item), comparison (two items), and average (assessing global distribution). Results show that wrapped bars may be the best choice for visualizing ranked lists, and that treemaps are surprisingly accurate despite the use of area rather than length to represent value.} } @article{Hullman_2011, doi = {10.1109/tvcg.2011.255}, url = {https://doi.org/10.1109%2Ftvcg.2011.255}, year = 2011, month = {dec}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {17}, number = {12}, pages = {2231--2240}, author = {J. Hullman and N. Diakopoulos}, title = {Visualization Rhetoric: Framing Effects in Narrative Visualization}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, abstract = {Narrative visualizations combine conventions of communicative and exploratory information visualization to convey an intended story. We demonstrate visualization rhetoric as an analytical framework for understanding how design techniques that prioritize particular interpretations in visualizations that "tell a story" can significantly affect end-user interpretation. We draw a parallel between narrative visualization interpretation and evidence from framing studies in political messaging, decision-making, and literary studies. Devices for understanding the rhetorical nature of narrative information visualizations are presented, informed by the rigorous application of concepts from critical theory, semiotics, journalism, and political theory. We draw attention to how design tactics represent additions or omissions of information at various levels-the data, visual representation, textual annotations, and interactivity-and how visualizations denote and connote phenomena with reference to unstated viewing conventions and codes. Classes of rhetorical techniques identified via a systematic analysis of recent narrative visualizations are presented, and characterized according to their rhetorical contribution to the visualization. We describe how designers and researchers can benefit from the potentially positive aspects of visualization rhetoric in designing engaging, layered narrative visualizations and how our framework can shed light on how a visualization design prioritizes specific interpretations. We identify areas where future inquiry into visualization rhetoric can improve understanding of visualization interpretation.} } @article{Borgo_2018, doi = {10.1111/cgf.13444}, url = {https://doi.org/10.1111%2Fcgf.13444}, year = 2018, month = {jun}, publisher = {Wiley}, volume = {37}, number = {3}, pages = {573--595}, author = {R. Borgo and L. Micallef and B. Bach and F. McGee and B. Lee}, title = {Information Visualization Evaluation Using Crowdsourcing}, journal = {Computer Graphics Forum}, abstract = {Visualization researchers have been increasingly leveraging crowdsourcing approaches to overcome a number of limitations of controlled laboratory experiments, including small participant sample sizes and narrow demographic backgrounds of study participants. However, as a community, we have little understanding on when, where, and how researchers use crowdsourcing approaches for visualization research. In this paper, we review the use of crowdsourcing for evaluation in visualization research. We analyzed 190 crowdsourcing experiments, reported in 82 papers that were published in major visualization conferences and journals between 2006 and 2017. We tagged each experiment along 36 dimensions that we identified for crowdsourcing experiments. We grouped our dimensions into six important aspects: study design & procedure, task type, participants, measures & metrics, quality assurance, and reproducibility. We report on the main findings of our review and discuss challenges and opportunities for improvements in conducting crowdsourcing studies for visualization research.}, keywords = {crowdsourcing} } @InProceedings{Kosara-EuroVisShort-2019b, doi = {10.2312/evs.20191162}, url = {https://diglib.eg.org/handle/10.2312/evs20191162}, author = {Kosara, Robert}, title = {The Impact of Distribution and Chart Type on Part-to-Whole Comparisons}, journal = {EuroVis 2019 - Short Papers}, publisher = {The Eurographics Association}, year = {2019}, abstract = {Pie charts and treemaps are commonly used in business settings to show part-to-whole relationships. In a study, we compare pie charts, treemaps, stacked bars, and two circular charts when answering part-to-whole questions with multiple slices and different distributions of values. We find that the circular charts, including the unusual variations, perform better than the treemap, and that their performance depends on whether participants are asked to judge the largest slice or a smaller one.}, keywords = {Pie-charts} } @InProceedings{Kosara-EuroVisShort-2019a, doi = {10.2312/evs.20191163}, url = {https://diglib.eg.org/handle/10.2312/evs20191163}, author = {Kosara, Robert}, title = {Circular Part-to-Whole Charts Using the Area Visual Cue}, journal = {EuroVis 2019 - Short Papers}, publisher = {The Eurographics Association}, year = {2019}, abstract = {Studies of chart types can reveal unexplored design spaces, like the circular diagrams used in recent studies on pie charts. In this paper, we explore several variations of part-to-whole charts that use area to represent a fraction within a circle. We find one chart that performs very similarly to the pie chart, even though it is visually more complex. Centered shapes turn out to lead to much worse accuracy than any other stimuli, even the same shape when not centered. These first results point to the need for more systematic explorations of the design spaces around existing charts.}, keywords = {Pie-charts} } @InProceedings {Kim2019, booktitle = {EuroVis 2019 - Short Papers}, editor = {Johansson, Jimmy and Sadlo, Filip and Marai, G. Elisabeta}, title = {{Color Names Across Languages: Salient Colors and Term Translation in Multilingual Color Naming Models}}, author = {Kim, Younghoon and Thayer, Kyle and Gorsky, Gabriella Silva and Heer, Jeffrey}, year = {2019}, publisher = {The Eurographics Association}, ISBN = {978-3-03868-090-1}, doi = {10.2312/evs.20191166}, abstract = {Color names facilitate the identification and communication of colors, but may vary across languages. We contribute a set of human color name judgments across 14 common written languages and build probabilistic models that find different sets of nameable (salient) colors across languages. For example, we observe that unlike English and Chinese, Russian and Korean have more than one nameable blue color among fully-saturated RGB colors. In addition, we extend these probabilistic models to translate color terms from one language to another via a shared perceptual color space. We compare Korean-English translations from our model to those from online translation tools and find that our method better preserves perceptual similarity of the colors corresponding to the source and target terms. We conclude with implications for visualization and future research.}, keywords = {color} } @article{Szafir_2018, doi = {10.1109/tvcg.2017.2744359}, url = {https://doi.org/10.1109%2Ftvcg.2017.2744359}, year = 2018, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {24}, number = {1}, pages = {392--401}, author = {Danielle Albers Szafir}, title = {Modeling Color Difference for Visualization Design}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keyword = {color}, abstract = {Color is frequently used to encode values in visualizations. For color encodings to be effective, the mapping between colors and values must preserve important differences in the data. However, most guidelines for effective color choice in visualization are based on either color perceptions measured using large, uniform fields in optimal viewing environments or on qualitative intuitions. These limitations may cause data misinterpretation in visualizations, which frequently use small, elongated marks. Our goal is to develop quantitative metrics to help people use color more effectively in visualizations. We present a series of crowdsourced studies measuring color difference perceptions for three common mark types: points, bars, and lines. Our results indicate that peoples' abilities to perceive color differences varies significantly across mark types. Probabilistic models constructed from the resulting data can provide objective guidance for designers, allowing them to anticipate viewer perceptions in order to inform effective encoding design.} } @article{Bujack_2018, doi = {10.1109/tvcg.2017.2743978}, url = {https://doi.org/10.1109%2Ftvcg.2017.2743978}, year = 2018, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {24}, number = {1}, pages = {923--933}, author = {Roxana Bujack and Terece L. Turton and Francesca Samsel and Colin Ware and David H. Rogers and James Ahrens}, title = {The Good, the Bad, and the Ugly: A Theoretical Framework for the Assessment of Continuous Colormaps}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {color}, abstract = {A myriad of design rules for what constitutes a "good" colormap can be found in the literature. Some common rules include order, uniformity, and high discriminative power. However, the meaning of many of these terms is often ambiguous or open to interpretation. At times, different authors may use the same term to describe different concepts or the same rule is described by varying nomenclature. These ambiguities stand in the way of collaborative work, the design of experiments to assess the characteristics of colormaps, and automated colormap generation. In this paper, we review current and historical guidelines for colormap design. We propose a specified taxonomy and provide unambiguous mathematical definitions for the most common design rules.} } @article{Burlinson_2018, doi = {10.1109/tvcg.2017.2745086}, url = {https://doi.org/10.1109%2Ftvcg.2017.2745086}, year = 2018, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {24}, number = {1}, pages = {574--583}, author = {David Burlinson and Kalpathi Subramanian and Paula Goolkasian}, title = {Open vs. Closed Shapes: New Perceptual Categories?}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, abstract = {Effective communication using visualization relies in part on the use of viable encoding strategies. For example, a viewer's ability to rapidly and accurately discern between two or more categorical variables in a chart or figure is contingent upon the distinctiveness of the encodings applied to each variable. Research in perception suggests that color is a more salient visual feature when compared to shape and although that finding is supported by visualization studies, characteristics of shape also yield meaningful differences in distinctiveness. We propose that open or closed shapes (that is, whether shapes are composed of line segments that are bounded across a region of space or not) represent a salient characteristic that influences perceptual processing. Three experiments were performed to test the reliability of the open/closed category; the first two from the perspective of attentional allocation, and the third experiment in the context of multi-class scatterplot displays. In the first, a flanker paradigm was used to test whether perceptual load and open/closed feature category would modulate the effect of the flanker on target processing. Results showed an influence of both variables. The second experiment used a Same/Different reaction time task to replicate and extend those findings. Results from both show that responses are faster and more accurate when closed rather than open shapes are processed as targets, and there is more processing interference when two competing shapes come from the same rather than different open or closed feature categories. The third experiment employed three commonly used visual analytic tasks - perception of average value, numerosity, and linear relationships with both single and dual displays of open and closed symbols. Our findings show that for numerosity and trend judgments, in particular, that different symbols from the same open or closed feature category cause more perceptual interference when they are presented together in a plot than symbols from different categories. Moreover, the extent of the interference appears to depend upon whether the participant is focused on processing open or closed symbols.} } @article{Valdez_2018, doi = {10.1109/tvcg.2017.2744138}, url = {https://doi.org/10.1109%2Ftvcg.2017.2744138}, year = 2018, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {24}, number = {1}, pages = {584--594}, author = {Andre Calero Valdez and Martina Ziefle and Michael Sedlmair}, title = {Priming and Anchoring Effects in Visualization}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {scatterplots, priming, anchoring} } @article{Saket_2018, doi = {10.1109/tvcg.2017.2680452}, url = {https://doi.org/10.1109%2Ftvcg.2017.2680452}, year = 2018, month = {mar}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {24}, number = {3}, pages = {1316--1330}, author = {Bahador Saket and Arjun Srinivasan and Eric D. Ragan and Alex Endert}, title = {Evaluating Interactive Graphical Encodings for Data Visualization}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {interactivity}, abstract = {User interfaces for data visualization often consist of two main components: control panels for user interaction and visual representation. A recent trend in visualization is directly embedding user interaction into the visual representations. For example, instead of using control panels to adjust visualization parameters, users can directly adjust basic graphical encodings (e.g., changing distances between points in a scatterplot) to perform similar parameterizations. However, enabling embedded interactions for data visualization requires a strong understanding of how user interactions influence the ability to accurately control and perceive graphical encodings. In this paper, we study the effectiveness of these graphical encodings when serving as the method for interaction. Our user study includes 12 interactive graphical encodings. We discuss the results in terms of task performance and interaction effectiveness metrics.} } @article{2018-others-expectations, title = {Data Through Others' Eyes: The Impact of Visualizing Others' Expectations on Visualization Interpretation}, author = {Yea-Seul Kim AND Katharina Reinecke AND Jessica Hullman}, journal = {IEEE Trans. Visualization \& Comp. Graphics (Proc. InfoVis)}, year = {2018}, url = {http://idl.cs.washington.edu/papers/others-expectations}, abstract = {In addition to visualizing input data, interactive visualizations have the potential to be social artifacts that reveal other people's perspectives on the data. However, how such social information embedded in a visualization impacts a viewer's interpretation of the data remains unknown. Inspired by recent interactive visualizations that display people's expectations of data against the data, we conducted a controlled experiment to evaluate the effect of showing social information in the form of other people's expectations on people's ability to recall the data, the degree to which they adjust their expectations to align with the data, and their trust in the accuracy of the data. We found that social information that exhibits a high degree of consensus lead participants to recall the data more accurately relative to participants who were exposed to the data alone. Additionally, participants trusted the accuracy of the data less and were more likely to maintain their initial expectations when other people's expectations aligned with their own initial expectations but not with the data. We conclude by characterizing the design space for visualizing others' expectations alongside data.} } @article{Nusrat_2018, doi = {10.1109/tvcg.2016.2642109}, url = {https://doi.org/10.1109%2Ftvcg.2016.2642109}, year = 2018, month = {feb}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {24}, number = {2}, pages = {1077--1090}, author = {Sabrina Nusrat and Md. Jawaherul Alam and Stephen Kobourov}, title = {Evaluating Cartogram Effectiveness}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {Cartograms}, abstract = {Cartograms are maps in which areas of geographic regions, such as countries and states, appear in proportion to some variable of interest, such as population or income. Cartograms are popular visualizations for geo-referenced data that have been used for over a century to illustrate patterns and trends in the world around us. Despite the popularity of cartograms, and the large number of cartogram types, there are few studies evaluating the effectiveness of cartograms in conveying information. Based on a recent task taxonomy for cartograms, we evaluate four major types of cartograms: contiguous, non-contiguous, rectangular, and Dorling cartograms. We first evaluate the effectiveness of these cartogram types by quantitative performance analysis (time and error). Second, we collect qualitative data with an attitude study and by analyzing subjective preferences. Third, we compare the quantitative and qualitative results with the results of a metrics-based cartogram evaluation. Fourth, we analyze the results of our study in the context of cartography, geography, visual perception, and demography. Finally, we consider implications for design and possible improvements.} } @article{Bach_2018, doi = {10.1109/tvcg.2017.2745941}, url = {https://doi.org/10.1109%2Ftvcg.2017.2745941}, year = 2018, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {24}, number = {1}, pages = {457--467}, author = {Benjamin Bach and Ronell Sicat and Johanna Beyer and Maxime Cordeil and Hanspeter Pfister}, title = {The Hologram in My Hand: How Effective is Interactive Exploration of 3D Visualizations in Immersive Tangible Augmented Reality?}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {3D, AR}, abstract = {We report on a controlled user study comparing three visualization environments for common 3D exploration. Our environments differ in how they exploit natural human perception and interaction capabilities. We compare an augmented-reality head-mounted display (Microsoft HoloLens), a handheld tablet, and a desktop setup. The novel head-mounted HoloLens display projects stereoscopic images of virtual content into a user's real world and allows for interaction in-situ at the spatial position of the 3D hologram. The tablet is able to interact with 3D content through touch, spatial positioning, and tangible markers, however, 3D content is still presented on a 2D surface. Our hypothesis is that visualization environments that match human perceptual and interaction capabilities better to the task at hand improve understanding of 3D visualizations. To better understand the space of display and interaction modalities in visualization environments, we first propose a classification based on three dimensions: perception, interaction, and the spatial and cognitive proximity of the two. Each technique in our study is located at a different position along these three dimensions. We asked 15 participants to perform four tasks, each task having different levels of difficulty for both spatial perception and degrees of freedom for interaction. Our results show that each of the tested environments is more effective for certain tasks, but that generally the desktop environment is still fastest and most precise in almost all cases.} } @article{Fuchs_2017, doi = {10.1109/tvcg.2016.2549018}, url = {https://doi.org/10.1109%2Ftvcg.2016.2549018}, year = 2017, month = {jul}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {23}, number = {7}, pages = {1863--1879}, author = {Johannes Fuchs and Petra Isenberg and Anastasia Bezerianos and Daniel Keim}, title = {A Systematic Review of Experimental Studies on Data Glyphs}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {glyphs}, abstract = {We systematically reviewed 64 user-study papers on data glyphs to help researchers and practitioners gain an informed understanding of tradeoffs in the glyph design space. The glyphs we consider are individual representations of multi-dimensional data points, often meant to be shown in small-multiple settings. Over the past 60 years many different glyph designs were proposed and many of these designs have been subjected to perceptual or comparative evaluations. Yet, a systematic overview of the types of glyphs and design variations tested, the tasks under which they were analyzed, or even the study goals and results does not yet exist. In this paper we provide such an overview by systematically sampling and tabulating the literature on data glyph studies, listing their designs, questions, data, and tasks. In addition we present a concise overview of the types of glyphs and their design characteristics analyzed by researchers in the past, and a synthesis of the study results. Based on our meta analysis of all results we further contribute a set of design implications and a discussion on open research directions.} } @article{Hullman_2018, doi = {10.1109/tvcg.2017.2743898}, url = {https://doi.org/10.1109%2Ftvcg.2017.2743898}, year = 2018, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {24}, number = {1}, pages = {446--456}, author = {Jessica Hullman and Matthew Kay and Yea-Seul Kim and Samana Shrestha}, title = {Imagining Replications: Graphical Prediction {\&} Discrete Visualizations Improve Recall {\&} Estimation of Effect Uncertainty}, keywords = {memorability, imagining}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, abstract = {People often have erroneous intuitions about the results of uncertain processes, such as scientific experiments. Many uncertainty visualizations assume considerable statistical knowledge, but have been shown to prompt erroneous conclusions even when users possess this knowledge. Active learning approaches been shown to improve statistical reasoning, but are rarely applied in visualizing uncertainty in scientific reports. We present a controlled study to evaluate the impact of an interactive, graphical uncertainty prediction technique for communicating uncertainty in experiment results. Using our technique, users sketch their prediction of the uncertainty in experimental effects prior to viewing the true sampling distribution from an experiment. We find that having a user graphically predict the possible effects from experiment replications is an effective way to improve one's ability to make predictions about replications of new experiments. Additionally, visualizing uncertainty as a set of discrete outcomes, as opposed to a continuous probability distribution, can improve recall of a sampling distribution from a single experiment. Our work has implications for various applications where it is important to elicit peoples' estimates of probability distributions and to communicate uncertainty effectively.} } @article{Dragicevic_2018, doi = {10.1109/tvcg.2017.2744298}, url = {https://doi.org/10.1109%2Ftvcg.2017.2744298}, year = 2018, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {24}, number = {1}, pages = {781--790}, author = {Pierre Dragicevic and Yvonne Jansen}, title = {Blinded with Science or Informed by Charts? A Replication Study}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, abstract = {We provide a reappraisal of Tal and Wansink's study "Blinded with Science", where seemingly trivial charts were shown to increase belief in drug efficacy, presumably because charts are associated with science. Through a series of four replications conducted on two crowdsourcing platforms, we investigate an alternative explanation, namely, that the charts allowed participants to better assess the drug's efficacy. Considered together, our experiments suggest that the chart seems to have indeed promoted understanding, although the effect is likely very small. Meanwhile, we were unable to replicate the original study's findings, as text with chart appeared to be no more persuasive - and sometimes less persuasive - than text alone. This suggests that the effect may not be as robust as claimed and may need specific conditions to be reproduced. Regardless, within our experimental settings and considering our study as a whole (), the chart's contribution to understanding was clearly larger than its contribution to persuasion.} } @Article{borland2007rainbow, title={Rainbow color map (still) considered harmful}, author={Borland, David and Ii, Russell M Taylor}, journal={IEEE computer graphics and applications}, url = {https://ieeexplore.ieee.org/document/4118486}, volume={27}, number={2}, pages={14--17}, year={2007}, publisher={IEEE}, keywords={color}, abstract={In this article, we reiterate the characteristics that make the rainbow color map a poor choice, provide examples that clearly illustrate these deficiencies even on simple data sets, and recommend better color maps for several categories of display. The goal is to make the rainbow color map as rare in visualization as the goto statement is in programming - which complicates the task of analyzing and verifying program correctness} } @inproceedings{Rogowitz:2001:BPQ:601671.601699, author = {Rogowitz, Bernice E. and Kalvin, Alan D.}, title = {The "Which Blair Project": A Quick Visual Method for Evaluating Perceptual Color Maps}, booktitle = {Proceedings of the Conference on Visualization '01}, series = {VIS '01}, year = {2001}, isbn = {0-7803-7200-X}, location = {San Diego, California}, pages = {183--190}, numpages = {8}, url = {http://dl.acm.org/citation.cfm?id=601671.601699}, acmid = {601699}, publisher = {IEEE Computer Society}, address = {Washington, DC, USA}, keywords = {color}, abstract = {We have developed a fast, perceptual method for selecting color scales for data visualization that takes advantage of our sensitivity to luminance variations in human faces. To do so, we conducted experiments in which we mapped various color scales onto the intensitiy values of a digitized photograph of a face and asked observers to rate each image. We found a very strong correlation between the perceived naturalness of the images and the degree to which the underlying color scales increased monotonically in luminance. Color scales that did not include a monotonically-increasing luminance component produced no positive rating scores. Since color scales with monotonic luminance profiles are widely recommended for visualizing continuous scalar data, a purely visual technique for identifying such color scales could be very useful, especially in situations where color calibration is not integrated into the visualization environment, such as over the Internet.} } @Article{Ware_1988, author={Colin Ware}, journal={IEEE Computer Graphics and Applications}, title={Color sequences for univariate maps: theory, experiments and principles}, year={1988}, volume={8}, number={5}, pages={41-49}, keywords={color}, doi={10.1109/38.7760}, ISSN={0272-1716}, month={Sep.}, abstract={Pseudocoloring for presenting univariate map information on a graphic display system is investigated. The kinds of information available in maps are divided into two classes: metric information denotes the quantity stored at each point on the surface, and form information denotes the shape or structure of the surface. Theoretical principles are proposed to predict which color sequences will be effective at conveying value and form information respectively. According to this theory, a scale that approximates the physical spectrum should be good at conveying value information, because of the reduced effects of simultaneous contrast. It should be poor at conveying form information, however, because the brain prefers form information to come through the lightness-processing channel. Conversely, a gray scale should be poor at conveying value information and good at conveying form information, according to the same theory. These predictions are tested in a series of psychophysical experiments that test five color sequences. The results show that simultaneous contrast can be a major source of error when reading maps, but only partially confirm the form hypothesis. Guidelines are given for designing color sequences to be effective in both conveying form and value information. An experimental color sequence is presented to illustrate these guidelines.} } @Article{spence_1999, title={Using color to code quantity in spatial displays.}, author={Spence, Ian and Kutlesa, Natasha and Rose, David L}, journal={Journal of Experimental Psychology: Applied}, volume={5}, number={4}, pages={393}, year={1999}, publisher={American Psychological Association}, keywords={color}, abstract = {Participants made simple and complex judgments in 2 experiments that examined the use of color to code quantity in spatial displays. The coding assignments were chosen to evaluate the principle of perceptual linearity in color space. In Experiment 1, participants compared all possible pairs of colors used to represent magnitudes. Comparisons were made most rapidly with a scale that varied only brightness (B) and most accurately with a scale that covaried hue (H) with saturation (S) and brightness (H+S+B scale). In Experiment 2, clusters were identified fastest with the H+S+B scale, followed by brightness and bipolar scales, whereas a nonlinear, hue-only scale was slowest and produced the least accurate judgments. Coding assignments close to perceptual linearity were best for both simple and complex judgments in data visualization. However, hue conferred an advantage if the task involved segregation or classification.} } @article{Gogolou_2019, doi = {10.1109/tvcg.2018.2865077}, url = {https://doi.org/10.1109%2Ftvcg.2018.2865077}, year = 2019, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {25}, number = {1}, pages = {523--533}, author = {Anna Gogolou and Theophanis Tsandilas and Themis Palpanas and Anastasia Bezerianos}, title = {Comparing Similarity Perception in Time Series Visualizations}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {time-series}, abstract = {A common challenge faced by many domain experts working with time series data is how to identify and compare similar patterns. This operation is fundamental in high-level tasks, such as detecting recurring phenomena or creating clusters of similar temporal sequences. While automatic measures exist to compute time series similarity, human intervention is often required to visually inspect these automatically generated results. The visualization literature has examined similarity perception and its relation to automatic similarity measures for line charts, but has not yet considered if alternative visual representations, such as horizon graphs and colorfields, alter this perception. Motivated by how neuroscientists evaluate epileptiform patterns, we conducted two experiments that study how these three visualization techniques affect similarity perception in EEG signals. We seek to understand if the time series results returned from automatic similarity measures are perceived in a similar manner, irrespective of the visualization technique; and if what people perceive as similar with each visualization aligns with different automatic measures and their similarity constraints. Our findings indicate that horizon graphs align with similarity measures that allow local variations in temporal position or speed (i.e., dynamic time warping) more than the two other techniques. On the other hand, horizon graphs do not align with measures that are insensitive to amplitude and y-offset scaling (i.e., measures based on z-normalization), but the inverse seems to be the case for line charts and colorfields. Overall, our work indicates that the choice of visualization affects what temporal patterns we consider as similar, i.e., the notion of similarity in time series is not visualization independent.} } @article{Schloss_2019, doi = {10.1109/tvcg.2018.2865147}, url = {https://doi.org/10.1109%2Ftvcg.2018.2865147}, year = 2019, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {25}, number = {1}, pages = {810--819}, author = {Karen B. Schloss and Connor C. Gramazio and Allison T. Silverman and Madeline L. Parker and Audrey S. Wang}, title = {Mapping Color to Meaning in Colormap Data Visualizations}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {colors}, abstract = {To interpret data visualizations, people must determine how visual features map onto concepts. For example, to interpret colormaps, people must determine how dimensions of color (e.g., lightness, hue) map onto quantities of a given measure (e.g., brain activity, correlation magnitude). This process is easier when the encoded mappings in the visualization match people's predictions of how visual features will map onto concepts, their inferred mappings. To harness this principle in visualization design, it is necessary to understand what factors determine people's inferred mappings. In this study, we investigated how inferred color-quantity mappings for colormap data visualizations were influenced by the background color. Prior literature presents seemingly conflicting accounts of how the background color affects inferred color-quantity mappings. The present results help resolve those conflicts, demonstrating that sometimes the background has an effect and sometimes it does not, depending on whether the colormap appears to vary in opacity. When there is no apparent variation in opacity, participants infer that darker colors map to larger quantities (dark-is-more bias). As apparent variation in opacity increases, participants become biased toward inferring that more opaque colors map to larger quantities (opaque-is-more bias). These biases work together on light backgrounds and conflict on dark backgrounds. Under such conflicts, the opaque-is-more bias can negate, or even supersede the dark-is-more bias. The results suggest that if a design goal is to produce colormaps that match people's inferred mappings and are robust to changes in background color, it is beneficial to use colormaps that will not appear to vary in opacity on any background color, and to encode larger quantities in darker colors.} } @article{Wang_2019, doi = {10.1109/tvcg.2018.2864912}, url = {https://doi.org/10.1109%2Ftvcg.2018.2864912}, year = 2019, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {25}, number = {1}, pages = {820--829}, author = {Yunhai Wang and Xin Chen and Tong Ge and Chen Bao and Michael Sedlmair and Chi-Wing Fu and Oliver Deussen and Baoquan Chen}, title = {Optimizing Color Assignment for Perception of Class Separability in Multiclass Scatterplots}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {color}, abstract = {Appropriate choice of colors significantly aids viewers in understanding the structures in multiclass scatterplots and becomes more important with a growing number of data points and groups. An appropriate color mapping is also an important parameter for the creation of an aesthetically pleasing scatterplot. Currently, users of visualization software routinely rely on color mappings that have been pre-defined by the software. A default color mapping, however, cannot ensure an optimal perceptual separability between groups, and sometimes may even lead to a misinterpretation of the data. In this paper, we present an effective approach for color assignment based on a set of given colors that is designed to optimize the perception of scatterplots. Our approach takes into account the spatial relationships, density, degree of overlap between point clusters, and also the background color. For this purpose, we use a genetic algorithm that is able to efficiently find good color assignments. We implemented an interactive color assignment system with three extensions of the basic method that incorporates top K suggestions, user-defined color subsets, and classes of interest for the optimization. To demonstrate the effectiveness of our assignment technique, we conducted a numerical study and a controlled user study to compare our approach with default color assignments; our findings were verified by two expert studies. The results show that our approach is able to support users in distinguishing cluster numbers faster and more precisely than default assignment methods.} } @article{Saket_2019, doi = {10.1109/tvcg.2018.2829750}, url = {https://doi.org/10.1109%2Ftvcg.2018.2829750}, year = 2019, month = {jul}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {25}, number = {7}, pages = {2505--2512}, author = {Bahador Saket and Alex Endert and Cagatay Demiralp}, title = {Task-Based Effectiveness of Basic Visualizations}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, abstract = {Visualizations of tabular data are widely used; understanding their effectiveness in different task and data contexts is fundamental to scaling their impact. However, little is known about how basic tabular data visualizations perform across varying data analysis tasks. In this paper, we report results from a crowdsourced experiment to evaluate the effectiveness of five small scale (5-34 data points) two-dimensional visualization types-Table, Line Chart, Bar Chart, Scatterplot, and Pie Chart-across ten common data analysis tasks using two datasets. We find the effectiveness of these visualization types significantly varies across task, suggesting that visualization design would benefit from considering context-dependent effectiveness. Based on our findings, we derive recommendations on which visualizations to choose based on different tasks. We finally train a decision tree on the data we collected to drive a recommender, showcasing how to effectively engineer experimental user data into practical visualization systems.} } @article{Ryan_2019, doi = {10.1109/tvcg.2018.2865264}, url = {https://doi.org/10.1109%2Ftvcg.2018.2865264}, year = 2019, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {25}, number = {1}, pages = {872--881}, author = {Gabriel Ryan and Abigail Mosca and Remco Chang and Eugene Wu}, title = {At a Glance: Pixel Approximate Entropy as a Measure of Line Chart Complexity}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, abstract = {When inspecting information visualizations under time critical settings, such as emergency response or monitoring the heart rate in a surgery room, the user only has a small amount of time to view the visualization “at a glance”. In these settings, it is important to provide a quantitative measure of the visualization to understand whether or not the visualization is too “complex” to accurately judge at a glance. This paper proposes Pixel Approximate Entropy (PAE), which adapts the approximate entropy statistical measure commonly used to quantify regularity and unpredictability in time-series data, as a measure of visual complexity for line charts. We show that PAE is correlated with user-perceived chart complexity, and that increased chart PAE correlates with reduced judgement accuracy. `We also find that the correlation between PAE values and participants' judgment increases when the user has less time to examine the line charts.} } @article{Yang_2019, doi = {10.1109/tvcg.2018.2810918}, url = {https://doi.org/10.1109%2Ftvcg.2018.2810918}, year = 2019, month = {mar}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {25}, number = {3}, pages = {1474--1488}, author = {Fumeng Yang and Lane T. Harrison and Ronald A. Rensink and Steven L. Franconeri and Remco Chang}, title = {Correlation Judgment and Visualization Features: A Comparative Study}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {scatterplots, correlation}, abstract = {Recent visualization research efforts have incorporated experimental techniques and perceptual models from the vision science community. Perceptual laws such as Weber's law, for example, have been used to model the perception of correlation in scatterplots. While this thread of research has progressively refined the modeling of the perception of correlation in scatterplots, it remains unclear as to why such perception can be modeled using relatively simple functions, e.g., linear and log-linear. In this paper, we investigate a longstanding hypothesis that people use visual features in a chart as a proxy for statistical measures like correlation. For a given scatterplot, we extract 49 candidate visual features and evaluate which best align with existing models and participant judgments. The results support the hypothesis that people attend to a small number of visual features when discriminating correlation in scatterplots. We discuss how this result may account for prior conflicting findings, and how visual features provide a baseline for future model-based approaches in visualization evaluation and design.} } @article{Hullman_2019, doi = {10.1109/tvcg.2018.2864889}, url = {https://doi.org/10.1109%2Ftvcg.2018.2864889}, year = 2019, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {25}, number = {1}, pages = {903--913}, author = {Jessica Hullman and Xiaoli Qiao and Michael Correll and Alex Kale and Matthew Kay}, title = {In Pursuit of Error: A Survey of Uncertainty Visualization Evaluation}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {uncertainty}, abstract = {Understanding and accounting for uncertainty is critical to effectively reasoning about visualized data. However, evaluating the impact of an uncertainty visualization is complex due to the difficulties that people have interpreting uncertainty and the challenge of defining correct behavior with uncertainty information. Currently, evaluators of uncertainty visualization must rely on general purpose visualization evaluation frameworks which can be ill-equipped to provide guidance with the unique difficulties of assessing judgments under uncertainty. To help evaluators navigate these complexities, we present a taxonomy for characterizing decisions made in designing an evaluation of an uncertainty visualization. Our taxonomy differentiates six levels of decisions that comprise an uncertainty visualization evaluation: the behavioral targets of the study, expected effects from an uncertainty visualization, evaluation goals, measures, elicitation techniques, and analysis approaches. Applying our taxonomy to 86 user studies of uncertainty visualizations, we find that existing evaluation practice, particularly in visualization research, focuses on Performance and Satisfaction-based measures that assume more predictable and statistically-driven judgment behavior than is suggested by research on human judgment and decision making. We reflect on common themes in evaluation practice concerning the interpretation and semantics of uncertainty, the use of confidence reporting, and a bias toward evaluating performance as accuracy rather than decision quality. We conclude with a concrete set of recommendations for evaluators designed to reduce the mismatch between the conceptualization of uncertainty in visualization versus other fields.} } @article{Song_2019, doi = {10.1109/tvcg.2018.2864914}, url = {https://doi.org/10.1109%2Ftvcg.2018.2864914}, year = 2019, month = {jan}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {25}, number = {1}, pages = {914--924}, author = {Hayeong Song and Danielle Albers Szafir}, title = {Where's My Data? Evaluating Visualizations with Missing Data}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, keywords = {missing-data}, abstract = {Many real-world datasets are incomplete due to factors such as data collection failures or misalignments between fused datasets. Visualizations of incomplete datasets should allow analysts to draw conclusions from their data while effectively reasoning about the quality of the data and resulting conclusions. We conducted a pair of crowdsourced studies to measure how the methods used to impute and visualize missing data may influence analysts' perceptions of data quality and their confidence in their conclusions. Our experiments used different design choices for line graphs and bar charts to estimate averages and trends in incomplete time series datasets. Our results provide preliminary guidance for visualization designers to consider when working with incomplete data in different domains and scenarios.} } @inproceedings{blythe1995effect, title={The effect of graph layout on inference from social network data}, author={Blythe, Jim and McGrath, Cathleen and Krackhardt, David}, doi = {10.1007/BFb0021789}, booktitle={International symposium on graph drawing}, pages={40--51}, year={1995}, organization={Springer}, abstract = { Social network analysis uses techniques from graph theory to analyze the structure of relationships among social actors such as individuals or groups. We investigate the effect of the layout of a social network on the inferences drawn by observers about the number of social groupings evident and the centrality of various actors in the network. We conducted an experiment in which eighty subjects provided answers about three drawings. The subjects were not told that the drawings were chosen from five different layouts of the same graph. We found that the layout has a significant effect on their inferences and present some initial results about the way certain Euclidean features will affect perceptions of structural features of the network. There is no "best" layout for a social network; when layouts are designed one must take into account the most important features of the network to be presented as well as the network itself. }, keywords = {networks} } @incollection{Dengler1998, doi = {10.1007/3-540-37623-2_37}, url = {https://doi.org/10.1007/3-540-37623-2_37}, year = {1998}, publisher = {Springer Berlin Heidelberg}, pages = {441--443}, author = {Edmund Dengler and William Cowan}, title = {Human Perception of Laid-Out Graphs}, booktitle = {Graph Drawing}, abstract={Combinatorial graphs are increasingly used for information presentation. They provide high information density and intuitive display of multiple relationships, while offering low cost because they can be created algorithmically. Essential to algorithmic graph layout is a set of rules that encode layout objectives. How these rules are related to inferences drawn from the graph by human observers is a largely unexplored issue. Thus, success or failure by algorithmic standards is only uncertainly related to perceptual effectiveness of the resulting layout. Human experimentation is the only way to correct this deficiency. This poster describes empirical research conducted in 1994. Forty-six respondents, separated into naive and computer-aware groups, freely viewed a collection of graph layouts, providing semantic conclusions they reached on the basis of the layout, in the absence of any semantic attribution to nodes in themselves. We were interested in two questions. First, are semantic attributions consistent or random? If the former semantic objectives must be considered when creating layout rules or objective functions for automated graph layout. Second, if consistent semantic attributions exist, what are they? The remaining paragraphs of this abstract describe our results and conclusions. Most importantly, all our observers agreed strongly as to the semantic content of specific graph layouts. There was no difference in interpretation between the group consisting of experienced programmers, and the group who had little exposure to computers. We were interested in possible differences because combinatorial graphs are extremely common in computer science curriculum material, and it’s possible that a group of programmers might agree because they had been exposed to a common set of layout conventions. The agreement between our two groups demonstrates that semantic conventions extend widely.}, keywords = {networks} } @article{mcgrath1996seeing, title={Seeing groups in graph layouts}, author={McGrath, Cathleen and Blythe, Jim and Krackhardt, David}, journal={Connections}, volume={19}, number={2}, pages={22--29}, year={1996}, url = {http://www.andrew.cmu.edu/user/krack/documents/pubs/1996/1996%20Seeing%20Groups%20in%20Graph%20Layout.pdf}, abstract = {Social networkers frequently make use of drawings to communicate information and ideasabout networks. However, the impact of the layout of a network on the conclusions that aviewer is likely to draw has so far received very little scrutiny. In this paper, we extend workbegun in (Blythe et al, 1995) and (Mcgrath et al, 19961 to understand how the layout of graphsdepicting social network data influences the inferences viewers draw about social networks. Our previous work focused on the perception of prominence or bridging of a particular node. Here we focus on perceptions of clustering among nodes. Previous empirical work studying graph layout and social networks has shown that layoutinfluences viewers' perception of the prominence, or importance of individuals in the network(Blythe et al, 1995). Purchase et al. (1995) report on experimental work validating generalgraph layout aesthetics. Both of these empirical studies of human perception of graphs buildon earlier work on graph drawing aesthetics (see Battista et al.(1994) for a survey of this work).Finding groups in networks of people is an important part of social network analysis. According to Scott (1991): One of the most enduring concerns of those working in social network analysis has beenthe attempt to discover the various 'cliques' and cohesive sub-groups into which anetwork can be divided. We extend experimental work testing viewers' understanding of graphs based on layout byusing an interactive system that allows us to closely track the responses and response time of people answering questions about the graphs}, keywords = {networks} } @article{MCGRATH1997223, title = "The effect of spatial arrangement on judgments and errors in interpreting graphs", journal = "Social Networks", volume = "19", number = "3", pages = "223 - 242", year = "1997", issn = "0378-8733", doi = "https://doi.org/10.1016/S0378-8733(96)00299-7", url = "http://www.sciencedirect.com/science/article/pii/S0378873396002997", author = "Cathleen McGrath and Jim Blythe and David Krackhardt", abstract = "The spatial arrangement of social network data in graphs can influence viewers' perceptions of structural characteristics such as prominence, bridging and grouping. To study the extent of this effect, we conducted an experiment with 80 graduate students. Each student viewed three of five different spatial arrangements of the same network. We found that viewers' perceptions of structural features of the network changed as the spatial arrangement of the network changed.", keywords = {networks} } @article{Purchase:1997:ESB:264216.264222, author = {Purchase, H. C. and Cohen, R. F. and James, M. I.}, title = {An Experimental Study of the Basis for Graph Drawing Algorithms}, journal = {J. Exp. Algorithmics}, issue_date = {1997}, volume = {2}, month = jan, year = {1997}, issn = {1084-6654}, articleno = {4}, url = {http://doi.acm.org/10.1145/264216.264222}, doi = {10.1145/264216.264222}, acmid = {264222}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {Designers of graph drawing algorithms and systems claim to illuminate application data by producing layouts that optimise measurable aesthetic qualities. Examples of these aesthetics include symmetry (where possible, a symmetrical view of the graph should be displayed), minimise arc crossing(the number of arc crossings in the display should be minimised), and minimise bends (the total number of bends in polyline arcs should be minimised).The aim of this paper is to describe our work to validate these claims by performing empirical studies of human understanding of graphs drawn using various layout aesthetics. This work is important since it helps indicate to algorithm and system designers what are the aesthetic qualities most important to aid understanding, and consequently to build more effective systems.}, keywords = {networks} } @inproceedings{Archambault2012, doi = {10.1109/pacificvis.2012.6183578}, url = {https://doi.org/10.1109/pacificvis.2012.6183578}, year = {2012}, month = feb, publisher = {{IEEE}}, author = {Daniel Archambault and Helen C. Purchase}, title = {The mental map and memorability in dynamic graphs}, booktitle = {2012 {IEEE} Pacific Visualization Symposium}, abstract = {In dynamic graph drawing, preserving the mental map, or ensuring that the location of nodes do not change significantly as the information evolves over time is considered an important property by algorithm designers. Many prior experiments have attempted to verify this principle, with surprisingly little success. These experiments have used several different algorithmic methods, a variety of graph interpretation questions on both real and fabricated data, and different presentation methods. However, none of the results have conclusively demonstrated the importance of mental map preservation on task performance. Our experiment measures the efficacy of the dynamic graph drawing in a different manner: we look at how memorable the evolving graph is, rather than how easy it is to interpret. As observed in the previous studies, we found no significant difference in terms of response time or error rate when preserving the mental map. While preserving the mental map is a good idea in principle, we find that it may not always support performance. However, our qualitative data suggests that, in terms of the user's perception, preserving the mental map makes memorability tasks easier. Our qualitative data also suggests that there may be two features of the dynamic graph drawing that may assist in their memorability: interesting subgraphs that remain visible over time and interesting patterns in node movement. The former is supported by preserving the mental map while the latter is not.}, keywords = {networks} } @incollection{Huang2006, doi = {10.1007/11618058_24}, url = {https://doi.org/10.1007/11618058_24}, year = {2006}, publisher = {Springer Berlin Heidelberg}, pages = {262--273}, author = {Weidong Huang and Seok-Hee Hong and Peter Eades}, title = {Layout Effects on Sociogram Perception}, booktitle = {Graph Drawing}, abstract = {This paper describes a within-subjects experiment in which we compare the relative effectiveness of five sociogram drawing conventions in communicating underlying network substance, based on user task performance and usability preference, in order to examine effects of different spatial layout formats on human sociogram perception. We also explore the impact of edge crossings, a widely accepted readability aesthetic. Subjective data were gathered based on the methodology of Purchase et al.[14] Objective data were collected through an online system. We found that both edge crossings and conventions pose significant affects on user preference and task performance of finding groups, but either has little impact on the perception of actor status. On the other hand, the node positioning and angular resolution might be more important in perceiving actor status. In visualizing social networks, it is important to note that the techniques that are highly preferred by users do not necessarily lead to best task performance.}, keywords = {networks} } @incollection{Purchase1996, doi = {10.1007/bfb0021827}, url = {https://doi.org/10.1007/bfb0021827}, year = {1996}, publisher = {Springer Berlin Heidelberg}, pages = {435--446}, author = {Helen C. Purchase and Robert F. Cohen and Murray James}, title = {Validating graph drawing aesthetics}, booktitle = {Graph Drawing}, abstract = {Designers of graph drawing algorithms and systems claim to illuminate application data by producing layouts that optimize measurable aesthetic qualities. Examples of these aesthetics include symmetry (where possible, a symmetrical view of the graph should be displayed), minimize edge crossings (the number of edge crossings in the display should be minimized), and minimize bends (the total number of bends in polyline edges should be minimized). The aim of this paper is to describe our work to validate these claims by performing empirical studies of human understanding of graphs drawn using various layout aesthetics. This work is important since it helps indicate to algorithm and system designers what are the aesthetic qualities most important to aid understanding, and consequently to build more effective systems.}, keywords = {networks} } @incollection{Archambault2013, doi = {10.1007/978-3-642-36763-2_42}, url = {https://doi.org/10.1007/978-3-642-36763-2_42}, year = {2013}, publisher = {Springer Berlin Heidelberg}, pages = {475--486}, author = {Daniel Archambault and Helen C. Purchase}, title = {Mental Map Preservation Helps User Orientation in Dynamic Graphs}, booktitle = {Graph Drawing}, abstract={We present the results of a formal experiment that tests the ability of a participant to orient themselves in a dynamically evolving graph. Examples of these tasks include finding a specific location or route between two locations. We find that preserving the mental map for the tasks tested is significantly faster and produces fewer errors. As the number of targets increase, this result holds.}, keywords = {networks} } @inproceedings{Huang:2006:PRS:1151903.1151932, author = {Huang, Weidong and Hong, Seok-Hee and Eades, Peter}, title = {How People Read Sociograms: A Questionnaire Study}, booktitle = {Proceedings of the 2006 Asia-Pacific Symposium on Information Visualisation - Volume 60}, series = {APVis '06}, year = {2006}, isbn = {1-920682-41-4}, location = {Tokyo, Japan}, pages = {199--206}, numpages = {8}, url = {http://dl.acm.org/citation.cfm?id=1151903.1151932}, acmid = {1151932}, publisher = {Australian Computer Society, Inc.}, address = {Darlinghurst, Australia, Australia}, abstract = {Visualizing social network data into sociograms plays an important role in communicating information about network characteristics. Previous studies have shown that human perceptions of network features can be affected by the layout of a sociogram [McGrath et al. 1996, 1997]. An empirical user study has been conducted to investigate effectiveness of five different network visualization conventions and impact of edge crossings on sociogram perceptions, using both quantitative performance and preference measures and qualitative questionnaire study. This paper reports results and findings of the questionnaire study. We relate qualitative questionnaire results with quantitative findings and discuss their implications for sociogram design. We found that subjects had a strong preference of placing nodes on the top or in the center to highlight importance, and clustering nodes in the same group and separating groups to highlight groups. They had tendency to believe that nodes in the center or on the top are more important, and nodes in close proximity belong to the same group. Some preliminary recommendations for sociogram design and hypotheses about human reading behaviors are proposed.}, keywords = {networks} } @inproceedings{Huang:2005:PRG:1082315.1082324, author = {Huang, Weidong and Eades, Peter}, title = {How People Read Graphs}, booktitle = {Proceedings of the 2005 Asia-Pacific Symposium on Information Visualisation - Volume 45}, series = {APVis '05}, year = {2005}, isbn = {1-920-68227-9}, location = {Sydney, Australia}, pages = {51--58}, numpages = {8}, url = {http://dl.acm.org/citation.cfm?id=1082315.1082324}, acmid = {1082324}, publisher = {Australian Computer Society, Inc.}, address = {Darlinghurst, Australia, Australia}, abstract = {The graph layout problem has long been a major concern for effectiveness of conveying information. To propose user-centred aesthetic criteria for a "good" layout, it is important to have knowledge on how people read graphs; how a particular graph layout characteristic can affect people's reading performance. On the other hand, despite the increasingly wide use of graphs in everyday life, yet we know surprisingly little about how people actually read graphs. The present eye tracking study in this paper is an attempt to perform an initial investigation into this issue and provide data that can help build the basic understanding of how people read graphs.}, keywords = {networks} } @article{Purchase2012, doi = {10.1109/tvcg.2010.269}, url = {https://doi.org/10.1109/tvcg.2010.269}, year = {2012}, month = jan, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, volume = {18}, number = {1}, pages = {81--92}, author = {H. C. Purchase and C. Pilcher and B. Plimmer}, title = {Graph Drawing Aesthetics{\textemdash}Created by Users, Not Algorithms}, journal = {{IEEE} Transactions on Visualization and Computer Graphics}, abstract = {Prior empirical work on layout aesthetics for graph drawing algorithms has concentrated on the interpretation of existing graph drawings. We report on experiments which focus on the creation and layout of graph drawings: participants were asked to draw graphs based on adjacency lists, and to lay them out "nicely.” Two interaction methods were used for creating the drawings: a sketch interface which allows for easy, natural hand movements, and a formal point-and-click interface similar to a typical graph editing system. We find, in common with many other studies, that removing edge crossings is the most significant aesthetic, but also discover that aligning nodes and edges to an underlying grid is important. We observe that the aesthetics favored by participants during creation of a graph drawing are often not evident in the final product and that the participants did not make a clear distinction between the processes of creation and layout. Our results suggest that graph drawing systems should integrate automatic layout with the user's manual editing process, and provide facilities to support grid-based graph creation.}, keywords = {networks} } @incollection{Purchase1997, doi = {10.1007/3-540-63938-1_67}, url = {https://doi.org/10.1007/3-540-63938-1_67}, year = {1997}, publisher = {Springer Berlin Heidelberg}, pages = {248--261}, author = {Helen Purchase}, title = {Which aesthetic has the greatest effect on human understanding?}, booktitle = {Graph Drawing}, abstract = {In the creation of graph drawing algorithms and systems, designers claim that by producing layouts that optimise certain aesthetic qualities, the graphs are easier to understand. Such aesthetics include maximise symmetry, minimise edge crosses and minimise bends. A previous study aimed to validate these claims with respect to three aesthetics, using paper-based experiments [11]. The study reported here is superior in many ways: five aesthetics are considered, attempts are made to place a priority order on the relative importance of the aesthetics, the experiments are run on-line, and the ease of understanding the drawings is measured in time, as well as in the number of errors. In addition, greater consideration is given to the possible effect of confounding factors in the graph drawings. The results indicate that reducing the number of edge crosses is by far the most important aesthetic, while minimising the number of bends and maximising symmetry have a lesser effect. The effects of maximising the minimum angle between edges leaving a node and of fixing edges and nodes to an orthogonal grid are not statistically significant. This work is important since it helps to demonstrate to algorithm and system designers the aesthetic qualities most important for aiding human understanding, the most appropriate compromises to make when there is a conflict in aesthetics, and consequently, how to build more effective systems.}, keywords = {networks} } @article{Ghani2012, doi = {10.1111/j.1467-8659.2012.03113.x}, url = {https://doi.org/10.1111/j.1467-8659.2012.03113.x}, year = {2012}, month = jun, publisher = {Wiley}, volume = {31}, number = {3pt3}, pages = {1205--1214}, author = {S. Ghani and N. Elmqvist and J. S. Yi}, title = {Perception of Animated Node-Link Diagrams for Dynamic Graphs}, journal = {Computer Graphics Forum}, abstract={Effective visualization of dynamic graphs remains an open research topic, and many state‐of‐the‐art tools use animated node‐link diagrams for this purpose. Despite its intuitiveness, the effectiveness of animation in node‐link diagrams has been questioned, and several empirical studies have shown that animation is not necessarily superior to static visualizations. However, the exact mechanics of perceiving animated node‐link diagrams are still unclear. In this paper, we study the impact of different dynamic graph metrics on user perception of the animation. After deriving candidate visual graph metrics, we perform an exploratory user study where participants are asked to reconstruct the event sequence in animated node‐link diagrams. Based on these findings, we conduct a second user study where we investigate the most important visual metrics in depth. Our findings show that node speed and target separation are prominent visual metrics to predict the performance of event sequencing tasks.}, keywords = {networks, animation} } @inproceedings{Huang2008, doi = {10.1145/1377966.1377970}, url = {https://doi.org/10.1145/1377966.1377970}, year = {2008}, publisher = {{ACM} Press}, author = {Weidong Huang and Peter Eades and Seok-Hee Hong}, title = {Beyond time and error}, booktitle = {Proceedings of the 2008 conference on {BEyond} time and errors novel {evaLuation} methods for Information Visualization - {BELIV} {\textquotesingle}08}, abstract = {Time and error are commonly used to measure the effectiveness of graph drawings. However, such measures are limited in providing more fundamental knowledge that is useful for general visualization design. We therefore apply a cognitive approach in evaluations. This approach evaluates graph drawings from a cognitive perspective, measuring more than just time and error. Three user studies are conducted to demonstrate the usefulness of this approach.}, keywords = {networks} } @article{PURCHASE1998, doi = {10.1006/jvlc.1998.0093}, url = {https://doi.org/10.1006/jvlc.1998.0093}, year = {1998}, month = dec, publisher = {Elsevier {BV}}, volume = {9}, number = {6}, pages = {647--657}, author = {HELEN C. PURCHASE}, title = {Performance of Layout Algorithms: Comprehension, not Computation}, journal = {Journal of Visual Languages {\&} Computing}, abstract = {Many algorithms address the problem of rendering an abstract graph structure as a diagram in as efficient and as elegant a manner as possible. The criteria for judging the worth of these algorithms are typically the extent to which they conform to common aesthetic criteria (e.g. minimising the number of crossings, maximising symmetry), or their computational efficiency. The algorithms are not usually judged on their ability to produce diagrams that maximise humans’ performance on tasks which require their use. This paper presents an example experimental methodology for considering the relative worth of eight layout algorithms with respect to human performance, together with details of an experiment using a single graph. The results indicate that, with the exception of one algorithm, there is no statistical difference between the performance data of the algorithms when applied to this graph, indicating that they produce drawings of comparable difficulty. This result is despite the different aesthetic bases for the algorithms.}, keywords = {networks} } @article{Purchase2000, doi = {10.1016/s0953-5438(00)00032-1}, url = {https://doi.org/10.1016/s0953-5438(00)00032-1}, year = {2000}, month = dec, publisher = {Oxford University Press ({OUP})}, volume = {13}, number = {2}, pages = {147--162}, author = {H.C Purchase}, title = {Effective information visualisation: a study of graph drawing aesthetics and algorithms}, journal = {Interacting with Computers}, abstract = {Information visualisation systems which generate diagrams representing discrete relational information must consider potential users if they are to be effective. Many algorithms which render an abstract graph structure as a diagram are valued for their conformance to aesthetic criteria (e.g. reducing the number of edge crossings, maximising symmetry), or for computational efficiency. They are not usually judged on their ability to produce diagrams that maximise human performance. This paper presents the results of experiments investigating the relative worth (from an HCI point of view) of graph drawing aesthetics and algorithms using a single graph. The results indicate that while some individual aesthetics affect human performance, it is difficult to say that one algorithm is ‘better’ than another from a relational understanding point of view. Designers of automatic layout algorithms, and the systems which embody such algorithms, can benefit from this study and this human-centred approach, by adapting their methods to focus on user concerns, rather than computational ones.}, keywords = {networks} } @incollection{Romat_2019, abstract = {Edges in networks often represent transfer relationships between vertices. When visualizing such networks as node-link diagrams, animated particles flowing along the links can effectively convey this notion of transfer. Variables that govern the motion of particles, their speed in particular, may be used to visually represent edge data attributes. Few guidelines exist to inform the design of these particle-based network visualizations, however. Empirical studies so far have only looked at the different motion variables in isolation, independently from other visual variables controlling the appearance of particles, such as their color or size. In this paper, we report on a study of the influence of several visual variables on users’ perception of the speed of particles. Our results show that particles’ luminance, chromaticity and width do not interfere with their perceived speed. But variations in their length make it more difficult for users to compare the relative speed of particles across edges.}, doi = {10.1007/978-3-030-29384-0_37}, year = 2019, publisher = {Springer International Publishing}, pages = {619--637}, author = {Hugo Romat and Dylan Lebout and Emmanuel Pietriga and Caroline Appert}, title = {Influence of Color and Size of Particles on Their Perceived Speed in Node-Link Diagrams}, booktitle = {Human-Computer Interaction {\textendash} {INTERACT} 2019}, keywords={animation, networks, color} } @Comment{jabref-meta: databaseType:bibtex;} @Comment{jabref-meta: fileDirectory:/Users/jsb/vis-perception;}