diff --git "a/best_paper/use_this_matches.json" "b/best_paper/use_this_matches.json" new file mode 100644--- /dev/null +++ "b/best_paper/use_this_matches.json" @@ -0,0 +1,48822 @@ +[ + { + "paper": "3122089757", + "venue": "1133523790", + "year": "2020", + "title": "scaling attributed network embedding to massive graphs", + "label": [ + "114466953", + "45374587", + "46135064", + "48044578", + "177264268", + "137955351", + "11413529", + "2778770139", + "137836250" + ], + "author": [ + "2742253777", + "2951341187", + "2157867657", + "2280962535", + "3080986785", + "2168903744" + ], + "reference": [ + "114517082", + "1593045043", + "1932742904", + "1956559956", + "2000769684", + "2025603201", + "2064675550", + "2069153192", + "2099471712", + "2119821739", + "2133299088", + "2155461593", + "2242161203", + "2557283755", + "2585247128", + "2605234117", + "2613171001", + "2622489478", + "2624431344", + "2798909945", + "2808000122", + "2808466528", + "2809660921", + "2893944917", + "2897117569", + "2908461307", + "2914080035", + "2926442184", + "2950352474", + "2951451004", + "2951486171", + "2962756421", + "2965080627", + "2970929262", + "2985340067", + "2998336824", + "3007813770", + "3010920176", + "3030286867", + "3102647957", + "3103668824", + "3105705953" + ], + "abstract": "given a graph g where each node is associated with a set of attributes attributed network embedding ane maps each node v g to a compact vector xv which can be used in downstream machine learnin", + "title_raw": "Scaling attributed network embedding to massive graphs", + "abstract_raw": "Given a graph G where each node is associated with a set of attributes, attributed network embedding (ANE) maps each node v G to a compact vector Xv, which can be used in downstream machine learnin...", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Scaling+Attributed+Network+Embedding+to+Massive+Graphs&as_oq=&as_eq=&as_occt=any&as_sauthors=Yang", + "scraped_abstract": "Given a graph G where each node is associated with a set of attributes, attributed network embedding (ANE) maps each node v \u2208 G to a compact vector Xv, which can be used in downstream machine learning tasks. Ideally, Xv should capture node v's affinity to each attribute, which considers not only v's own attribute associations, but also those of its connected nodes along edges in G. It is challenging to obtain high-utility embeddings that enable accurate predictions; scaling effective ANE computation to massive graphs with millions of nodes pushes the difficulty of the problem to a whole new level. Existing solutions largely fail on such graphs, leading to prohibitive costs, low-quality embeddings, or both. This paper proposes PANE, an effective and scalable approach to ANE computation for massive graphs that achieves state-of-the-art result quality on multiple benchmark datasets, measured by the accuracy of three common prediction tasks: attribute inference, link prediction, and node classification. In particular, for the large MAG data with over 59 million nodes, 0.98 billion edges, and 2000 attributes, PANE is the only known viable solution that obtains effective embeddings on a single server, within 12 hours. PANE obtains high scalability and effectiveness through three main algorithmic designs. First, it formulates the learning objective based on a novel random walk model for attributed networks. The resulting optimization task is still challenging on large graphs. Second, PANE includes a highly efficient solver for the above optimization problem, whose key module is a carefully designed initialization of the embeddings, which drastically reduces the number of iterations required to converge. Finally, PANE utilizes multi-core CPUs through non-trivial parallelization of the above solver, which achieves scalability while retaining the high quality of the resulting embeddings. Extensive experiments, comparing 10 existing approaches on 8 real datasets, demonstrate that PANE consistently outperforms all existing methods in terms of result quality, while being orders of magnitude faster.", + "citation_best": 36 + }, + { + "paper": "2996908057", + "venue": "1184914352", + "year": "2020", + "title": "winogrande an adversarial winograd schema challenge at scale", + "label": [ + "150899416", + "119857082", + "137293760", + "114289077", + "61249035", + "137955351", + "193221554", + "2779276763", + "62230096" + ], + "author": [ + "2497204219", + "1967926312", + "2071644166", + "2133417374" + ], + "reference": [], + "abstract": "the winograd schema challenge wsc levesque davis and morgenstern 2011 a benchmark for commonsense reasoning is a set of 273 expert crafted pronoun resolution problems originally designed to be unsolvable for statistical models that rely on selectional preferences or word associations however recent advances in neural language models have already reached around 90 accuracy on variants of wsc this raises an important question whether these models have truly acquired robust commonsense capabilities or whether they rely on spurious biases in the datasets that lead to an overestimation of the true capabilities of machine commonsense to investigate this question we introduce winogrande a large scale dataset of 44k problems inspired by the original wsc design but adjusted to improve both the scale and the hardness of the dataset the key steps of the dataset construction consist of 1 a carefully designed crowdsourcing procedure followed by 2 systematic bias reduction using a novel aflite algorithm that generalizes human detectable word associations to machine detectable embedding associations the best state of the art methods on winogrande achieve 59 4 79 1 which are 15 35 absolute below human performance of 94 0 depending on the amount of the training data allowed 2 100 respectively furthermore we establish new state of the art results on five related benchmarks wsc 90 1 dpr 93 1 copa 90 6 knowref 85 6 and winogender 97 1 these results have dual implications on one hand they demonstrate the effectiveness of winogrande when used as a resource for transfer learning on the other hand they raise a concern that we are likely to be overestimating the true capabilities of machine commonsense across all these benchmarks we emphasize the importance of algorithmic bias reduction in existing and future benchmarks to mitigate such overestimation", + "title_raw": "WinoGrande: An Adversarial Winograd Schema Challenge at Scale", + "abstract_raw": "The Winograd Schema Challenge (WSC) (Levesque, Davis, and Morgenstern 2011), a benchmark for commonsense reasoning, is a set of 273 expert-crafted pronoun resolution problems originally designed to be unsolvable for statistical models that rely on selectional preferences or word associations. However, recent advances in neural language models have already reached around 90% accuracy on variants of WSC. This raises an important question whether these models have truly acquired robust commonsense capabilities or whether they rely on spurious biases in the datasets that lead to an overestimation of the true capabilities of machine commonsense.To investigate this question, we introduce WinoGrande, a large-scale dataset of 44k problems, inspired by the original WSC design, but adjusted to improve both the scale and the hardness of the dataset. The key steps of the dataset construction consist of (1) a carefully designed crowdsourcing procedure, followed by (2) systematic bias reduction using a novel AfLite algorithm that generalizes human-detectable word associations to machine-detectable embedding associations. The best state-of-the-art methods on WinoGrande achieve 59.4 \u2013 79.1%, which are \u223c15-35% (absolute) below human performance of 94.0%, depending on the amount of the training data allowed (2% \u2013 100% respectively).Furthermore, we establish new state-of-the-art results on five related benchmarks \u2014 WSC (\u2192 90.1%), DPR (\u2192 93.1%), COPA(\u2192 90.6%), KnowRef (\u2192 85.6%), and Winogender (\u2192 97.1%). These results have dual implications: on one hand, they demonstrate the effectiveness of WinoGrande when used as a resource for transfer learning. On the other hand, they raise a concern that we are likely to be overestimating the true capabilities of machine commonsense across all these benchmarks. We emphasize the importance of algorithmic bias reduction in existing and future benchmarks to mitigate such overestimation.", + "link": "https://www.semanticscholar.org/paper/8f7133b2e3851b09d659b91e8faa761ec206413f", + "scraped_abstract": null, + "citation_best": 396 + }, + { + "paper": "3035507081", + "venue": "1188739475", + "year": "2020", + "title": "beyond accuracy behavioral testing of nlp models with checklist", + "label": [ + "40140605", + "204321447" + ], + "author": [ + "2986741849", + "2789722361", + "1988556028", + "2279876130" + ], + "reference": [ + "2076253536", + "2108816886", + "2282821441", + "2324595780", + "2799007037", + "2852714836", + "2906152891", + "2922234936", + "2941666437", + "2949858875", + "2953039212", + "2953307569", + "2962900737", + "2963126845", + "2963310665", + "2963323070", + "2963394326", + "2963482440", + "2963661177", + "2963748441", + "2965373594", + "2970442950", + "2982756474", + "2989344603", + "2990704537", + "2996601440" + ], + "abstract": "although measuring held out accuracy has been the primary approach to evaluate generalization it often overestimates the performance of nlp models while alternative approaches for evaluating models either focus on individual tasks or on specific behaviors inspired by principles of behavioral testing in software engineering we introduce checklist a task agnostic methodology for testing nlp models checklist includes a matrix of general linguistic capabilities and test types that facilitate comprehensive test ideation as well as a software tool to generate a large and diverse number of test cases quickly we illustrate the utility of checklist with tests for three tasks identifying critical failures in both commercial and state of art models in a user study a team responsible for a commercial sentiment analysis model found new and actionable bugs in an extensively tested model in another user study nlp practitioners with checklist created twice as many tests and found almost three times as many bugs as users without it", + "title_raw": "Beyond Accuracy: Behavioral Testing of NLP Models with CheckList.", + "abstract_raw": "Although measuring held-out accuracy has been the primary approach to evaluate generalization, it often overestimates the performance of NLP models, while alternative approaches for evaluating models either focus on individual tasks or on specific behaviors. Inspired by principles of behavioral testing in software engineering, we introduce CheckList, a task-agnostic methodology for testing NLP models. CheckList includes a matrix of general linguistic capabilities and test types that facilitate comprehensive test ideation, as well as a software tool to generate a large and diverse number of test cases quickly. We illustrate the utility of CheckList with tests for three tasks, identifying critical failures in both commercial and state-of-art models. In a user study, a team responsible for a commercial sentiment analysis model found new and actionable bugs in an extensively tested model. In another user study, NLP practitioners with CheckList created twice as many tests, and found almost three times as many bugs as users without it.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Beyond+Accuracy:+Behavioral+Testing+of+NLP+models+with+Checklist&as_oq=&as_eq=&as_occt=any&as_sauthors=Ribeiro", + "scraped_abstract": null, + "citation_best": 8 + }, + { + "paper": "3009121097", + "venue": "1163450153", + "year": "2020", + "title": "a design engineering approach for quantitatively exploring context aware sentence retrieval for nonspeaking individuals with motor disabilities", + "label": [ + "2777530160", + "107457646", + "108265739", + "61249035" + ], + "author": [ + "2091367221", + "3047766742", + "2125862785", + "2153840550" + ], + "reference": [ + "1532325895", + "1601205600", + "1969707202", + "1972143341", + "2009942287", + "2073514611", + "2094563468", + "2116163483", + "2135620230", + "2137704435", + "2144211451", + "2153252192", + "2160484899", + "2165027873", + "2187259397", + "2256209570", + "2312217865", + "2604140970" + ], + "abstract": "nonspeaking individuals with motor disabilities typically have very low communication rates this paper proposes a design engineering approach for quantitatively exploring context aware sentence retrieval as a promising complementary input interface working in tandem with a word prediction keyboard we motivate the need for complementary design engineering methodology in the design of augmentative and alternative communication and explain how such methods can be used to gain additional design insights we then study the theoretical performance envelopes of a context aware sentence retrieval system identifying potential keystroke savings as a function of the parameters of the subsystems such as the accuracy of the underlying auto complete word prediction algorithm and the accuracy of sensed context information under varying assumptions we find that context aware sentence retrieval has the potential to provide users with considerable improvements in keystroke savings under reasonable parameter assumptions of the underlying subsystems this highlights how complementary design engineering methods can reveal additional insights into design for augmentative and alternative communication", + "title_raw": "A Design Engineering Approach for Quantitatively Exploring Context-Aware Sentence Retrieval for Nonspeaking Individuals with Motor Disabilities", + "abstract_raw": "Nonspeaking individuals with motor disabilities typically have very low communication rates. This paper proposes a design engineering approach for quantitatively exploring context-aware sentence retrieval as a promising complementary input interface, working in tandem with a word-prediction keyboard. We motivate the need for complementary design engineering methodology in the design of augmentative and alternative communication and explain how such methods can be used to gain additional design insights. We then study the theoretical performance envelopes of a context-aware sentence retrieval system, identifying potential keystroke savings as a function of the parameters of the subsystems, such as the accuracy of the underlying auto-complete word prediction algorithm and the accuracy of sensed context information under varying assumptions. We find that context-aware sentence retrieval has the potential to provide users with considerable improvements in keystroke savings under reasonable parameter assumptions of the underlying subsystems. This highlights how complementary design engineering methods can reveal additional insights into design for augmentative and alternative communication.", + "link": "https://www.semanticscholar.org/paper/d8bacc613f359e2d672df09f9582a5c6fa0f8a64", + "scraped_abstract": "Nonspeaking individuals with motor disabilities typically have very low communication rates. This paper proposes a design engineering approach for quantitatively exploring context-aware sentence retrieval as a promising complementary input interface, working in tandem with a word-prediction keyboard. We motivate the need for complementary design engineering methodology in the design of augmentative and alternative communication and explain how such methods can be used to gain additional design insights. We then study the theoretical performance envelopes of a context-aware sentence retrieval system, identifying potential keystroke savings as a function of the parameters of the subsystems, such as the accuracy of the underlying auto-complete word prediction algorithm and the accuracy of sensed context information under varying assumptions. We find that context-aware sentence retrieval has the potential to provide users with considerable improvements in keystroke savings under reasonable parameter assumptions of the underlying subsystems. This highlights how complementary design engineering methods can reveal additional insights into design for augmentative and alternative communication.", + "citation_best": 27 + }, + { + "paper": "3030254708", + "venue": "1163450153", + "year": "2020", + "title": "articulating experience reflections from experts applying micro phenomenology to design research in hci", + "label": [ + "2779561248", + "201025465", + "107457646", + "100609095" + ], + "author": [ + "2223746108", + "3031082360", + "1478380734", + "2095733154" + ], + "reference": [ + "625693175", + "1998969635", + "2005037843", + "2005783023", + "2009770806", + "2014858229", + "2017373968", + "2018570226", + "2021178794", + "2022360138", + "2032666449", + "2049124404", + "2052030431", + "2055592348", + "2064894702", + "2070558619", + "2084016497", + "2089635306", + "2092767720", + "2099636708", + "2109917277", + "2119552352", + "2131043567", + "2155987308", + "2164115694", + "2168568775", + "2342459749", + "2524189730", + "2606370266", + "2610536640", + "2621763900", + "2623764574", + "2734473211", + "2792723369", + "2798651530", + "2808073017", + "2940885058", + "3022307797" + ], + "abstract": "third wave hci initiated a slow transformation in the methods of ux research from widely used quantitative approaches to more recently employed qualitative techniques articulating the nuances complexity and diversity of a user s experience beyond surface descriptions remains a challenge within design one qualitative method micro phenomenology has been used in hci design research since 2001 yet no systematic understanding of micro phenomenology has been presented particularly from the perspective of hci design researchers who actively use it in design contexts we interviewed 5 hci design experts who utilize micro phenomenology and present their experiences with the method we illustrate how this method has been applied by the selected experts through developing a practice and present conditions under which the descriptions of the experience unfold and the values that this method can provide to hci design field our contribution highlights the value of micro phenomenology in articulating the experience of designers and participants developing vocabulary for multi sensory experiences and unfolding embodied tacit knowledge", + "title_raw": "Articulating Experience: Reflections from Experts Applying Micro-Phenomenology to Design Research in HCI", + "abstract_raw": "Third wave HCI initiated a slow transformation in the methods of UX research: from widely used quantitative approaches to more recently employed qualitative techniques. Articulating the nuances, complexity, and diversity of a user's experience beyond surface descriptions remains a challenge within design. One qualitative method \u0097 micro-phenomenology \u0097 has been used in HCI/Design research since 2001. Yet, no systematic understanding of micro-phenomenology has been presented, particularly from the perspective of HCI/Design researchers who actively use it in design contexts. We interviewed 5 HCI/Design experts who utilize micro-phenomenology and present their experiences with the method. We illustrate how this method has been applied by the selected experts through developing a practice, and present conditions under which the descriptions of the experience unfold, and the values that this method can provide to HCI/Design field. Our contribution highlights the value of micro-phenomenology in articulating the experience of designers and participants, developing vocabulary for multi-sensory experiences, and unfolding embodied tacit knowledge.", + "link": "https://www.semanticscholar.org/paper/ec1d2c530aa984a11443089dcacfa2e76a3be9f1", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "3017640030", + "venue": "1163450153", + "year": "2020", + "title": "beyond the prototype understanding the challenge of scaling hardware device production", + "label": [ + "9390403" + ], + "author": [ + "2114031067", + "2148014207" + ], + "reference": [ + "1520829806", + "1979290264", + "1984182046", + "1995601995", + "2003941864", + "2028187927", + "2030390645", + "2054044780", + "2057073649", + "2062658884", + "2090351970", + "2091678085", + "2102511761", + "2103339808", + "2116817356", + "2137371941", + "2138899136", + "2147068791", + "2149006949", + "2318293530", + "2346517070", + "2392542003", + "2611488115", + "2761973445", + "2765259813", + "2940939137", + "2940959397", + "2981031913", + "3000996643" + ], + "abstract": "the hardware research and development communities have invested heavily in tools and materials that facilitate the design and prototyping of electronic devices numerous easy to access and easy to use tools have streamlined the prototyping of interactive and embedded devices for experts and led to a remarkable growth in non expert builders however there has been little exploration of challenges associated with moving beyond a prototype and creating hundreds or thousands of exact replicas a process that is still challenging for many we interviewed 25 individuals with experience taking prototype hardware devices into low volume production we systematically investigated the common issues faced and mitigation strategies adopted we present our findings in four main categories 1 gaps in technical knowledge 2 gaps in non technical knowledge 3 minimum viable rigor in manufacturing preparation and 4 building relationships and a professional network our study unearthed several opportunities for new tools and processes to support the transition beyond a working prototype to cost effective low volume manufacturing these would complement the aforementioned tools and materials that support design and prototyping", + "title_raw": "Beyond the Prototype: Understanding the Challenge of Scaling Hardware Device Production", + "abstract_raw": "The hardware research and development communities have invested heavily in tools and materials that facilitate the design and prototyping of electronic devices. Numerous easy-to-access and easy-to-use tools have streamlined the prototyping of interactive and embedded devices for experts and led to a remarkable growth in non-expert builders. However, there has been little exploration of challenges associated with moving beyond a prototype and creating hundreds or thousands of exact replicas - a process that is still challenging for many. We interviewed 25 individuals with experience taking prototype hardware devices into low volume production. We systematically investigated the common issues faced and mitigation strategies adopted. We present our findings in four main categories: (1) gaps in technical knowledge; (2) gaps in non-technical knowledge; (3) minimum viable rigor in manufacturing preparation; and (4) building relationships and a professional network. Our study unearthed several opportunities for new tools and processes to support the transition beyond a working prototype to cost effective low-volume manufacturing. These would complement the aforementioned tools and materials that support design and prototyping.", + "link": "https://www.semanticscholar.org/paper/b1e6cbedd0fbb52b7fcde7df5605c92fa4bbde04", + "scraped_abstract": "The hardware research and development communities have invested heavily in tools and materials that facilitate the design and prototyping of electronic devices. Numerous easy-to-access and easy-to-use tools have streamlined the prototyping of interactive and embedded devices for experts and led to a remarkable growth in non-expert builders. However, there has been little exploration of challenges associated with moving beyond a prototype and creating hundreds or thousands of exact replicas - a process that is still challenging for many. We interviewed 25 individuals with experience taking prototype hardware devices into low volume production. We systematically investigated the common issues faced and mitigation strategies adopted. We present our findings in four main categories: (1) gaps in technical knowledge; (2) gaps in non-technical knowledge; (3) minimum viable rigor in manufacturing preparation; and (4) building relationships and a professional network. Our study unearthed several opportunities for new tools and processes to support the transition beyond a working prototype to cost effective low-volume manufacturing. These would complement the aforementioned tools and materials that support design and prototyping.", + "citation_best": 22 + }, + { + "paper": "3032514484", + "venue": "1163450153", + "year": "2020", + "title": "bug or feature covert impairments to human computer interaction", + "label": [ + "104114177", + "1009929", + "184297639", + "107457646", + "2776990265", + "7374053" + ], + "author": [ + "2303005455" + ], + "reference": [ + "128509308", + "180808015", + "202714577", + "319698796", + "580699849", + "975377894", + "1486263771", + "1490259388", + "1542629324", + "1546080376", + "1647671624", + "1667580224", + "1774682829", + "1855259589", + "1941267885", + "1971948928", + "1974535758", + "1990175024", + "2004195178", + "2034225924", + "2042751882", + "2050483477", + "2050787672", + "2055326384", + "2060906279", + "2063289012", + "2081428746", + "2083300987", + "2088301450", + "2089852938", + "2093197020", + "2099789718", + "2103223087", + "2103854406", + "2104372013", + "2109137639", + "2112824399", + "2115592467", + "2116567696", + "2122544819", + "2146944478", + "2152099471", + "2152398460", + "2169229156", + "2169856608", + "2179427518", + "2216664396", + "2250366030", + "2293737032", + "2303413189", + "2343698281", + "2399380318", + "2513151097", + "2528437680", + "2535970271", + "2611474773", + "2640306816", + "2795529671", + "2807123141", + "2884962025", + "2901576596", + "2941211423", + "2942280647", + "2945920325", + "3103552852", + "3125207128" + ], + "abstract": "computer users commonly experience interaction anomalies such as the text cursor jumping to another location in a document perturbed mouse pointer motion or a disagreement between tactile input and touch screen location these anomalies impair interaction and require the user to take corrective measures such as resetting the text cursor or correcting the trajectory of the pointer to reach a desired target impairments can result from software bugs physical hardware defects and extraneous input however some designs alter the course of interaction through covert impairments anomalies introduced intentionally and without the user s knowledge there are various motivations for doing so rooted in disparate fields including biometrics electronic voting and entertainment we examine this kind of deception by systematizing four different ways computer interaction may become impaired and three different goals of the designer providing insight to the design of systems that implement covert impairments", + "title_raw": "Bug or Feature? Covert Impairments to Human Computer Interaction", + "abstract_raw": "Computer users commonly experience interaction anomalies, such as the text cursor jumping to another location in a document, perturbed mouse pointer motion, or a disagreement between tactile input and touch screen location. These anomalies impair interaction and require the user to take corrective measures, such as resetting the text cursor or correcting the trajectory of the pointer to reach a desired target. Impairments can result from software bugs, physical hardware defects, and extraneous input. However, some designs alter the course of interaction through covert impairments, anomalies introduced intentionally and without the user's knowledge. There are various motivations for doing so rooted in disparate fields including biometrics, electronic voting, and entertainment. We examine this kind of deception by systematizing four different ways computer interaction may become impaired and three different goals of the designer, providing insight to the design of systems that implement covert impairments.", + "link": "https://www.semanticscholar.org/paper/cb3fa78e7c85a5bf71d44e8172be83b4f1e1d454", + "scraped_abstract": "Computer users commonly experience interaction anomalies, such as the text cursor jumping to another location in a document, perturbed mouse pointer motion, or a disagreement between tactile input and touch screen location. These anomalies impair interaction and require the user to take corrective measures, such as resetting the text cursor or correcting the trajectory of the pointer to reach a desired target. Impairments can result from software bugs, physical hardware defects, and extraneous input. However, some designs alter the course of interaction through covert impairments, anomalies introduced intentionally and without the user's knowledge. There are various motivations for doing so rooted in disparate fields including biometrics, electronic voting, and entertainment. We examine this kind of deception by systematizing four different ways computer interaction may become impaired and three different goals of the designer, providing insight to the design of systems that implement covert impairments.", + "citation_best": 6 + }, + { + "paper": "3014972121", + "venue": "1163450153", + "year": "2020", + "title": "co designing checklists to understand organizational challenges and opportunities around fairness in ai", + "label": [ + "105339364" + ], + "author": [ + "2504639629", + "2101373462", + "2152685116", + "2893136266" + ], + "reference": [ + "604960715", + "1006997171", + "1491887163", + "1525067466", + "1859579244", + "1921009626", + "1976129032", + "1993979945", + "1996333597", + "2000117884", + "2001541484", + "2006447892", + "2016782401", + "2051339604", + "2073653117", + "2100960835", + "2118183099", + "2119472226", + "2126847279", + "2137080312", + "2138806549", + "2139834614", + "2142013321", + "2144663895", + "2148781751", + "2160343841", + "2162448023", + "2163102864", + "2277509373", + "2475843659", + "2519750805", + "2557671501", + "2599025709", + "2611748211", + "2742049669", + "2781306982", + "2790628304", + "2795038878", + "2802642435", + "2808450727", + "2884972792", + "2887489621", + "2895484796", + "2897154134", + "2898911770", + "2899367046", + "2900572965", + "2905125924", + "2911227954", + "2912732633", + "2916904544", + "2940545533", + "2940823115", + "2941090159", + "2942659792", + "2942721282", + "2952203301", + "2952696298", + "2953522645", + "2964276035", + "2973345269", + "2995006168", + "3099361686", + "3100046612", + "3100279624", + "3121189393", + "3122075665", + "3122189619" + ], + "abstract": "many organizations have published principles intended to guide the ethical development and deployment of ai systems however their abstract nature makes them difficult to operationalize some organizations have therefore produced ai ethics checklists as well as checklists for more specific concepts such as fairness as applied to ai systems but unless checklists are grounded in practitioners needs they may be misused to understand the role of checklists in ai ethics we conducted an iterative co design process with 48 practitioners focusing on fairness we co designed an ai fairness checklist and identified desiderata and concerns for ai fairness checklists in general we found that ai fairness checklists could provide organizational infrastructure for formalizing ad hoc processes and empowering individual advocates we highlight aspects of organizational culture that may impact the efficacy of ai fairness checklists and suggest future design directions", + "title_raw": "Co-Designing Checklists to Understand Organizational Challenges and Opportunities around Fairness in AI", + "abstract_raw": "Many organizations have published principles intended to guide the ethical development and deployment of AI systems; however, their abstract nature makes them difficult to operationalize. Some organizations have therefore produced AI ethics checklists, as well as checklists for more specific concepts, such as fairness, as applied to AI systems. But unless checklists are grounded in practitioners' needs, they may be misused. To understand the role of checklists in AI ethics, we conducted an iterative co-design process with 48 practitioners, focusing on fairness. We co-designed an AI fairness checklist and identified desiderata and concerns for AI fairness checklists in general. We found that AI fairness checklists could provide organizational infrastructure for formalizing ad-hoc processes and empowering individual advocates. We highlight aspects of organizational culture that may impact the efficacy of AI fairness checklists, and suggest future design directions.", + "link": "https://www.semanticscholar.org/paper/58bb221c1e375f254826b7b7341f74057e87676c", + "scraped_abstract": null, + "citation_best": 301 + }, + { + "paper": "3013330822", + "venue": "1163450153", + "year": "2020", + "title": "color and animation preferences for a light band ehmi in interactions between automated vehicles and pedestrians", + "label": [ + "502989409", + "170130773", + "107457646", + "108265739" + ], + "author": [ + "2757243025", + "2633159238", + "971489800", + "2146780842", + "3110189257" + ], + "reference": [ + "1980985548", + "2018260930", + "2074474221", + "2141708418", + "2148062318", + "2182299929", + "2185399867", + "2462466550", + "2470955424", + "2507800322", + "2558786080", + "2592496287", + "2606385607", + "2612105706", + "2612879771", + "2677137679", + "2751103733", + "2759166613", + "2763646004", + "2768774120", + "2783946743", + "2794233897", + "2890958932", + "2901419506", + "2935196311", + "2939338567", + "2943591006", + "2968841686", + "2973769932", + "2978875798" + ], + "abstract": "in this paper we report user preferences regarding color and animation patterns to support the interaction between automated vehicles avs and pedestrians through an external human machine interface ehmi existing concepts of ehmi differ among other things in their use of colors or animations to express an av s yielding intention in the absence of empirical research there is a knowledge gap regarding which color and animation leads to highest usability and preferences in traffic negotiation situations we conducted an online survey n 400 to investigate the comprehensibility of a light band ehmi with a combination of 5 color and 3 animation patterns for a yielding av results show that cyan is considered a neutral color for communicating a yielding intention additionally a uniformly flashing or pulsing animation is preferred compared to any pattern that animates sideways these insights can contribute in the future design and standardization of ehmis", + "title_raw": "Color and Animation Preferences for a Light Band eHMI in Interactions Between Automated Vehicles and Pedestrians", + "abstract_raw": "In this paper, we report user preferences regarding color and animation patterns to support the interaction between Automated Vehicles (AVs) and pedestrians through an external Human-Machine-Interface (eHMI). Existing concepts of eHMI differ -- among other things -- in their use of colors or animations to express an AV's yielding intention. In the absence of empirical research, there is a knowledge gap regarding which color and animation leads to highest usability and preferences in traffic negotiation situations. We conducted an online survey (N=400) to investigate the comprehensibility of a light band eHMI with a combination of 5 color and 3 animation patterns for a yielding AV. Results show that cyan is considered a neutral color for communicating a yielding intention. Additionally, a uniformly flashing or pulsing animation is preferred compared to any pattern that animates sideways. These insights can contribute in the future design and standardization of eHMIs.", + "link": "https://www.semanticscholar.org/paper/47af51de5d703a695fea5e4f3d3102e964dc2b54", + "scraped_abstract": "In this paper, we report user preferences regarding color and animation patterns to support the interaction between Automated Vehicles (AVs) and pedestrians through an external Human-Machine-Interface (eHMI). Existing concepts of eHMI differ -- among other things -- in their use of colors or animations to express an AV's yielding intention. In the absence of empirical research, there is a knowledge gap regarding which color and animation leads to highest usability and preferences in traffic negotiation situations. We conducted an online survey (N=400) to investigate the comprehensibility of a light band eHMI with a combination of 5 color and 3 animation patterns for a yielding AV. Results show that cyan is considered a neutral color for communicating a yielding intention. Additionally, a uniformly flashing or pulsing animation is preferred compared to any pattern that animates sideways. These insights can contribute in the future design and standardization of eHMIs.", + "citation_best": 113 + }, + { + "paper": "3029211878", + "venue": "1163450153", + "year": "2020", + "title": "connecting distributed families camera work for three party mobile video calls", + "label": [ + "200632571", + "108827166" + ], + "author": [ + "3030038250", + "1980560805", + "2099767421" + ], + "reference": [ + "1441143464", + "1965376821", + "1993239396", + "1993258900", + "1994408718", + "2000628771", + "2005373731", + "2018324857", + "2022699881", + "2027260883", + "2085750959", + "2098105587", + "2106423183", + "2114616727", + "2123741245", + "2126024701", + "2136326827", + "2140706899", + "2146124776", + "2171204315", + "2587350437", + "2610486606", + "2618152073", + "2624578412", + "2803031567" + ], + "abstract": "mobile video calling technologies have become a critical link to connect distributed families however these technologies have been principally designed for video calling between two parties whereas family video calls involve young children often comprise three parties namely a co present adult a parent or grandparent helping with the interaction between the child and another remote adult we examine how manipulation of phone cameras and management of co present children is used to stage parent child interactions we present results from a video ethnographic study based on 40 video recordings of video calls between left behind children and their migrant parents in china our analysis reveals a key practice of facilitation work performed by grandparents as a crucial feature of three party calls facilitation work offers a new concept for hci s broader conceptualisation of mobile video calling suggesting revisions that design might take into consideration for triadic interactions in general", + "title_raw": "Connecting Distributed Families: Camera Work for Three-party Mobile Video Calls", + "abstract_raw": "Mobile video calling technologies have become a critical link to connect distributed families. However, these technologies have been principally designed for video calling between two parties, whereas family video calls involve young children often comprise three parties, namely a co-present adult (a parent or grandparent) helping with the interaction between the child and another remote adult. We examine how manipulation of phone cameras and management of co-present children is used to stage parent-child interactions. We present results from a video-ethnographic study based on 40 video recordings of video calls between 'left-behind' children and their migrant parents in China. Our analysis reveals a key practice of 'facilitation work', performed by grandparents, as a crucial feature of three-party calls. Facilitation work offers a new concept for HCI's broader conceptualisation of mobile video calling, suggesting revisions that design might take into consideration for triadic interactions in general.", + "link": "https://www.semanticscholar.org/paper/fbd31b58de8e6de8647b65e34a982d99f03e930b", + "scraped_abstract": "Mobile video calling technologies have become a critical link to connect distributed families. However, these technologies have been principally designed for video calling between two parties, whereas family video calls involve young children often comprise three parties, namely a co-present adult (a parent or grandparent) helping with the interaction between the child and another remote adult. We examine how manipulation of phone cameras and management of co-present children is used to stage parent-child interactions. We present results from a video-ethnographic study based on 40 video recordings of video calls between 'left-behind' children and their migrant parents in China. Our analysis reveals a key practice of 'facilitation work', performed by grandparents, as a crucial feature of three-party calls. Facilitation work offers a new concept for HCI's broader conceptualisation of mobile video calling, suggesting revisions that design might take into consideration for triadic interactions in general.", + "citation_best": 33 + }, + { + "paper": "3030161855", + "venue": "1163450153", + "year": "2020", + "title": "creating augmented and virtual reality applications current practices challenges and opportunities", + "label": [ + "107457646", + "136197465", + "194969405", + "16963264", + "153715457", + "2776867947", + "2781390188" + ], + "author": [ + "2801102128", + "2039874956", + "2314594501", + "287661457", + "2001101459" + ], + "reference": [ + "1493315108", + "1589029596", + "1607675442", + "1808011207", + "1963557108", + "1968853180", + "1979253984", + "1984315811", + "1994547327", + "1995760065", + "2015564561", + "2022797053", + "2024880014", + "2028424422", + "2034499400", + "2061585524", + "2066930153", + "2093495548", + "2095807562", + "2098923055", + "2120089691", + "2122122381", + "2127972053", + "2132168989", + "2138350998", + "2145280169", + "2148447775", + "2156106848", + "2157070927", + "2157922094", + "2159771345", + "2171666482", + "2215079391", + "2412306367", + "2533856174", + "2587972759", + "2610406819", + "2765106443", + "2765478675", + "2775663585", + "2796111556", + "2796156077", + "2796294902", + "2802609531", + "2808833240", + "2897393156", + "2941201726", + "2941270448", + "2943535317" + ], + "abstract": "augmented reality ar and virtual reality vr devices are becoming easier to access and use but the barrier to entry for creating ar vr applications remains high although the recent spike in hci research on novel ar vr tools is promising we lack insights into how ar vr creators use today s state of the art authoring tools as well as the types of challenges that they face we interviewed 21 ar vr creators which we grouped into hobbyists domain experts and professional designers despite having a variety of motivations and skillsets they described similar challenges in designing and building ar vr applications we synthesize 8 key barriers that ar vr creators face nowadays starting from prototyping the initial experiences to dealing with the many unknowns during implementation to facing difficulties in testing applications based on our analysis we discuss the importance of considering end user developers as a growing population of ar vr creators how we can build learning opportunities into ar vr tools and the need for building ar vr toolchains that integrate debugging and testing", + "title_raw": "Creating Augmented and Virtual Reality Applications: Current Practices, Challenges, and Opportunities", + "abstract_raw": "Augmented Reality (AR) and Virtual Reality (VR) devices are becoming easier to access and use, but the barrier to entry for creating AR/VR applications remains high. Although the recent spike in HCI research on novel AR/VR tools is promising, we lack insights into how AR/VR creators use today's state-of-the-art authoring tools as well as the types of challenges that they face. We interviewed 21 AR/VR creators, which we grouped into hobbyists, domain experts, and professional designers. Despite having a variety of motivations and skillsets, they described similar challenges in designing and building AR/VR applications. We synthesize 8 key barriers that AR/VR creators face nowadays, starting from prototyping the initial experiences to dealing with \"the many unknowns\" during implementation, to facing difficulties in testing applications. Based on our analysis, we discuss the importance of considering end-user developers as a growing population of AR/VR creators, how we can build learning opportunities into AR/VR tools, and the need for building AR/VR toolchains that integrate debugging and testing.", + "link": "https://www.semanticscholar.org/paper/a1409fb1b764d8f986ee0e501f9c97e239a3eee0", + "scraped_abstract": "Augmented Reality (AR) and Virtual Reality (VR) devices are becoming easier to access and use, but the barrier to entry for creating AR/VR applications remains high. Although the recent spike in HCI research on novel AR/VR tools is promising, we lack insights into how AR/VR creators use today's state-of-the-art authoring tools as well as the types of challenges that they face. We interviewed 21 AR/VR creators, which we grouped into hobbyists, domain experts, and professional designers. Despite having a variety of motivations and skillsets, they described similar challenges in designing and building AR/VR applications. We synthesize 8 key barriers that AR/VR creators face nowadays, starting from prototyping the initial experiences to dealing with \"the many unknowns\" during implementation, to facing difficulties in testing applications. Based on our analysis, we discuss the importance of considering end-user developers as a growing population of AR/VR creators, how we can build learning opportunities into AR/VR tools, and the need for building AR/VR toolchains that integrate debugging and testing.", + "citation_best": 185 + }, + { + "paper": "3030528496", + "venue": "1163450153", + "year": "2020", + "title": "design study lite methodology expediting design studies and enabling the synergy of visualization pedagogy and social good", + "label": [ + "37836645", + "36464697", + "16963264", + "65682993", + "172367668" + ], + "author": [ + "3032430196", + "2900883138", + "2947246435", + "2195176598", + "1286943279" + ], + "reference": [ + "52809394", + "1526426957", + "1970569592", + "1973973460", + "1987617541", + "2010473024", + "2042400831", + "2047621276", + "2058203255", + "2073214011", + "2073800769", + "2084776154", + "2110158245", + "2118029778", + "2136279581", + "2141033859", + "2142493242", + "2157114416", + "2161114148", + "2611910045", + "2752126964", + "2951741171" + ], + "abstract": "design studies are frequently used to conduct problem driven visualization research by working with real world domain experts in visualization pedagogy design studies are often introduced but rarely practiced due to their large time requirements this limits students to a classroom curriculum often involving projects that may not have implications beyond the classroom thus we present the design study lite methodology a novel framework for implementing design studies with novice students in 14 weeks we utilized the design study lite methodology in conjunction with service learning to teach five data visualization courses and demonstrate that it benefits not only the students but also the community through service to non profit partners in this paper we provide a detailed breakdown of the methodology and how service learning can be incorporated with it we also include an extensive reflection on the methodology and provide recommendations for future applications of the framework for teaching visualization courses and research", + "title_raw": "Design Study \"Lite\" Methodology: Expediting Design Studies and Enabling the Synergy of Visualization Pedagogy and Social Good", + "abstract_raw": "Design studies are frequently used to conduct problem-driven visualization research by working with real-world domain experts. In visualization pedagogy, design studies are often introduced but rarely practiced due to their large time requirements. This limits students to a classroom curriculum, often involving projects that may not have implications beyond the classroom. Thus we present the Design Study \"Lite\" Methodology, a novel framework for implementing design studies with novice students in 14 weeks. We utilized the Design Study \"Lite\" Methodology in conjunction with Service-Learning to teach five Data Visualization courses and demonstrate that it benefits not only the students but also the community through service to non-profit partners. In this paper, we provide a detailed breakdown of the methodology and how Service-Learning can be incorporated with it. We also include an extensive reflection on the methodology and provide recommendations for future applications of the framework for teaching visualization courses and research.", + "link": "https://www.semanticscholar.org/paper/7908182914cf22c814e3725b6687234dd4d6db21", + "scraped_abstract": "Design studies are frequently used to conduct problem-driven visualization research by working with real-world domain experts. In visualization pedagogy, design studies are often introduced but rarely practiced due to their large time requirements. This limits students to a classroom curriculum, often involving projects that may not have implications beyond the classroom. Thus we present the Design Study \"Lite\" Methodology, a novel framework for implementing design studies with novice students in 14 weeks. We utilized the Design Study \"Lite\" Methodology in conjunction with Service-Learning to teach five Data Visualization courses and demonstrate that it benefits not only the students but also the community through service to non-profit partners. In this paper, we provide a detailed breakdown of the methodology and how Service-Learning can be incorporated with it. We also include an extensive reflection on the methodology and provide recommendations for future applications of the framework for teaching visualization courses and research.", + "citation_best": 26 + }, + { + "paper": "3029149150", + "venue": "1163450153", + "year": "2020", + "title": "designing ambient narrative based interfaces to reflect and motivate physical activity", + "label": [ + "2522767166", + "25621077", + "169093310" + ], + "author": [ + "1894141224", + "3030962138", + "3030680764", + "2125269170", + "3032217263", + "3031841233", + "2935610732", + "2972683810", + "3029527346", + "3029401516", + "2943273397", + "3032888158", + "2635130508", + "29409133" + ], + "reference": [ + "1524652962", + "1581103196", + "1757644187", + "1936508772", + "1967591898", + "1978460036", + "1978630615", + "1983774990", + "1992335563", + "2003659858", + "2006992350", + "2009546847", + "2009721998", + "2018018739", + "2022411152", + "2025298047", + "2045841047", + "2048386111", + "2055350541", + "2056591916", + "2058546262", + "2062865797", + "2067812586", + "2077701726", + "2093767824", + "2094401981", + "2096528587", + "2099257324", + "2104565869", + "2110991980", + "2111756682", + "2114439881", + "2118410924", + "2118772539", + "2119401330", + "2120909070", + "2123755015", + "2129077831", + "2129343065", + "2135019055", + "2141202086", + "2142912849", + "2146588344", + "2149204228", + "2152823229", + "2156221064", + "2165096662", + "2165982949", + "2167872834", + "2168420763", + "2171129594", + "2189491484", + "2407300725", + "2467021583", + "2519860851", + "2611338523", + "2626560014", + "2756147537", + "2940664027", + "2989934271", + "2994647068", + "3104394646", + "3123476072" + ], + "abstract": "numerous technologies now exist for promoting more active lifestyles however while quantitative data representations e g charts graphs and statistical reports typify most health tools growing evidence suggests such feedback can not only fail to motivate behavior but may also harm self integrity and fuel negative mindsets about exercise our research seeks to devise alternative more qualitative schemes for encoding personal information in particular this paper explores the design of data driven narratives given the intuitive and persuasive power of stories we present whoiszuki a smartphone application that visualizes physical activities and goals as components of a multi chapter quest where the main character s progress is tied to the user s we report on our design process involving online surveys in lab studies and in the wild deployments aimed at refining the interface and the narrative and gaining a deep understanding of people s experiences with this type of feedback from these insights we contribute recommendations to guide future development of narrative based applications for motivating healthy behavior", + "title_raw": "Designing Ambient Narrative-Based Interfaces to Reflect and Motivate Physical Activity", + "abstract_raw": "Numerous technologies now exist for promoting more active lifestyles. However, while quantitative data representations (e.g., charts, graphs, and statistical reports) typify most health tools, growing evidence suggests such feedback can not only fail to motivate behavior but may also harm self-integrity and fuel negative mindsets about exercise. Our research seeks to devise alternative, more qualitative schemes for encoding personal information. In particular, this paper explores the design of data-driven narratives, given the intuitive and persuasive power of stories. We present WhoIsZuki, a smartphone application that visualizes physical activities and goals as components of a multi-chapter quest, where the main character's progress is tied to the user's. We report on our design process involving online surveys, in-lab studies, and in-the-wild deployments, aimed at refining the interface and the narrative and gaining a deep understanding of people's experiences with this type of feedback. From these insights, we contribute recommendations to guide future development of narrative-based applications for motivating healthy behavior.", + "link": "https://www.semanticscholar.org/paper/ab284eb5b25e2fe8a2d81f5cbfc738e9d55ff20c", + "scraped_abstract": null, + "citation_best": 48 + }, + { + "paper": "3030460604", + "venue": "1163450153", + "year": "2020", + "title": "exploring how game genre in student designed games influences computational thinking development", + "label": [ + "2780368719", + "503285160", + "2856821" + ], + "author": [ + "2628283070", + "3031107595", + "3128659610", + "2149102271", + "2779481380", + "2125453455", + "2486024159", + "2520644651", + "2122946840" + ], + "reference": [ + "62065704", + "294653858", + "1537860447", + "1566844395", + "1687473687", + "1963557108", + "1983436187", + "1984280712", + "1986013978", + "1996538286", + "2007508730", + "2014261905", + "2018037215", + "2018548598", + "2032172470", + "2033540643", + "2037149633", + "2039650955", + "2042351467", + "2046605032", + "2059640080", + "2063686855", + "2068086207", + "2082940629", + "2083925149", + "2084944215", + "2087099867", + "2092403008", + "2093789852", + "2094740931", + "2100029531", + "2101398579", + "2105665100", + "2114410422", + "2118560749", + "2119759105", + "2124942656", + "2136291974", + "2255937970", + "2275318040", + "2294905309", + "2324497025", + "2339183141", + "2441881010", + "2487378932", + "2489758317", + "2491875576", + "2531504105", + "2555617532", + "2566696933", + "2591669123", + "2603696004", + "2606994865", + "2611516937", + "2728043297", + "2752522198", + "2760706586", + "2784702174", + "2789849566", + "2791155911", + "2795739371", + "2830776011", + "2909872056", + "2948485546", + "2952143692", + "2970277596", + "2989293288", + "2999632887" + ], + "abstract": "game design is increasingly used in modern education to foster computational thinking ct yet it is unclear how and if the game genre of student designed games impact ct and programming we explore how game genre impacts ct development and programming routines in scratch games designed by 8th grade students using a metrics based approach i e dr scratch our findings show that designing particular games e g action storytelling impact ct and programming development we observe for instance that ct skills develop and consolidate fast after which students can focus on aspects more specific to game design based on the results we suggest that researchers and educators in constructionist learning consider the impact of game genre when designing game based curricula for the learning of programming and ct", + "title_raw": "Exploring How Game Genre in Student-Designed Games Influences Computational Thinking Development", + "abstract_raw": "Game design is increasingly used in modern education to foster Computational Thinking (CT). Yet, it is unclear how and if the game genre of student-designed games impact CT and programming. We explore how game genre impacts CT development and programming routines in Scratch games designed by 8th-grade students using a metrics-based approach (i.e., Dr. Scratch). Our findings show that designing particular games (e.g., action, storytelling) impact CT and programming development. We observe, for instance, that CT skills develop and consolidate fast, after which students can focus on aspects more specific to game design. Based on the results, we suggest that researchers and educators in constructionist learning consider the impact of game genre when designing game-based curricula for the learning of programming and CT.", + "link": "https://www.semanticscholar.org/paper/dd25b01a270bdb21c4f18f0c75e403b13784d11a", + "scraped_abstract": null, + "citation_best": 25 + }, + { + "paper": "3003276753", + "venue": "1163450153", + "year": "2020", + "title": "me vs super wo man effects of customization and identification in a vr exergame", + "label": [ + "78646695", + "183003079", + "107457646", + "194969405" + ], + "author": [ + "3003652145", + "3003819001", + "3004187492", + "2131547447", + "203114230" + ], + "reference": [ + "50067576", + "286052686", + "645827274", + "646341065", + "1482483329", + "1486776460", + "1564010284", + "1660479143", + "1737133910", + "1880211625", + "1923391821", + "1952915971", + "1964267333", + "1966911549", + "1969992223", + "1972995154", + "1986119901", + "1997599993", + "2001709237", + "2006451753", + "2012281587", + "2012563727", + "2013871799", + "2016962479", + "2020925527", + "2028843820", + "2030684656", + "2031129736", + "2031591317", + "2032177978", + "2036302618", + "2037723572", + "2037870389", + "2039015824", + "2044892642", + "2046622454", + "2050710017", + "2060763381", + "2062975533", + "2069643143", + "2070329089", + "2071878387", + "2072558210", + "2074149232", + "2077744672", + "2078241739", + "2081463760", + "2083860679", + "2084235337", + "2086081369", + "2087948696", + "2090152964", + "2092539420", + "2092625138", + "2095683801", + "2095843376", + "2097677476", + "2097984233", + "2101371764", + "2106063457", + "2108905626", + "2111704340", + "2114395098", + "2117195836", + "2118108696", + "2118361221", + "2125068113", + "2125441287", + "2129512678", + "2129552065", + "2131174334", + "2133084664", + "2133616538", + "2134334767", + "2135375808", + "2137894368", + "2138771401", + "2142849824", + "2143870509", + "2150454725", + "2151572312", + "2160144355", + "2162153512", + "2165108227", + "2170899200", + "2171190321", + "2173545186", + "2233946182", + "2234224674", + "2274526007", + "2317230710", + "2374724373", + "2404781118", + "2478535890", + "2494131256", + "2506937591", + "2511478054", + "2572947697", + "2593296946", + "2607839303", + "2769375465", + "2793207213", + "2795552328", + "2796312189", + "2940640317", + "3011177227" + ], + "abstract": "customised avatars are a powerful tool to increase identification engagement and intrinsic motivation in digital games we investigated the effects of customisation in a self competitive vr exergame by modelling players and their previous performance in the game with customised avatars in a first study we found that similar to non exertion games customisation significantly increased identification and intrinsic motivation as well as physical performance in the exergame in a second study we identified a more complex relationship with the customisation style idealised avatars increased wishful identification but decreased exergame performance compared to realistic avatars in a third study we found that enhancing realistic avatars with idealised characteristics increased wishful identification but did not have any adverse effects we discuss the findings based on feedforward and self determination theory proposing notions of intrinsic identification fostering a sense of self and extrinsic identification drawing away from the self to explain the results", + "title_raw": "Me vs. Super(wo)man: Effects of Customization and Identification in a VR Exergame", + "abstract_raw": "Customised avatars are a powerful tool to increase identification, engagement and intrinsic motivation in digital games. We investigated the effects of customisation in a self-competitive VR exergame by modelling players and their previous performance in the game with customised avatars. In a first study we found that, similar to non-exertion games, customisation significantly increased identification and intrinsic motivation, as well as physical performance in the exergame. In a second study we identified a more complex relationship with the customisation style: idealised avatars increased wishful identification but decreased exergame performance compared to realistic avatars. In a third study, we found that 'enhancing' realistic avatars with idealised characteristics increased wishful identification, but did not have any adverse effects. We discuss the findings based on feedforward and self-determination theory, proposing notions of intrinsic identification (fostering a sense of self) and extrinsic identification (drawing away from the self) to explain the results.", + "link": "https://www.semanticscholar.org/paper/f636cc0eb0e5d9f78f542618aa57a430691165c8", + "scraped_abstract": "Customised avatars are a powerful tool to increase identification, engagement and intrinsic motivation in digital games. We investigated the effects of customisation in a self-competitive VR exergame by modelling players and their previous performance in the game with customised avatars. In a first study we found that, similar to non-exertion games, customisation significantly increased identification and intrinsic motivation, as well as physical performance in the exergame. In a second study we identified a more complex relationship with the customisation style: idealised avatars increased wishful identification but decreased exergame performance compared to realistic avatars. In a third study, we found that 'enhancing' realistic avatars with idealised characteristics increased wishful identification, but did not have any adverse effects. We discuss the findings based on feedforward and self-determination theory, proposing notions of intrinsic identification (fostering a sense of self) and extrinsic identification (drawing away from the self) to explain the results.", + "citation_best": 53 + }, + { + "paper": "3030700920", + "venue": "1163450153", + "year": "2020", + "title": "mrat the mixed reality analytics toolkit", + "label": [ + "107457646", + "79158427", + "2777138346", + "206776904", + "127705205" + ], + "author": [ + "287661457", + "2138305382", + "2942938971", + "3029758231", + "2495104075", + "3028905776", + "3029705902", + "2004014559", + "3032255839", + "3028939136", + "3032498047", + "3190526863", + "3031783244", + "3032607908" + ], + "reference": [ + "1587018963", + "1593163947", + "1965863284", + "2015465704", + "2036674910", + "2050571058", + "2059049498", + "2067257553", + "2071086633", + "2100549663", + "2102005779", + "2122122381", + "2147213239", + "2153958417", + "2159771345", + "2202862721", + "2281242420", + "2799120477", + "2940746775", + "2940924130", + "2941563158", + "2941624906", + "2941630349", + "2941915966", + "2942094826", + "2942106174", + "2942440020", + "2943535317", + "3103127709" + ], + "abstract": "significant tool support exists for the development of mixed reality mr applications however there is a lack of tools for analyzing mr experiences we elicit requirements for future tools through interviews with 8 university research instructional and media teams using ar vr in a variety of domains while we find a common need for capturing how users perform tasks in mr the primary differences were in terms of heuristics and metrics relevant to each project particularly in the early project stages teams were uncertain about what data should and even could be collected with mr technologies we designed the mixed reality analytics toolkit mrat to instrument mr apps via visual editors without programming and enable rapid data collection and filtering for visualizations of mr user sessions with mrat we contribute flexible interaction tracking and task definition concepts an extensible set of heuristic techniques and metrics to measure task success and visual inspection tools with in situ visualizations in mr focusing on a multi user cross device mr crisis simulation and triage training app as a case study we then show the benefits of using mrat not only for user testing of mr apps but also performance tuning throughout the design process", + "title_raw": "MRAT: The Mixed Reality Analytics Toolkit", + "abstract_raw": "Significant tool support exists for the development of mixed reality (MR) applications; however, there is a lack of tools for analyzing MR experiences. We elicit requirements for future tools through interviews with 8 university research, instructional, and media teams using AR/VR in a variety of domains. While we find a common need for capturing how users perform tasks in MR, the primary differences were in terms of heuristics and metrics relevant to each project. Particularly in the early project stages, teams were uncertain about what data should, and even could, be collected with MR technologies. We designed the Mixed Reality Analytics Toolkit (MRAT) to instrument MR apps via visual editors without programming and enable rapid data collection and filtering for visualizations of MR user sessions. With MRAT, we contribute flexible interaction tracking and task definition concepts, an extensible set of heuristic techniques and metrics to measure task success, and visual inspection tools with in-situ visualizations in MR. Focusing on a multi-user, cross-device MR crisis simulation and triage training app as a case study, we then show the benefits of using MRAT, not only for user testing of MR apps, but also performance tuning throughout the design process.", + "link": "https://www.semanticscholar.org/paper/af95004e60c9f54ffef34a634b1d80b8d60c4b30", + "scraped_abstract": null, + "citation_best": 49 + }, + { + "paper": "3029640333", + "venue": "1163450153", + "year": "2020", + "title": "pensight enhanced interaction with a pen top camera", + "label": [ + "13854087", + "43521106", + "2776694159", + "207347870", + "31972630", + "108583219" + ], + "author": [ + "39539933", + "2898088992", + "3176594460", + "2154794983" + ], + "reference": [ + "1499050920", + "1965447681", + "1986606504", + "1986707552", + "1988430201", + "1990438997", + "2000649959", + "2008834805", + "2021291149", + "2067870298", + "2070885641", + "2097361084", + "2099800354", + "2106941316", + "2108225567", + "2108598243", + "2108715885", + "2109075207", + "2113033897", + "2116691011", + "2118603960", + "2139257564", + "2145491077", + "2146631093", + "2149962092", + "2150187269", + "2152528000", + "2169435375", + "2194775991", + "2209204668", + "2252227770", + "2343052201", + "2402069821", + "2535521690", + "2559085405", + "2605982830", + "2610251148", + "2611233583", + "2611257466", + "2761042874", + "2765846985", + "2766880125", + "2894714727", + "2896800351", + "2897765997", + "2905199702", + "2941054658", + "2941359057", + "2941909295", + "2987420683" + ], + "abstract": "we propose mounting a downward facing camera above the top end of a digital tablet pen this creates a unique and practical viewing angle for capturing the pen holding hand and the immediate surroundings which can include the other hand the fabrication of a prototype device is described and the enabled interaction design space is explored including dominant and non dominant hand pose recognition tablet grip detection hand gestures capturing physical content in the environment and detecting users and pens a deep learning computer vision pipeline is developed for classification regression and keypoint detection to enable these interactions example applications demonstrate usage scenarios and a qualitative user evaluation confirms the potential of the approach", + "title_raw": "PenSight: Enhanced Interaction with a Pen-Top Camera", + "abstract_raw": "We propose mounting a downward-facing camera above the top end of a digital tablet pen. This creates a unique and practical viewing angle for capturing the pen-holding hand and the immediate surroundings which can include the other hand. The fabrication of a prototype device is described and the enabled interaction design space is explored, including dominant and non-dominant hand pose recognition, tablet grip detection, hand gestures, capturing physical content in the environment, and detecting users and pens. A deep learning computer vision pipeline is developed for classification, regression, and keypoint detection to enable these interactions. Example applications demonstrate usage scenarios and a qualitative user evaluation confirms the potential of the approach.", + "link": "https://www.semanticscholar.org/paper/347cca553c0cd891b376c7b77145c3ea18e549ce", + "scraped_abstract": null, + "citation_best": 25 + }, + { + "paper": "3029785311", + "venue": "1163450153", + "year": "2020", + "title": "techniques for flexible responsive visualization design", + "label": [ + "183003079", + "107457646", + "36464697", + "186967261" + ], + "author": [ + "2228940112", + "2303505900", + "2110342502" + ], + "reference": [ + "2053859636", + "2055948336", + "2110591634", + "2135415614", + "2160382748", + "2795857247", + "2897426720", + "2940876996" + ], + "abstract": "responsive visualizations adapt to effectively present information based on the device context such adaptations are essential for news content that is increasingly consumed on mobile devices however existing tools provide little support for responsive visualization design we analyze a corpus of 231 responsive news visualizations and discuss formative interviews with five journalists about responsive visualization design these interviews motivate four central design guidelines enable simultaneous cross device edits facilitate device specific customization show cross device previews and support propagation of edits based on these guidelines we present a prototype system that allows users to preview and edit multiple visualization versions simultaneously we demonstrate the utility of the system features by recreating four real world responsive visualizations from our corpus", + "title_raw": "Techniques for Flexible Responsive Visualization Design", + "abstract_raw": "Responsive visualizations adapt to effectively present information based on the device context. Such adaptations are essential for news content that is increasingly consumed on mobile devices. However, existing tools provide little support for responsive visualization design. We analyze a corpus of 231 responsive news visualizations and discuss formative interviews with five journalists about responsive visualization design. These interviews motivate four central design guidelines: enable simultaneous cross-device edits, facilitate device-specific customization, show cross-device previews, and support propagation of edits. Based on these guidelines, we present a prototype system that allows users to preview and edit multiple visualization versions simultaneously. We demonstrate the utility of the system features by recreating four real-world responsive visualizations from our corpus.", + "link": "https://www.semanticscholar.org/paper/8b81ddbc457189c793feec802374f4d8160199d2", + "scraped_abstract": null, + "citation_best": 56 + }, + { + "paper": "3029893033", + "venue": "1163450153", + "year": "2020", + "title": "texsketch active diagramming through pen and ink annotations", + "label": [ + "186399060", + "177212765", + "107457646", + "2777904410" + ], + "author": [ + "2225895094", + "2171980484", + "2104052658", + "2305277957" + ], + "reference": [ + "1558433517", + "1963571134", + "1965693187", + "1966042082", + "1968134969", + "1969319597", + "1970082458", + "1985594972", + "1991500058", + "2009220433", + "2010950062", + "2030533114", + "2037514117", + "2040048368", + "2051950131", + "2069581045", + "2083847157", + "2090990377", + "2094567668", + "2115578279", + "2115685289", + "2118726258", + "2125952021", + "2133788799", + "2141618888", + "2148713609", + "2151191734", + "2154679615", + "2155811000", + "2164430989", + "2167361454", + "2170541075", + "2175498352", + "2753821824", + "2768531527", + "2912360407", + "3119340533" + ], + "abstract": "learning from text is a constructive activity in which sentence level information is combined by the reader to build coherent mental models with increasingly complex texts forming a mental model becomes challenging due to a lack of background knowledge and limits in working memory and attention to address this we are taught knowledge externalization strategies such as active reading and diagramming unfortunately paper and pencil approaches may not always be appropriate and software solutions create friction through difficult input modalities limited workflow support and barriers between reading and diagramming for all but the simplest text building coherent diagrams can be tedious and difficult we propose active diagramming an approach extending familiar active reading strategies to the task of diagram construction our prototype texsketch combines pen and ink interactions with natural language processing to reduce the cost of producing diagrams while maintaining the cognitive effort necessary for comprehension our user study finds that readers can effectively create diagrams without disrupting reading", + "title_raw": "texSketch: Active Diagramming through Pen-and-Ink Annotations", + "abstract_raw": "Learning from text is a constructive activity in which sentence-level information is combined by the reader to build coherent mental models. With increasingly complex texts, forming a mental model becomes challenging due to a lack of background knowledge, and limits in working memory and attention. To address this, we are taught knowledge externalization strategies such as active reading and diagramming. Unfortunately, paper-and-pencil approaches may not always be appropriate, and software solutions create friction through difficult input modalities, limited workflow support, and barriers between reading and diagramming. For all but the simplest text, building coherent diagrams can be tedious and difficult. We propose Active Diagramming, an approach extending familiar active reading strategies to the task of diagram construction. Our prototype, texSketch, combines pen-and-ink interactions with natural language processing to reduce the cost of producing diagrams while maintaining the cognitive effort necessary for comprehension. Our user study finds that readers can effectively create diagrams without disrupting reading.", + "link": "https://www.semanticscholar.org/paper/b1474115e472cbc0f0ca7800b47f771bd16cadd4", + "scraped_abstract": null, + "citation_best": 28 + }, + { + "paper": "3029091907", + "venue": "1163450153", + "year": "2020", + "title": "trigeminal based temperature illusions", + "label": [ + "107457646", + "152086174" + ], + "author": [ + "2910884304", + "2751525545", + "2128317731" + ], + "reference": [ + "97531205", + "111007040", + "167870992", + "264017219", + "1481989529", + "1562645017", + "1773343540", + "1884577175", + "1963648925", + "1973129274", + "1973778497", + "1988860455", + "1995330222", + "2002849465", + "2003541405", + "2015521515", + "2020953245", + "2045783685", + "2053901442", + "2059745508", + "2060453803", + "2066625506", + "2073789878", + "2076019452", + "2078249385", + "2094829462", + "2095548925", + "2097756473", + "2103701937", + "2106064607", + "2127218939", + "2131647620", + "2148225483", + "2153429336", + "2163203445", + "2272113778", + "2319307938", + "2321166002", + "2331408567", + "2345685671", + "2346747312", + "2397519240", + "2481501602", + "2524418100", + "2546189711", + "2611096819", + "2611509278", + "2727578252", + "2737702410", + "2752309345", + "2767891136", + "2789958075", + "2792814336", + "2796270307", + "2798837038", + "2800730642", + "2802070785", + "2895479873", + "2896581863", + "2902521861", + "2917744131", + "2923060138", + "2941965057" + ], + "abstract": "we explore a temperature illusion that uses low powered electronics and enables the miniaturization of simple warm and cool sensations our illusion relies on the properties of certain scents such as the coolness of mint or hotness of peppers these odors trigger not only the olfactory bulb but also the nose s trigeminal nerve which has receptors that respond to both temperature and chemicals to exploit this we engineered a wearable device based on micropumps and an atomizer that emits up to three custom made thermal scents directly to the user s nose breathing in these scents causes the user to feel warmer or cooler we demonstrate how our device renders warmth and cooling sensations in virtual experiences in our first study we evaluated six candidate thermal scents we found two hot cold pairs with one pair being less identifiable by odor in our second study pparticipants rated vr experiences with our device trigeminal stimulants as significantly warmer or cooler than the baseline conditions lastly we believe this offers an alternative to existing thermal feedback devices which unfortunately rely on power hungry heat lamps or peltier elements", + "title_raw": "Trigeminal-based Temperature Illusions", + "abstract_raw": "We explore a temperature illusion that uses low-powered electronics and enables the miniaturization of simple warm and cool sensations. Our illusion relies on the properties of certain scents, such as the coolness of mint or hotness of peppers. These odors trigger not only the olfactory bulb, but also the nose's trigeminal nerve, which has receptors that respond to both temperature and chemicals. To exploit this, we engineered a wearable device based on micropumps and an atomizer that emits up to three custom-made \"thermal\" scents directly to the user's nose. Breathing in these scents causes the user to feel warmer or cooler. We demonstrate how our device renders warmth and cooling sensations in virtual experiences. In our first study, we evaluated six candidate \"thermal\" scents. We found two hot-cold pairs, with one pair being less identifiable by odor. In our second study, pParticipants rated VR experiences with our device trigeminal stimulants as significantly warmer or cooler than the baseline conditions. Lastly, we believe this offers an alternative to existing thermal feedback devices, which unfortunately rely on power-hungry heat-lamps or Peltier-elements.", + "link": "https://www.semanticscholar.org/paper/c9b2d4236b56ba7ad37b078e1c894b0e79b711c4", + "scraped_abstract": "We explore a temperature illusion that uses low-powered electronics and enables the miniaturization of simple warm and cool sensations. Our illusion relies on the properties of certain scents, such as the coolness of mint or hotness of peppers. These odors trigger not only the olfactory bulb, but also the nose's trigeminal nerve, which has receptors that respond to both temperature and chemicals. To exploit this, we engineered a wearable device based on micropumps and an atomizer that emits up to three custom-made \"thermal\" scents directly to the user's nose. Breathing in these scents causes the user to feel warmer or cooler. We demonstrate how our device renders warmth and cooling sensations in virtual experiences. In our first study, we evaluated six candidate \"thermal\" scents. We found two hot-cold pairs, with one pair being less identifiable by odor. In our second study, pParticipants rated VR experiences with our device trigeminal stimulants as significantly warmer or cooler than the baseline conditions. Lastly, we believe this offers an alternative to existing thermal feedback devices, which unfortunately rely on power-hungry heat-lamps or Peltier-elements.", + "citation_best": 83 + }, + { + "paper": "3031927435", + "venue": "1163450153", + "year": "2020", + "title": "wireality enabling complex tangible geometries in virtual reality with worn multi string haptics", + "label": [ + "35173682", + "107457646", + "1462715", + "194969405", + "152086174", + "171268870" + ], + "author": [ + "3029918594", + "2305394960", + "3028610108", + "2123491528" + ], + "reference": [ + "57383190", + "1493450182", + "2020966348", + "2042512356", + "2055310310", + "2106383691", + "2116396066", + "2118639279", + "2142774015", + "2286501983", + "2534521461", + "2536013731", + "2536605168", + "2565842466", + "2611096819", + "2611884502", + "2754552787", + "2766544714", + "2784809774", + "2788551228", + "2791390850", + "2793637051", + "2795381942", + "2796086529", + "2809871420", + "2897561735", + "2897976513", + "2899555115", + "2905238358", + "2940592368", + "2940901411", + "2941383056", + "2942440020", + "2951850174", + "2958740673" + ], + "abstract": "today s virtual reality vr systems allow users to explore immersive new worlds and experiences through sight unfortunately most vr systems lack haptic feedback and even high end consumer systems use only basic vibration motors this clearly precludes realistic physical interactions with virtual objects larger obstacles such as walls railings and furniture are not simulated at all in response we developed wireality a self contained worn system that allows for individual joints on the hands to be accurately arrested in 3d space through the use of retractable wires that can be programmatically locked this allows for convincing tangible interactions with complex geometries such as wrapping fingers around a railing our approach is lightweight low cost and low power criteria important for future worn consumer uses in our studies we further show that our system is fast acting spatially accurate high strength comfortable and immersive", + "title_raw": "Wireality: Enabling Complex Tangible Geometries in Virtual Reality with Worn Multi-String Haptics", + "abstract_raw": "Today's virtual reality (VR) systems allow users to explore immersive new worlds and experiences through sight. Unfortunately, most VR systems lack haptic feedback, and even high-end consumer systems use only basic vibration motors. This clearly precludes realistic physical interactions with virtual objects. Larger obstacles, such as walls, railings, and furniture are not simulated at all. In response, we developed Wireality, a self-contained worn system that allows for individual joints on the hands to be accurately arrested in 3D space through the use of retractable wires that can be programmatically locked. This allows for convincing tangible interactions with complex geometries, such as wrapping fingers around a railing. Our approach is lightweight, low-cost, and low-power, criteria important for future, worn consumer uses. In our studies, we further show that our system is fast-acting, spatially-accurate, high-strength, comfortable, and immersive.", + "link": "https://www.semanticscholar.org/paper/ee7f663d0a3752904584b2a27084e9dd726c83a4", + "scraped_abstract": "Today's virtual reality (VR) systems allow users to explore immersive new worlds and experiences through sight. Unfortunately, most VR systems lack haptic feedback, and even high-end consumer systems use only basic vibration motors. This clearly precludes realistic physical interactions with virtual objects. Larger obstacles, such as walls, railings, and furniture are not simulated at all. In response, we developed Wireality, a self-contained worn system that allows for individual joints on the hands to be accurately arrested in 3D space through the use of retractable wires that can be programmatically locked. This allows for convincing tangible interactions with complex geometries, such as wrapping fingers around a railing. Our approach is lightweight, low-cost, and low-power, criteria important for future, worn consumer uses. In our studies, we further show that our system is fast-acting, spatially-accurate, high-strength, comfortable, and immersive.", + "citation_best": 104 + }, + { + "paper": "3011590361", + "venue": "1163450153", + "year": "2020", + "title": "wrex a unified programming by example interaction for synthesizing readable code for data scientists", + "label": [ + "2778999678", + "167955471", + "49585438", + "107457646", + "2776937632", + "136197465" + ], + "author": [ + "2820645236", + "1989569963", + "2285416364", + "2163972128", + "310804771" + ], + "reference": [ + "1551385575", + "2012401665", + "2044102377", + "2049311030", + "2064766209", + "2065394549", + "2112501366", + "2132525863", + "2132667707", + "2142126234", + "2143677795", + "2144951274", + "2146105230", + "2164611950", + "2199882249", + "2425230667", + "2550471858", + "2612824201", + "2766697724", + "2796040126", + "2798578675", + "2889073286", + "2896298055", + "2964264982" + ], + "abstract": "data wrangling is a difficult and time consuming activity in computational notebooks and existing wrangling tools do not fit the exploratory workflow for data scientists in these environments we propose a unified interaction model based on programming by example that generates readable code for a variety of useful data transformations implemented as a jupyter notebook extension called wrex user study results demonstrate that data scientists are significantly more effective and efficient at data wrangling with wrex over manual programming qualitative participant feedback indicates that wrex was useful and reduced barriers in having to recall or look up the usage of various data transform functions the synthesized code allowed data scientists to verify the intended data transformation increased their trust and confidence in wrex and fit seamlessly within their cell based notebook workflows this work suggests that presenting readable code to professional data scientists is an indispensable component of offering data wrangling tools in notebooks", + "title_raw": "Wrex: A Unified Programming-by-Example Interaction for Synthesizing Readable Code for Data Scientists", + "abstract_raw": "Data wrangling is a difficult and time-consuming activity in computational notebooks, and existing wrangling tools do not fit the exploratory workflow for data scientists in these environments. We propose a unified interaction model based on programming-by-example that generates readable code for a variety of useful data transformations, implemented as a Jupyter notebook extension called Wrex. User study results demonstrate that data scientists are significantly more effective and efficient at data wrangling with Wrex over manual programming. Qualitative participant feedback indicates that Wrex was useful and reduced barriers in having to recall or look up the usage of various data transform functions. The synthesized code allowed data scientists to verify the intended data transformation, increased their trust and confidence in Wrex, and fit seamlessly within their cell-based notebook workflows. This work suggests that presenting readable code to professional data scientists is an indispensable component of offering data wrangling tools in notebooks.", + "link": "https://www.semanticscholar.org/paper/b01ac6b990770092c6784f6eda8f3e94e2feb5a8", + "scraped_abstract": null, + "citation_best": 100 + }, + { + "paper": "3035523051", + "venue": "1158167855", + "year": "2020", + "title": "unsupervised learning of probably symmetric deformable 3d objects from images in the wild", + "label": [ + "101738243", + "154945302", + "64729616", + "8038995", + "31510193", + "31972630", + "52102323", + "141379421" + ], + "author": [ + "2770720883", + "2251807875", + "332962150" + ], + "reference": [ + "19301072", + "97083571", + "183071939", + "1520997877", + "1567532702", + "1834627138", + "1977295328", + "2013599012", + "2017814585", + "2051297709", + "2067164770", + "2083880226", + "2097307110", + "2107037917", + "2118304946", + "2124600577", + "2147334734", + "2171740948", + "2190691619", + "2321727850", + "2520707372", + "2542323081", + "2546066744", + "2561074213", + "2582734987", + "2600383743", + "2604672468", + "2609883120", + "2784996692", + "2812468425", + "2883221003", + "2889582485", + "2889980536", + "2903206492", + "2945729334", + "2952610664", + "2962742544", + "2962760512", + "2962835968", + "2962946389", + "2963022858", + "2963409406", + "2963527086", + "2963590054", + "2963654727", + "2963823554", + "2963850211", + "2963958774", + "2963995996", + "2964020152", + "2964053173", + "2968940310", + "2969485315", + "2970086547", + "2973948937", + "2974067445", + "2978506573", + "2981081013", + "2990173985", + "3000817459", + "3004414671", + "3101531717", + "3103596843" + ], + "abstract": "we propose a method to learn 3d deformable object categories from raw single view images without external supervision the method is based on an autoencoder that factors each input image into depth albedo viewpoint and illumination in order to disentangle these components without supervision we use the fact that many object categories have at least in principle a symmetric structure we show that reasoning about illumination allows us to exploit the underlying object symmetry even if the appearance is not symmetric due to shading furthermore we model objects that are probably but not certainly symmetric by predicting a symmetry probability map learned end to end with the other components of the model our experiments show that this method can recover very accurately the 3d shape of human faces cat faces and cars from single view images without any supervision or a prior shape model on benchmarks we demonstrate superior accuracy compared to another method that uses supervision at the level of 2d image correspondences", + "title_raw": "Unsupervised Learning of Probably Symmetric Deformable 3D Objects From Images in the Wild", + "abstract_raw": "We propose a method to learn 3D deformable object categories from raw single-view images, without external supervision. The method is based on an autoencoder that factors each input image into depth, albedo, viewpoint and illumination. In order to disentangle these components without supervision, we use the fact that many object categories have, at least in principle, a symmetric structure. We show that reasoning about illumination allows us to exploit the underlying object symmetry even if the appearance is not symmetric due to shading. Furthermore, we model objects that are probably, but not certainly, symmetric by predicting a symmetry probability map, learned end-to-end with the other components of the model. Our experiments show that this method can recover very accurately the 3D shape of human faces, cat faces and cars from single-view images, without any supervision or a prior shape model. On benchmarks, we demonstrate superior accuracy compared to another method that uses supervision at the level of 2D image correspondences.", + "link": " https://www.semanticscholar.org/paper/2245620c912d669dd6ceb325c127ecbba01b1516", + "scraped_abstract": null, + "citation_best": 220 + }, + { + "paper": "3126291038", + "venue": "1150208541", + "year": "2020", + "title": "an equivalence between private classification and online prediction", + "label": [ + "112972136", + "80444323", + "2777723229", + "203313322", + "23130292", + "46686674" + ], + "author": [ + "2025031154", + "839208152", + "2114518581" + ], + "reference": [ + "136004222", + "607505555", + "1508384000", + "1570963478", + "1790582767", + "1981635503", + "1993116423", + "2019363670", + "2074586650", + "2077723394", + "2100960835", + "2129113961", + "2139338362", + "2167372639", + "2169401877", + "2245160765", + "2397629669", + "2513180554", + "2768483212", + "2932647247", + "2963473170", + "2963486588", + "2970500190", + "2971608487", + "2971613228", + "2982531080", + "3005389535", + "3023250268", + "3046593760", + "3046893838" + ], + "abstract": "we prove that every concept class with finite littlestone dimension can be learned by an approximate differentially private algorithm this answers an open question of alon et al stoc 2019 who proved the converse statement this question was also asked by neel et al focs 2019 together these two results yield an equivalence between online learnability and private pac learnability we introduce a new notion of algorithmic stability called global stability which is essential to our proof and may be of independent interest we also discuss an application of our results to boosting the privacy and accuracy parameters of differentially private learners", + "title_raw": "An Equivalence Between Private Classification and Online Prediction", + "abstract_raw": "We prove that every concept class with finite Littlestone dimension can be learned by an (approximate) differentially-private algorithm. This answers an open question of Alon et al. (STOC 2019) who proved the converse statement (this question was also asked by Neel et al. (FOCS 2019)). Together these two results yield an equivalence between online learnability and private PAC learnability. We introduce a new notion of algorithmic stability called \u201cglobal stability\u201d which is essential to our proof and may be of independent interest. We also discuss an application of our results to boosting the privacy and accuracy parameters of differentially-private learners.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=An+Equivalence+Between+Private+Classification+and+Online+Prediction&as_oq=&as_eq=&as_occt=any&as_sauthors=Bun", + "scraped_abstract": "We prove that every concept class with finite Littlestone dimension can be learned by an (approximate) differentially-private algorithm. This answers an open question of Alon et al. (STOC 2019) who proved the converse statement (this question was also asked by Neel et al. (FOCS 2019)). Together these two results yield an equivalence between online learnability and private PAC learnability. We introduce a new notion of algorithmic stability called \u201cglobal stability\u201d which is essential to our proof and may be of independent interest. We also discuss an application of our results to boosting the privacy and accuracy parameters of differentially-private learners.", + "citation_best": 3 + }, + { + "paper": "3104260925", + "venue": "1199533187", + "year": "2020", + "title": "a principled approach to graphql query cost analysis", + "label": [ + "97686452", + "124246873", + "37836645", + "192028432", + "116253237", + "93996380", + "127613066", + "23123220" + ], + "author": [ + "3099098258", + "757425139", + "1586471906", + "2606103259", + "2027215001", + "1983204165" + ], + "reference": [ + "2789091536", + "2804365050", + "2982298417", + "3103328430", + "3105133011" + ], + "abstract": "the landscape of web apis is evolving to meet new client requirements and to facilitate how providers fulfill them a recent web api model is graphql which is both a query language and a runtime using graphql client queries express the data they want to retrieve or mutate and servers respond with exactly those data or changes graphql s expressiveness is risky for service providers because clients can succinctly request stupendous amounts of data and responding to overly complex queries can be costly or disrupt service availability recent empirical work has shown that many service providers are at risk using traditional api management methods is not sufficient and practitioners lack principled means of estimating and measuring the cost of the graphql queries they receive in this work we present a linear time graphql query analysis that can measure the cost of a query without executing it our approach can be applied in a separate api management layer and used with arbitrary graphql backends in contrast to existing static approaches our analysis supports common graphql conventions that affect query cost and our analysis is provably correct based on our formal specification of graphql semantics we demonstrate the potential of our approach using a novel graphql query response corpus for two commercial graphql apis our query analysis consistently obtains upper cost bounds tight enough relative to the true response sizes to be actionable for service providers in contrast existing static graphql query analyses exhibit over estimates and under estimates because they fail to support graphql conventions", + "title_raw": "A Principled Approach to GraphQL Query Cost Analysis", + "abstract_raw": "The landscape of web APIs is evolving to meet new client requirements and to facilitate how providers fulfill them. A recent web API model is GraphQL, which is both a query language and a runtime. Using GraphQL, client queries express the data they want to retrieve or mutate, and servers respond with exactly those data or changes. GraphQL's expressiveness is risky for service providers because clients can succinctly request stupendous amounts of data, and responding to overly complex queries can be costly or disrupt service availability. Recent empirical work has shown that many service providers are at risk. Using traditional API management methods is not sufficient, and practitioners lack principled means of estimating and measuring the cost of the GraphQL queries they receive. In this work, we present a linear-time GraphQL query analysis that can measure the cost of a query without executing it. Our approach can be applied in a separate API management layer and used with arbitrary GraphQL backends. In contrast to existing static approaches, our analysis supports common GraphQL conventions that affect query cost, and our analysis is provably correct based on our formal specification of GraphQL semantics. We demonstrate the potential of our approach using a novel GraphQL query-response corpus for two commercial GraphQL APIs. Our query analysis consistently obtains upper cost bounds, tight enough relative to the true response sizes to be actionable for service providers. In contrast, existing static GraphQL query analyses exhibit over-estimates and under-estimates because they fail to support GraphQL conventions.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=A+Principled+Approach+to+GraphQL+Query+Cost+Analysis&as_oq=&as_eq=&as_occt=any&as_sauthors=Cha", + "scraped_abstract": null, + "citation_best": 9 + }, + { + "paper": "3101845936", + "venue": "1199533187", + "year": "2020", + "title": "boosting fuzzer efficiency an information theoretic perspective", + "label": [ + "114289077", + "46686674", + "149091818", + "80444323", + "111065885" + ], + "author": [ + "2154856611", + "2902904979", + "2282250597" + ], + "reference": [ + "1546956568", + "1990414292", + "1995875735", + "2050238751", + "2060573639", + "2065684071", + "2067416361", + "2088917779", + "2095093991", + "2106065105", + "2106235393", + "2128128820", + "2130514924", + "2138428785", + "2340281863", + "2757104921", + "2765435026", + "2865298191", + "2891235722", + "2947109320", + "2963147982", + "2964241064", + "2979357014", + "3046946156", + "3047947484", + "3089794841", + "3099627437", + "3102086861", + "3104664063", + "3109101477" + ], + "abstract": "", + "title_raw": "Boosting fuzzer efficiency: an information theoretic perspective.", + "abstract_raw": "", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Boosting+fuzzer+efficiency:+an+information+theoretic+perspective&as_oq=&as_eq=&as_occt=any&as_sauthors=Bohme", + "scraped_abstract": null, + "citation_best": 80 + }, + { + "paper": "3099627437", + "venue": "1199533187", + "year": "2020", + "title": "community expectations for research artifacts and evaluation processes", + "label": [ + "34127721", + "2522767166" + ], + "author": [ + "2990417269", + "3205847921", + "2145688174" + ], + "reference": [ + "1983830972", + "2032593675", + "2034158106", + "2092388562", + "2113533445", + "2125759561", + "2167926541", + "2283438976", + "2406493898", + "2740060156", + "2768459524", + "2900842731", + "2913200795", + "2942995248", + "2951495752", + "3030678554", + "3091970108", + "3121939465" + ], + "abstract": "", + "title_raw": "Community expectations for research artifacts and evaluation processes.", + "abstract_raw": "", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Community+expectations+for+research+artifacts+and+evaluation+processes&as_oq=&as_eq=&as_occt=any&as_sauthors=Hermann", + "scraped_abstract": "Background. Artifact evaluation has been introduced into the software engineering and programming languages research community with a pilot at ESEC/FSE 2011 and has since then enjoyed a healthy adoption throughout the conference landscape. Objective. In this qualitative study, we examine the expectations of the community toward research artifacts and their evaluation processes. Method. We conducted a survey including all members of artifact evaluation committees of major conferences in the software engineering and programming language field since the first pilot and compared the answers to expectations set by calls for artifacts and reviewing guidelines. Results. While we find that some expectations exceed the ones expressed in calls and reviewing guidelines, there is no consensus on quality thresholds for artifacts in general. We observe very specific quality expectations for specific artifact types for review and later usage, but also a lack of their communication in calls. We also find problematic inconsistencies in the terminology used to express artifact evaluation\u2019s most important purpose \u2013 replicability. Conclusion. We derive several actionable suggestions which can help to mature artifact evaluation in the inspected community and also to aid its introduction into other communities in computer science.", + "citation_best": 31 + }, + { + "paper": "3104663419", + "venue": "1199533187", + "year": "2020", + "title": "deep learning library testing via effective model generation", + "label": [ + "119857082", + "167955471", + "149091818", + "173801870", + "108583219", + "2776159882" + ], + "author": [ + "2747044602", + "3041465469", + "2277988947", + "3171446376", + "2888277670" + ], + "reference": [ + "109452506", + "1965969360", + "1996977322", + "2002386085", + "2032754744", + "2065555413", + "2119112357", + "2135841285", + "2143612262", + "2168231600", + "2169004268", + "2170260129", + "2343875716", + "2370472429", + "2473248416", + "2514806119", + "2565186948", + "2565778127", + "2611386757", + "2616028256", + "2725449579", + "2739959195", + "2755657284", + "2761359361", + "2782311202", + "2795354529", + "2799640043", + "2804337238", + "2850992922", + "2859484040", + "2888307014", + "2898868990", + "2899445138", + "2926962417", + "2942544869", + "2947133760", + "2947815220", + "2954629067", + "2954903132", + "2957905354", + "2958754741", + "2963327228", + "2963913218", + "2964164993", + "2968370566", + "2969772318", + "2972204217", + "2973084513", + "3000315285", + "3007855180", + "3041012898", + "3047008933", + "3105347387", + "3122945969", + "3124767051", + "3146215426" + ], + "abstract": "", + "title_raw": "Deep learning library testing via effective model generation.", + "abstract_raw": "", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Deep+learning+library+testing+via+effective+model+generation&as_oq=&as_eq=&as_occt=any&as_sauthors=Wang", + "scraped_abstract": "Deep learning (DL) techniques are rapidly developed and have been widely adopted in practice. However, similar to traditional software systems, DL systems also contain bugs, which could cause serious impacts especially in safety-critical domains. Recently, many research approaches have focused on testing DL models, while little attention has been paid for testing DL libraries, which is the basis of building DL models and directly affects the behavior of DL systems. In this work, we propose a novel approach, LEMON, to testing DL libraries. In particular, we (1) design a series of mutation rules for DL models, with the purpose of exploring different invoking sequences of library code and hard-to-trigger behaviors; and (2) propose a heuristic strategy to guide the model generation process towards the direction of amplifying the inconsistent degrees of the inconsistencies between different DL libraries caused by bugs, so as to mitigate the impact of potential noise introduced by uncertain factors in DL libraries. We conducted an empirical study to evaluate the effectiveness of LEMON with 20 release versions of 4 widely-used DL libraries, i.e., TensorFlow, Theano, CNTK, MXNet. The results demonstrate that LEMON detected 24 new bugs in the latest release versions of these libraries, where 7 bugs have been confirmed and one bug has been fixed by developers. Besides, the results confirm that the heuristic strategy for model generation indeed effectively guides LEMON in amplifying the inconsistent degrees for bugs.", + "citation_best": 111 + }, + { + "paper": "3109004940", + "venue": "1199533187", + "year": "2020", + "title": "detecting numerical bugs in neural network architectures", + "label": [ + "50644808", + "113775141", + "2780654840", + "48044578", + "2777904410", + "108583219", + "97686452" + ], + "author": [ + "2825538765", + "2809494200", + "2754983945", + "2150281962", + "2147693338", + "1920070090" + ], + "reference": [ + "60370665", + "2015362443", + "2031373197", + "2043100293", + "2060697066", + "2108241838", + "2121415300", + "2543296129", + "2594877703", + "2616028256", + "2721006554", + "2791251367", + "2793633339", + "2794609696", + "2799423598", + "2804337238", + "2850992922", + "2884426148", + "2900153411", + "2909765152", + "2917325587", + "2922015121", + "2938701611", + "2954903132", + "2963207607", + "2963327228", + "2963673089", + "2963735478", + "2963857521", + "2963913218", + "2964253222", + "2968594320", + "2981639301", + "3099972168", + "3100198463" + ], + "abstract": "", + "title_raw": "Detecting numerical bugs in neural network architectures", + "abstract_raw": "", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Detecting+numerical+bugs+in+neural+network+architectures&as_oq=&as_eq=&as_occt=any&as_sauthors=Zhang", + "scraped_abstract": null, + "citation_best": 49 + }, + { + "paper": "3103697033", + "venue": "1199533187", + "year": "2020", + "title": "on decomposing a deep neural network into modules", + "label": [ + "49585438", + "154945302", + "50644808", + "190502265", + "149091818", + "2777904410", + "108583219" + ], + "author": [ + "2947513754", + "2046826343" + ], + "reference": [ + "54398672", + "1537017777", + "1539693001", + "1591471358", + "1991604845", + "2010425280", + "2029414465", + "2040348150", + "2055103902", + "2063867591", + "2108999965", + "2112796928", + "2115403315", + "2116120550", + "2117153488", + "2124961556", + "2134119432", + "2152464310", + "2412094331", + "2504108613", + "2734358244", + "2750384547", + "2891768589", + "2902986194", + "3144030986" + ], + "abstract": "", + "title_raw": "On decomposing a deep neural network into modules", + "abstract_raw": "", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=On+decomposing+a+deep+neural+network+into+modules&as_oq=&as_eq=&as_occt=any&as_sauthors=Pan", + "scraped_abstract": "Deep learning is being incorporated in many modern software systems. Deep learning approaches train a deep neural network (DNN) model using training examples, and then use the DNN model for prediction. While the structure of a DNN model as layers is observable, the model is treated in its entirety as a monolithic component. To change the logic implemented by the model, e.g. to add/remove logic that recognizes inputs belonging to a certain class, or to replace the logic with an alternative, the training examples need to be changed and the DNN needs to be retrained using the new set of examples. We argue that decomposing a DNN into DNN modules\u2014 akin to decomposing a monolithic software code into modules\u2014can bring the benefits of modularity to deep learning. In this work, we develop a methodology for decomposing DNNs for multi-class problems into DNN modules. For four canonical problems, namely MNIST, EMNIST, FMNIST, and KMNIST, we demonstrate that such decomposition enables reuse of DNN modules to create different DNNs, enables replacement of one DNN module in a DNN with another without needing to retrain. The DNN models formed by composing DNN modules are at least as good as traditional monolithic DNNs in terms of test accuracy for our problems.", + "citation_best": 29 + }, + { + "paper": "3109892451", + "venue": "1199533187", + "year": "2020", + "title": "testing self adaptive software with probabilistic guarantees on performance metrics", + "label": [ + "119857082", + "49937458", + "178059732", + "37836645", + "17777890", + "177264268", + "89187990", + "118505674" + ], + "author": [ + "2899492396", + "2115106485" + ], + "reference": [ + "65917576", + "130710483", + "161987229", + "619787199", + "1498464051", + "1515851193", + "1528449109", + "1697671524", + "1908723292", + "1947529348", + "1964137226", + "1967755445", + "1968246386", + "1973291434", + "1984744606", + "2004778468", + "2045002843", + "2054636385", + "2055688117", + "2073787051", + "2075063780", + "2086502833", + "2087758025", + "2094226673", + "2098639318", + "2113498208", + "2119304681", + "2121495423", + "2133665775", + "2155691344", + "2160960108", + "2161737886", + "2166352318", + "2167412190", + "2170078167", + "2171264441", + "2243694434", + "2244233262", + "2269457277", + "2463595550", + "2502254989", + "2546536826", + "2547248845", + "2614814129", + "2616400932", + "2623584706", + "2724110097", + "2740409037", + "2769555393", + "2883346038", + "2884584112", + "2891325180", + "2899445138", + "2911097524", + "2922290115", + "2953682625", + "2957370936", + "2963670055", + "2964073054", + "2967287319", + "2979702785", + "3103745061" + ], + "abstract": "this paper discusses the problem of testing the performance of the adaptation layer in a self adaptive system the problem is notoriously hard due to the high degree of uncertainty and variability inherent in an adaptive software application in particular providing any type of formal guarantee for this problem is extremely difficult in this paper we propose the use of a rigorous probabilistic approach to overcome the mentioned difficulties and provide probabilistic guarantees on the software performance we describe the set up needed for the application of a probabilistic approach we then discuss the traditional tools from statistics that could be applied to analyse the results highlighting their limitations and motivating why they are unsuitable for the given problem we propose the use of a novel tool the scenario theory to overcome said limitations we conclude the paper with a thorough empirical evaluation of the proposed approach using two adaptive software applications the tele assistance service and the self adaptive video encoder with the first we empirically expose the trade off between data collection and confidence in the testing campaign with the second we demonstrate how to compare different adaptation strategies less", + "title_raw": "Testing self-adaptive software with probabilistic guarantees on performance metrics", + "abstract_raw": "This paper discusses the problem of testing the performance of the adaptation layer in a self-adaptive system. The problem is notoriously hard, due to the high degree of uncertainty and variability inherent in an adaptive software application. In particular, providing any type of formal guarantee for this problem is extremely difficult. In this paper we propose the use of a rigorous probabilistic approach to overcome the mentioned difficulties and provide probabilistic guarantees on the software performance. We describe the set up needed for the application of a probabilistic approach. We then discuss the traditional tools from statistics that could be applied to analyse the results, highlighting their limitations and motivating why they are unsuitable for the given problem. We propose the use of a novel tool - the scenario theory - to overcome said limitations. We conclude the paper with a thorough empirical evaluation of the proposed approach, using two adaptive software applications: the Tele-Assistance Service and the Self-Adaptive Video Encoder. With the first, we empirically expose the trade-off between data collection and confidence in the testing campaign. With the second, we demonstrate how to compare different adaptation strategies. (Less)", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Testing+self-adaptive+software+with+probabilistic+guarantees+on+performance+metrics&as_oq=&as_eq=&as_occt=any&as_sauthors=Mandrioli", + "scraped_abstract": null, + "citation_best": 10 + }, + { + "paper": "3034940772", + "venue": "1180662882", + "year": "2020", + "title": "on learning sets of symmetric elements", + "label": [ + "115961682", + "2777693668", + "177264268", + "83665646", + "159363923", + "80444323" + ], + "author": [ + "2428319768", + "2188538367", + "2806907591", + "2482610019" + ], + "reference": [], + "abstract": "", + "title_raw": "On Learning Sets of Symmetric Elements", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/7716887e55ff239710c943b1d57859679fad7c3d", + "scraped_abstract": "Learning from unordered sets is a fundamental learning setup, recently attracting increasing attention. Research in this area has focused on the case where elements of the set are represented by feature vectors, and far less emphasis has been given to the common case where set elements themselves adhere to their own symmetries. That case is relevant to numerous applications, from deblurring image bursts to multi-view 3D shape recognition and reconstruction. \nIn this paper, we present a principled approach to learning sets of general symmetric elements. We first characterize the space of linear layers that are equivariant both to element reordering and to the inherent symmetries of elements, like translation in the case of images. We further show that networks that are composed of these layers, called Deep Sets for Symmetric Elements layers (DSS), are universal approximators of both invariant and equivariant functions. DSS layers are also straightforward to implement. Finally, we show that they improve over existing set-learning architectures in a series of experiments with images, graphs, and point-clouds.", + "citation_best": 34 + }, + { + "paper": "3034925446", + "venue": "1180662882", + "year": "2020", + "title": "tuning free plug and play proximal algorithm for inverse imaging problems", + "label": [ + "124851039", + "108583219", + "11413529", + "97541855", + "200632571", + "2780200862", + "177769412" + ], + "author": [ + "2931804203", + "2891557695", + "2150086182", + "2431565455", + "2025056398", + "2607399926" + ], + "reference": [], + "abstract": "", + "title_raw": "Tuning-free Plug-and-Play Proximal Algorithm for Inverse Imaging Problems", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/4c2fd562120de9cf8c60f8f8831f3de24ef4271b", + "scraped_abstract": "Plug-and-play (PnP) is a non-convex framework that combines ADMM or other proximal algorithms with advanced denoiser priors. Recently, PnP has achieved great empirical success, especially with the integration of deep learning-based denoisers. However, a key problem of PnP based approaches is that they require manual parameter tweaking. It is necessary to obtain high-quality results across the high discrepancy in terms of imaging conditions and varying scene content. In this work, we present a tuning-free PnP proximal algorithm, which can automatically determine the internal parameters including the penalty parameter, the denoising strength and the terminal time. A key part of our approach is to develop a policy network for automatic search of parameters, which can be effectively learned via mixed model-free and model-based deep reinforcement learning. We demonstrate, through numerical and visual experiments, that the learned policy can customize different parameters for different states, and often more efficient and effective than existing handcrafted criteria. Moreover, we discuss the practical considerations of the plugged denoisers, which together with our learned policy yield state-of-the-art results. This is prevalent on both linear and nonlinear exemplary inverse imaging problems, and in particular, we show promising results on Compressed Sensing MRI and phase retrieval.", + "citation_best": 2 + }, + { + "paper": "3002398329", + "venue": "1174403976", + "year": "2020", + "title": "white box fairness testing through adversarial sampling", + "label": [ + "119857082", + "50644808", + "45374587", + "48044578", + "77019957", + "87551280", + "180932941", + "73555534" + ], + "author": [ + "2794553365", + "2773187611", + "2302978205", + "2904077701", + "2114282303", + "2169268051", + "2927341831", + "2974544495" + ], + "reference": [ + "1665214252", + "2014352947", + "2049633694", + "2096733369", + "2100960835", + "2150593711", + "2180612164", + "2282821441", + "2309693750", + "2342840547", + "2402144811", + "2525596522", + "2569531558", + "2616028256", + "2730550703", + "2799640043", + "2892229407", + "2898851569", + "2962851944", + "2963207607", + "2963542245", + "2963779314", + "2964116855", + "2964153729", + "2964164993", + "3105507623" + ], + "abstract": "although deep neural networks dnns have demonstrated astonishing performance in many applications there are still concerns on their dependability one desirable property of dnn for applications with societal impact is fairness i e non discrimination in this work we propose a scalable approach for searching individual discriminatory instances of dnn compared with state of the art methods our approach only employs lightweight procedures like gradient computation and clustering which makes it significantly more scalable than existing methods experimental results show that our approach explores the search space more effectively 9 times and generates much more individual discriminatory instances 25 times using much less time half to 1 7", + "title_raw": "White-box Fairness Testing through Adversarial Sampling", + "abstract_raw": "Although deep neural networks (DNNs) have demonstrated astonishing performance in many applications, there are still concerns on their dependability. One desirable property of DNN for applications with societal impact is fairness (i.e., non-discrimination). In this work, we propose a scalable approach for searching individual discriminatory instances of DNN. Compared with state-of-the-art methods, our approach only employs lightweight procedures like gradient computation and clustering, which makes it significantly more scalable than existing methods. Experimental results show that our approach explores the search space more effectively (9 times) and generates much more individual discriminatory instances (25 times) using much less time (half to 1/7).", + "link": "https://www.semanticscholar.org/paper/eb0aac907dd0331655c713d892ec71a54ee45531", + "scraped_abstract": "Although deep neural networks (DNNs) have demonstrated astonishing performance in many applications, there are still concerns on their dependability. One desirable property of DNN for applications with societal impact is fairness (i.e., non-discrimination). In this work, we propose a scalable approach for searching individual discriminatory instances of DNN. Compared with state-of-the-art methods, our approach only employs lightweight procedures like gradient computation and clustering, which makes it significantly more scalable than existing methods. Experimental results show that our approach explores the search space more effectively (9 times) and generates much more individual discriminatory instances (25 times) using much less time (half to 1/7).", + "citation_best": 104 + }, + { + "paper": "3005780259", + "venue": "1174403976", + "year": "2020", + "title": "an empirical study on program failures of deep learning jobs", + "label": [ + "167955471", + "168065819", + "108583219", + "115903868" + ], + "author": [ + "2984402671", + "2953297821", + "2619848081", + "2989150659", + "2139123949", + "2108289083" + ], + "reference": [ + "1589576271", + "1757796397", + "1867761151", + "2039676055", + "2087419855", + "2088727885", + "2097117768", + "2101512909", + "2114053493", + "2158698691", + "2164948578", + "2168206938", + "2186615578", + "2296335794", + "2402144811", + "2475334473", + "2513383847", + "2613904329", + "2616028256", + "2622263826", + "2732547613", + "2743948853", + "2752512710", + "2753713840", + "2769279756", + "2772678505", + "2789444109", + "2798515322", + "2850992922", + "2896457183", + "2899071864", + "2919594608", + "2963150697", + "2963327228", + "2963403751", + "2963403868", + "2963785012", + "2964174152", + "2968594320", + "2970971581", + "3103894541" + ], + "abstract": "deep learning has made significant achievements in many application areas to train and test models more efficiently enterprise developers submit and run their deep learning programs on a shared multi tenant platform however some of the programs fail after a long execution time due to code script defects which reduces the development productivity and wastes expensive resources such as gpu storage and network i o this paper presents the first comprehensive empirical study on program failures of deep learning jobs 4960 real failures are collected from a deep learning platform in microsoft we manually examine their failure messages and classify them into 20 categories in addition we identify the common root causes and bug fix solutions on a sample of 400 failures to better understand the current testing and debugging practices for deep learning we also conduct developer interviews our major findings include 1 48 0 of the failures occur in the interaction with the platform rather than in the execution of code logic mostly due to the discrepancies between local and platform execution environments 2 deep learning specific failures 13 5 are mainly caused by inappropriate model parameters structures and framework api misunderstanding 3 current debugging practices are not efficient for fault localization in many cases and developers need more deep learning specific tools based on our findings we further suggest possible research topics and tooling support that could facilitate future deep learning development", + "title_raw": "An Empirical Study on Program Failures of Deep Learning Jobs", + "abstract_raw": "Deep learning has made significant achievements in many application areas. To train and test models more efficiently, enterprise developers submit and run their deep learning programs on a shared, multi-tenant platform. However, some of the programs fail after a long execution time due to code/script defects, which reduces the development productivity and wastes expensive resources such as GPU, storage, and network I/O. This paper presents the first comprehensive empirical study on program failures of deep learning jobs. 4960 real failures are collected from a deep learning platform in Microsoft. We manually examine their failure messages and classify them into 20 categories. In addition, we identify the common root causes and bug-fix solutions on a sample of 400 failures. To better understand the current testing and debugging practices for deep learning, we also conduct developer interviews. Our major findings include: (1) 48.0% of the failures occur in the interaction with the platform rather than in the execution of code logic, mostly due to the discrepancies between local and platform execution environments; (2) Deep learning specific failures (13.5%) are mainly caused by inappropriate model parameters/structures and framework API misunderstanding; (3) Current debugging practices are not efficient for fault localization in many cases, and developers need more deep learning specific tools. Based on our findings, we further suggest possible research topics and tooling support that could facilitate future deep learning development.", + "link": "https://www.semanticscholar.org/paper/0d10237707b3ce5f0236d4da013eb45ad18ac12b", + "scraped_abstract": "Deep learning has made significant achievements in many application areas. To train and test models more efficiently, enterprise developers submit and run their deep learning programs on a shared, multi-tenant platform. However, some of the programs fail after a long execution time due to code/script defects, which reduces the development productivity and wastes expensive resources such as GPU, storage, and network I/O. This paper presents the first comprehensive empirical study on program failures of deep learning jobs. 4960 real failures are collected from a deep learning platform in Microsoft. We manually examine their failure messages and classify them into 20 categories. In addition, we identify the common root causes and bug-fix solutions on a sample of 400 failures. To better understand the current testing and debugging practices for deep learning, we also conduct developer interviews. Our major findings include: (1) 48.0% of the failures occur in the interaction with the platform rather than in the execution of code logic, mostly due to the discrepancies between local and platform execution environments; (2) Deep learning specific failures (13.5%) are mainly caused by inappropriate model parameters/structures and framework API misunderstanding; (3) Current debugging practices are not efficient for fault localization in many cases, and developers need more deep learning specific tools. Based on our findings, we further suggest possible research topics and tooling support that could facilitate future deep learning development.", + "citation_best": 77 + }, + { + "paper": "3090625562", + "venue": "1174403976", + "year": "2020", + "title": "towards the use of the readily available tests from the release pipeline as performance tests are we there yet", + "label": [ + "9903902", + "135945739", + "178059732", + "529173508", + "43126263", + "43521106", + "149091818", + "117447612" + ], + "author": [ + "3090985124", + "2767504857", + "2118619547" + ], + "reference": [ + "109397963", + "1902482618", + "1965377045", + "1981055536", + "1992254614", + "1997726341", + "2007705030", + "2023740829", + "2028897749", + "2058879750", + "2062666593", + "2087515886", + "2104364184", + "2105300539", + "2115380655", + "2145458045", + "2147386665", + "2148082729", + "2148615889", + "2153530968", + "2155653793", + "2157499124", + "2157662360", + "2163931728", + "2277866467", + "2325662965", + "2399881321", + "2474835145", + "2546913659", + "2557595292", + "2579246879", + "2605807148", + "2605883949", + "2606444700", + "2606496611", + "2618917786", + "2767269462", + "2796132572", + "2884229567", + "2889590244", + "2911964244", + "2923271003", + "2934678393", + "2936421018", + "2997591727", + "3152352337" + ], + "abstract": "performance is one of the important aspects of software quality performance issues exist widely in software systems and the process of fixing the performance issues is an essential step in the release cycle of software systems although performance testing is widely adopted in practice it is still expensive and time consuming in particular the performance testing is usually conducted after the system is built in a dedicated testing environment the challenges of performance testing make it difficult to fit into the common devops process in software development on the other hand there exist a large number of tests readily available that are executed regularly within the release pipeline during software development in this paper we perform an exploratory study to determine whether such readily available tests are capable of serving as performance tests in particular we would like to see whether the performance of these tests can demonstrate performance improvements obtained from fixing real life performance issues we collect 127 performance issues from hadoop and cassandra and evaluate the performance of the readily available tests from the commits before and after the performance issue fixes we find that most of the improvements from the fixes to performance issues can be demonstrated using the readily available tests in the release pipeline however only a very small portion of the tests can be used for demonstrating the improvements by manually examining the tests we identify eight reasons that a test cannot demonstrate performance improvements even though it covers the changed source code of the issue fix finally we build random forest classifiers determining the important metrics influencing the readily available tests not being able to demonstrate performance improvements from issue fixes we find that the test code itself and the source code covered by the test are important factors while the factors related to the code changes in the performance issues fixes have a low importance practitioners may focus on designing and improving the tests instead of fine tuning tests for different performance issues fixes our findings can be used as a guideline for practitioners to reduce the amount of effort spent on leveraging and designing tests that run in the release pipeline for performance assurance activities", + "title_raw": "Towards the Use of the Readily Available Tests from the Release Pipeline as Performance Tests. Are We There Yet", + "abstract_raw": "Performance is one of the important aspects of software quality. Performance issues exist widely in software systems, and the process of fixing the performance issues is an essential step in the release cycle of software systems. Although performance testing is widely adopted in practice, it is still expensive and time-consuming. In particular, the performance testing is usually conducted after the system is built in a dedicated testing environment. The challenges of performance testing make it difficult to fit into the common DevOps process in software development. On the other hand, there exist a large number of tests readily available, that are executed regularly within the release pipeline during software development. In this paper, we perform an exploratory study to determine whether such readily available tests are capable of serving as performance tests. In particular, we would like to see whether the performance of these tests can demonstrate performance improvements obtained from fixing real-life performance issues. We collect 127 performance issues from Hadoop and Cassandra, and evaluate the performance of the readily available tests from the commits before and after the performance issue fixes. We find that most of the improvements from the fixes to performance issues can be demonstrated using the readily available tests in the release pipeline. However, only a very small portion of the tests can be used for demonstrating the improvements. By manually examining the tests, we identify eight reasons that a test cannot demonstrate performance improvements even though it covers the changed source code of the issue fix. Finally, we build random forest classifiers determining the important metrics influencing the readily available tests (not) being able to demonstrate performance improvements from issue fixes. We find that the test code itself and the source code covered by the test are important factors, while the factors related to the code changes in the performance issues fixes have a low importance. Practitioners may focus on designing and improving the tests, instead of fine-tuning tests for different performance issues fixes. Our findings can be used as a guideline for practitioners to reduce the amount of effort spent on leveraging and designing tests that run in the release pipeline for performance assurance activities.", + "link": "https://www.semanticscholar.org/paper/ddf582429fff1ba9abd24d5cd0ff80b6f200ed02", + "scraped_abstract": "Performance is one of the important aspects of software quality. Performance issues exist widely in software systems, and the process of fixing the performance issues is an essential step in the release cycle of software systems. Although performance testing is widely adopted in practice, it is still expensive and time-consuming. In particular, the performance testing is usually conducted after the system is built in a dedicated testing environment. The challenges of performance testing make it difficult to fit into the common DevOps process in software development. On the other hand, there exist a large number of tests readily available, that are executed regularly within the release pipeline during software development. In this paper, we perform an exploratory study to determine whether such readily available tests are capable of serving as performance tests. In particular, we would like to see whether the performance of these tests can demonstrate performance improvements obtained from fixing real-life performance issues. We collect 127 performance issues from Hadoop and Cassandra, and evaluate the performance of the readily available tests from the commits before and after the performance issue fixes. We find that most of the improvements from the fixes to performance issues can be demonstrated using the readily available tests in the release pipeline. However, only a very small portion of the tests can be used for demonstrating the improvements. By manually examining the tests, we identify eight reasons that a test cannot demonstrate performance improvements even though it covers the changed source code of the issue fix. Finally, we build random forest classifiers determining the important metrics influencing the readily available tests (not) being able to demonstrate performance improvements from issue fixes. We find that the test code itself and the source code covered by the test are important factors, while the factors related to the code changes in the performance issues fixes have a low importance. Practitioners may focus on designing and improving the tests, instead of fine-tuning tests for different performance issues fixes. Our findings can be used as a guideline for practitioners to reduce the amount of effort spent on leveraging and designing tests that run in the release pipeline for performance assurance activities.", + "citation_best": 0 + }, + { + "paper": "3091518781", + "venue": "1174403976", + "year": "2020", + "title": "time travel testing of android apps", + "label": [ + "119857082", + "53942775", + "557433098", + "57482682" + ], + "author": [ + "2239612353", + "2154856611", + "2687476115", + "2153766775" + ], + "reference": [ + "176206521", + "1500546894", + "2013856010", + "2055703785", + "2071751474", + "2088749975", + "2091932246", + "2101800210", + "2119760639", + "2161963160", + "2162468176", + "2164170598", + "2227887088", + "2356780433", + "2463553622", + "2514303331", + "2530507015", + "2571682498", + "2617064411", + "2619271281", + "2621123093", + "2740742367", + "2766540688", + "2795227216", + "2888246077", + "2888272748", + "2951058005", + "2955215835", + "2974819274", + "2999907851", + "3000499753", + "3102650716" + ], + "abstract": "android testing tools generate sequences of input events to exercise the state space of the app under test existing search based techniques systematically evolve a population of event sequences so as to achieve certain objectives such as maximal code coverage the hope is that the mutation of fit event sequences leads to the generation of even fitter sequences however the evolution of event sequences may be ineffective our key insight is that pertinent app states which contributed to the original sequence s fitness may not be reached by a mutated event sequence the original path through the state space is truncated at the point of mutation in this paper we propose instead to evolve a population of states which can be captured upon discovery and resumed when needed the hope is that generating events on a fit program state leads to the transition to even fitter states for instance we can quickly deprioritize testing the main screen state which is visited by most event sequences and instead focus our limited resources on testing more interesting states that are otherwise difficult to reach we call our approach time travel testing because of this ability to travel back to any state that has been observed in the past we implemented time travel testing into timemachine a time travel enabled version of the successful automated android testing tool monkey in our experiments on a large number of open and closed source android apps timemachine outperforms the state of the art search based model based android testing tools sapienz and stoat both in terms of coverage achieved and crashes found we call our approach time travel testing because of this ability to travel back to any state that has been observed in the past we implemented time travel testing into timemachine a time travel enabled version of the successful automated android testing tool monkey in our experiments on a large number of open and closed source android apps timemachine outperforms the state of the art search based model based android testing tools sapienz and stoat both in terms of coverage achieved and crashes found", + "title_raw": "Time-travel Testing of Android Apps", + "abstract_raw": "Android testing tools generate sequences of input events to exercise the state space of the app-under-test. Existing search-based techniques systematically evolve a population of event sequences so as to achieve certain objectives such as maximal code coverage. The hope is that the mutation of fit event sequences leads to the generation of even fitter sequences. However, the evolution of event sequences may be ineffective. Our key insight is that pertinent app states which contributed to the original sequence's fitness may not be reached by a mutated event sequence. The original path through the state space is truncated at the point of mutation. In this paper, we propose instead to evolve a population of states which can be captured upon discovery and resumed when needed. The hope is that generating events on a fit program state leads to the transition to even fitter states. For instance, we can quickly deprioritize testing the main screen state which is visited by most event sequences, and instead focus our limited resources on testing more interesting states that are otherwise difficult to reach. We call our approach time-travel testing because of this ability to travel back to any state that has been observed in the past. We implemented time-travel testing into TimeMachine, a time-travel enabled version of the successful, automated Android testing tool Monkey. In our experiments on a large number of open- and closed source Android apps, TimeMachine outperforms the state-of-the-art search-based/model-based Android testing tools Sapienz and Stoat, both in terms of coverage achieved and crashes found. We call our approach time-travel testing because of this ability to travel back to any state that has been observed in the past. We implemented time-travel testing into TimeMachine, a time-travel enabled version of the successful, automated Android testing tool Monkey. In our experiments on a large number of open- and closed source Android apps, TimeMachine outperforms the state-of-the-art search-based/model-based Android testing tools Sapienz and Stoat, both in terms of coverage achieved and crashes found.", + "link": "https://www.semanticscholar.org/paper/fd2abfc767195843f3df50dfb50313b472257a8a", + "scraped_abstract": "Android testing tools generate sequences of input events to exercise the state space of the app-under-test. Existing search-based techniques systematically evolve a population of event sequences so as to achieve certain objectives such as maximal code coverage. The hope is that the mutation of fit event sequences leads to the generation of even fitter sequences. However, the evolution of event sequences may be ineffective. Our key insight is that pertinent app states which contributed to the original sequence's fitness may not be reached by a mutated event sequence. The original path through the state space is truncated at the point of mutation. In this paper, we propose instead to evolve a population of states which can be captured upon discovery and resumed when needed. The hope is that generating events on a fit program state leads to the transition to even fitter states. For instance, we can quickly deprioritize testing the main screen state which is visited by most event sequences, and instead focus our limited resources on testing more interesting states that are otherwise difficult to reach. We call our approach time-travel testing because of this ability to travel back to any state that has been observed in the past. We implemented time-travel testing into TimeMachine, a time-travel enabled version of the successful, automated Android testing tool Monkey. In our experiments on a large number of open- and closed source Android apps, TimeMachine outperforms the state-of-the-art search-based/model-based Android testing tools Sapienz and Stoat, both in terms of coverage achieved and crashes found. We call our approach time-travel testing because of this ability to travel back to any state that has been observed in the past. We implemented time-travel testing into TimeMachine, a time-travel enabled version of the successful, automated Android testing tool Monkey. In our experiments on a large number of open- and closed source Android apps, TimeMachine outperforms the state-of-the-art search-based/model-based Android testing tools Sapienz and Stoat, both in terms of coverage achieved and crashes found.", + "citation_best": 83 + }, + { + "paper": "3045552507", + "venue": "1174403976", + "year": "2020", + "title": "here we go again why is it difficult for developers to learn another programming language", + "label": [ + "153083717", + "2778514511", + "199360897", + "56666940", + "2777561058" + ], + "author": [ + "2889384537", + "3045522353", + "1989569963", + "347693595" + ], + "reference": [ + "95226708", + "187103360", + "1483471644", + "1525811688", + "1535959380", + "1540823594", + "1820505434", + "1968956798", + "1984964495", + "1989111274", + "1993926093", + "1995969371", + "1997377527", + "1999862073", + "2001468118", + "2012761058", + "2014243053", + "2016839846", + "2018720828", + "2054941659", + "2055520200", + "2056586145", + "2081277739", + "2084050405", + "2096329292", + "2097116556", + "2111635546", + "2120704737", + "2122401044", + "2127988403", + "2135933369", + "2153297147", + "2154671002", + "2159076163", + "2164777277", + "2167619672", + "2168795140", + "2171668631", + "2560286917", + "2763783445", + "2766624855", + "2885048964", + "2888945069", + "2891928410", + "2899324569", + "2911692446", + "2922338646", + "2973993233", + "3021779197", + "3103014234" + ], + "abstract": "once a programmer knows one language they can leverage concepts and knowledge already learned and easily pick up another programming language but is that always the case to understand if programmers have difficulty learning additional programming languages we conducted an empirical study of stack overflow questions across 18 different programming languages we hypothesized that previous knowledge could potentially interfere with learning a new programming language from our inspection of 450 stack overflow questions we found 276 instances of interference that occurred due to faulty assumptions originating from knowledge about a different language to understand why these difficulties occurred we conducted semi structured interviews with 16 professional programmers the interviews revealed that programmers make failed attempts to relate a new programming language with what they already know our findings inform design implications for technical authors toolsmiths and language designers such as designing documentation and automated tools that reduce interference anticipating uncommon language transitions during language design and welcoming programmers not just into a language but its entire ecosystem", + "title_raw": "Here We Go Again: Why Is It Difficult for Developers to Learn Another Programming Language?", + "abstract_raw": "Once a programmer knows one language, they can leverage concepts and knowledge already learned, and easily pick up another programming language. But is that always the case? To understand if programmers have difficulty learning additional programming languages, we conducted an empirical study of Stack Overflow questions across 18 different programming languages. We hypothesized that previous knowledge could potentially interfere with learning a new programming language. From our inspection of 450 Stack Overflow questions, we found 276 instances of interference that occurred due to faulty assumptions originating from knowledge about a different language. To understand why these difficulties occurred, we conducted semi-structured interviews with 16 professional programmers. The interviews revealed that programmers make failed attempts to relate a new programming language with what they already know. Our findings inform design implications for technical authors, toolsmiths, and language designers, such as designing documentation and automated tools that reduce interference, anticipating uncommon language transitions during language design, and welcoming programmers not just into a language, but its entire ecosystem.", + "link": "https://www.semanticscholar.org/paper/112353453760498067a78e5e53220b7a11df9db4", + "scraped_abstract": "Once a programmer knows one language, they can leverage concepts and knowledge already learned, and easily pick up another programming language. But is that always the case? To understand if programmers have difficulty learning additional programming languages, we conducted an empirical study of Stack Overflow questions across 18 different programming languages. We hypothesized that previous knowledge could potentially interfere with learning a new programming language. From our inspection of 450 Stack Overflow questions, we found 276 instances of interference that occurred due to faulty assumptions originating from knowledge about a different language. To understand why these difficulties occurred, we conducted semi-structured interviews with 16 professional programmers. The interviews revealed that programmers make failed attempts to relate a new programming language with what they already know. Our findings inform design implications for technical authors, toolsmiths, and language designers, such as designing documentation and automated tools that reduce interference, anticipating uncommon language transitions during language design, and welcoming programmers not just into a language, but its entire ecosystem.", + "citation_best": 0 + }, + { + "paper": "3105398568", + "venue": "1174403976", + "year": "2020", + "title": "big code big vocabulary open vocabulary models for source code", + "label": [ + "137293760", + "167955471", + "529173508", + "548217200", + "519991488", + "2778143727", + "204321447", + "195324797", + "43126263" + ], + "author": [ + "2921149106", + "2935244848", + "2764018286", + "2113665458", + "2019551114" + ], + "reference": [ + "46679369", + "179875071", + "1501139663", + "1655078475", + "1771830246", + "1860267373", + "1924770834", + "1938755728", + "1970607969", + "1972141422", + "1973681806", + "1974020522", + "1993318811", + "1994573369", + "2010608861", + "2018389835", + "2032942114", + "2060384944", + "2064675550", + "2074032109", + "2077537588", + "2095705004", + "2100664567", + "2128737833", + "2140609933", + "2142403498", + "2143861926", + "2143960295", + "2148190602", + "2153579005", + "2156723666", + "2157331557", + "2158195707", + "2165747537", + "2175297521", + "2251012068", + "2259472270", + "2308618763", + "2402619042", + "2444132761", + "2497764072", + "2511803001", + "2516621648", + "2547880329", + "2571859396", + "2579161546", + "2605202003", + "2740130862", + "2740220421", + "2768572539", + "2795516572", + "2795753518", + "2806551457", + "2806718802", + "2884276923", + "2884681705", + "2887364112", + "2891185194", + "2907705732", + "2921792613", + "2927177344", + "2954149564", + "2954274464", + "2954451301", + "2954823997", + "2954950681", + "2962739339", + "2962784628", + "2962883166", + "2962894772", + "2962995178", + "2963026768", + "2963099225", + "2963341956", + "2963403868", + "2963935794", + "2963951265", + "2964315653", + "2976890614", + "2979792666", + "3099302725", + "3104874136", + "3146720657", + "3149750418" + ], + "abstract": "statistical language modeling techniques have successfully been applied to large source code corpora yielding a variety of new software development tools such as tools for code suggestion improving readability and api migration a major issue with these techniques is that code introduces new vocabulary at a far higher rate than natural language as new identifier names proliferate both large vocabularies and out of vocabulary issues severely affect neural language models nlms of source code degrading their performance and rendering them unable to scale in this paper we address this issue by 1 studying how various modelling choices impact the resulting vocabulary on a large scale corpus of 13 362 projects 2 presenting an open vocabulary source code nlm that can scale to such a corpus 100 times larger than in previous work and 3 showing that such models outperform the state of the art on three distinct code corpora java c python to our knowledge these are the largest nlms for code that have been reported all datasets code and trained models used in this work are publicly available", + "title_raw": "Big Code != Big Vocabulary: Open-Vocabulary Models for Source Code", + "abstract_raw": "Statistical language modeling techniques have successfully been applied to large source code corpora, yielding a variety of new software development tools, such as tools for code suggestion, improving readability, and API migration. A major issue with these techniques is that code introduces new vocabulary at a far higher rate than natural language, as new identifier names proliferate. Both large vocabularies and out-of-vocabulary issues severely affect Neural Language Models (NLMs) of source code, degrading their performance and rendering them unable to scale. In this paper, we address this issue by: 1) studying how various modelling choices impact the resulting vocabulary on a large-scale corpus of 13,362 projects; 2) presenting an open vocabulary source code NLM that can scale to such a corpus, 100 times larger than in previous work; and 3) showing that such models outperform the state of the art on three distinct code corpora (Java, C, Python). To our knowledge, these are the largest NLMs for code that have been reported. All datasets, code, and trained models used in this work are publicly available.", + "link": "https://www.semanticscholar.org/paper/3944354c42ddfff7414ad06022f96c72858d5fa6", + "scraped_abstract": "Statistical language modeling techniques have successfully been applied to large source code corpora, yielding a variety of new software development tools, such as tools for code suggestion, improving readability, and API migration. A major issue with these techniques is that code introduces new vocabulary at a far higher rate than natural language, as new identifier names proliferate. Both large vocabularies and out-of-vocabulary issues severely affect Neural Language Models (NLMs) of source code, degrading their performance and rendering them unable to scale. In this paper, we address this issue by: 1) studying how various modelling choices impact the resulting vocabulary on a large-scale corpus of 13,362 projects; 2) presenting an open vocabulary source code NLM that can scale to such a corpus, 100 times larger than in previous work; and 3) showing that such models outperform the state of the art on three distinct code corpora (Java, C, Python). To our knowledge, these are the largest NLMs for code that have been reported. All datasets, code, and trained models used in this work are publicly available.", + "citation_best": 87 + }, + { + "paper": "3103843169", + "venue": "1174403976", + "year": "2020", + "title": "unblind your apps predicting natural language labels for mobile gui components by deep learning", + "label": [ + "557433098", + "37836645", + "207347870", + "89505385", + "136764020", + "195324797" + ], + "author": [ + "3007330025", + "2520883552", + "2779303333", + "2302662278", + "3121335775", + "2223340429", + "2097793164" + ], + "reference": [ + "19399978", + "761890246", + "1538131130", + "1861492603", + "1889081078", + "1895577753", + "1956340063", + "1965555277", + "1972978214", + "2039184942", + "2069442545", + "2082177508", + "2101105183", + "2110065044", + "2143860600", + "2150824314", + "2151967815", + "2161987701", + "2163605009", + "2168842329", + "2194775991", + "2213051614", + "2214322531", + "2242083635", + "2293111166", + "2313513770", + "2475883615", + "2481985212", + "2513201734", + "2513969877", + "2525084721", + "2610917376", + "2613718673", + "2742636803", + "2774988559", + "2794908093", + "2805591798", + "2896625930", + "2897267527", + "2899371602", + "2911344747", + "2911831256", + "2922099667", + "2942870440", + "2956017828", + "2962992787", + "2963403868", + "2963461515", + "2963686907", + "2964121744", + "2985406956", + "2989724224", + "3091044163" + ], + "abstract": "according to the world health organization who it is estimated that approximately 1 3 billion people live with some forms of vision impairment globally of whom 36 million are blind due to their disability engaging these minority into the society is a challenging problem the recent rise of smart mobile phones provides a new solution by enabling blind users convenient access to the information and service for understanding the world users with vision impairment can adopt the screen reader embedded in the mobile operating systems to read the content of each screen within the app and use gestures to interact with the phone however the prerequisite of using screen readers is that developers have to add natural language labels to the image based components when they are developing the app unfortunately more than 77 apps have issues of missing labels according to our analysis of 10 408 android apps most of these issues are caused by developers lack of awareness and knowledge in considering the minority and even if developers want to add the labels to ui components they may not come up with concise and clear description as most of them are of no visual issues to overcome these challenges we develop a deep learning based model called labeldroid to automatically predict the labels of image based buttons by learning from large scale commercial apps in google play the experimental results show thatour model can make accurate predictions and the generated labels are of higher quality than that from real android developers", + "title_raw": "Unblind Your Apps: Predicting Natural-Language Labels for Mobile GUI Components by Deep Learning", + "abstract_raw": "According to the World Health Organization(WHO), it is estimated that approximately 1.3 billion people live with some forms of vision impairment globally, of whom 36 million are blind. Due to their disability, engaging these minority into the society is a challenging problem. The recent rise of smart mobile phones provides a new solution by enabling blind users' convenient access to the information and service for understanding the world. Users with vision impairment can adopt the screen reader embedded in the mobile operating systems to read the content of each screen within the app, and use gestures to interact with the phone. However, the prerequisite of using screen readers is that developers have to add natural-language labels to the image-based components when they are developing the app. Unfortunately, more than 77% apps have issues of missing labels, according to our analysis of 10,408 Android apps. Most of these issues are caused by developers' lack of awareness and knowledge in considering the minority. And even if developers want to add the labels to UI components, they may not come up with concise and clear description as most of them are of no visual issues. To overcome these challenges, we develop a deep-learning based model, called Labeldroid, to automatically predict the labels of image-based buttons by learning from large-scale commercial apps in Google Play. The experimental results show thatour model can make accurate predictions and the generated labels are of higher quality than that from real Android developers.", + "link": "https://www.semanticscholar.org/paper/a32ca8fb3ad34374dfb3ef4967d13b89d7d3ffd2", + "scraped_abstract": "According to the World Health Organization(WHO), it is estimated that approximately 1.3 billion people live with some forms of vision impairment globally, of whom 36 million are blind. Due to their disability, engaging these minority into the society is a challenging problem. The recent rise of smart mobile phones provides a new solution by enabling blind users' convenient access to the information and service for understanding the world. Users with vision impairment can adopt the screen reader embedded in the mobile operating systems to read the content of each screen within the app, and use gestures to interact with the phone. However, the prerequisite of using screen readers is that developers have to add natural-language labels to the image-based components when they are developing the app. Unfortunately, more than 77% apps have issues of missing labels, according to our analysis of 10,408 Android apps. Most of these issues are caused by developers' lack of awareness and knowledge in considering the minority. And even if developers want to add the labels to UI components, they may not come up with concise and clear description as most of them are of no visual issues. To overcome these challenges, we develop a deep-learning based model, called Labeldroid, to automatically predict the labels of image-based buttons by learning from large-scale commercial apps in Google Play. The experimental results show thatour model can make accurate predictions and the generated labels are of higher quality than that from real Android developers.", + "citation_best": 47 + }, + { + "paper": "3100403944", + "venue": "1174403976", + "year": "2020", + "title": "translating video recordings of mobile app usages into replayable scenarios", + "label": [ + "100850083", + "557433098", + "107457646", + "7374053", + "2776151529", + "2777904410" + ], + "author": [ + "2230368832", + "3027676927", + "2337296193", + "2786423358", + "2146731111", + "67342874" + ], + "reference": [ + "639708223", + "1849277567", + "1861492603", + "1894826246", + "1984361257", + "1995362840", + "2001605182", + "2016996406", + "2018654787", + "2025721496", + "2027999475", + "2035030692", + "2052493036", + "2082491926", + "2088049833", + "2097117768", + "2102605133", + "2123873494", + "2136695057", + "2154221125", + "2160517961", + "2163605009", + "2194775991", + "2241093273", + "2345397167", + "2362396924", + "2513201734", + "2547513165", + "2601548810", + "2615260684", + "2618807062", + "2734399289", + "2741894851", + "2767785010", + "2770563577", + "2794908093", + "2884291721", + "2888588953", + "2899070402", + "2962724148", + "2962835968", + "2963011053", + "2963139460", + "2963295463", + "2963943519", + "2967289945", + "3098403179", + "3102650716" + ], + "abstract": "screen recordings of mobile applications are easy to obtain and capture a wealth of information pertinent to software developers e g bugs or feature requests making them a popular mechanism for crowdsourced app feedback thus these videos are becoming a common artifact that developers must manage in light of unique mobile development constraints including swift release cycles and rapidly evolving platforms automated techniques for analyzing all types of rich software artifacts provide benefit to mobile developers unfortunately automatically analyzing screen recordings presents serious challenges due to their graphical nature compared to other types of textual artifacts to address these challenges this paper introduces v2s a lightweight automated approach for translating video recordings of android app usages into replayable scenarios v2s is based primarily on computer vision techniques and adapts recent solutions for object detection and image classification to detect and classify user actions captured in a video and convert these into a replayable test scenario we performed an extensive evaluation of v2s involving 175 videos depicting 3 534 gui based actions collected from users exercising features and reproducing bugs from over 80 popular android apps our results illustrate that v2s can accurately replay scenarios from screen recordings and is capable of reproducing 89 of our collected videos with minimal overhead a case study with three industrial partners illustrates the potential usefulness of v2s from the viewpoint of developers", + "title_raw": "Translating Video Recordings of Mobile App Usages into Replayable Scenarios", + "abstract_raw": "Screen recordings of mobile applications are easy to obtain and capture a wealth of information pertinent to software developers (e.g., bugs or feature requests), making them a popular mechanism for crowdsourced app feedback. Thus, these videos are becoming a common artifact that developers must manage. In light of unique mobile development constraints, including swift release cycles and rapidly evolving platforms, automated techniques for analyzing all types of rich software artifacts provide benefit to mobile developers. Unfortunately, automatically analyzing screen recordings presents serious challenges, due to their graphical nature, compared to other types of (textual) artifacts. To address these challenges, this paper introduces V2S, a lightweight, automated approach for translating video recordings of Android app usages into replayable scenarios. V2S is based primarily on computer vision techniques and adapts recent solutions for object detection and image classification to detect and classify user actions captured in a video, and convert these into a replayable test scenario. We performed an extensive evaluation of V2S involving 175 videos depicting 3,534 GUI-based actions collected from users exercising features and reproducing bugs from over 80 popular Android apps. Our results illustrate that V2S can accurately replay scenarios from screen recordings, and is capable of reproducing \u2248 89% of our collected videos with minimal overhead. A case study with three industrial partners illustrates the potential usefulness of V2S from the viewpoint of developers.", + "link": "\thttps://www.semanticscholar.org/paper/ca268090f41e48b5775ee60653d86567bd1787c5", + "scraped_abstract": "Screen recordings of mobile applications are easy to obtain and capture a wealth of information pertinent to software developers (e.g., bugs or feature requests), making them a popular mechanism for crowdsourced app feedback. Thus, these videos are becoming a common artifact that developers must manage. In light of unique mobile development constraints, including swift release cycles and rapidly evolving platforms, automated techniques for analyzing all types of rich software artifacts provide benefit to mobile developers. Unfortunately, automatically analyzing screen recordings presents serious challenges, due to their graphical nature, compared to other types of (textual) artifacts. To address these challenges, this paper introduces V2S, a lightweight, automated approach for translating video recordings of Android app usages into replayable scenarios. V2S is based primarily on computer vision techniques and adapts recent solutions for object detection and image classification to detect and classify user actions captured in a video, and convert these into a replayable test scenario. We performed an extensive evaluation of V2S involving 175 videos depicting 3,534 GUI-based actions collected from users exercising features and reproducing bugs from over 80 popular Android apps. Our results illustrate that V2S can accurately replay scenarios from screen recordings, and is capable of reproducing \u2248 89% of our collected videos with minimal overhead. A case study with three industrial partners illustrates the potential usefulness of V2S from the viewpoint of developers.", + "citation_best": 53 + }, + { + "paper": "3034497099", + "venue": "1203999783", + "year": "2020", + "title": "synthesizing aspect driven recommendation explanations from reviews", + "label": [ + "2522767166" + ], + "author": [ + "3034755947", + "2024254804" + ], + "reference": [ + "1518152488", + "2024165284", + "2027731328", + "2028988057", + "2116959421", + "2150824314", + "2152184085", + "2337403844", + "2507974895", + "2573167395", + "2739992143", + "2740167620", + "2742657630", + "2798277467", + "2893085659", + "2897405591", + "2912678722", + "2962685628", + "2962765866", + "2971196067", + "3099023595", + "3101422495" + ], + "abstract": "", + "title_raw": "Synthesizing Aspect-Driven Recommendation Explanations from Reviews", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/a945a2feb44b4b2e038e9f2abdbbf2a9e15f4584", + "scraped_abstract": null, + "citation_best": 12 + }, + { + "paper": "3081170586", + "venue": "1130985203", + "year": "2020", + "title": "on sampled metrics for item recommendation", + "label": [ + "177264268", + "197927960" + ], + "author": [ + "1109698060", + "1585981875" + ], + "reference": [ + "1965755064", + "2042281163", + "2055945388", + "2101409192", + "2152808281", + "2565948352", + "2605350416", + "2783603395", + "2892888989", + "2950577311", + "2963714345", + "2964324019", + "2966349618", + "3097991661" + ], + "abstract": "the task of item recommendation requires ranking a large catalogue of items given a context item recommendation algorithms are evaluated using ranking metrics that depend on the positions of relevant items to speed up the computation of metrics recent work often uses sampled metrics where only a smaller set of random items and the relevant items are ranked this paper investigates sampled metrics in more detail and shows that they are inconsistent with their exact version in the sense that they do not persist relative statements e g recommender a is better than b not even in expectation moreover the smaller the sampling size the less difference there is between metrics and for very small sampling size all metrics collapse to the auc metric we show that it is possible to improve the quality of the sampled metrics by applying a correction obtained by minimizing different criteria such as bias or mean squared error we conclude with an empirical evaluation of the naive sampled metrics and their corrected variants to summarize our work suggests that sampling should be avoided for metric calculation however if an experimental study needs to sample the proposed corrections can improve the quality of the estimate", + "title_raw": "On Sampled Metrics for Item Recommendation", + "abstract_raw": "The task of item recommendation requires ranking a large catalogue of items given a context. Item recommendation algorithms are evaluated using ranking metrics that depend on the positions of relevant items. To speed up the computation of metrics, recent work often uses sampled metrics where only a smaller set of random items and the relevant items are ranked. This paper investigates sampled metrics in more detail and shows that they are inconsistent with their exact version, in the sense that they do not persist relative statements, e.g., recommender A is better than B, not even in expectation. Moreover, the smaller the sampling size, the less difference there is between metrics, and for very small sampling size, all metrics collapse to the AUC metric. We show that it is possible to improve the quality of the sampled metrics by applying a correction, obtained by minimizing different criteria such as bias or mean squared error. We conclude with an empirical evaluation of the naive sampled metrics and their corrected variants. To summarize, our work suggests that sampling should be avoided for metric calculation, however if an experimental study needs to sample, the proposed corrections can improve the quality of the estimate.", + "link": "https://www.semanticscholar.org/paper/41f9fd1c997473d8c50a4f7f61eb317ac7a6f25c", + "scraped_abstract": "The task of item recommendation requires ranking a large catalogue of items given a context. Item recommendation algorithms are evaluated using ranking metrics that depend on the positions of relevant items. To speed up the computation of metrics, recent work often uses sampled metrics where only a smaller set of random items and the relevant items are ranked. This paper investigates sampled metrics in more detail and shows that they are inconsistent with their exact version, in the sense that they do not persist relative statements, e.g., recommender A is better than B, not even in expectation. Moreover, the smaller the sampling size, the less difference there is between metrics, and for very small sampling size, all metrics collapse to the AUC metric. We show that it is possible to improve the quality of the sampled metrics by applying a correction, obtained by minimizing different criteria such as bias or mean squared error. We conclude with an empirical evaluation of the naive sampled metrics and their corrected variants. To summarize, our work suggests that sampling should be avoided for metric calculation, however if an experimental study needs to sample, the proposed corrections can improve the quality of the estimate.", + "citation_best": 330 + }, + { + "paper": "3021455140", + "venue": "1123349196", + "year": "2020", + "title": "hummingbird energy efficient gps receiver for small satellites", + "label": [ + "60229501", + "193648706", + "14279187", + "21822782", + "79403827" + ], + "author": [ + "2111314172", + "2810408929", + "2107208207", + "2025697695", + "2037036683" + ], + "reference": [ + "130840055", + "180647260", + "1558470758", + "1992973611", + "2056601122", + "2078125800", + "2089091794", + "2108085482", + "2168956667", + "2185862955", + "2256438262", + "2345634401", + "2411129447", + "2486278406", + "2517003582", + "2531385306", + "2607406613", + "2613922593", + "2761903093", + "2769085832" + ], + "abstract": "global positioning system is a widely adopted localization technique with the increasing demand for small satellites the need for a low power gps for satellites is also increasing to enable many state of the art applications the exact position of the satellites is necessary however building low power gps receivers which operate in low earth orbit pose significant challenges this is mainly due to the high speed 7 8 km s of small satellites while duty cycling the receiver is a possible solution the high relative doppler shift between the gps satellites and the small satellite contributes to the increase in time to first fix ttff thus increasing the energy consumption further if the gps receiver is tumbling along with the small satellite on which it is mounted longer ttff may lead to no gps fix due to disorientation of the receiver antenna in this paper we elucidate the design of a low cost low power gps receiver for small satellite applications we also propose an energy optimization algorithm called f3to improve the ttff which is the main contributor to the energy consumption during cold start with simulations and in orbit evaluation from a launched nanosatellite with our gps and high end gps simulators we show that up to 96 16 of energy savings consuming only 1 25th energy compared to the state of the art can be achieved using our algorithm without compromising much 10 m on the navigation accuracy the ttff achieved is at most 33 s", + "title_raw": "Hummingbird: energy efficient GPS receiver for small satellites", + "abstract_raw": "Global Positioning System is a widely adopted localization technique. With the increasing demand for small satellites, the need for a low-power GPS for satellites is also increasing. To enable many state-of-the-art applications, the exact position of the satellites is necessary. However, building low-power GPS receivers which operate in low earth orbit pose significant challenges. This is mainly due to the high speed (~7.8 km/s) of small satellites. While duty-cycling the receiver is a possible solution, the high relative Doppler shift between the GPS satellites and the small satellite contributes to the increase in Time To First Fix (TTFF), thus increasing the energy consumption. Further, if the GPS receiver is tumbling along with the small satellite on which it is mounted, longer TTFF may lead to no GPS fix due to disorientation of the receiver antenna. In this paper, we elucidate the design of a low-cost, low-power GPS receiver for small satellite applications. We also propose an energy optimization algorithm called F3to improve the TTFF which is the main contributor to the energy consumption during cold start. With simulations and in-orbit evaluation from a launched nanosatellite with our \u03bcGPS and high-end GPS simulators, we show that up to 96.16% of energy savings (consuming only ~ 1/25th energy compared to the state of the art) can be achieved using our algorithm without compromising much (~10 m) on the navigation accuracy. The TTFF achieved is at most 33 s.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Hummingbird:+Energy+Efficient+GPS+Receiver+for+Small+Satellites&as_oq=&as_eq=&as_occt=any&as_sauthors=Narayana", + "scraped_abstract": "Global Positioning System is a widely adopted localization technique. With the increasing demand for small satellites, the need for a low-power GPS for satellites is also increasing. To enable many state-of-the-art applications, the exact position of the satellites is necessary. However, building low-power GPS receivers which operate in low earth orbit pose significant challenges. This is mainly due to the high speed (~7.8 km/s) of small satellites. While duty-cycling the receiver is a possible solution, the high relative Doppler shift between the GPS satellites and the small satellite contributes to the increase in Time To First Fix (TTFF), thus increasing the energy consumption. Further, if the GPS receiver is tumbling along with the small satellite on which it is mounted, longer TTFF may lead to no GPS fix due to disorientation of the receiver antenna. In this paper, we elucidate the design of a low-cost, low-power GPS receiver for small satellite applications. We also propose an energy optimization algorithm called F3to improve the TTFF which is the main contributor to the energy consumption during cold start. With simulations and in-orbit evaluation from a launched nanosatellite with our \u03bcGPS and high-end GPS simulators, we show that up to 96.16% of energy savings (consuming only ~ 1/25th energy compared to the state of the art) can be achieved using our algorithm without compromising much (~10 m) on the navigation accuracy. The TTFF achieved is at most 33 s.", + "citation_best": 0 + }, + { + "paper": "3023792721", + "venue": "1123349196", + "year": "2020", + "title": "m cube a millimeter wave massive mimo software radio", + "label": [ + "65165936", + "171115542", + "207987634", + "45764600", + "555944384", + "108037233", + "55494473", + "21822782" + ], + "author": [ + "2511222405", + "3023630854", + "2649615838", + "2324632098", + "2240635727" + ], + "reference": [ + "65255600", + "1521564387", + "1551517259", + "1976593671", + "1994738996", + "2015155523", + "2016999686", + "2066218102", + "2110468965", + "2111953900", + "2113638573", + "2115147171", + "2124969302", + "2138752820", + "2157977389", + "2159347248", + "2188587999", + "2261663070", + "2311907317", + "2469690627", + "2525282885", + "2525608966", + "2526381904", + "2605315041", + "2625834558", + "2761083975", + "2761854559", + "2761994205", + "2764164848", + "2781054684", + "2789370391", + "2887976451", + "2889969682", + "2891322913", + "2892065752", + "2895784442", + "2896094378", + "2896938066", + "2985849463", + "3022992923", + "3098632498", + "3098827349" + ], + "abstract": "millimeter wave mmwave technologies represent a cornerstone for emerging wireless network infrastructure and for rf sensing systems in security health and automotive domains through a mimo array of phased arrays with hundreds of antenna elements mmwave can boost wireless bit rates to 100 gbps and potentially achieve near vision sensing resolution however the lack of an experimental platform has been impeding research in this field this paper fills the gap with m3 m cube the first mmwave massive mimo software radio m3 features a fully reconfigurable array of phased arrays with up to 8 rf chains and 288 antenna elements despite the orders of magnitude larger antenna arrays its cost is orders of magnitude lower even when compared with state of the art single rf chain mmwave software radios the key design principle behind m3 is to hijack a low cost commodity 802 11ad radio separate the control path and data path inside regenerate the phased array control signals and recreate the data signals using a programmable baseband extensive experiments have demonstrated the effectiveness of the m3 design and its usefulness for research in mmwave massive mimo communication and sensing", + "title_raw": "M-Cube: a millimeter-wave massive MIMO software radio", + "abstract_raw": "Millimeter-wave (mmWave) technologies represent a cornerstone for emerging wireless network infrastructure, and for RF sensing systems in security, health, and automotive domains. Through a MIMO array of phased arrays with hundreds of antenna elements, mmWave can boost wireless bit-rates to 100+ Gbps, and potentially achieve near-vision sensing resolution. However, the lack of an experimental platform has been impeding research in this field. This paper fills the gap with M3 (M-Cube), the first mmWave massive MIMO software radio. M3 features a fully reconfigurable array of phased arrays, with up to 8 RF chains and 288 antenna elements. Despite the orders of magnitude larger antenna arrays, its cost is orders of magnitude lower, even when compared with state-of-the-art single RF chain mmWave software radios. The key design principle behind M3 is to hijack a low-cost commodity 802.11ad radio, separate the control path and data path inside, regenerate the phased array control signals, and recreate the data signals using a programmable baseband. Extensive experiments have demonstrated the effectiveness of the M3 design, and its usefulness for research in mmWave massive MIMO communication and sensing.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=M-Cube:+a+millimeter-wave+massive+MIMO+software+radio&as_oq=&as_eq=&as_occt=any&as_sauthors=Zhao", + "scraped_abstract": "Millimeter-wave (mmWave) technologies represent a cornerstone for emerging wireless network infrastructure, and for RF sensing systems in security, health, and automotive domains. Through a MIMO array of phased arrays with hundreds of antenna elements, mmWave can boost wireless bit-rates to 100+ Gbps, and potentially achieve near-vision sensing resolution. However, the lack of an experimental platform has been impeding research in this field. This paper fills the gap with M3 (M-Cube), the first mmWave massive MIMO software radio. M3 features a fully reconfigurable array of phased arrays, with up to 8 RF chains and 288 antenna elements. Despite the orders of magnitude larger antenna arrays, its cost is orders of magnitude lower, even when compared with state-of-the-art single RF chain mmWave software radios. The key design principle behind M3 is to hijack a low-cost commodity 802.11ad radio, separate the control path and data path inside, regenerate the phased array control signals, and recreate the data signals using a programmable baseband. Extensive experiments have demonstrated the effectiveness of the M3 design, and its usefulness for research in mmWave massive MIMO communication and sensing.", + "citation_best": 0 + }, + { + "paper": "3098350529", + "venue": "1127325140", + "year": "2020", + "title": "no regret learning dynamics for extensive form correlated equilibrium", + "label": [ + "99221444", + "50817715" + ], + "author": [ + "2698743197", + "2741186379", + "2336248083", + "2024572568" + ], + "reference": [], + "abstract": "", + "title_raw": "No-Regret Learning Dynamics for Extensive-Form Correlated Equilibrium", + "abstract_raw": "", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=No-Regret+Learning+Dynamics+for+Extensive-Form+Correlated+Equilibrium&as_oq=&as_eq=&as_occt=any&as_sauthors=Celli", + "scraped_abstract": null, + "citation_best": 11 + }, + { + "paper": "3098903812", + "venue": "1127325140", + "year": "2020", + "title": "language models are few shot learners", + "label": [ + "2777530160", + "137293760", + "60509570", + "61249035", + "2777884278", + "204321447" + ], + "author": [ + "2976778545", + "3029199448", + "2631148962", + "3029806967", + "2127852364", + "2560622064", + "2498206862", + "2594324588", + "3016482836", + "2916923792", + "3032278108", + "3016268343", + "3031815860", + "2609171090", + "2566464304", + "2634285778", + "2311703592", + "2953498957", + "3028694232", + "3029177488", + "3029609219", + "3029541869", + "3029272132", + "2898206926", + "3000793068", + "2997198986", + "3011344281", + "2092797056", + "2479703635", + "215131072", + "669186563" + ], + "reference": [], + "abstract": "", + "title_raw": "Language Models are Few-Shot Learners", + "abstract_raw": "", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Language+Models+are+Few-Shot+Learners&as_oq=&as_eq=&as_occt=any&as_sauthors=Brown", + "scraped_abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.", + "citation_best": 12370 + }, + { + "paper": "3013640143", + "venue": "1158363782", + "year": "2020", + "title": "understanding detecting and localizing partial failures in large system software", + "label": [ + "115903868", + "2779960034" + ], + "author": [ + "2944236663", + "2646704398", + "2113752898" + ], + "reference": [ + "597502782", + "1455804204", + "1514045764", + "1545684573", + "1609618519", + "1963279632", + "1981514768", + "1985229168", + "1990249073", + "2010805714", + "2080696000", + "2109061641", + "2116021422", + "2118004641", + "2124617909", + "2133943294", + "2140991542", + "2148165526", + "2163372502", + "2166755212", + "2167880834", + "2168118398", + "2310116763", + "2337392435", + "2522470548", + "2616861556", + "2623323969", + "2791493318", + "2895690683", + "2899050079", + "2899066029", + "3106729728" + ], + "abstract": "", + "title_raw": "Understanding, Detecting and Localizing Partial Failures in Large System Software", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/ef41331eea9eb2e4d972b56230b8f2937ab422f1", + "scraped_abstract": null, + "citation_best": 17 + }, + { + "paper": "3108362055", + "venue": "1185109434", + "year": "2020", + "title": "hxdp efficient software packet processing on fpga nics", + "label": [ + "149635348", + "49154492", + "553261973", + "120317029", + "42935608", + "2779818221", + "78766204", + "2779581428", + "2776834041" + ], + "author": [ + "2969460025", + "2886426610", + "2041772485", + "2066056238", + "2060344939", + "2307403921", + "3024280804", + "3097705148", + "2567704736", + "2151233986" + ], + "reference": [], + "abstract": "", + "title_raw": "hXDP: Efficient Software Packet Processing on {FPGA} NICs", + "abstract_raw": "", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=hXDP:+Efficient+Software+Packet+Processing+on+FPGA+NICs&as_oq=&as_eq=&as_occt=any&as_sauthors=Brunella", + "scraped_abstract": null, + "citation_best": 12 + }, + { + "paper": "3093054587", + "venue": "1185109434", + "year": "2020", + "title": "byzantine ordered consensus without byzantine oligarchy", + "label": [ + "91062100", + "55439883", + "168021876", + "2778029271", + "49265948", + "80444323", + "20528329" + ], + "author": [ + "3092819672", + "2164544756", + "2667658809", + "2158515488", + "2292936287" + ], + "reference": [], + "abstract": "the specific order of commands agreed upon when running state machine replication smr is immaterial to fault tolerance all that is required is for all correct deterministic replicas to follow it in the permissioned blockchains that rely on byzantine fault tolerant bft smr however nodes have a stake in the specific sequence that ledger records as well as in preventing other parties from manipulating the sequencing to their advantage the traditional specification of smr correctness however has no language to express these concerns this paper introduces byzantine ordered consensus a new primitive that augments the correctness specification of bft smr to include specific guarantees on the total orders it produces and a new architecture for bft smr that by factoring out ordering from consensus can enforce these guarantees and prevent byzantine nodes from controlling ordering decisions a byzantine oligarchy these contributions are instantiated in pompe a bft smr protocol that is guaranteed to order commands in a way that respects a natural extension of linearizability", + "title_raw": "Byzantine Ordered Consensus without Byzantine Oligarchy.", + "abstract_raw": "The specific order of commands agreed upon when running state machine replication (SMR) is immaterial to fault-tolerance: all that is required is for all correct deterministic replicas to follow it. In the permissioned blockchains that rely on Byzantine fault tolerant (BFT) SMR, however, nodes have a stake in the specific sequence that ledger records, as well as in preventing other parties from manipulating the sequencing to their advantage. The traditional specification of SMR correctness, however, has no language to express these concerns. This paper introduces Byzantine ordered consensus, a new primitive that augments the correctness specification of BFT SMR to include specific guarantees on the total orders it produces; and a new architecture for BFT SMR that, by factoring out ordering from consensus, can enforce these guarantees and prevent Byzantine nodes from controlling ordering decisions (a Byzantine oligarchy). These contributions are instantiated in Pompe, a BFT SMR protocol that is guaranteed to order commands in a way that respects a natural extension of linearizability.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Byzantine+Ordered+Consensus+without+Byzantine+Oligarchy&as_oq=&as_eq=&as_occt=any&as_sauthors=Zhang", + "scraped_abstract": null, + "citation_best": 14 + }, + { + "paper": "3095716351", + "venue": "1185109434", + "year": "2020", + "title": "virtual consensus in delos", + "label": [ + "136764020" + ], + "author": [ + "2658606637", + "1992040104", + "2645045122", + "3097010034", + "3095451682", + "3096914841", + "3094797943", + "3097301373", + "3096437285", + "3096520798", + "3095038659", + "3097796921", + "3095928172", + "3094867429", + "3097782491", + "3096128199", + "2141556116" + ], + "reference": [ + "1546816891", + "1549820118", + "1642392512", + "1971191556", + "1975814632", + "1992479210", + "2000832815", + "2005216655", + "2010805714", + "2016404663", + "2035362408", + "2065180040", + "2067740651", + "2075854425", + "2101939036", + "2109213558", + "2118153508", + "2118726720", + "2119738171", + "2134337235", + "2156580773", + "2173966303", + "2185792076", + "2516886136", + "2576670572", + "2604734262", + "2612759782", + "2898926654", + "2899422128", + "2929031942", + "2962756066", + "3005310413", + "3009246723", + "3018702812" + ], + "abstract": "", + "title_raw": "Virtual Consensus in Delos", + "abstract_raw": "", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Virtual+Consensus+in+Delos&as_oq=&as_eq=&as_occt=any&as_sauthors=Balakrishnan", + "scraped_abstract": null, + "citation_best": 7 + }, + { + "paper": "3033394649", + "venue": "1127352206", + "year": "2020", + "title": "data driven inference of representation invariants", + "label": [ + "165464430", + "167955471", + "50341643", + "2780440489", + "80444323", + "138958017", + "175971053" + ], + "author": [ + "2762900551", + "2911920872", + "294596595", + "2161850330" + ], + "reference": [ + "100631053", + "2013596093", + "2128303158", + "2135322712", + "2142756246", + "2170577595", + "2185676247", + "2311603331", + "2571456046", + "2741361950", + "2767948565", + "2779005308", + "2798232457", + "2904362559", + "2955879452", + "2962805785", + "2986598723", + "3102469351", + "3192343331" + ], + "abstract": "a representation invariant is a property that holds of all values of abstract type produced by a module representation invariants play important roles in software engineering and program verification in this paper we develop a counterexample driven algorithm for inferring a representation invariant that is sufficient to imply a desired specification for a module the key novelty is a type directed notion of visible inductiveness which ensures that the algorithm makes progress toward its goal as it alternates between weakening and strengthening candidate invariants the algorithm is parameterized by an example based synthesis engine and a verifier and we prove that it is sound and complete for first order modules over finite types assuming that the synthesizer and verifier are as well we implement these ideas in a tool called hanoi which synthesizes representation invariants for recursive data types hanoi not only handles invariants for first order code but higher order code as well in its back end hanoi uses an enumerative synthesizer called myth and an enumerative testing tool as a verifier because hanoi uses testing for verification it is not sound though our empirical evaluation shows that it is successful on the benchmarks we investigated", + "title_raw": "Data-driven inference of representation invariants", + "abstract_raw": "A representation invariant is a property that holds of all values of abstract type produced by a module. Representation invariants play important roles in software engineering and program verification. In this paper, we develop a counterexample-driven algorithm for inferring a representation invariant that is sufficient to imply a desired specification for a module. The key novelty is a type-directed notion of visible inductiveness, which ensures that the algorithm makes progress toward its goal as it alternates between weakening and strengthening candidate invariants. The algorithm is parameterized by an example-based synthesis engine and a verifier, and we prove that it is sound and complete for first-order modules over finite types, assuming that the synthesizer and verifier are as well. We implement these ideas in a tool called Hanoi, which synthesizes representation invariants for recursive data types. Hanoi not only handles invariants for first-order code, but higher-order code as well. In its back end, Hanoi uses an enumerative synthesizer called Myth and an enumerative testing tool as a verifier. Because Hanoi uses testing for verification, it is not sound, though our empirical evaluation shows that it is successful on the benchmarks we investigated.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Data-Driven+Inference+of+Representation+Invariants&as_oq=&as_eq=&as_occt=any&as_sauthors=Miltner", + "scraped_abstract": "A representation invariant is a property that holds of all values of abstract type produced by a module. Representation invariants play important roles in software engineering and program verification. In this paper, we develop a counterexample-driven algorithm for inferring a representation invariant that is sufficient to imply a desired specification for a module. The key novelty is a type-directed notion of visible inductiveness, which ensures that the algorithm makes progress toward its goal as it alternates between weakening and strengthening candidate invariants. The algorithm is parameterized by an example-based synthesis engine and a verifier, and we prove that it is sound and complete for first-order modules over finite types, assuming that the synthesizer and verifier are as well. We implement these ideas in a tool called Hanoi, which synthesizes representation invariants for recursive data types. Hanoi not only handles invariants for first-order code, but higher-order code as well. In its back end, Hanoi uses an enumerative synthesizer called Myth and an enumerative testing tool as a verifier. Because Hanoi uses testing for verification, it is not sound, though our empirical evaluation shows that it is successful on the benchmarks we investigated.", + "citation_best": 23 + }, + { + "paper": "3034158217", + "venue": "1127352206", + "year": "2020", + "title": "armada low effort verification of high performance concurrent programs", + "label": [ + "167955471", + "87468716", + "97824396", + "10784920", + "184337299", + "115903868" + ], + "author": [ + "2150137935", + "3033747810", + "1772270056", + "1595681779", + "1985363956", + "2896079737", + "2224265930", + "3033888624" + ], + "reference": [ + "175195251", + "971186954", + "1480909796", + "1509000726", + "1525350307", + "1552367747", + "1552921094", + "1561412378", + "2001738739", + "2072062729", + "2090551028", + "2095762545", + "2095770127", + "2099470183", + "2104245532", + "2130427425", + "2131135493", + "2132107743", + "2132761501", + "2141607910", + "2150224578", + "2152390090", + "2162627428", + "2168704035", + "2564852534", + "2578546025", + "2596377803", + "2768537380", + "2780286008", + "2798365728", + "2883180298", + "2898893133", + "2899000846", + "2901454403", + "2982041059", + "3011177614" + ], + "abstract": "safely writing high performance concurrent programs is notoriously difficult to aid developers we introduce armada a language and tool designed to formally verify such programs with relatively little effort via a c like language and a small step state machine based semantics armada gives developers the flexibility to choose arbitrary memory layout and synchronization primitives so they are never constrained in their pursuit of performance to reduce developer effort armada leverages smt powered automation and a library of powerful reasoning techniques including rely guarantee tso elimination reduction and alias analysis all these techniques are proven sound and armada can be soundly extended with additional strategies over time using armada we verify four concurrent case studies and show that we can achieve performance equivalent to that of unverified code", + "title_raw": "Armada: low-effort verification of high-performance concurrent programs", + "abstract_raw": "Safely writing high-performance concurrent programs is notoriously difficult. To aid developers, we introduce Armada, a language and tool designed to formally verify such programs with relatively little effort. Via a C-like language and a small-step, state-machine-based semantics, Armada gives developers the flexibility to choose arbitrary memory layout and synchronization primitives so they are never constrained in their pursuit of performance. To reduce developer effort, Armada leverages SMT-powered automation and a library of powerful reasoning techniques, including rely-guarantee, TSO elimination, reduction, and alias analysis. All these techniques are proven sound, and Armada can be soundly extended with additional strategies over time. Using Armada, we verify four concurrent case studies and show that we can achieve performance equivalent to that of unverified code.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Armada:+Low-Effort+Verification+of+High-Performance+Concurrent+Programs&as_oq=&as_eq=&as_occt=any&as_sauthors=Lorch", + "scraped_abstract": null, + "citation_best": 19 + }, + { + "paper": "3034071395", + "venue": "1127352206", + "year": "2020", + "title": "validating smt solvers via semantic fusion", + "label": [ + "164155591", + "168773769", + "1462715", + "199360897", + "200632571", + "39920170", + "111065885" + ], + "author": [ + "2572568037", + "2787977068", + "2102704429" + ], + "reference": [ + "1480909796", + "1541238879", + "1579437898", + "1710734607", + "1966021031", + "2040158828", + "2055477538", + "2057156093", + "2065793351", + "2095445208", + "2096449544", + "2143331802", + "2155877593", + "2159595840", + "2170737051", + "2256863551", + "2276356546", + "2324595780", + "2530895108", + "2532737545", + "2600774173", + "2759550170", + "2761359361", + "2853432192", + "2883887424", + "2888538076", + "2962458576", + "2967572128", + "2997653900", + "2998740266", + "3004482543", + "3005142042", + "3008321987", + "3011737828", + "3102504333" + ], + "abstract": "we introduce semantic fusion a general effective methodology for validating satisfiability modulo theory smt solvers our key idea is to fuse two existing equisatisfiable i e both satisfiable or unsatisfiable formulas into a new formula that combines the structures of its ancestors in a novel manner and preserves the satisfiability by construction this fused formula is then used for validating smt solvers we realized semantic fusion as yinyang a practical smt solver testing tool during four months of extensive testing yinyang has found 45 confirmed unique bugs in the default arithmetic and string solvers of z3 and cvc4 the two state of the art smt solvers among these 41 have already been fixed by the developers the majority 29 45 of these bugs expose critical soundness issues our bug reports and testing effort have been well appreciated by smt solver developers", + "title_raw": "Validating SMT solvers via semantic fusion", + "abstract_raw": "We introduce Semantic Fusion, a general, effective methodology for validating Satisfiability Modulo Theory (SMT) solvers. Our key idea is to fuse two existing equisatisfiable (i.e., both satisfiable or unsatisfiable) formulas into a new formula that combines the structures of its ancestors in a novel manner and preserves the satisfiability by construction. This fused formula is then used for validating SMT solvers. We realized Semantic Fusion as YinYang, a practical SMT solver testing tool. During four months of extensive testing, YinYang has found 45 confirmed, unique bugs in the default arithmetic and string solvers of Z3 and CVC4, the two state-of-the-art SMT solvers. Among these, 41 have already been fixed by the developers. The majority (29/45) of these bugs expose critical soundness issues. Our bug reports and testing effort have been well-appreciated by SMT solver developers.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Validating+SMT+Solvers+via+Semantic+Fusion&as_oq=&as_eq=&as_occt=any&as_sauthors=Winterer", + "scraped_abstract": null, + "citation_best": 57 + }, + { + "paper": "3033957710", + "venue": "1127352206", + "year": "2020", + "title": "from folklore to fact comparing implementations of stacks and continuations", + "label": [ + "26713055", + "106663253", + "100602654", + "176486055", + "2780870223", + "169590947", + "193702766", + "199360897", + "88626702" + ], + "author": [ + "2803679564", + "324107302" + ], + "reference": [ + "1494199306", + "1495087637", + "1508280916", + "1511466363", + "1519496539", + "1571962375", + "1580664042", + "1584951182", + "1622288619", + "1828229499", + "1966975259", + "1969235224", + "1972174903", + "1979240050", + "1992640206", + "1994257154", + "2016849154", + "2026879022", + "2033675214", + "2036136174", + "2037023397", + "2046525202", + "2047869411", + "2057065238", + "2059416532", + "2062372058", + "2063591933", + "2070531891", + "2082709037", + "2095450548", + "2095951614", + "2098298678", + "2104098106", + "2111534760", + "2114922959", + "2116053402", + "2119816515", + "2131129592", + "2141157907", + "2151792522", + "2156268797", + "2161318861", + "2169347615", + "2913888506", + "3006378633", + "3103479364" + ], + "abstract": "the efficient implementation of function calls and non local control transfers is a critical part of modern language implementations and is important in the implementation of everything from recursion higher order functions concurrency and coroutines to task based parallelism in a compiler these features can be supported by a variety of mechanisms including call stacks segmented stacks and heap allocated continuation closures an implementor of a high level language with advanced control features might ask the question what is the best choice for my implementation unfortunately the current literature does not provide much guidance since previous studies suffer from various flaws in methodology and are outdated for modern hardware in the absence of recent well normalized measurements and a holistic overview of their implementation specifics the path of least resistance when choosing a strategy is to trust folklore but the folklore is also suspect this paper attempts to remedy this situation by providing an apples to apples comparison of six different approaches to implementing call stacks and continuations this comparison uses the same source language compiler pipeline llvm backend and runtime system with the only differences being those required by the differences in implementation strategy we compare the implementation challenges of the different approaches their sequential performance and their suitability to support advanced control mechanisms including supporting heavily threaded code in addition to the comparison of implementation strategies the paper s contributions also include a number of useful implementation techniques that we discovered along the way", + "title_raw": "From folklore to fact: comparing implementations of stacks and continuations", + "abstract_raw": "The efficient implementation of function calls and non-local control transfers is a critical part of modern language implementations and is important in the implementation of everything from recursion, higher-order functions, concurrency and coroutines, to task-based parallelism. In a compiler, these features can be supported by a variety of mechanisms, including call stacks, segmented stacks, and heap-allocated continuation closures. An implementor of a high-level language with advanced control features might ask the question ``what is the best choice for my implementation?'' Unfortunately, the current literature does not provide much guidance, since previous studies suffer from various flaws in methodology and are outdated for modern hardware. In the absence of recent, well-normalized measurements and a holistic overview of their implementation specifics, the path of least resistance when choosing a strategy is to trust folklore, but the folklore is also suspect. This paper attempts to remedy this situation by providing an ``apples-to-apples'' comparison of six different approaches to implementing call stacks and continuations. This comparison uses the same source language, compiler pipeline, LLVM-backend, and runtime system, with the only differences being those required by the differences in implementation strategy. We compare the implementation challenges of the different approaches, their sequential performance, and their suitability to support advanced control mechanisms, including supporting heavily threaded code. In addition to the comparison of implementation strategies, the paper's contributions also include a number of useful implementation techniques that we discovered along the way.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=From+Folklore+to+Fact:+Comparing+Implementations+of+Stacks+and+Continuations&as_oq=&as_eq=&as_occt=any&as_sauthors=Farvardin", + "scraped_abstract": "The efficient implementation of function calls and non-local control transfers is a critical part of modern language implementations and is important in the implementation of everything from recursion, higher-order functions, concurrency and coroutines, to task-based parallelism. In a compiler, these features can be supported by a variety of mechanisms, including call stacks, segmented stacks, and heap-allocated continuation closures. An implementor of a high-level language with advanced control features might ask the question ``what is the best choice for my implementation?'' Unfortunately, the current literature does not provide much guidance, since previous studies suffer from various flaws in methodology and are outdated for modern hardware. In the absence of recent, well-normalized measurements and a holistic overview of their implementation specifics, the path of least resistance when choosing a strategy is to trust folklore, but the folklore is also suspect. This paper attempts to remedy this situation by providing an ``apples-to-apples'' comparison of six different approaches to implementing call stacks and continuations. This comparison uses the same source language, compiler pipeline, LLVM-backend, and runtime system, with the only differences being those required by the differences in implementation strategy. We compare the implementation challenges of the different approaches, their sequential performance, and their suitability to support advanced control mechanisms, including supporting heavily threaded code. In addition to the comparison of implementation strategies, the paper's contributions also include a number of useful implementation techniques that we discovered along the way.", + "citation_best": 12 + }, + { + "paper": "3033818254", + "venue": "1127352206", + "year": "2020", + "title": "fast graph simplification for interleaved dyck reachability", + "label": [ + "192034797", + "136643341", + "49515151", + "48044578", + "63116202", + "80444323", + "97686452" + ], + "author": [ + "3034066047", + "2168384499", + "341386833" + ], + "reference": [ + "148396834", + "1604043045", + "1966032455", + "1997981086", + "2024779397", + "2035260626", + "2036208810", + "2060345611", + "2060439939", + "2065088439", + "2080573945", + "2093080079", + "2094716892", + "2096587139", + "2110833886", + "2117948828", + "2119137801", + "2133497528", + "2135565307", + "2156883715", + "2169304846", + "2247002685", + "2561266941", + "2899166628", + "2900044545", + "2997088857", + "2998965762", + "3008075797", + "3009945260", + "3126200532", + "3176441537" + ], + "abstract": "many program analysis problems can be formulated as graph reachability problems interleaved dyck language reachability interleaved dyck language reachability interdyck reachability is a fundamental framework to express a wide variety of program analysis problems over edge labeled graphs the interdyck language represents an intersection of multiple matched parenthesis languages i e dyck languages in practice program analyses typically leverage one dyck language to achieve context sensitivity and other dyck languages to model data dependences such as field sensitivity and pointer references dereferences in the ideal case an interdyck reachability framework should model multiple dyck languages simultaneously unfortunately precise interdyck reachability is undecidable any practical solution must over approximate the exact answer in the literature a lot of work has been proposed to over approximate the interdyck reachability formulation this paper offers a new perspective on improving both the precision and the scalability of interdyck reachability we aim to simplify the underlying input graph g our key insight is based on the observation that if an edge is not contributing to any interdyck path we can safely eliminate it from g our technique is orthogonal to the interdyck reachability formulation and can serve as a pre processing step with any over approximating approaches for interdyck reachability we have applied our graph simplification algorithm to pre processing the graphs from a recent interdyck reachability based taint analysis for android our evaluation on three popular interdyck reachability algorithms yields promising results in particular our graph simplification method improves both the scalability and precision of all three interdyck reachability algorithms sometimes dramatically", + "title_raw": "Fast graph simplification for interleaved Dyck-reachability", + "abstract_raw": "Many program-analysis problems can be formulated as graph-reachability problems. Interleaved Dyck language reachability. Interleaved Dyck language reachability (InterDyck-reachability) is a fundamental framework to express a wide variety of program-analysis problems over edge-labeled graphs. The InterDyck language represents an intersection of multiple matched-parenthesis languages (i.e., Dyck languages). In practice, program analyses typically leverage one Dyck language to achieve context-sensitivity, and other Dyck languages to model data dependences, such as field-sensitivity and pointer references/dereferences. In the ideal case, an InterDyck-reachability framework should model multiple Dyck languages simultaneously. Unfortunately, precise InterDyck-reachability is undecidable. Any practical solution must over-approximate the exact answer. In the literature, a lot of work has been proposed to over-approximate the InterDyck-reachability formulation. This paper offers a new perspective on improving both the precision and the scalability of InterDyck-reachability: we aim to simplify the underlying input graph G. Our key insight is based on the observation that if an edge is not contributing to any InterDyck-path, we can safely eliminate it from G. Our technique is orthogonal to the InterDyck-reachability formulation, and can serve as a pre-processing step with any over-approximating approaches for InterDyck-reachability. We have applied our graph simplification algorithm to pre-processing the graphs from a recent InterDyck-reachability-based taint analysis for Android. Our evaluation on three popular InterDyck-reachability algorithms yields promising results. In particular, our graph-simplification method improves both the scalability and precision of all three InterDyck-reachability algorithms, sometimes dramatically.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Fast+Graph+Simplification+for+Interleaved+Dyck-Reachability&as_oq=&as_eq=&as_occt=any&as_sauthors=Li", + "scraped_abstract": "Many program-analysis problems can be formulated as graph-reachability problems. Interleaved Dyck language reachability. Interleaved Dyck language reachability (InterDyck-reachability) is a fundamental framework to express a wide variety of program-analysis problems over edge-labeled graphs. The InterDyck language represents an intersection of multiple matched-parenthesis languages (i.e., Dyck languages). In practice, program analyses typically leverage one Dyck language to achieve context-sensitivity, and other Dyck languages to model data dependences, such as field-sensitivity and pointer references/dereferences. In the ideal case, an InterDyck-reachability framework should model multiple Dyck languages simultaneously. Unfortunately, precise InterDyck-reachability is undecidable. Any practical solution must over-approximate the exact answer. In the literature, a lot of work has been proposed to over-approximate the InterDyck-reachability formulation. This paper offers a new perspective on improving both the precision and the scalability of InterDyck-reachability: we aim to simplify the underlying input graph G. Our key insight is based on the observation that if an edge is not contributing to any InterDyck-path, we can safely eliminate it from G. Our technique is orthogonal to the InterDyck-reachability formulation, and can serve as a pre-processing step with any over-approximating approaches for InterDyck-reachability. We have applied our graph simplification algorithm to pre-processing the graphs from a recent InterDyck-reachability-based taint analysis for Android. Our evaluation on three popular InterDyck-reachability algorithms yields promising results. In particular, our graph-simplification method improves both the scalability and precision of all three InterDyck-reachability algorithms, sometimes dramatically.", + "citation_best": 8 + }, + { + "paper": "3031693517", + "venue": "1184151122", + "year": "2020", + "title": "a framework for adversarially robust streaming algorithms", + "label": [ + "97970142", + "80444323", + "2781039887", + "95546049", + "128669082" + ], + "author": [ + "2286491985", + "2792367697", + "2142501412", + "2008719867" + ], + "reference": [ + "8957553", + "1493892051", + "1546372806", + "1592346261", + "1988355221", + "1995484833", + "1997010704", + "1998272044", + "2006355640", + "2015926116", + "2052800956", + "2063844956", + "2080745194", + "2083100635", + "2103126020", + "2104561628", + "2107917944", + "2129978406", + "2132032691", + "2142224482", + "2159041477", + "2160681854", + "2563544973", + "2963777735", + "2963799254" + ], + "abstract": "we investigate the adversarial robustness of streaming algorithms in this context an algorithm is considered robust if its performance guarantees hold even if the stream is chosen adaptively by an adversary that observes the outputs of the algorithm along the stream and can react in an online manner while deterministic streaming algorithms are inherently robust many central problems in the streaming literature do not admit sublinear space deterministic algorithms on the other hand classical space efficient randomized algorithms for these problems are generally not adversarially robust this raises the natural question of whether there exist efficient adversarially robust randomized streaming algorithms for these problems in this work we show that the answer is positive for various important streaming problems in the insertion only model including distinct elements and more generally f p estimation fp heavy hitters entropy estimation and others for all of these problems we develop adversarially robust 1 e approximation algorithms whose required space matches that of the best known non robust algorithms up to a poly log n 1 e multiplicative factor and in some cases even up to a constant factor towards this end we develop several generic tools allowing one to efficiently transform a non robust streaming algorithm into a robust one in various scenarios", + "title_raw": "A Framework for Adversarially Robust Streaming Algorithms", + "abstract_raw": "We investigate the adversarial robustness of streaming algorithms. In this context, an algorithm is considered robust if its performance guarantees hold even if the stream is chosen adaptively by an adversary that observes the outputs of the algorithm along the stream and can react in an online manner. While deterministic streaming algorithms are inherently robust, many central problems in the streaming literature do not admit sublinear-space deterministic algorithms; on the other hand, classical space-efficient randomized algorithms for these problems are generally not adversarially robust. This raises the natural question of whether there exist efficient adversarially robust (randomized) streaming algorithms for these problems. In this work, we show that the answer is positive for various important streaming problems in the insertion-only model, including distinct elements and more generally $F_p$-estimation, Fp-heavy hitters, entropy estimation, and others. For all of these problems, we develop adversarially robust (1+e)-approximation algorithms whose required space matches that of the best known non-robust algorithms up to a poly(log n, 1/e) multiplicative factor (and in some cases even up to a constant factor). Towards this end, we develop several generic tools allowing one to efficiently transform a non-robust streaming algorithm into a robust one in various scenarios.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=A+Framework+for+Adversarially+Robust+Streaming+Algorithms&as_oq=&as_eq=&as_occt=any&as_sauthors=Ben-Eliezer", + "scraped_abstract": null, + "citation_best": 4 + }, + { + "paper": "3046609696", + "venue": "1163618098", + "year": "2020", + "title": "trrespass exploiting the many sides of target row refresh", + "label": [ + "7366592", + "38652104" + ], + "author": [ + "3003579281", + "3047200515", + "2288330127", + "2109265691", + "2160510735", + "2064923061", + "2269201832", + "2044536833" + ], + "reference": [ + "1456826529", + "1551993571", + "1873382978", + "1964316448", + "1970426108", + "1992487929", + "2119092821", + "2129991978", + "2157116240", + "2158319074", + "2158620667", + "2163563130", + "2169370030", + "2170257519", + "2233523872", + "2250217037", + "2274271966", + "2276529763", + "2280288224", + "2293640271", + "2311847288", + "2329308213", + "2331766458", + "2332254524", + "2399117482", + "2418958843", + "2420049379", + "2489077495", + "2491829854", + "2505343551", + "2516668814", + "2525431465", + "2529008862", + "2537014044", + "2593313312", + "2612835180", + "2621772158", + "2624299682", + "2625267941", + "2648282931", + "2734814518", + "2751092112", + "2766489088", + "2769824527", + "2795139041", + "2795222486", + "2801268510", + "2806638034", + "2814895833", + "2847114817", + "2881206495", + "2886541648", + "2889929196", + "2895886949", + "2897830755", + "2899469948", + "2920016180", + "2932858260", + "2939057911", + "2945839280", + "2949951157", + "2953044030", + "2954241526", + "2962726564", + "2964118667", + "2964923388", + "2974891422", + "3015685940", + "3016266693", + "3105886106", + "3141810756" + ], + "abstract": "after a plethora of high profile rowhammer attacks cpu and dram vendors scrambled to deliver what was meant to be the definitive hardware solution against the rowhammer problem target row refresh trr a common belief among practitioners is that for the latest generation of ddr4 systems that are protected by trr rowhammer is no longer an issue in practice however in reality very little is known about trr how does trr exactly prevent rowhammer which parts of a system are responsible for operating the trr mechanism does trr completely solve the rowhammer problem or does it have weaknesses in this paper we demystify the inner workings of trr and debunk its security guarantees we show that what is advertised as a single mitigation mechanism is actually a series of different solutions coalesced under the umbrella term target row refresh we inspect and disclose via a deep analysis different existing trr solutions and demonstrate that modern implementations operate entirely inside dram chips despite the difficulties of analyzing in dram mitigations we describe novel techniques for gaining insights into the operation of these mitigation mechanisms these insights allow us to build trrespass a scalable black box rowhammer fuzzer that we evaluate on 42 recent ddr4 modules trrespass shows that even the latest generation ddr4 chips with in dram trr immune to all known rowhammer attacks are often still vulnerable to new trr aware variants of rowhammer that we develop in particular trrespass finds that on present day ddr4 modules rowhammer is still possible when many aggressor rows are used as many as 19 in some cases with a method we generally refer to as many sided rowhammer overall our analysis shows that 13 out of the 42 modules from all three major dram vendors i e samsung micron and hynix are vulnerable to our trr aware rowhammer access patterns and thus one can still mount existing state of the art system level rowhammer attacks in addition to ddr4 we also experiment with lpddr4 x 1 chips and show that they are susceptible to rowhammer bit flips too our results provide concrete evidence that the pursuit of better rowhammer mitigations must continue", + "title_raw": "TRRespass: Exploiting the Many Sides of Target Row Refresh", + "abstract_raw": "After a plethora of high-profile RowHammer attacks, CPU and DRAM vendors scrambled to deliver what was meant to be the definitive hardware solution against the RowHammer problem: Target Row Refresh (TRR). A common belief among practitioners is that, for the latest generation of DDR4 systems that are protected by TRR, RowHammer is no longer an issue in practice. However, in reality, very little is known about TRR. How does TRR exactly prevent RowHammer? Which parts of a system are responsible for operating the TRR mechanism? Does TRR completely solve the RowHammer problem or does it have weaknesses? In this paper, we demystify the inner workings of TRR and debunk its security guarantees. We show that what is advertised as a single mitigation mechanism is actually a series of different solutions coalesced under the umbrella term Target Row Refresh. We inspect and disclose, via a deep analysis, different existing TRR solutions and demonstrate that modern implementations operate entirely inside DRAM chips. Despite the difficulties of analyzing in-DRAM mitigations, we describe novel techniques for gaining insights into the operation of these mitigation mechanisms. These insights allow us to build TRRespass, a scalable black-box RowHammer fuzzer that we evaluate on 42 recent DDR4 modules. TRRespass shows that even the latest generation DDR4 chips with in-DRAM TRR, immune to all known RowHammer attacks, are often still vulnerable to new TRR-aware variants of RowHammer that we develop. In particular, TRRespass finds that, on present-day DDR4 modules, RowHammer is still possible when many aggressor rows are used (as many as 19 in some cases), with a method we generally refer to as Many-sided RowHammer. Overall, our analysis shows that 13 out of the 42 modules from all three major DRAM vendors (i.e., Samsung, Micron, and Hynix) are vulnerable to our TRR-aware RowHammer access patterns, and thus one can still mount existing state-of-the-art system-level RowHammer attacks. In addition to DDR4, we also experiment with LPDDR4(X)1 chips and show that they are susceptible to RowHammer bit flips too. Our results provide concrete evidence that the pursuit of better RowHammer mitigations must continue.", + "link": "https://www.semanticscholar.org/paper/6316e1473e478c5ff5099b3b3b77b529d36f9f77", + "scraped_abstract": "After a plethora of high-profile RowHammer attacks, CPU and DRAM vendors scrambled to deliver what was meant to be the definitive hardware solution against the RowHammer problem: Target Row Refresh (TRR). A common belief among practitioners is that, for the latest generation of DDR4 systems that are protected by TRR, RowHammer is no longer an issue in practice. However, in reality, very little is known about TRR. How does TRR exactly prevent RowHammer? Which parts of a system are responsible for operating the TRR mechanism? Does TRR completely solve the RowHammer problem or does it have weaknesses? In this paper, we demystify the inner workings of TRR and debunk its security guarantees. We show that what is advertised as a single mitigation mechanism is actually a series of different solutions coalesced under the umbrella term Target Row Refresh. We inspect and disclose, via a deep analysis, different existing TRR solutions and demonstrate that modern implementations operate entirely inside DRAM chips. Despite the difficulties of analyzing in-DRAM mitigations, we describe novel techniques for gaining insights into the operation of these mitigation mechanisms. These insights allow us to build TRRespass, a scalable black-box RowHammer fuzzer that we evaluate on 42 recent DDR4 modules. TRRespass shows that even the latest generation DDR4 chips with in-DRAM TRR, immune to all known RowHammer attacks, are often still vulnerable to new TRR-aware variants of RowHammer that we develop. In particular, TRRespass finds that, on present-day DDR4 modules, RowHammer is still possible when many aggressor rows are used (as many as 19 in some cases), with a method we generally refer to as Many-sided RowHammer. Overall, our analysis shows that 13 out of the 42 modules from all three major DRAM vendors (i.e., Samsung, Micron, and Hynix) are vulnerable to our TRR-aware RowHammer access patterns, and thus one can still mount existing state-of-the-art system-level RowHammer attacks. In addition to DDR4, we also experiment with LPDDR4(X)1 chips and show that they are susceptible to RowHammer bit flips too. Our results provide concrete evidence that the pursuit of better RowHammer mitigations must continue.", + "citation_best": 1 + }, + { + "paper": "3046728914", + "venue": "1152462849", + "year": "2020", + "title": "routing on multiple optimality criteria", + "label": [ + "199845137", + "29436982", + "87044965", + "46135064", + "104954878", + "74172769" + ], + "author": [ + "2159088785", + "2624343452" + ], + "reference": [ + "119169328", + "1503302851", + "1573513254", + "1760148955", + "1994909604", + "2011824721", + "2018377390", + "2018930611", + "2021234005", + "2051956023", + "2068113337", + "2095964941", + "2096774592", + "2097346423", + "2099236627", + "2104602019", + "2105997281", + "2108827250", + "2124430419", + "2137081436", + "2137736907", + "2140037362", + "2144537141", + "2145485255", + "2147575919", + "2148662748", + "2148769607", + "2158832488", + "2180824136", + "2289495142", + "2298498790", + "2403202602", + "2428827905", + "2488197787", + "2901062671", + "2911229907", + "2943101560", + "2974116768" + ], + "abstract": "standard vectoring protocols such as eigrp bgp dsdv or babel only route on optimal paths when the total order on path attributes that substantiates optimality is consistent with the extension operation that calculates path attributes from link attributes leaving out many optimality criteria of practical interest we present a solution to this problem and more generally to the problem of routing on multiple optimality criteria a key idea is the derivation of a partial order on path attributes that is consistent with the extension operation and respects every optimality criterion of a designated collection of such criteria we design new vectoring protocols that compute on partial orders with every node capable of electing multiple attributes per destination rather than a single attribute as in standard vectoring protocols our evaluation over publicly available network topologies and attributes shows that the proposed protocols converge fast and enable optimal path routing concurrently for many optimality criteria with only a few elected attributes at each node per destination we further show how predicating computations on partial orders allows incorporation of service chain constraints on optimal path routing", + "title_raw": "Routing on Multiple Optimality Criteria", + "abstract_raw": "Standard vectoring protocols, such as EIGRP, BGP, DSDV, or Babel, only route on optimal paths when the total order on path attributes that substantiates optimality is consistent with the extension operation that calculates path attributes from link attributes, leaving out many optimality criteria of practical interest. We present a solution to this problem and, more generally, to the problem of routing on multiple optimality criteria. A key idea is the derivation of a partial order on path attributes that is consistent with the extension operation and respects every optimality criterion of a designated collection of such criteria. We design new vectoring protocols that compute on partial orders, with every node capable of electing multiple attributes per destination rather than a single attribute as in standard vectoring protocols. Our evaluation over publicly available network topologies and attributes shows that the proposed protocols converge fast and enable optimal path routing concurrently for many optimality criteria with only a few elected attributes at each node per destination. We further show how predicating computations on partial orders allows incorporation of service chain constraints on optimal path routing.", + "link": "https://www.semanticscholar.org/paper/8ce4650878576839d61d0be35a364e019c18318b", + "scraped_abstract": null, + "citation_best": 24 + }, + { + "paper": "3105035347", + "venue": "1140684652", + "year": "2020", + "title": "controlling fairness and bias in dynamic learning to rank", + "label": [ + "100853971", + "25621077", + "86037889", + "197927960" + ], + "author": [ + "2612662677", + "2947100642", + "3029505525", + "245171893" + ], + "reference": [ + "1974360117", + "1984770088", + "1992549066", + "2023599408", + "2059001985", + "2279176662", + "2507134384", + "2604370872", + "2748058847", + "2769473018", + "2797400361", + "2905569957", + "2951607891", + "2955421345", + "2965749257", + "2971071571", + "2991634944", + "3099096815", + "3102518922", + "3104475013", + "3123374861" + ], + "abstract": "rankings are the primary interface through which many online platforms match users to items e g news products music video in these two sided markets not only the users draw utility from the rankings but the rankings also determine the utility e g exposure revenue for the item providers e g publishers sellers artists studios it has already been noted that myopically optimizing utility to the users as done by virtually all learning to rank algorithms can be unfair to the item providers we therefore present a learning to rank approach for explicitly enforcing merit based fairness guarantees to groups of items e g articles by the same publisher tracks by the same artist in particular we propose a learning algorithm that ensures notions of amortized group fairness while simultaneously learning the ranking function from implicit feedback data the algorithm takes the form of a controller that integrates unbiased estimators for both fairness and utility dynamically adapting both as more data becomes available in addition to its rigorous theoretical foundation and convergence guarantees we find empirically that the algorithm is highly practical and robust", + "title_raw": "Controlling Fairness and Bias in Dynamic Learning-to-Rank", + "abstract_raw": "Rankings are the primary interface through which many online platforms match users to items (e.g. news, products, music, video). In these two-sided markets, not only the users draw utility from the rankings, but the rankings also determine the utility (e.g. exposure, revenue) for the item providers (e.g. publishers, sellers, artists, studios). It has already been noted that myopically optimizing utility to the users -- as done by virtually all learning-to-rank algorithms -- can be unfair to the item providers. We, therefore, present a learning-to-rank approach for explicitly enforcing merit-based fairness guarantees to groups of items (e.g. articles by the same publisher, tracks by the same artist). In particular, we propose a learning algorithm that ensures notions of amortized group fairness, while simultaneously learning the ranking function from implicit feedback data. The algorithm takes the form of a controller that integrates unbiased estimators for both fairness and utility, dynamically adapting both as more data becomes available. In addition to its rigorous theoretical foundation and convergence guarantees, we find empirically that the algorithm is highly practical and robust.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Controlling+Fairness+and+Bias+in+Dynamic+Learning-to-rank&as_oq=&as_eq=&as_occt=any&as_sauthors=Morik", + "scraped_abstract": null, + "citation_best": 60 + }, + { + "paper": "2995994861", + "venue": "1131589359", + "year": "2020", + "title": "rateless codes for near perfect load balancing in distributed matrix vector multiplication", + "label": [ + "45374587", + "139330139", + "173608175", + "2780513914", + "137529215" + ], + "author": [ + "2972350748", + "2804675016", + "2902667482", + "2995963024", + "2100886215" + ], + "reference": [ + "636382642", + "1497601926", + "1499305477", + "1672859716", + "1903497807", + "1982063824", + "1983964956", + "1994574815", + "1998066559", + "2064224609", + "2082292996", + "2083613288", + "2101124960", + "2118858186", + "2140181879", + "2153903112", + "2173213060", + "2189465200", + "2296125569", + "2305657994", + "2504767107", + "2523435939", + "2556205507", + "2560441213", + "2567951755", + "2582719439", + "2714205045", + "2742582805", + "2751625010", + "2762176262", + "2766474259", + "2781270075", + "2782956234", + "2787606899", + "2791869133", + "2806909486", + "2896903681", + "2899664803", + "2914980053", + "2951660378", + "2962850796", + "2962941774", + "2963446630", + "2963531643", + "2963706835", + "2963953036", + "2964095159", + "2964254394", + "2981449041", + "3100515187" + ], + "abstract": "large scale machine learning and data mining applications require computer systems to perform massive matrix vector and matrix matrix multiplication operations that need to be parallelized across multiple nodes the presence of straggling nodes computing nodes that unpredictably slowdown or fail is a major bottleneck in such distributed computations ideal load balancing strategies that dynamically allocate more tasks to faster nodes require knowledge or monitoring of node speeds as well as the ability to quickly move data recently proposed fixed rate erasure coding strategies can handle unpredictable node slowdown but they ignore partial work done by straggling nodes thus resulting in a lot of redundant computation we propose a rateless fountain coding strategy that achieves the best of both worlds we prove that its latency is asymptotically equal to ideal load balancing and it performs asymptotically zero redundant computations our idea is to create linear combinations of the m rows of the matrix and assign these encoded rows to different worker nodes the original matrix vector product can be decoded as soon as slightly more than m row vector products are collectively finished by the nodes we conduct experiments in three computing environments local parallel computing amazon ec2 and amazon lambda which show that rateless coding gives as much as 3x speed up over uncoded schemes", + "title_raw": "Rateless Codes for Near-Perfect Load Balancing in Distributed Matrix-Vector Multiplication", + "abstract_raw": "Large-scale machine learning and data mining applications require computer systems to perform massive matrix-vector and matrix-matrix multiplication operations that need to be parallelized across multiple nodes. The presence of straggling nodes -- computing nodes that unpredictably slowdown or fail -- is a major bottleneck in such distributed computations. Ideal load balancing strategies that dynamically allocate more tasks to faster nodes require knowledge or monitoring of node speeds as well as the ability to quickly move data. Recently proposed fixed-rate erasure coding strategies can handle unpredictable node slowdown, but they ignore partial work done by straggling nodes thus resulting in a lot of redundant computation. We propose a rateless fountain coding strategy that achieves the best of both worlds -- we prove that its latency is asymptotically equal to ideal load balancing, and it performs asymptotically zero redundant computations. Our idea is to create linear combinations of the m rows of the matrix and assign these encoded rows to different worker nodes. The original matrix-vector product can be decoded as soon as slightly more than m row-vector products are collectively finished by the nodes. We conduct experiments in three computing environments: local parallel computing, Amazon EC2, and Amazon Lambda, which show that rateless coding gives as much as 3x speed-up over uncoded schemes.", + "link": "https://www.semanticscholar.org/paper/5b35404465568b647f2c5ee132d7a8177229d242", + "scraped_abstract": null, + "citation_best": 15 + }, + { + "paper": "3030167386", + "venue": "1175089206", + "year": "2020", + "title": "shapesearch a flexible and efficient system for shape based exploration of trendlines", + "label": [ + "170130773", + "59732488", + "48044578", + "177264268", + "186644900", + "80444323", + "195324797" + ], + "author": [ + "2508159517", + "2888844955", + "2889153666", + "2307725321", + "2077695977" + ], + "reference": [ + "2009899978", + "2012372680", + "2029438113", + "2072054026", + "2097267403", + "2106595237", + "2123685385", + "2158264503", + "2269738476", + "2274505579", + "2534380090", + "2590252985", + "2598209472", + "2610760451", + "2612993235", + "2782974899", + "2888851078", + "2948115999", + "2963707382", + "3104072681", + "3147178137" + ], + "abstract": "identifying trendline visualizations with desired patterns is a common task during data exploration existing visual analytics tools offer limited flexibility expressiveness and scalability for such tasks especially when the pattern of interest is under specified and approximate we propose shapesearch an efficient and flexible pattern searching tool that enables the search for desired patterns via multiple mechanisms sketch natural language and visual regular expressions we develop a novel shape querying algebra with a minimal set of primitives and operators that can express a wide variety of shape search queries and design a natural language and regex based parser to translate user queries to the algebraic representation to execute these queries within interactive response times shapesearch uses a fast shape algebra execution engine with query aware optimizations and perceptually aware scoring methodologies we present a thorough evaluation of the system including a user study a case study involving genomics data analysis as well as performance experiments comparing against state of the art trendline shape matching approaches that together demonstrate the usability and scalability of shapesearch", + "title_raw": "ShapeSearch: A Flexible and Efficient System for Shape-based Exploration of Trendlines", + "abstract_raw": "Identifying trendline visualizations with desired patterns is a common task during data exploration. Existing visual analytics tools offer limited flexibility, expressiveness, and scalability for such tasks, especially when the pattern of interest is under-specified and approximate. We propose ShapeSearch, an efficient and flexible pattern-searching tool, that enables the search for desired patterns via multiple mechanisms: sketch, natural-language, and visual regular expressions. We develop a novel shape querying algebra, with a minimal set of primitives and operators that can express a wide variety of shape search queries, and design a natural- language and regex-based parser to translate user queries to the algebraic representation. To execute these queries within interactive response times, ShapeSearch uses a fast shape algebra execution engine with query-aware optimizations, and perceptually-aware scoring methodologies. We present a thorough evaluation of the system, including a user study, a case study involving genomics data analysis, as well as performance experiments, comparing against state-of-the-art trendline shape matching approaches-that together demonstrate the usability and scalability of ShapeSearch.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=ShapeSearch:+A+Flexible+and+Efficient+System+for+Shape-based+Exploration+of+Trendlines&as_oq=&as_eq=&as_occt=any&as_sauthors=Siddiqui", + "scraped_abstract": "Identifying trendline visualizations with desired patterns is a common task during data exploration. Existing visual analytics tools offer limited flexibility, expressiveness, and scalability for such tasks, especially when the pattern of interest is under-specified and approximate. We propose ShapeSearch, an efficient and flexible pattern-searching tool, that enables the search for desired patterns via multiple mechanisms: sketch, natural-language, and visual regular expressions. We develop a novel shape querying algebra, with a minimal set of primitives and operators that can express a wide variety of shape search queries, and design a natural- language and regex-based parser to translate user queries to the algebraic representation. To execute these queries within interactive response times, ShapeSearch uses a fast shape algebra execution engine with query-aware optimizations, and perceptually-aware scoring methodologies. We present a thorough evaluation of the system, including a user study, a case study involving genomics data analysis, as well as performance experiments, comparing against state-of-the-art trendline shape matching approaches-that together demonstrate the usability and scalability of ShapeSearch.", + "citation_best": 1 + }, + { + "paper": "3028661980", + "venue": "1175089206", + "year": "2020", + "title": "pump up the volume processing large data on gpus with fast interconnects", + "label": [ + "48044578", + "173608175", + "2780513914", + "2776257435", + "116259339", + "188805328", + "188045654", + "98045186" + ], + "author": [ + "2805615275", + "2302676805", + "2225168296", + "2005408220", + "1963964406" + ], + "reference": [ + "1535897043", + "1840931607", + "1970799414", + "2004772832", + "2014180767", + "2018995228", + "2050277572", + "2054468361", + "2066860750", + "2068418796", + "2072541977", + "2092283255", + "2097880677", + "2099035968", + "2100415730", + "2110975861", + "2125529470", + "2134569538", + "2136083615", + "2151224499", + "2159287299", + "2159824335", + "2188889839", + "2255163656", + "2268177516", + "2276395270", + "2282396894", + "2330672121", + "2401249903", + "2424351436", + "2426048753", + "2440477515", + "2464859674", + "2493061014", + "2534888058", + "2535724050", + "2548941637", + "2579082966", + "2604606554", + "2610614018", + "2611538725", + "2752012896", + "2752640170", + "2777078856", + "2796649226", + "2798422034", + "2811111775", + "2830736653", + "2867345499", + "2891639990", + "2903901007", + "2907106711", + "2907714110", + "2908321983", + "2910096450", + "2912050199", + "2912101967", + "2913983209", + "2914698571", + "2916281080", + "2935389012", + "2950461515", + "2951543943", + "2952269378", + "2954651520", + "2963040295", + "2970139027", + "2970229914", + "2982507465", + "3013171512", + "3101708369", + "3121313599", + "3148573243" + ], + "abstract": "gpus have long been discussed as accelerators for database query processing because of their high processing power and memory bandwidth however two main challenges limit the utility of gpus for large scale data processing 1 the on board memory capacity is too small to store large data sets yet 2 the interconnect bandwidth to cpu main memory is insufficient for ad hoc data transfers as a result gpu based systems and algorithms run into a transfer bottleneck and do not scale to large data sets in practice cpus process large scale data faster than gpus with current technology in this paper we investigate how a fast interconnect can resolve these scalability limitations using the example of nvlink 2 0 nvlink 2 0 is a new interconnect technology that links dedicated gpus to a cpu the high bandwidth of nvlink 2 0 enables us to overcome the transfer bottleneck and to efficiently process large data sets stored in main memory on gpus we perform an in depth analysis of nvlink 2 0 and show how we can scale a no partitioning hash join beyond the limits of gpu memory our evaluation shows speed ups of up to 18x over pci e 3 0 and up to 7 3x over an optimized cpu implementation fast gpu interconnects thus enable gpus to efficiently accelerate query processing", + "title_raw": "Pump Up the Volume: Processing Large Data on GPUs with Fast Interconnects", + "abstract_raw": "GPUs have long been discussed as accelerators for database query processing because of their high processing power and memory bandwidth. However, two main challenges limit the utility of GPUs for large-scale data processing: (1) the on-board memory capacity is too small to store large data sets, yet (2) the interconnect bandwidth to CPU main-memory is insufficient for ad hoc data transfers. As a result, GPU-based systems and algorithms run into a transfer bottleneck and do not scale to large data sets. In practice, CPUs process large-scale data faster than GPUs with current technology. In this paper, we investigate how a fast interconnect can resolve these scalability limitations using the example of NVLink 2.0. NVLink 2.0 is a new interconnect technology that links dedicated GPUs to a CPU@. The high bandwidth of NVLink 2.0 enables us to overcome the transfer bottleneck and to efficiently process large data sets stored in main-memory on GPUs. We perform an in-depth analysis of NVLink 2.0 and show how we can scale a no-partitioning hash join beyond the limits of GPU memory. Our evaluation shows speed-ups of up to 18x over PCI-e 3.0 and up to 7.3x over an optimized CPU implementation. Fast GPU interconnects thus enable GPUs to efficiently accelerate query processing.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Pump+Up+the+Volume:+Processing+Large+Data+on+GPUs+with+Fast+Interconnects&as_oq=&as_eq=&as_occt=any&as_sauthors=Lutz", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "3093717762", + "venue": "1166315290", + "year": "2020", + "title": "handmorph a passive exoskeleton that miniaturizes grasp", + "label": [ + "104114177", + "146549078", + "177212765", + "107457646", + "194969405", + "152086174", + "171268870" + ], + "author": [ + "2229190849", + "2131635999", + "2954555213", + "2766349606", + "3094520591", + "2535716982", + "2128317731" + ], + "reference": [ + "1537629355", + "1590311443", + "1950864686", + "1964359788", + "1965428772", + "1965771313", + "1972977748", + "1973787747", + "1976353992", + "1978014948", + "1996011294", + "1996077504", + "2035083436", + "2035753646", + "2035992545", + "2049598990", + "2050896993", + "2054097320", + "2057687952", + "2061167110", + "2061360146", + "2070892675", + "2076420199", + "2077845711", + "2082998908", + "2097823092", + "2102230854", + "2103633624", + "2108879165", + "2111371475", + "2119233587", + "2129553141", + "2130299716", + "2135648815", + "2149891956", + "2152855580", + "2160144355", + "2164533521", + "2164870294", + "2169222362", + "2235313182", + "2266199239", + "2293399791", + "2306647462", + "2316221310", + "2346219675", + "2397519240", + "2414419152", + "2436253671", + "2479852468", + "2509557939", + "2527977666", + "2594831906", + "2611120433", + "2737440582", + "2745328921", + "2766099953", + "2791303701", + "2890254610", + "2896609939", + "2897414246", + "2907663980", + "2911671614", + "2941036552", + "2962826673", + "2967535517", + "2981649310", + "3031927435", + "3206489296" + ], + "abstract": "", + "title_raw": "HandMorph: a Passive Exoskeleton that Miniaturizes Grasp.", + "abstract_raw": "", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=HandMorph:+A+Passive+Exoskeleton+that+Miniaturizes+Grasp&as_oq=&as_eq=&as_occt=any&as_sauthors=Nishida", + "scraped_abstract": null, + "citation_best": 29 + }, + { + "paper": "3008139924", + "venue": "1133523790", + "year": "2020", + "title": "opportunities for optimism in contended main memory multicore transactions", + "label": [ + "108744092", + "84511453", + "2778476105", + "79240183", + "113954288", + "78766204", + "120314980" + ], + "author": [ + "2439023249", + "3008898811", + "2114981089", + "2084056563", + "1967149413" + ], + "reference": [ + "1480006450", + "1532546444", + "1542975293", + "1550275036", + "1563487434", + "1828386109", + "1920727200", + "1977329921", + "1985229168", + "2000345607", + "2001738739", + "2006296837", + "2014977566", + "2039795745", + "2050195061", + "2084130915", + "2088398920", + "2091416827", + "2102333161", + "2104670257", + "2106871513", + "2129458440", + "2133386065", + "2133752438", + "2140876280", + "2141710443", + "2142349608", + "2145343598", + "2150847784", + "2152719738", + "2155070484", + "2157092502", + "2165663045", + "2240667924", + "2341778529", + "2394555589", + "2423891596", + "2425029179", + "2429213287", + "2435606122", + "2543376883", + "2571118757", + "2604961016", + "2613970404", + "2902648414", + "2933851371", + "2943698992" + ], + "abstract": "optimistic concurrency control or occ can achieve excellent performance on uncontended workloads for main memory transactional databases contention causes occ s performance to degrade however", + "title_raw": "Opportunities for optimism in contended main-memory multicore transactions", + "abstract_raw": "Optimistic concurrency control, or OCC, can achieve excellent performance on uncontended workloads for main-memory transactional databases. Contention causes OCC's performance to degrade, however, ...", + "link": "https://www.semanticscholar.org/paper/4be158f7b6cd74c9691306c743b2cb60373afdf6", + "scraped_abstract": null, + "citation_best": 33 + }, + { + "paper": "3023027202", + "venue": "1135342153", + "year": "2020", + "title": "open intent extraction from natural language interactions", + "label": [ + "2776230583", + "173853756", + "2779439875", + "148524875", + "16963264", + "204321447", + "195324797", + "2775852435" + ], + "author": [ + "2728007267", + "731396322", + "2562753520", + "2694791869" + ], + "reference": [ + "1972595521", + "1987971958", + "2036282699", + "2047237057", + "2064675550", + "2077302143", + "2094472029", + "2095705004", + "2097645701", + "2123442489", + "2124033848", + "2141732516", + "2142384583", + "2143612262", + "2147880316", + "2150295085", + "2156387975", + "2162833336", + "2163844356", + "2170240176", + "2250539671", + "2251687660", + "2251913848", + "2252180690", + "2253491900", + "2473007590", + "2473329891", + "2522720655", + "2575101493", + "2740765036", + "2741306347", + "2742039423", + "2742129161", + "2752813202", + "2774470364", + "2798965707", + "2803392141", + "2803609229", + "2804945011", + "2805853672", + "2892081520", + "2901336235", + "2914702779", + "2933022734", + "2954108589", + "2962715022", + "2962788148", + "2962854379", + "2962910139", + "2963207607", + "2963403868", + "2963560594", + "2963826681", + "2963834453", + "2963888305", + "2963951015", + "2963974889", + "2964232431" + ], + "abstract": "accurately discovering user intents from their written or spoken language plays a critical role in natural language understanding and automated dialog response most existing research models this as a classification task with a single intent label per utterance grouping user utterances into a single intent type from a set of categories known beforehand going beyond this formulation we define and investigate a new problem of open intent discovery it involves discovering one or more generic intent types from text utterances that may not have been encountered during training we propose a novel domain agnostic approach opine which formulates the problem as a sequence tagging task under an open world setting it employs a crf on top of a bidirectional lstm to extract intents in a consistent format subject to constraints among intent tag labels we apply a multi head self attention mechanism to effectively learn dependencies between distant words we further use adversarial training to improve performance and robustly adapt our model across varying domains finally we curate and plan to release an open intent annotated dataset of 25k real life utterances spanning diverse domains extensive experiments show that our approach outperforms state of the art baselines by 5 15 f1 score points we also demonstrate the efficacy of opine in recognizing multiple diverse domain intents with limited can also be zero training examples per unique domain", + "title_raw": "Open Intent Extraction from Natural Language Interactions", + "abstract_raw": "Accurately discovering user intents from their written or spoken language plays a critical role in natural language understanding and automated dialog response. Most existing research models this as a classification task with a single intent label per utterance, grouping user utterances into a single intent type from a set of categories known beforehand. Going beyond this formulation, we define and investigate a new problem of open intent discovery. It involves discovering one or more generic intent types from text utterances, that may not have been encountered during training. We propose a novel domain-agnostic approach, OPINE, which formulates the problem as a sequence tagging task under an open-world setting. It employs a CRF on top of a bidirectional LSTM to extract intents in a consistent format, subject to constraints among intent tag labels. We apply a multi-head self-attention mechanism to effectively learn dependencies between distant words. We further use adversarial training to improve performance and robustly adapt our model across varying domains. Finally, we curate and plan to release an open intent annotated dataset of 25K real-life utterances spanning diverse domains. Extensive experiments show that our approach outperforms state-of-the-art baselines by 5-15% F1 score points. We also demonstrate the efficacy of OPINE in recognizing multiple, diverse domain intents with limited (can also be zero) training examples per unique domain.", + "link": "https://www.semanticscholar.org/paper/5b48682554db4d5a55cce475a2e9c1f3cac81e94", + "scraped_abstract": null, + "citation_best": 30 + }, + { + "paper": "2963763772", + "venue": "1184914352", + "year": "2019", + "title": "how to combine tree search methods in reinforcement learning", + "label": [ + "46149586", + "101056560", + "97541855", + "121163568" + ], + "author": [ + "2804882695", + "2226018891", + "1735999048", + "2252608274" + ], + "reference": [], + "abstract": "finite horizon lookahead policies are abundantly used in reinforcement learning and demonstrate impressive empirical success usually the lookahead policies are implemented with specific planning methods such as monte carlo tree search e g in alphazero silver et al 2017b referring to the planning problem as tree search a reasonable practice in these implementations is to back up the value only at the leaves while the information obtained at the root is not leveraged other than for updating the policy here we question the potency of this approach namely the latter procedure is non contractive in general and its convergence is not guaranteed our proposed enhancement is straightforward and simple use the return from the optimal tree path to back up the values at the descendants of the root this leads to a h contracting procedure where is the discount factor and h is the tree depth to establish our results we first introduce a notion called multiple step greedy consistency we then provide convergence rates for two algorithmic instantiations of the above enhancement in the presence of noise injected to both the tree search stage and value estimation stage", + "title_raw": "How to Combine Tree-Search Methods in Reinforcement Learning", + "abstract_raw": "Finite-horizon lookahead policies are abundantly used in Reinforcement Learning and demonstrate impressive empirical success. Usually, the lookahead policies are implemented with specific planning methods such as Monte Carlo Tree Search (e.g. in AlphaZero (Silver et al. 2017b)). Referring to the planning problem as tree search, a reasonable practice in these implementations is to back up the value only at the leaves while the information obtained at the root is not leveraged other than for updating the policy. Here, we question the potency of this approach. Namely, the latter procedure is non-contractive in general, and its convergence is not guaranteed. Our proposed enhancement is straightforward and simple: use the return from the optimal tree path to back up the values at the descendants of the root. This leads to a \u03b3h-contracting procedure, where \u03b3 is the discount factor and h is the tree depth. To establish our results, we first introduce a notion called multiple-step greedy consistency. We then provide convergence rates for two algorithmic instantiations of the above enhancement in the presence of noise injected to both the tree search stage and value estimation stage.", + "link": "https://www.semanticscholar.org/paper/How-to-Combine-Tree-Search-Methods-in-Reinforcement-Efroni-Dalal/1cba4288144124680c23f2a3d923a1a98f9bebf1", + "scraped_abstract": "Finite-horizon lookahead policies are abundantly used in Reinforcement Learning and demonstrate impressive empirical success. Usually, the lookahead policies are implemented with specific planning methods such as Monte Carlo Tree Search (e.g. in AlphaZero (Silver et al. 2017b)). Referring to the planning problem as tree search, a reasonable practice in these implementations is to back up the value only at the leaves while the information obtained at the root is not leveraged other than for updating the policy. Here, we question the potency of this approach. Namely, the latter procedure is non-contractive in general, and its convergence is not guaranteed. Our proposed enhancement is straightforward and simple: use the return from the optimal tree path to back up the values at the descendants of the root. This leads to a \u03b3h-contracting procedure, where \u03b3 is the discount factor and h is the tree depth. To establish our results, we first introduce a notion called multiple-step greedy consistency. We then provide convergence rates for two algorithmic instantiations of the above enhancement in the presence of noise injected to both the tree search stage and value estimation stage.", + "citation_best": 13 + }, + { + "paper": "2964345285", + "venue": "1188739475", + "year": "2019", + "title": "bridging the gap between training and inference for neural machine translation", + "label": [ + "2776689786", + "204321447", + "2776214188", + "203005215" + ], + "author": [ + "2751747505", + "2780880600", + "2167648583", + "2949026699", + "2573745738" + ], + "reference": [ + "6908809", + "648786980", + "2101105183", + "2130942839", + "2154652894", + "2162245945", + "2268617045", + "2296701362", + "2741986820", + "2890220768", + "2904829696", + "2962784628", + "2963260202", + "2963403868", + "2963463964", + "2964265128", + "2964308564" + ], + "abstract": "neural machine translation nmt generates target words sequentially in the way of predicting the next word conditioned on the context words at training time it predicts with the ground truth words as context while at inference it has to generate the entire sequence from scratch this discrepancy of the fed context leads to error accumulation among the way furthermore word level training requires strict matching between the generated sequence and the ground truth sequence which leads to overcorrection over different but reasonable translations in this paper we address these issues by sampling context words not only from the ground truth sequence but also from the predicted sequence by the model during training where the predicted sequence is selected with a sentence level optimum experiment results on chinese english and wmt 14 english german translation tasks demonstrate that our approach can achieve significant improvements on multiple datasets", + "title_raw": "Bridging the Gap between Training and Inference for Neural Machine Translation.", + "abstract_raw": "Neural Machine Translation (NMT) generates target words sequentially in the way of predicting the next word conditioned on the context words. At training time, it predicts with the ground truth words as context while at inference it has to generate the entire sequence from scratch. This discrepancy of the fed context leads to error accumulation among the way. Furthermore, word-level training requires strict matching between the generated sequence and the ground truth sequence which leads to overcorrection over different but reasonable translations. In this paper, we address these issues by sampling context words not only from the ground truth sequence but also from the predicted sequence by the model during training, where the predicted sequence is selected with a sentence-level optimum. Experiment results on Chinese->English and WMT\u201914 English->German translation tasks demonstrate that our approach can achieve significant improvements on multiple datasets.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Bridging+the+Gap+between+Training+and+Inference+for+Neural+Machine+Translation&as_oq=&as_eq=&as_occt=any&as_sauthors=Zhang", + "scraped_abstract": null, + "citation_best": 20 + }, + { + "paper": "2940636472", + "venue": "1163450153", + "year": "2019", + "title": "anchored audio sampling a seamless method for exploring children s thoughts during deployment studies", + "label": [ + "153083717", + "34127721", + "5366617", + "49774154", + "105339364" + ], + "author": [ + "2071366953", + "2157441593", + "2940998202", + "2941839069" + ], + "reference": [ + "5048401", + "98381297", + "104711285", + "614149592", + "1506299970", + "1508611703", + "1597043752", + "1608683114", + "1658908529", + "1871443204", + "1985006367", + "1987098519", + "1997523391", + "1998522059", + "2016939612", + "2017912703", + "2022317205", + "2026645894", + "2031973273", + "2033541192", + "2036625316", + "2042452443", + "2042692535", + "2050299884", + "2055694118", + "2075177804", + "2080422783", + "2092083194", + "2102570173", + "2102831500", + "2115903714", + "2117252912", + "2122164040", + "2126145938", + "2126437008", + "2130432866", + "2137354914", + "2142057670", + "2142556291", + "2152303583", + "2155002669", + "2155167373", + "2157436967", + "2165117198", + "2172064665", + "2185057914", + "2186343293", + "2255570509", + "2280090222", + "2499590469", + "2509156994", + "2516086211", + "2588321375", + "2611292243", + "2611822934", + "2735494119", + "2768990309", + "2786284130", + "2789513075", + "2794622338", + "2795622964", + "2796972396", + "2886023867", + "2890519628", + "2906151105", + "2913184846", + "2940965681", + "3022452870" + ], + "abstract": "many traditional hci methods such as surveys and interviews are of limited value when working with preschoolers in this paper we present anchored audio sampling aas a remote data collection technique for extracting qualitative audio samples during field deployments with young children aas offers a developmentally sensitive way of understanding how children make sense of technology and situates their use in the larger context of daily life aas is defined by an anchor event around which audio is collected a sliding window surrounding this anchor captures both antecedent and ensuing recording providing the researcher insight into the activities that led up to the event of interest as well as those that followed we present themes from three deployments that leverage this technique based on our experiences using aas we have also developed a reusable open source library for embedding aas into any android application", + "title_raw": "Anchored Audio Sampling: A Seamless Method for Exploring Children's Thoughts During Deployment Studies", + "abstract_raw": "Many traditional HCI methods, such as surveys and interviews, are of limited value when working with preschoolers. In this paper, we present anchored audio sampling (AAS), a remote data collection technique for extracting qualitative audio samples during field deployments with young children. AAS offers a developmentally sensitive way of understanding how children make sense of technology and situates their use in the larger context of daily life. AAS is defined by an anchor event, around which audio is collected. A sliding window surrounding this anchor captures both antecedent and ensuing recording, providing the researcher insight into the activities that led up to the event of interest as well as those that followed. We present themes from three deployments that leverage this technique. Based on our experiences using AAS, we have also developed a reusable open-source library for embedding AAS into any Android application.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Anchored+Audio+Sampling:+A+Seamless+Method+for+Exploring+Children's+Thoughts+During+Deployment+Studies&as_oq=&as_eq=&as_occt=any&as_sauthors=Hiniker", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2935781565", + "venue": "1163450153", + "year": "2019", + "title": "unremarkable ai fitting intelligent decision support into critical clinical decision making processes", + "label": [ + "34127721", + "201025465", + "63527458", + "129364497", + "107327155", + "2522767166", + "132829578" + ], + "author": [ + "2475714580", + "932414507", + "2165294875" + ], + "reference": [ + "1548728152", + "1583168600", + "1971878643", + "1982396709", + "2000402020", + "2022401640", + "2042607745", + "2051483504", + "2061808595", + "2081722738", + "2081884196", + "2097177521", + "2097855313", + "2106299823", + "2107318542", + "2115441252", + "2126966176", + "2140319297", + "2193039967", + "2296370947", + "2338314300", + "2397156396", + "2795392050", + "2800460483" + ], + "abstract": "clinical decision support tools dst promise improved healthcare outcomes by offering data driven insights while effective in lab settings almost all dsts have failed in practice empirical research diagnosed poor contextual fit as the cause this paper describes the design and field evaluation of a radically new form of dst it automatically generates slides for clinicians decision meetings with subtly embedded machine prognostics this design took inspiration from the notion of unremarkable computing that by augmenting the users routines technology ai can have significant importance for the users yet remain unobtrusive our field evaluation suggests clinicians are more likely to encounter and embrace such a dst drawing on their responses we discuss the importance and intricacies of finding the right level of unremarkableness in dst design and share lessons learned in prototyping critical ai systems as a situated experience", + "title_raw": "Unremarkable AI: Fitting Intelligent Decision Support into Critical, Clinical Decision-Making Processes", + "abstract_raw": "Clinical decision support tools (DST) promise improved healthcare outcomes by offering data-driven insights. While effective in lab settings, almost all DSTs have failed in practice. Empirical research diagnosed poor contextual fit as the cause. This paper describes the design and field evaluation of a radically new form of DST. It automatically generates slides for clinicians' decision meetings with subtly embedded machine prognostics. This design took inspiration from the notion of Unremarkable Computing, that by augmenting the users' routines technology/AI can have significant importance for the users yet remain unobtrusive. Our field evaluation suggests clinicians are more likely to encounter and embrace such a DST. Drawing on their responses, we discuss the importance and intricacies of finding the right level of unremarkableness in DST design, and share lessons learned in prototyping critical AI systems as a situated experience.", + "link": "https://www.semanticscholar.org/paper/Unremarkable-AI%3A-Fitting-Intelligent-Decision-into-Yang-Steinfeld/50dd41a39ef20321f3f7e20ddda3cb729e0ca91e", + "scraped_abstract": "Clinical decision support tools (DST) promise improved healthcare outcomes by offering data-driven insights. While effective in lab settings, almost all DSTs have failed in practice. Empirical research diagnosed poor contextual fit as the cause. This paper describes the design and field evaluation of a radically new form of DST. It automatically generates slides for clinicians' decision meetings with subtly embedded machine prognostics. This design took inspiration from the notion of Unremarkable Computing, that by augmenting the users' routines technology/AI can have significant importance for the users yet remain unobtrusive. Our field evaluation suggests clinicians are more likely to encounter and embrace such a DST. Drawing on their responses, we discuss the importance and intricacies of finding the right level of unremarkableness in DST design, and share lessons learned in prototyping critical AI systems as a situated experience.", + "citation_best": 72 + }, + { + "paper": "2927052147", + "venue": "1163450153", + "year": "2019", + "title": "increasing the transparency of research papers with explorable multiverse analyses", + "label": [ + "65697883", + "2522767166" + ], + "author": [ + "89785269", + "2116787695", + "2610488991", + "2160820845", + "2123408939" + ], + "reference": [ + "73081409", + "178162822", + "1075025804", + "1522777881", + "1531752254", + "1576631921", + "1595114968", + "1767470961", + "1964473154", + "1985594972", + "1990761540", + "1999369959", + "2005534705", + "2008130329", + "2008749884", + "2034041514", + "2036242330", + "2043523210", + "2044957292", + "2060804491", + "2074782571", + "2077128897", + "2084767266", + "2093331871", + "2094567668", + "2101096043", + "2104279568", + "2106570890", + "2107412312", + "2108075477", + "2109437898", + "2112250557", + "2112287470", + "2112905686", + "2114122752", + "2117897510", + "2125068351", + "2125515636", + "2127707768", + "2127875809", + "2128213565", + "2133353349", + "2141716420", + "2143493287", + "2145825838", + "2146425614", + "2152928398", + "2153735780", + "2158700344", + "2161498332", + "2171073562", + "2171992801", + "2174191405", + "2292312835", + "2302202015", + "2342082360", + "2345474448", + "2404411786", + "2416272719", + "2468469832", + "2526501380", + "2547503455", + "2570760970", + "2574537218", + "2610312145", + "2610780629", + "2729207627", + "2736848882", + "2753303513", + "2762364541", + "2767152246", + "2793021873", + "2795620203", + "2796454083", + "2810595427", + "2888554701", + "2897426720", + "2904142705", + "3021389498" + ], + "abstract": "we present explorable multiverse analysis reports a new approach to statistical reporting where readers of research papers can explore alternative analysis options by interacting with the paper itself this approach draws from two recent ideas i multiverse analysis a philosophy of statistical reporting where paper authors report the outcomes of many different statistical analyses in order to show how fragile or robust their findings are and ii explorable explanations narratives that can be read as normal explanations but where the reader can also become active by dynamically changing some elements of the explanation based on five examples and a design space analysis we show how combining those two ideas can complement existing reporting approaches and constitute a step towards more transparent research papers", + "title_raw": "Increasing the Transparency of Research Papers with Explorable Multiverse Analyses", + "abstract_raw": "We present explorable multiverse analysis reports, a new approach to statistical reporting where readers of research papers can explore alternative analysis options by interacting with the paper itself. This approach draws from two recent ideas: i) multiverse analysis, a philosophy of statistical reporting where paper authors report the outcomes of many different statistical analyses in order to show how fragile or robust their findings are; and ii) explorable explanations, narratives that can be read as normal explanations but where the reader can also become active by dynamically changing some elements of the explanation. Based on five examples and a design space analysis, we show how combining those two ideas can complement existing reporting approaches and constitute a step towards more transparent research papers.", + "link": "https://www.semanticscholar.org/paper/Increasing-the-Transparency-of-Research-Papers-with-Dragicevic-Jansen/35514e0a87c65660725cbc7c83452c44961a3502", + "scraped_abstract": "We present explorable multiverse analysis reports, a new approach to statistical reporting where readers of research papers can explore alternative analysis options by interacting with the paper itself. This approach draws from two recent ideas: i) multiverse analysis, a philosophy of statistical reporting where paper authors report the outcomes of many different statistical analyses in order to show how fragile or robust their findings are; and ii) explorable explanations, narratives that can be read as normal explanations but where the reader can also become active by dynamically changing some elements of the explanation. Based on five examples and a design space analysis, we show how combining those two ideas can complement existing reporting approaches and constitute a step towards more transparent research papers.", + "citation_best": 112 + }, + { + "paper": "2941798514", + "venue": "1163450153", + "year": "2019", + "title": "project sidewalk a web based crowdsourcing tool for collecting sidewalk accessibility data at scale", + "label": [ + "48044578", + "503285160", + "118643609", + "136764020", + "105339364", + "62230096" + ], + "author": [ + "2224387070", + "2945568470", + "2767116072", + "2940910254", + "2944845530", + "2946216058", + "2945958554", + "2765884967", + "2765578659", + "2136232910", + "2157441593" + ], + "reference": [ + "194327752", + "1558189258", + "1660592114", + "1975973707", + "1992028961", + "1993049392", + "1995373050", + "1998898665", + "2008768047", + "2010361629", + "2027808287", + "2028466093", + "2040851354", + "2058646177", + "2063935068", + "2071400129", + "2072683781", + "2078883237", + "2079757104", + "2084636801", + "2091532741", + "2105344531", + "2115395090", + "2117810730", + "2127008633", + "2138996324", + "2147830352", + "2147976255", + "2168514921", + "2170989440", + "2282616409", + "2398965494", + "2416782422", + "2556336411", + "2594809813", + "2618469824", + "2755477531", + "2763135235", + "2769787880", + "2809559781", + "2896144640", + "2897017388", + "2900509361", + "2952844618" + ], + "abstract": "we introduce project sidewalk a new web based tool that enables online crowdworkers to remotely label pedestrian related accessibility problems by virtually walking through city streets in google street view to train engage and sustain users we apply basic game design principles such as interactive onboarding mission based tasks and progress dashboards in an 18 month deployment study 797 online users contributed 205 385 labels and audited 2 941 miles of washington dc streets we compare behavioral and labeling quality differences between paid crowdworkers and volunteers investigate the effects of label type label severity and majority vote on accuracy and analyze common labeling errors to complement these findings we report on an interview study with three key stakeholder groups n 14 soliciting reactions to our tool and methods our findings demonstrate the potential of virtually auditing urban accessibility and highlight tradeoffs between scalability and quality compared to traditional approaches", + "title_raw": "Project Sidewalk: A Web-based Crowdsourcing Tool for Collecting Sidewalk Accessibility Data At Scale", + "abstract_raw": "We introduce Project Sidewalk, a new web-based tool that enables online crowdworkers to remotely label pedestrian-related accessibility problems by virtually walking through city streets in Google Street View. To train, engage, and sustain users, we apply basic game design principles such as interactive onboarding, mission-based tasks, and progress dashboards. In an 18-month deployment study, 797 online users contributed 205,385 labels and audited 2,941 miles of Washington DC streets. We compare behavioral and labeling quality differences between paid crowdworkers and volunteers, investigate the effects of label type, label severity, and majority vote on accuracy, and analyze common labeling errors. To complement these findings, we report on an interview study with three key stakeholder groups (N=14) soliciting reactions to our tool and methods. Our findings demonstrate the potential of virtually auditing urban accessibility and highlight tradeoffs between scalability and quality compared to traditional approaches.", + "link": "https://www.semanticscholar.org/paper/Project-Sidewalk%3A-A-Web-based-Crowdsourcing-Tool-At-Saha-Saugstad/1a566ebe377462862fcbe56e3eed861cdb3c9bf6", + "scraped_abstract": "We introduce Project Sidewalk, a new web-based tool that enables online crowdworkers to remotely label pedestrian-related accessibility problems by virtually walking through city streets in Google Street View. To train, engage, and sustain users, we apply basic game design principles such as interactive onboarding, mission-based tasks, and progress dashboards. In an 18-month deployment study, 797 online users contributed 205,385 labels and audited 2,941 miles of Washington DC streets. We compare behavioral and labeling quality differences between paid crowdworkers and volunteers, investigate the effects of label type, label severity, and majority vote on accuracy, and analyze common labeling errors. To complement these findings, we report on an interview study with three key stakeholder groups (N=14) soliciting reactions to our tool and methods. Our findings demonstrate the potential of virtually auditing urban accessibility and highlight tradeoffs between scalability and quality compared to traditional approaches.", + "citation_best": 0 + }, + { + "paper": "2940996955", + "venue": "1163450153", + "year": "2019", + "title": "touchstone2 an interactive environment for exploring trade offs in hci experiment design", + "label": [ + "146206909", + "107457646", + "25621077" + ], + "author": [ + "2941979913", + "2026250989", + "2674390412", + "1925449899" + ], + "reference": [ + "1515242458", + "1571883544", + "1600593465", + "1655069027", + "1961875129", + "1976474629", + "1979290264", + "2025095868", + "2034190452", + "2050355460", + "2087484885", + "2107031757", + "2112103637", + "2114804859", + "2128555886", + "2161498332", + "2165893637", + "2237440835", + "2314144213", + "2404411786", + "2406493898", + "2416272719", + "2471282380", + "2547503455", + "2755555556", + "2793021873" + ], + "abstract": "touchstone2 offers a direct manipulation interface for generating and examining trade offs in experiment designs based on interviews with experienced researchers we developed an interactive environment for manipulating experiment design parameters revealing patterns in trial tables and estimating and comparing statistical power we also developed tsl a declarative language that precisely represents experiment designs in two studies experienced hci researchers successfully used touchstone2 to evaluate design trade offs and calculate how many participants are required for particular effect sizes we discuss touchstone2 s benefits and limitations as well as directions for future research", + "title_raw": "Touchstone2 : An Interactive Environment for Exploring Trade-offs in HCI Experiment Design", + "abstract_raw": "Touchstone2 offers a direct-manipulation interface for generating and examining trade-offs in experiment designs. Based on interviews with experienced researchers, we developed an interactive environment for manipulating experiment design parameters, revealing patterns in trial tables, and estimating and comparing statistical power. We also developed TSL, a declarative language that precisely represents experiment designs. In two studies, experienced HCI researchers successfully used Touchstone2 to evaluate design trade-offs and calculate how many participants are required for particular effect sizes. We discuss Touchstone2's benefits and limitations, as well as directions for future research.", + "link": "https://www.semanticscholar.org/paper/Touchstone2%3A-An-Interactive-Environment-for-in-HCI-Eiselmayer-Wacharamanotham/40c5e8400829f0de34b549c8405caa6c35927b95", + "scraped_abstract": null, + "citation_best": 5 + }, + { + "paper": "2941255702", + "venue": "1163450153", + "year": "2019", + "title": "a translational science model for hci", + "label": [ + "78646695", + "2522767166" + ], + "author": [ + "2345091217", + "2940543399", + "2248571559", + "2140253893" + ], + "reference": [ + "25454230", + "67011371", + "96550334", + "175386426", + "1550275457", + "1573874021", + "1753385428", + "1780765560", + "1968183879", + "1970044051", + "1980996290", + "1983330855", + "1990513740", + "1998065338", + "2003744519", + "2019348437", + "2022805907", + "2026645894", + "2045620376", + "2050483420", + "2055122704", + "2070009723", + "2070237036", + "2070671581", + "2082817479", + "2085529605", + "2086509058", + "2093007203", + "2103819748", + "2104123546", + "2112031352", + "2117018107", + "2132810818", + "2133443878", + "2134121541", + "2135728732", + "2137260886", + "2142149055", + "2144705353", + "2153033399", + "2163509922", + "2164656227", + "2166050198", + "2169567109", + "2171685246", + "2269596138", + "2270145139", + "2480563107", + "2496684830", + "2601976332", + "2607784312", + "2612851029", + "2623337004", + "2645880303", + "2776423041", + "2776648127", + "2795713315", + "2798834888", + "2891520384", + "2898715512", + "2921141528", + "3128678724" + ], + "abstract": "using scientific discoveries to inform design practice is an important but difficult objective in hci in this paper we provide an overview of translational science in hci by triangulating literature related to the research practice gap with interview data from many parties engaged or not in translating hci knowledge we propose a model for translational science in hci based on the concept of a continuum to describe how knowledge progresses or stalls through multiple steps and translations until it can influence design practice the model offers a conceptual framework that can be used by researchers and practitioners to visualize and describe the progression of hci knowledge through a sequence of translations additionally the model may facilitate a precise identification of translational barriers which allows devising more effective strategies to increase the use of scientific findings in design practice", + "title_raw": "A Translational Science Model for HCI", + "abstract_raw": "Using scientific discoveries to inform design practice is an important, but difficult, objective in HCI. In this paper, we provide an overview of Translational Science in HCI by triangulating literature related to the research-practice gap with interview data from many parties engaged (or not) in translating HCI knowledge. We propose a model for Translational Science in HCI based on the concept of a continuum to describe how knowledge progresses (or stalls) through multiple steps and translations until it can influence design practice. The model offers a conceptual framework that can be used by researchers and practitioners to visualize and describe the progression of HCI knowledge through a sequence of translations. Additionally, the model may facilitate a precise identification of translational barriers, which allows devising more effective strategies to increase the use of scientific findings in design practice.", + "link": "https://www.semanticscholar.org/paper/e123cebb30dfaa32aa3b2cee2d66794c588bc47b", + "scraped_abstract": null, + "citation_best": 57 + }, + { + "paper": "2942399136", + "venue": "1163450153", + "year": "2019", + "title": "street level algorithms a theory at the gaps between policy and decisions", + "label": [ + "11413529" + ], + "author": [ + "2428504479", + "1974803209" + ], + "reference": [ + "609693342", + "611611202", + "783257918", + "1524854001", + "1584578310", + "1968992937", + "1972067873", + "1989630473", + "2007018772", + "2014060834", + "2019742293", + "2028953510", + "2031981325", + "2036291768", + "2063185719", + "2063782051", + "2083106224", + "2087710120", + "2104887187", + "2110151287", + "2110747032", + "2114412976", + "2120835749", + "2124503193", + "2124994029", + "2125943921", + "2147603330", + "2157513726", + "2165853607", + "2184059664", + "2187291759", + "2290009368", + "2292070666", + "2293765620", + "2297935301", + "2308740084", + "2398514781", + "2401762417", + "2473820412", + "2551317447", + "2553906916", + "2557671501", + "2591226236", + "2610281177", + "2610281615", + "2610364475", + "2610589002", + "2611520706", + "2615212458", + "2623107000", + "2737089608", + "2773809728", + "2785106566", + "2788481061", + "2792696627", + "2794541067", + "2885659818", + "2889490278", + "2896833840", + "2898911770", + "2902116413", + "2994507840", + "3100046612", + "3121486337", + "3121522380", + "3122810052" + ], + "abstract": "errors and biases are earning algorithms increasingly malignant reputations in society a central challenge is that algorithms must bridge the gap between high level policy and on the ground decisions making inferences in novel situations where the policy or training data do not readily apply in this paper we draw on the theory of street level bureaucracies how human bureaucrats such as police and judges interpret policy to make on the ground decisions we present by analogy a theory of street level algorithms the algorithms that bridge the gaps between policy and decisions about people in a socio technical system we argue that unlike street level bureaucrats who reflexively refine their decision criteria as they reason through a novel situation street level algorithms at best refine their criteria only after the decision is made this loop and a half delay results in illogical decisions when handling new or extenuating circumstances this theory suggests designs for street level algorithms that draw on historical design patterns for street level bureaucracies including mechanisms for self policing and recourse in the case of error", + "title_raw": "Street-Level Algorithms: A Theory at the Gaps Between Policy and Decisions", + "abstract_raw": "Errors and biases are earning algorithms increasingly malignant reputations in society. A central challenge is that algorithms must bridge the gap between high-level policy and on-the-ground decisions, making inferences in novel situations where the policy or training data do not readily apply. In this paper, we draw on the theory of street-level bureaucracies, how human bureaucrats such as police and judges interpret policy to make on-the-ground decisions. We present by analogy a theory of street-level algorithms, the algorithms that bridge the gaps between policy and decisions about people in a socio-technical system. We argue that unlike street-level bureaucrats, who reflexively refine their decision criteria as they reason through a novel situation, street-level algorithms at best refine their criteria only after the decision is made. This loop-and-a-half delay results in illogical decisions when handling new or extenuating circumstances. This theory suggests designs for street-level algorithms that draw on historical design patterns for street-level bureaucracies, including mechanisms for self-policing and recourse in the case of error.", + "link": "https://www.semanticscholar.org/paper/b2a784d30fc478470dc7c27c844700d7a63b5021", + "scraped_abstract": "Errors and biases are earning algorithms increasingly malignant reputations in society. A central challenge is that algorithms must bridge the gap between high-level policy and on-the-ground decisions, making inferences in novel situations where the policy or training data do not readily apply. In this paper, we draw on the theory of street-level bureaucracies, how human bureaucrats such as police and judges interpret policy to make on-the-ground decisions. We present by analogy a theory of street-level algorithms, the algorithms that bridge the gaps between policy and decisions about people in a socio-technical system. We argue that unlike street-level bureaucrats, who reflexively refine their decision criteria as they reason through a novel situation, street-level algorithms at best refine their criteria only after the decision is made. This loop-and-a-half delay results in illogical decisions when handling new or extenuating circumstances. This theory suggests designs for street-level algorithms that draw on historical design patterns for street-level bureaucracies, including mechanisms for self-policing and recourse in the case of error.", + "citation_best": 0 + }, + { + "paper": "2942185243", + "venue": "1163450153", + "year": "2019", + "title": "retype quick text editing with keyboard and gaze", + "label": [ + "2779916870", + "201025465", + "107457646", + "2781209916", + "89505385" + ], + "author": [ + "2941444823", + "203114230", + "2120939216" + ], + "reference": [ + "19399978", + "77826386", + "181330260", + "1276082511", + "1492315901", + "1517003397", + "1563141555", + "1586021458", + "1600737952", + "1601345954", + "1968423085", + "1973070506", + "1979038477", + "1980720228", + "1983239853", + "1994626127", + "2008517591", + "2028712503", + "2038565058", + "2046234395", + "2048621919", + "2052057152", + "2067484085", + "2071436133", + "2077379736", + "2089669290", + "2098550303", + "2101940840", + "2110228188", + "2116957708", + "2124585590", + "2129646538", + "2130200371", + "2140986116", + "2147482375", + "2169489294", + "2175863066", + "2189182273", + "2275318929", + "2293900001", + "2511637758", + "2579504323", + "2788567290", + "3125878135" + ], + "abstract": "when a user needs to reposition the cursor during text editing this is often done using the mouse for experienced typists especially the switch between keyboard and mouse can slow down the keyboard editing workflow considerably to address this we propose retype a new gaze assisted positioning technique combining keyboard with gaze input based on a new patching metaphor retype allows users to perform some common editing operations while keeping their hands on the keyboard we present the result of two studies a free use study indicated that retype enhances the user experience of text editing retype was liked by many participants regardless of their typing skills a comparative user study showed that retype is able to match or even beat the speed of mouse based interaction for small text edits we conclude that the gaze augmented user interface can make common interactions more fluent especially for professional keyboard users", + "title_raw": "ReType: Quick Text Editing with Keyboard and Gaze", + "abstract_raw": "When a user needs to reposition the cursor during text editing, this is often done using the mouse. For experienced typists especially, the switch between keyboard and mouse can slow down the keyboard editing workflow considerably. To address this we propose ReType, a new gaze-assisted positioning technique combining keyboard with gaze input based on a new 'patching' metaphor. ReType allows users to perform some common editing operations while keeping their hands on the keyboard. We present the result of two studies. A free-use study indicated that ReType enhances the user experience of text editing. ReType was liked by many participants, regardless of their typing skills. A comparative user study showed that ReType is able to match or even beat the speed of mouse-based interaction for small text edits. We conclude that the gaze-augmented user interface can make common interactions more fluent, especially for professional keyboard users.", + "link": "https://www.semanticscholar.org/paper/abd9c62d1ee42365f3c9a8e4e25617f9b2e0785d", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2941419932", + "venue": "1163450153", + "year": "2019", + "title": "picme interactive visual guidance for taking requested photo composition", + "label": [ + "115961682", + "162307627", + "107457646", + "36464697", + "2778751112", + "126042441" + ], + "author": [ + "2425929746", + "2108752850" + ], + "reference": [ + "2653780", + "18264378", + "1826088874", + "1975649169", + "1997141290", + "2008968438", + "2019085623", + "2026996548", + "2033819227", + "2040141351", + "2045884674", + "2048710758", + "2058783551", + "2066513689", + "2081253602", + "2085948715", + "2094954972", + "2099434798", + "2100052146", + "2104915826", + "2105964081", + "2111632961", + "2120247176", + "2120860586", + "2126134498", + "2128597132", + "2145023731", + "2147994340", + "2163096731", + "2179427518", + "2292978787", + "2339625848", + "2587004249", + "2611946954" + ], + "abstract": "picme is a mobile application that provides interactive on screen guidance that helps the user take pictures of a composition that another person requires once the requester captures a picture of the desired composition and delivers it to the user photographer a 2 5d guidance system called the virtual frame guides the user in real time by showing a three dimensional composition of the target image i e size and shape in addition according to the matching accuracy rate we provide a small sized target image in an inset window as feedback and edge visualization for further alignment of the detail elements we implemented picme to work fully in mobile environments we then conducted a preliminary user study to evaluate the effectiveness of picme compared to traditional 2d guidance methods the results show that picme helps users reach their target images more accurately and quickly by giving participants more confidence in their tasks", + "title_raw": "PicMe: Interactive Visual Guidance for Taking Requested Photo Composition", + "abstract_raw": "PicMe is a mobile application that provides interactive on-screen guidance that helps the user take pictures of a composition that another person requires. Once the requester captures a picture of the desired composition and delivers it to the user (photographer), a 2.5D guidance system, called the virtual frame, guides the user in real-time by showing a three-dimensional composition of the target image (i.e., size and shape). In addition, according to the matching accuracy rate, we provide a small-sized target image in an inset window as feedback and edge visualization for further alignment of the detail elements. We implemented PicMe to work fully in mobile environments. We then conducted a preliminary user study to evaluate the effectiveness of PicMe compared to traditional 2D guidance methods. The results show that PicMe helps users reach their target images more accurately and quickly by giving participants more confidence in their tasks.", + "link": "https://www.semanticscholar.org/paper/5ffa0f4c693305ece148a6f399a779a39deefa1d", + "scraped_abstract": "PicMe is a mobile application that provides interactive on-screen guidance that helps the user take pictures of a composition that another person requires. Once the requester captures a picture of the desired composition and delivers it to the user (photographer), a 2.5D guidance system, called the virtual frame, guides the user in real-time by showing a three-dimensional composition of the target image (i.e., size and shape). In addition, according to the matching accuracy rate, we provide a small-sized target image in an inset window as feedback and edge visualization for further alignment of the detail elements. We implemented PicMe to work fully in mobile environments. We then conducted a preliminary user study to evaluate the effectiveness of PicMe compared to traditional 2D guidance methods. The results show that PicMe helps users reach their target images more accurately and quickly by giving participants more confidence in their tasks.", + "citation_best": 0 + }, + { + "paper": "2941232686", + "venue": "1163450153", + "year": "2019", + "title": "managing messes in computational notebooks", + "label": [ + "170130773", + "198140048", + "167955471", + "91071405", + "2779910809", + "199360897", + "56666940" + ], + "author": [ + "2318425904", + "2611250197", + "1989569963", + "14227721", + "2163972128" + ], + "reference": [ + "1964885079", + "1979545399", + "2001058389", + "2002530879", + "2021538299", + "2028075681", + "2044102377", + "2058364728", + "2071873073", + "2089632471", + "2099857277", + "2099858487", + "2102335485", + "2113157806", + "2125083169", + "2139627310", + "2145154883", + "2154267672", + "2158532686", + "2161661398", + "2249604275", + "2262523701", + "2400341429", + "2466173809", + "2535296513", + "2610517421", + "2768736423", + "2784241156", + "2794467911", + "2796040126", + "2796354015", + "2820738241", + "2890114759", + "2898377472", + "2913256667" + ], + "abstract": "data analysts use computational notebooks to write code for analyzing and visualizing data notebooks help analysts iteratively write analysis code by letting them interleave code with output and selectively execute cells however as analysis progresses analysts leave behind old code and outputs and overwrite important code producing cluttered and inconsistent notebooks this paper introduces code gathering tools extensions to computational notebooks that help analysts find clean recover and compare versions of code in cluttered inconsistent notebooks the tools archive all versions of code outputs allowing analysts to review these versions and recover the subsets of code that produced them these subsets can serve as succinct summaries of analysis activity or starting points for new analyses in a qualitative usability study 12 professional analysts found the tools useful for cleaning notebooks and writing analysis code and discovered new ways to use them like generating personal documentation and lightweight versioning", + "title_raw": "Managing Messes in Computational Notebooks", + "abstract_raw": "Data analysts use computational notebooks to write code for analyzing and visualizing data. Notebooks help analysts iteratively write analysis code by letting them interleave code with output, and selectively execute cells. However, as analysis progresses, analysts leave behind old code and outputs, and overwrite important code, producing cluttered and inconsistent notebooks. This paper introduces code gathering tools, extensions to computational notebooks that help analysts find, clean, recover, and compare versions of code in cluttered, inconsistent notebooks. The tools archive all versions of code outputs, allowing analysts to review these versions and recover the subsets of code that produced them. These subsets can serve as succinct summaries of analysis activity or starting points for new analyses. In a qualitative usability study, 12 professional analysts found the tools useful for cleaning notebooks and writing analysis code, and discovered new ways to use them, like generating personal documentation and lightweight versioning.", + "link": "https://www.semanticscholar.org/paper/41a270cdb0405a454a6d55e86caaff67e99abe4f", + "scraped_abstract": null, + "citation_best": 134 + }, + { + "paper": "2941123418", + "venue": "1163450153", + "year": "2019", + "title": "think secure from the beginning a survey with software developers", + "label": [ + "167955471", + "62913178", + "2777904410" + ], + "author": [ + "2343837134", + "1779080424" + ], + "reference": [ + "65833265", + "133470593", + "1507004422", + "1580288159", + "1947959002", + "1968335087", + "1971219358", + "1973601758", + "1983142587", + "1985408088", + "1989657183", + "2005962568", + "2008626182", + "2011698232", + "2023350458", + "2033019352", + "2038889476", + "2046622454", + "2046810302", + "2047345533", + "2057366964", + "2057796693", + "2059507980", + "2061604051", + "2073220979", + "2078393527", + "2085067988", + "2085925880", + "2092690462", + "2106371080", + "2130758759", + "2137548130", + "2141846678", + "2143472559", + "2157820602", + "2158297335", + "2159613309", + "2217203162", + "2249595199", + "2297419069", + "2369295637", + "2467263134", + "2471005372", + "2471086916", + "2511044583", + "2541261609", + "2585818648", + "2605067380", + "2622940715", + "2624697062", + "2628359750", + "2679033717", + "2698406033", + "2766217896", + "2766347289", + "2792247140", + "2792425537", + "2796056969", + "2888915331", + "2888959482", + "2889118403", + "2889126501", + "2911649222", + "3122921485" + ], + "abstract": "vulnerabilities persist despite existing software security initiatives and best practices this paper focuses on the human factors of software security including human behaviour and motivation we conducted an online survey to explore the interplay between developers and software security processes e g we looked into how developers influence and are influenced by these processes our data included responses from 123 software developers currently employed in north america who work on various types of software applications whereas developers are often held responsible for security vulnerabilities our analysis shows that the real issues frequently stem from a lack of organizational or process support to handle security throughout development tasks our participants are self motivated towards software security and the majority did not dismiss it but identified obstacles to achieving secure code our work highlights the need to look beyond the individual and take a holistic approach to investigate organizational issues influencing software security", + "title_raw": "'Think secure from the beginning' : A Survey with Software Developers", + "abstract_raw": "Vulnerabilities persist despite existing software security initiatives and best practices. This paper focuses on the human factors of software security, including human behaviour and motivation. We conducted an online survey to explore the interplay between developers and software security processes, e.g., we looked into how developers influence and are influenced by these processes. Our data included responses from 123 software developers currently employed in North America who work on various types of software applications. Whereas developers are often held responsible for security vulnerabilities, our analysis shows that the real issues frequently stem from a lack of organizational or process support to handle security throughout development tasks. Our participants are self-motivated towards software security, and the majority did not dismiss it but identified obstacles to achieving secure code. Our work highlights the need to look beyond the individual, and take a holistic approach to investigate organizational issues influencing software security.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq='Think+secure+from+the+beginning':+A+Survey+with+Software+Developers&as_oq=&as_eq=&as_occt=any&as_sauthors=Assal", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2964608097", + "venue": "1158167855", + "year": "2019", + "title": "a theory of fermat paths for non line of sight shape reconstruction", + "label": [ + "154910267", + "154945302", + "146044194", + "11413529" + ], + "author": [ + "2965393190", + "2753664720", + "87538691", + "2077766773", + "2055799763", + "2042475543" + ], + "reference": [ + "1238092070", + "1546549503", + "1557650277", + "1965908318", + "1977235425", + "1994813338", + "2000443157", + "2008073424", + "2013125633", + "2031980170", + "2038630728", + "2045411570", + "2051317734", + "2052629386", + "2054113582", + "2076954462", + "2087957196", + "2109879161", + "2112858385", + "2156387017", + "2164693610", + "2200308833", + "2299392318", + "2547798018", + "2604237000", + "2723749992", + "2738798903", + "2752118498", + "2755257996", + "2770188800", + "2777077087", + "2793604749", + "2798602665", + "2799016823", + "2805267637", + "2889999978", + "2898171373", + "2913322182", + "2948901907", + "2951067010", + "2955172121", + "2962953926", + "2979485309", + "3102224640", + "3106536704" + ], + "abstract": "we present a novel theory of fermat paths of light between a known visible scene and an unknown object not in the line of sight of a transient camera these light paths either obey specular reflection or are reflected by the object s boundary and hence encode the shape of the hidden object we prove that fermat paths correspond to discontinuities in the transient measurements we then derive a novel constraint that relates the spatial derivatives of the path lengths at these discontinuities to the surface normal based on this theory we present an algorithm called fermat flow to estimate the shape of the non line of sight object our method allows for the first time accurate shape recovery of complex objects ranging from diffuse to specular that are hidden around the corner as well as hidden behind a diffuser finally our approach is agnostic to the particular technology used for transient imaging as such we demonstrate mm scale shape recovery from pico second scale transients using a spad and ultrafast laser as well as micron scale reconstruction from femto second scale transients using interferometry we believe our work is a significant advance over the state of the art in non line of sight imaging", + "title_raw": "A Theory of Fermat Paths for Non-Line-Of-Sight Shape Reconstruction", + "abstract_raw": "We present a novel theory of Fermat paths of light between a known visible scene and an unknown object not in the line of sight of a transient camera. These light paths either obey specular reflection or are reflected by the object's boundary, and hence encode the shape of the hidden object. We prove that Fermat paths correspond to discontinuities in the transient measurements. We then derive a novel constraint that relates the spatial derivatives of the path lengths at these discontinuities to the surface normal. Based on this theory, we present an algorithm, called Fermat Flow, to estimate the shape of the non-line-of-sight object. Our method allows, for the first time, accurate shape recovery of complex objects, ranging from diffuse to specular, that are hidden around the corner as well as hidden behind a diffuser. Finally, our approach is agnostic to the particular technology used for transient imaging. As such, we demonstrate mm-scale shape recovery from pico-second scale transients using a SPAD and ultrafast laser, as well as micron-scale reconstruction from femto-second scale transients using interferometry. We believe our work is a significant advance over the state-of-the-art in non-line-of-sight imaging.", + "link": "semanticscholar.org/paper/5809decf0a47a8a6be5c0525cd9f92e1e7d4c75f", + "scraped_abstract": null, + "citation_best": 121 + }, + { + "paper": "2967591898", + "venue": "1199533187", + "year": "2019", + "title": "empirical review of java program repair tools a large scale experiment on 2 141 bugs and 23 551 repair attempts", + "label": [ + "26713055", + "548217200", + "48103436", + "137955351", + "115903868" + ], + "author": [ + "2264761127", + "2784755993", + "2164567110", + "2129930770" + ], + "reference": [ + "841012168", + "1496637959", + "2061575154", + "2063387237", + "2076719273", + "2104107939", + "2117593603", + "2122947685", + "2142741391", + "2145124323", + "2151497118", + "2153881107", + "2156723666", + "2272835211", + "2285839903", + "2344973853", + "2400994325", + "2465133314", + "2518136680", + "2537787699", + "2582560208", + "2620986014", + "2735571786", + "2762550985", + "2767431443", + "2767766265", + "2767951593", + "2784445699", + "2791415964", + "2794443436", + "2795030435", + "2795866244", + "2801865415", + "2850616187", + "2867448323", + "2883977877", + "2908354350", + "2947219328", + "2952903800", + "2963909831", + "2969748184", + "3100600242", + "3102072242", + "3103170042", + "3115588598", + "3122945969", + "3206322347" + ], + "abstract": "in the past decade research on test suite based automatic program repair has grown significantly each year new approaches and implementations are featured in major software engineering venues however most of those approaches are evaluated on a single benchmark of bugs which are also rarely reproduced by other researchers in this paper we present a large scale experiment using 11 java test suite based repair tools and 2 141 bugs from 5 benchmarks our goal is to have a better understanding of the current state of automatic program repair tools on a large diversity of benchmarks our investigation is guided by the hypothesis that the repairability of repair tools might not be generalized across different benchmarks we found that the 11 tools 1 are able to generate patches for 21 of the bugs from the 5 benchmarks and 2 have better performance on defects4j compared to other benchmarks by generating patches for 47 of the bugs from defects4j compared to 10 30 of bugs from the other benchmarks our experiment comprises 23 551 repair attempts which we used to find causes of non patch generation these causes are reported in this paper which can help repair tool designers to improve their approaches and tools", + "title_raw": "Empirical review of Java program repair tools: a large-scale experiment on 2,141 bugs and 23,551 repair attempts", + "abstract_raw": "In the past decade, research on test-suite-based automatic program repair has grown significantly. Each year, new approaches and implementations are featured in major software engineering venues. However, most of those approaches are evaluated on a single benchmark of bugs, which are also rarely reproduced by other researchers. In this paper, we present a large-scale experiment using 11 Java test-suite-based repair tools and 2,141 bugs from 5 benchmarks. Our goal is to have a better understanding of the current state of automatic program repair tools on a large diversity of benchmarks. Our investigation is guided by the hypothesis that the repairability of repair tools might not be generalized across different benchmarks. We found that the 11 tools 1) are able to generate patches for 21% of the bugs from the 5 benchmarks, and 2) have better performance on Defects4J compared to other benchmarks, by generating patches for 47% of the bugs from Defects4J compared to 10-30% of bugs from the other benchmarks. Our experiment comprises 23,551 repair attempts, which we used to find causes of non-patch generation. These causes are reported in this paper, which can help repair tool designers to improve their approaches and tools.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Empirical+Review+of+Java+Program+Repair+Tools:+A+Large-Scale+Experiment+on+2,141+Bugs+and+23,551+Repair+Attempts&as_oq=&as_eq=&as_occt=any&as_sauthors=Durieux", + "scraped_abstract": "In the past decade, research on test-suite-based automatic program repair has grown significantly. Each year, new approaches and implementations are featured in major software engineering venues. However, most of those approaches are evaluated on a single benchmark of bugs, which are also rarely reproduced by other researchers. In this paper, we present a large-scale experiment using 11 Java test-suite-based repair tools and 2,141 bugs from 5 benchmarks. Our goal is to have a better understanding of the current state of automatic program repair tools on a large diversity of benchmarks. Our investigation is guided by the hypothesis that the repairability of repair tools might not be generalized across different benchmarks. We found that the 11 tools 1) are able to generate patches for 21% of the bugs from the 5 benchmarks, and 2) have better performance on Defects4J compared to other benchmarks, by generating patches for 47% of the bugs from Defects4J compared to 10-30% of bugs from the other benchmarks. Our experiment comprises 23,551 repair attempts, which we used to find causes of non-patch generation. These causes are reported in this paper, which can help repair tool designers to improve their approaches and tools.", + "citation_best": 3 + }, + { + "paper": "2966914448", + "venue": "1199533187", + "year": "2019", + "title": "generating automated and online test oracles for simulink models with continuous and uncertain behaviors", + "label": [ + "141218545", + "179768478", + "100481476", + "184337299", + "2779843651", + "199360897", + "2776235265", + "55166926" + ], + "author": [ + "1974069242", + "2024882999", + "2922281523", + "2144557644" + ], + "reference": [ + "47554466", + "83755378", + "189973795", + "1488625119", + "1532688402", + "1533557569", + "1533710763", + "1537283821", + "1547304883", + "1654369324", + "1856380672", + "1979248560", + "1994666370", + "2004463571", + "2010069926", + "2013449241", + "2019809768", + "2026629052", + "2031378219", + "2033455284", + "2041713059", + "2049399166", + "2085838366", + "2097223288", + "2098895708", + "2100339971", + "2138361257", + "2140007725", + "2148106163", + "2152728663", + "2153803621", + "2156992042", + "2158381251", + "2165338722", + "2171441042", + "2172184261", + "2174535387", + "2187041056", + "2229094708", + "2320062858", + "2344795941", + "2521098037", + "2558424088", + "2741510978", + "2788220863", + "2791815478", + "2796327418", + "2808791761", + "2885880873", + "2902726917", + "2920995093", + "2952646558", + "3099075475", + "3185338929" + ], + "abstract": "test automation requires automated oracles to assess test outputs for cyber physical systems cps oracles in addition to be automated should ensure some key objectives i they should check test outputs in an online manner to stop expensive test executions as soon as a failure is detected ii they should handle time and magnitude continuous cps behaviors iii they should provide a quantitative degree of satisfaction or failure measure instead of binary pass fail outputs and iv they should be able to handle uncertainties due to cps interactions with the environment we propose an automated approach to translate cps requirements specified in a logic based language into test oracles specified in simulink a widely used development and simulation language for cps our approach achieves the objectives noted above through the identification of a fragment of signal first order logic sfol to specify requirements the definition of a quantitative semantics for this fragment and a sound translation of the fragment into simulink the results from applying our approach on 11 industrial case studies show that i our requirements language can express all the 98 requirements of our case studies ii the time and effort required by our approach are acceptable showing potentials for the adoption of our work in practice and iii for large models our approach can dramatically reduce the test execution time compared to when test outputs are checked in an offline manner", + "title_raw": "Generating automated and online test oracles for Simulink models with continuous and uncertain behaviors", + "abstract_raw": "Test automation requires automated oracles to assess test outputs. For cyber physical systems (CPS), oracles, in addition to be automated, should ensure some key objectives: (i) they should check test outputs in an online manner to stop expensive test executions as soon as a failure is detected; (ii) they should handle time- and magnitude-continuous CPS behaviors; (iii) they should provide a quantitative degree of satisfaction or failure measure instead of binary pass/fail outputs; and (iv) they should be able to handle uncertainties due to CPS interactions with the environment. We propose an automated approach to translate CPS requirements specified in a logic-based language into test oracles specified in Simulink - a widely-used development and simulation language for CPS. Our approach achieves the objectives noted above through the identification of a fragment of Signal First Order logic (SFOL) to specify requirements, the definition of a quantitative semantics for this fragment and a sound translation of the fragment into Simulink. The results from applying our approach on 11 industrial case studies show that: (i) our requirements language can express all the 98 requirements of our case studies; (ii) the time and effort required by our approach are acceptable, showing potentials for the adoption of our work in practice, and (iii) for large models, our approach can dramatically reduce the test execution time compared to when test outputs are checked in an offline manner.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Generating+Automated+and+Online+Test+Oracles+for+Simulink+Models+with+Continuous+and+Uncertain+Behaviors&as_oq=&as_eq=&as_occt=any&as_sauthors=Menghi", + "scraped_abstract": null, + "citation_best": 44 + }, + { + "paper": "2967556797", + "venue": "1199533187", + "year": "2019", + "title": "the importance of accounting for real world labelling when predicting software vulnerabilities", + "label": [ + "119857082", + "553261973", + "2780615140", + "2777904410" + ], + "author": [ + "2342223544", + "2909899699", + "2170031156", + "2120263361", + "494248588", + "2150723893" + ], + "reference": [ + "38372507", + "149472151", + "154870643", + "165633843", + "1857789879", + "1997236144", + "1997646511", + "2004758929", + "2015001165", + "2022695357", + "2043837581", + "2055765785", + "2067148378", + "2070425304", + "2079753286", + "2106578314", + "2109553965", + "2118283821", + "2119327016", + "2148143831", + "2151666086", + "2154398797", + "2161336914", + "2166336492", + "2170735442", + "2244669237", + "2344072768", + "2348136092", + "2367798545", + "2471516951", + "2517094600", + "2521698904", + "2559874352", + "2753715782", + "2804743641", + "2810595416", + "2889763677", + "2896731104", + "2900572580", + "2900690747", + "2969620576", + "3100785508" + ], + "abstract": "previous work on vulnerability prediction assume that predictive models are trained with respect to perfect labelling information includes labels from future as yet undiscovered vulnerabilities in this paper we present results from a comprehensive empirical study of 1 898 real world vulnerabilities reported in 74 releases of three security critical open source systems linux kernel openssl and wiresark our study investigates the effectiveness of three previously proposed vulnerability prediction approaches in two settings with and without the unrealistic labelling assumption the results reveal that the unrealistic labelling assumption can profoundly mis lead the scientific conclusions drawn suggesting highly effective and deployable prediction results vanish when we fully account for realistically available labelling in the experimental methodology more precisely mcc mean values of predictive effectiveness drop from 0 77 0 65 and 0 43 to 0 08 0 22 0 10 for linux kernel openssl and wiresark respectively similar results are also obtained for precision recall and other assessments of predictive efficacy the community therefore needs to upgrade experimental and empirical methodology for vulnerability prediction evaluation and development to ensure robust and actionable scientific findings", + "title_raw": "The importance of accounting for real-world labelling when predicting software vulnerabilities", + "abstract_raw": "Previous work on vulnerability prediction assume that predictive models are trained with respect to perfect labelling information (includes labels from future, as yet undiscovered vulnerabilities). In this paper we present results from a comprehensive empirical study of 1,898 real-world vulnerabilities reported in 74 releases of three security-critical open source systems (Linux Kernel, OpenSSL and Wiresark). Our study investigates the effectiveness of three previously proposed vulnerability prediction approaches, in two settings: with and without the unrealistic labelling assumption. The results reveal that the unrealistic labelling assumption can profoundly mis- lead the scientific conclusions drawn; suggesting highly effective and deployable prediction results vanish when we fully account for realistically available labelling in the experimental methodology. More precisely, MCC mean values of predictive effectiveness drop from 0.77, 0.65 and 0.43 to 0.08, 0.22, 0.10 for Linux Kernel, OpenSSL and Wiresark, respectively. Similar results are also obtained for precision, recall and other assessments of predictive efficacy. The community therefore needs to upgrade experimental and empirical methodology for vulnerability prediction evaluation and development to ensure robust and actionable scientific findings.", + "link": "https://www.semanticscholar.org/paper/c636beee1a8b551d86bfb7e6b8d21c0229606d95", + "scraped_abstract": null, + "citation_best": 69 + }, + { + "paper": "2967289945", + "venue": "1199533187", + "year": "2019", + "title": "assessing the quality of the steps to reproduce in bug reports", + "label": [ + "119857082", + "138268822" + ], + "author": [ + "2786423358", + "2230368832", + "2571788724", + "2337296193", + "2146731111", + "2045589531", + "67342874", + "2165254938" + ], + "reference": [ + "1623072288", + "1940872118", + "1987604705", + "2035030692", + "2036270265", + "2074019008", + "2087248009", + "2093041713", + "2095593431", + "2100406974", + "2107142491", + "2110229593", + "2123442489", + "2124482849", + "2127577307", + "2127898555", + "2130146200", + "2158760041", + "2160517961", + "2164577291", + "2296283641", + "2338034015", + "2493916176", + "2514303331", + "2528179723", + "2727861051", + "2741600166", + "2799124508", + "2854345383", + "2909014315", + "2963186636", + "2963641259", + "3101412407", + "3102650716" + ], + "abstract": "a major problem with user written bug reports indicated by developers and documented by researchers is the lack of high quality of the reported steps to reproduce the bugs low quality steps to reproduce lead to excessive manual effort spent on bug triage and resolution this paper proposes euler an approach that automatically identifies and assesses the quality of the steps to reproduce in a bug report providing feedback to the reporters which they can use to improve the bug report the feedback provided by euler was assessed by external evaluators and the results indicate that euler correctly identified 98 of the existing steps to reproduce and 58 of the missing ones while 73 of its quality annotations are correct", + "title_raw": "Assessing the quality of the steps to reproduce in bug reports", + "abstract_raw": "A major problem with user-written bug reports, indicated by developers and documented by researchers, is the (lack of high) quality of the reported steps to reproduce the bugs. Low-quality steps to reproduce lead to excessive manual effort spent on bug triage and resolution. This paper proposes Euler, an approach that automatically identifies and assesses the quality of the steps to reproduce in a bug report, providing feedback to the reporters, which they can use to improve the bug report. The feedback provided by Euler was assessed by external evaluators and the results indicate that Euler correctly identified 98% of the existing steps to reproduce and 58% of the missing ones, while 73% of its quality annotations are correct.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Assessing+the+Quality+of+the+Steps+to+Reproduce+in+Bug+Reports&as_oq=&as_eq=&as_occt=any&as_sauthors=Chaparro", + "scraped_abstract": "A major problem with user-written bug reports, indicated by developers and documented by researchers, is the (lack of high) quality of the reported steps to reproduce the bugs. Low-quality steps to reproduce lead to excessive manual effort spent on bug triage and resolution. This paper proposes Euler, an approach that automatically identifies and assesses the quality of the steps to reproduce in a bug report, providing feedback to the reporters, which they can use to improve the bug report. The feedback provided by Euler was assessed by external evaluators and the results indicate that Euler correctly identified 98% of the existing steps to reproduce and 58% of the missing ones, while 73% of its quality annotations are correct.", + "citation_best": 1 + }, + { + "paper": "2967162666", + "venue": "1199533187", + "year": "2019", + "title": "a framework for writing trigger action todo comments in executable format", + "label": [ + "160145156", + "51929080", + "114408938", + "135257023", + "548217200", + "61423126", + "195917429", + "115903868", + "195324797" + ], + "author": [ + "2887089935", + "2887301861", + "2885523473", + "2103685797", + "2167433806", + "1986514872" + ], + "reference": [ + "1487664366", + "1500946169", + "1527612271", + "1660714679", + "1804512197", + "1851327982", + "1860267373", + "1967995512", + "2005710769", + "2008710384", + "2018141726", + "2045336717", + "2081749632", + "2082160726", + "2090398160", + "2117228548", + "2118655104", + "2119821925", + "2126793110", + "2144777691", + "2151228786", + "2151298976", + "2152874840", + "2153887189", + "2158396456", + "2165148718", + "2172398315", + "2258358872", + "2395122565", + "2516621648", + "2561301563", + "2612705982", + "2617604339", + "2724503592", + "2724651144", + "2761280532", + "2767331170", + "2767729231", + "2767804392", + "2879105418", + "2884276923", + "2887576733", + "2887769041", + "2888020248", + "2951719623", + "2963794306", + "2963935794" + ], + "abstract": "natural language elements e g todo comments are frequently used to communicate among developers and to describe tasks that need to be performed actions when specific conditions hold on artifacts related to the code repository triggers e g from the apache struts project remove expectedjdk15 and if after switching to java 1 6 as projects evolve development processes change and development teams reorganize these comments because of their informal nature frequently become irrelevant or forgotten we present the first framework dubbed trigit to specify trigger action todo comments in executable format thus actions are executed automatically when triggers evaluate to true trigit specifications are written in the host language e g java and are evaluated as part of the build process the triggers are specified as query statements over abstract syntax trees abstract representation of build configuration scripts issue tracking systems and system clock time the actions are either notifications to developers or code transformation steps we implemented trigit for the java programming language and migrated 44 existing trigger action comments from several popular open source projects evaluation of trigit via a user study showed that users find trigit easy to learn and use trigit has the potential to enforce more discipline in writing and maintaining comments in large code repositories", + "title_raw": "A framework for writing trigger-action todo comments in executable format", + "abstract_raw": "Natural language elements, e.g., todo comments, are frequently used to communicate among developers and to describe tasks that need to be performed (actions) when specific conditions hold on artifacts related to the code repository (triggers), e.g., from the Apache Struts project: \u201cremove expectedJDK15 and if() after switching to Java 1.6\u201d. As projects evolve, development processes change, and development teams reorganize, these comments, because of their informal nature, frequently become irrelevant or forgotten. We present the first framework, dubbed TrigIt, to specify trigger-action todo comments in executable format. Thus, actions are executed automatically when triggers evaluate to true. TrigIt specifications are written in the host language (e.g., Java) and are evaluated as part of the build process. The triggers are specified as query statements over abstract syntax trees, abstract representation of build configuration scripts, issue tracking systems, and system clock time. The actions are either notifications to developers or code transformation steps. We implemented TrigIt for the Java programming language and migrated 44 existing trigger-action comments from several popular open-source projects. Evaluation of TrigIt, via a user study, showed that users find TrigIt easy to learn and use. TrigIt has the potential to enforce more discipline in writing and maintaining comments in large code repositories.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=A+Framework+for+Writing+Trigger-Action+Todo+Comments+in+Executable+Format&as_oq=&as_eq=&as_occt=any&as_sauthors=Nie", + "scraped_abstract": null, + "citation_best": 18 + }, + { + "paper": "2968631515", + "venue": "1199533187", + "year": "2019", + "title": "a statistics based performance testing methodology for cloud applications", + "label": [ + "158025753", + "113775141", + "97824396", + "137955351", + "79974875" + ], + "author": [ + "2806325392", + "2967316199", + "2968208616", + "3205719391", + "2099917311", + "686913843" + ], + "reference": [ + "35708471", + "159719520", + "638544165", + "1497005429", + "1651603302", + "1681426122", + "1965555277", + "1969938843", + "1973223562", + "1982605568", + "1983545421", + "1985229168", + "1998124519", + "2014268383", + "2014820189", + "2021231613", + "2036804147", + "2048652859", + "2052172035", + "2053773822", + "2076880263", + "2079800789", + "2096666207", + "2097879961", + "2100618668", + "2104171511", + "2112582249", + "2115380655", + "2118020555", + "2129660502", + "2141181087", + "2153657167", + "2157662360", + "2159586681", + "2161009936", + "2240667924", + "2247585390", + "2249475272", + "2302022389", + "2324153683", + "2391251187", + "2579246879", + "2604856537", + "2606302229", + "2734631700", + "2743912071", + "2757024591", + "2769879317", + "2805685084", + "2843154948", + "2887476267", + "2888320967", + "2899117404", + "2963642335", + "3026721701", + "3151686641" + ], + "abstract": "the low cost of resource ownership and flexibility have led users to increasingly port their applications to the clouds to fully realize the cost benefits of cloud services users usually need to reliably know the execution performance of their applications however due to the random performance fluctuations experienced by cloud applications the black box nature of public clouds and the cloud usage costs testing on clouds to acquire accurate performance results is extremely difficult in this paper we present a novel cloud performance testing methodology called pt4cloud by employing non parametric statistical approaches of likelihood theory and the bootstrap method pt4cloud provides reliable stop conditions to obtain highly accurate performance distributions with confidence bands these statistical approaches also allow users to specify intuitive accuracy goals and easily trade between accuracy and testing cost we evaluated pt4cloud with 33 benchmark configurations on amazon web service and chameleon clouds when compared with performance data obtained from extensive performance tests pt4cloud provides testing results with 95 4 accuracy on average while reducing the number of test runs by 62 we also propose two test execution reduction techniques for pt4cloud which can reduce the number of test runs by 90 1 while retaining an average accuracy of 91 we compared our technique to three other techniques and found that our results are much more accurate", + "title_raw": "A statistics-based performance testing methodology for cloud applications", + "abstract_raw": "The low cost of resource ownership and flexibility have led users to increasingly port their applications to the clouds. To fully realize the cost benefits of cloud services, users usually need to reliably know the execution performance of their applications. However, due to the random performance fluctuations experienced by cloud applications, the black box nature of public clouds and the cloud usage costs, testing on clouds to acquire accurate performance results is extremely difficult. In this paper, we present a novel cloud performance testing methodology called PT4Cloud. By employing non-parametric statistical approaches of likelihood theory and the bootstrap method, PT4Cloud provides reliable stop conditions to obtain highly accurate performance distributions with confidence bands. These statistical approaches also allow users to specify intuitive accuracy goals and easily trade between accuracy and testing cost. We evaluated PT4Cloud with 33 benchmark configurations on Amazon Web Service and Chameleon clouds. When compared with performance data obtained from extensive performance tests, PT4Cloud provides testing results with 95.4% accuracy on average while reducing the number of test runs by 62%. We also propose two test execution reduction techniques for PT4Cloud, which can reduce the number of test runs by 90.1% while retaining an average accuracy of 91%. We compared our technique to three other techniques and found that our results are much more accurate.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=A+Statistics-based+Performance+Testing+Methodology+for+Cloud+Applications&as_oq=&as_eq=&as_occt=any&as_sauthors=He", + "scraped_abstract": null, + "citation_best": 54 + }, + { + "paper": "2982041717", + "venue": "1164975091", + "year": "2019", + "title": "singan learning a generative model from a single natural image", + "label": [ + "176258234", + "99498987", + "115961682", + "167966045", + "205372480", + "69952321", + "154945302", + "79284318", + "194145944", + "178980831", + "141379421" + ], + "author": [ + "2529187827", + "2224515115", + "1969462917" + ], + "reference": [ + "125693051", + "134498629", + "233979554", + "648143168", + "1834627138", + "1976416062", + "1988918871", + "2009548700", + "2047491370", + "2062811295", + "2097117768", + "2099471712", + "2102166818", + "2108598243", + "2115273023", + "2117897510", + "2121927366", + "2134670479", + "2141155330", + "2194775991", + "2298992465", + "2326925005", + "2339754110", + "2420631737", + "2475287302", + "2508457857", + "2519536754", + "2552611751", + "2557969682", + "2560481159", + "2566832195", + "2798668132", + "2798841107", + "2801495938", + "2893749619", + "2943149015", + "2949117887", + "2962719787", + "2962760235", + "2962770929", + "2962793481", + "2962879692", + "2963073614", + "2963125871", + "2963201933", + "2963245902", + "2963330667", + "2963372104", + "2963420272", + "2963561004", + "2963704386", + "2963800363", + "2963981733", + "2964013315", + "2964060609", + "2964193438", + "2964309429", + "2984529706", + "3009014607", + "3043547428", + "3098418424", + "3171930565" + ], + "abstract": "we introduce singan an unconditional generative model that can be learned from a single natural image our model is trained to capture the internal distribution of patches within the image and is then able to generate high quality diverse samples that carry the same visual content as the image singan contains a pyramid of fully convolutional gans each responsible for learning the patch distribution at a different scale of the image this allows generating new samples of arbitrary size and aspect ratio that have significant variability yet maintain both the global structure and the fine textures of the training image in contrast to previous single image gan schemes our approach is not limited to texture images and is not conditional i e it generates samples from noise user studies confirm that the generated samples are commonly confused to be real images we illustrate the utility of singan in a wide range of image manipulation tasks", + "title_raw": "SinGAN: Learning a Generative Model From a Single Natural Image", + "abstract_raw": "We introduce SinGAN, an unconditional generative model that can be learned from a single natural image. Our model is trained to capture the internal distribution of patches within the image, and is then able to generate high quality, diverse samples that carry the same visual content as the image. SinGAN contains a pyramid of fully convolutional GANs, each responsible for learning the patch distribution at a different scale of the image. This allows generating new samples of arbitrary size and aspect ratio, that have significant variability, yet maintain both the global structure and the fine textures of the training image. In contrast to previous single image GAN schemes, our approach is not limited to texture images, and is not conditional (i.e. it generates samples from noise). User studies confirm that the generated samples are commonly confused to be real images. We illustrate the utility of SinGAN in a wide range of image manipulation tasks.", + "link": "https://www.semanticscholar.org/paper/SinGAN%3A-Learning-a-Generative-Model-from-a-Single-Shaham-Dekel/ccaf15d4ad006171061508ca0a99c73814671501", + "scraped_abstract": null, + "citation_best": 31 + }, + { + "paper": "2903538854", + "venue": "1180662882", + "year": "2019", + "title": "challenging common assumptions in the unsupervised learning of disentangled representations", + "label": [ + "119857082", + "34127721", + "8038995", + "200632571", + "2780527393" + ], + "author": [ + "2590626502", + "2416758793", + "2147033837", + "168172700", + "1169494551", + "297432538", + "345454859" + ], + "reference": [], + "abstract": "", + "title_raw": "Challenging Common Assumptions in the Unsupervised Learning of Disentangled Representations", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/9c5c794094fbf5da8c48df5c3242615dc0b1d245", + "scraped_abstract": null, + "citation_best": 505 + }, + { + "paper": "2951076951", + "venue": "1174403976", + "year": "2019", + "title": "detecting incorrect build rules", + "label": [ + "16311509", + "55439883", + "138673069", + "169590947", + "2777904410", + "115903868", + "166955791", + "111065885" + ], + "author": [ + "2970024448", + "2107668114" + ], + "reference": [ + "602296818", + "1495453491", + "1514026819", + "1584220698", + "1921560783", + "2022794931", + "2023056333", + "2070223459", + "2097120616", + "2103030845", + "2132251340", + "2144528247", + "2148238464", + "2150395559", + "2229273480", + "2469513535", + "2791446013", + "2914298304" + ], + "abstract": "automated build systems are routinely used by software engineers to minimize the number of objects that need to be recompiled after incremental changes to the source files of a project in order to achieve efficient and correct builds developers must provide the build tools with dependency information between the files and modules of a project usually expressed in a macro language specific to each build tool most build systems offer good support for well known languages and compilers but as projects grow larger engineers tend to include source files generated using custom tools in order to guarantee correctness the authors of these tools are responsible for enumerating all the files whose contents an output depends on unfortunately this is a tedious process and not all dependencies are captured in practice which leads to incorrect builds we automatically uncover such missing dependencies through a novel method that we call build fuzzing the correctness of build definitions is verified by modifying files in a project triggering incremental builds and comparing the set of changed files to the set of expected changes these sets are determined using a dependency graph inferred by tracing the system calls executed during a clean build we evaluate our method by exhaustively testing build rules of open source projects uncovering issues leading to race conditions and faulty builds in 30 of them we provide a discussion of the bugs we detect identifying anti patterns in the use of the macro languages we fix some of the issues in projects where the features of build systems allow a clean solution", + "title_raw": "Detecting incorrect build rules", + "abstract_raw": "Automated build systems are routinely used by software engineers to minimize the number of objects that need to be recompiled after incremental changes to the source files of a project. In order to achieve efficient and correct builds, developers must provide the build tools with dependency information between the files and modules of a project, usually expressed in a macro language specific to each build tool. Most build systems offer good support for well-known languages and compilers, but as projects grow larger, engineers tend to include source files generated using custom tools. In order to guarantee correctness, the authors of these tools are responsible for enumerating all the files whose contents an output depends on. Unfortunately, this is a tedious process and not all dependencies are captured in practice, which leads to incorrect builds. We automatically uncover such missing dependencies through a novel method that we call build fuzzing. The correctness of build definitions is verified by modifying files in a project, triggering incremental builds and comparing the set of changed files to the set of expected changes. These sets are determined using a dependency graph inferred by tracing the system calls executed during a clean build. We evaluate our method by exhaustively testing build rules of open-source projects, uncovering issues leading to race conditions and faulty builds in 30 of them. We provide a discussion of the bugs we detect, identifying anti-patterns in the use of the macro languages. We fix some of the issues in projects where the features of build systems allow a clean solution.", + "link": "https://www.semanticscholar.org/paper/fba3a44e5f4ccec3dd5708d9692f8ff17aa25ef3", + "scraped_abstract": "Automated build systems are routinely used by software engineers to minimize the number of objects that need to be recompiled after incremental changes to the source files of a project. In order to achieve efficient and correct builds, developers must provide the build tools with dependency information between the files and modules of a project, usually expressed in a macro language specific to each build tool. In order to guarantee correctness, the authors of these tools are responsible for enumerating all the files whose contents an output depends on. Unfortunately, this is a tedious process and not all dependencies are captured in practice, which leads to incorrect builds. We automatically uncover such missing dependencies through a novel method that we call build fuzzing. The correctness of build definitions is verified by modifying files in a project, triggering incremental builds and comparing the set of changed files to the set of expected changes. These sets are determined using a dependency graph inferred by tracing the system calls executed during a clean build. We evaluate our method by exhaustively testing build rules of open-source projects, uncovering issues leading to race conditions and faulty builds in 31 of them. We provide a discussion of the bugs we detect, identifying anti-patterns in the use of the macro languages. We fix some of the issues in projects where the features of build systems allow a clean solution.", + "citation_best": 16 + }, + { + "paper": "2955730880", + "venue": "1174403976", + "year": "2019", + "title": "distilling neural representations of data structure manipulation using fmri and fnirs", + "label": [ + "162319229", + "31601959", + "155911833" + ], + "author": [ + "2305260873", + "2953997441", + "2954303273", + "2800415762", + "2126942884", + "1983588154", + "1977991679" + ], + "reference": [ + "207357820", + "613254647", + "1607939528", + "1779898155", + "1973204205", + "1974316717", + "1976781461", + "1979342075", + "1985147801", + "1987143349", + "1988628854", + "1992210365", + "1992529560", + "1994563214", + "1996407856", + "2004414525", + "2008060633", + "2010645028", + "2014232144", + "2022244667", + "2038233718", + "2038719540", + "2039772616", + "2040710054", + "2041829367", + "2043045839", + "2043594008", + "2044590882", + "2049812766", + "2061607227", + "2065343501", + "2066113169", + "2071714163", + "2076269298", + "2077190669", + "2079450984", + "2081016707", + "2082504982", + "2088661419", + "2090801405", + "2092770659", + "2095491050", + "2099419188", + "2099637955", + "2101892923", + "2112003252", + "2114728368", + "2120938928", + "2123477708", + "2123526233", + "2124453301", + "2125383450", + "2127812042", + "2130930647", + "2133188546", + "2133320039", + "2133584444", + "2136435696", + "2141663826", + "2150238850", + "2150673354", + "2151969869", + "2156424501", + "2160385588", + "2161253280", + "2163118886", + "2168062284", + "2169149290", + "2187213760", + "2203376984", + "2231155388", + "2292302374", + "2560572048", + "2563587725", + "2577973227", + "2581999867", + "2616911396", + "2741371834", + "2742206045", + "2800092803", + "2810705494", + "2810767260", + "2883338005", + "2894895876", + "2895073034", + "2941714792", + "3011322780" + ], + "abstract": "data structures permeate many aspects of software engineering but their associated human cognitive processes are not thoroughly understood we leverage medical imaging and insights from the psychological notion of spatial ability to decode the neural representations of several fundamental data structures and their manipulations in a human study involving 76 participants we examine list array tree and mental rotation tasks using both functional near infrared spectroscopy fnirs and functional magnetic resonance imaging fmri we find a nuanced relationship data structure and spatial operations use the same focal regions of the brain but to different degrees they are related but distinct neural tasks in addition more difficult computer science problems induce higher cognitive load than do problems of pure spatial reasoning finally while fnirs is less expensive and more permissive there are some computing relevant brain regions that only fmri can reach", + "title_raw": "Distilling neural representations of data structure manipulation using fMRI and fNIRS", + "abstract_raw": "Data structures permeate many aspects of software engineering, but their associated human cognitive processes are not thoroughly understood. We leverage medical imaging and insights from the psychological notion of spatial ability to decode the neural representations of several fundamental data structures and their manipulations. In a human study involving 76 participants, we examine list, array, tree, and mental rotation tasks using both functional near-infrared spectroscopy (fNIRS) and functional magnetic resonance imaging (fMRI). We find a nuanced relationship: data structure and spatial operations use the same focal regions of the brain but to different degrees. They are related but distinct neural tasks. In addition, more difficult computer science problems induce higher cognitive load than do problems of pure spatial reasoning. Finally, while fNIRS is less expensive and more permissive, there are some computing-relevant brain regions that only fMRI can reach.", + "link": "https://www.semanticscholar.org/paper/76b1c6fb9b3eb44b75549bbc8d176106d758adf6", + "scraped_abstract": "Data structures permeate many aspects of software engineering, but their associated human cognitive processes are not thoroughly understood. We leverage medical imaging and insights from the psychological notion of spatial ability to decode the neural representations of several fundamental data structures and their manipulations. In a human study involving 76 participants, we examine list, array, tree, and mental rotation tasks using both functional near-infrared spectroscopy (fNIRS) and functional magnetic resonance imaging (fMRI). We find a nuanced relationship: data structure and spatial operations use the same focal regions of the brain but to different degrees. They are related but distinct neural tasks. In addition, more difficult computer science problems induce higher cognitive load than do problems of pure spatial reasoning. Finally, while fNIRS is less expensive and more permissive, there are some computing-relevant brain regions that only fMRI can reach.", + "citation_best": 30 + }, + { + "paper": "2954494850", + "venue": "1174403976", + "year": "2019", + "title": "do developers discover new tools on the toilet", + "label": [ + "529173508", + "115903868" + ], + "author": [ + "2342860276", + "2164851866", + "2165696351", + "2282611853", + "2293100435", + "2972480023", + "2170752136", + "2928359948", + "2928640897" + ], + "reference": [ + "104561821", + "1487426945", + "1674092947", + "1969785126", + "1990513740", + "2001961185", + "2005914253", + "2006235585", + "2013611238", + "2033420833", + "2057366964", + "2060601789", + "2078639378", + "2092415033", + "2097363245", + "2099334948", + "2102970299", + "2116436752", + "2138350998", + "2141682682", + "2150374398", + "2151953984", + "2159682099", + "2159958028", + "2170369178", + "2546913659", + "2547513165", + "2548846043", + "2755308101", + "2807713652", + "2889563586" + ], + "abstract": "maintaining awareness of useful tools is a substantial challenge for developers physical newsletters are a simple technique to inform developers about tools in this paper we evaluate such a technique called testing on the toilet by performing a mixed methods case study we first quantitatively evaluate how effective this technique is by applying statistical causal inference over six years of data about tools used by thousands of developers we then qualitatively contextualize these results by interviewing and surveying 382 developers from authors to editors to readers we found that the technique was generally effective at increasing software development tool use although the increase varied depending on factors such as the breadth of applicability of the tool the extent to which the tool has reached saturation and the memorability of the tool name", + "title_raw": "Do developers discover new tools on the toilet", + "abstract_raw": "Maintaining awareness of useful tools is a substantial challenge for developers. Physical newsletters are a simple technique to inform developers about tools. In this paper, we evaluate such a technique, called Testing on the Toilet, by performing a mixed-methods case study. We first quantitatively evaluate how effective this technique is by applying statistical causal inference over six years of data about tools used by thousands of developers. We then qualitatively contextualize these results by interviewing and surveying 382 developers, from authors to editors to readers. We found that the technique was generally effective at increasing software development tool use, although the increase varied depending on factors such as the breadth of applicability of the tool, the extent to which the tool has reached saturation, and the memorability of the tool name.", + "link": "https://www.semanticscholar.org/paper/25b82fbdabcae52dc2b11a9a8ca4ef0badd403e6", + "scraped_abstract": null, + "citation_best": 12 + }, + { + "paper": "2954024706", + "venue": "1174403976", + "year": "2019", + "title": "isense completion aware crowdtesting management", + "label": [ + "107327155" + ], + "author": [ + "2690816326", + "2100273499", + "2293907099", + "2236230523", + "2253369799" + ], + "reference": [ + "105995946", + "1575961892", + "1592442212", + "1594767943", + "1785384640", + "1902482618", + "1971618369", + "1974747370", + "1991612614", + "2009165332", + "2009786711", + "2016864600", + "2017319947", + "2022284665", + "2032421424", + "2039155169", + "2041457600", + "2041551942", + "2046241805", + "2049625105", + "2050389893", + "2053873374", + "2081503608", + "2081575303", + "2091646623", + "2116693957", + "2120232805", + "2142838174", + "2150649788", + "2159781868", + "2160958420", + "2165921013", + "2167830984", + "2171326583", + "2180620742", + "2245133560", + "2285939512", + "2292289085", + "2308859608", + "2316422287", + "2332230829", + "2514090203", + "2515531724", + "2522186013", + "2522973809", + "2612584574", + "2624109868", + "2725032736", + "2731935965", + "2740565296", + "2744629383", + "2753715782", + "2754054654", + "2770141444", + "2789717644", + "2794497357", + "2794695798", + "2795003837", + "2800594283", + "3010856131" + ], + "abstract": "crowdtesting has become an effective alternative to traditional testing especially for mobile applications however crowdtesting is hard to manage in nature given the complexity of mobile applications and unpredictability of distributed crowdtesting processes it is difficult to estimate a remaining number of bugs yet to be detected or b required cost to find those bugs experience based decisions may result in ineffective crowdtesting processes e g there is an average of 32 wasteful spending in current crowdtesting practices this paper aims at exploring automated decision support to effectively manage crowdtesting processes it proposes an approach named isense which applies incremental sampling technique to process crowdtesting reports arriving in chronological order organizes them into fixed size groups as dynamic inputs and predicts two test completion indicators in an incremental manner the two indicators are 1 total number of bugs predicted with capture recapture model and 2 required test cost for achieving certain test objectives predicted with autoregressive integrated moving average model the evaluation of isense is conducted on 46 434 reports of 218 crowdtesting tasks from one of the largest crowdtesting platforms in china its effectiveness is demonstrated through two application studies for automating crowdtesting management and semi automation of task closing trade off analysis the results show that isense can provide managers with greater awareness of testing progress to achieve cost effectiveness gains of crowdtesting specifically a median of 100 bugs can be detected with 30 saved cost based on the automated close prediction", + "title_raw": "iSENSE: completion-aware crowdtesting management", + "abstract_raw": "Crowdtesting has become an effective alternative to traditional testing, especially for mobile applications. However, crowdtesting is hard to manage in nature. Given the complexity of mobile applications and unpredictability of distributed crowdtesting processes, it is difficult to estimate (a) remaining number of bugs yet to be detected or (b) required cost to find those bugs. Experience-based decisions may result in ineffective crowdtesting processes, e.g., there is an average of 32% wasteful spending in current crowdtesting practices. This paper aims at exploring automated decision support to effectively manage crowdtesting processes. It proposes an approach named iSENSE which applies incremental sampling technique to process crowdtesting reports arriving in chronological order, organizes them into fixed-size groups as dynamic inputs, and predicts two test completion indicators in an incremental manner. The two indicators are: 1) total number of bugs predicted with Capture-ReCapture model, and 2) required test cost for achieving certain test objectives predicted with AutoRegressive Integrated Moving Average model. The evaluation of iSENSE is conducted on 46,434 reports of 218 crowdtesting tasks from one of the largest crowdtesting platforms in China. Its effectiveness is demonstrated through two application studies for automating crowdtesting management and semi-automation of task closing trade-off analysis. The results show that iSENSE can provide managers with greater awareness of testing progress to achieve cost-effectiveness gains of crowdtesting. Specifically, a median of 100% bugs can be detected with 30% saved cost based on the automated close prediction.", + "link": "https://www.semanticscholar.org/paper/96463d0e204f73693bcdcb0e7926d160902cc262", + "scraped_abstract": null, + "citation_best": 20 + }, + { + "paper": "2963274022", + "venue": "1174403976", + "year": "2019", + "title": "redundant loads a software inefficiency indicator", + "label": [ + "139571649", + "97686452", + "190902152", + "169590947", + "2779960059", + "162319229", + "2777904410", + "199519371", + "120314980" + ], + "author": [ + "2913863676", + "2540373368", + "2233088131", + "2089167281", + "2688987970" + ], + "reference": [ + "35708471", + "101609324", + "115766323", + "1524758670", + "1580086658", + "1604252617", + "1832436993", + "1843198456", + "1967690511", + "1988927353", + "1992748711", + "1997375049", + "2009736000", + "2010452422", + "2010557350", + "2020517863", + "2021085621", + "2068424089", + "2080046548", + "2080592089", + "2082318969", + "2087985741", + "2090058354", + "2095872092", + "2100799353", + "2101778912", + "2102890180", + "2104414437", + "2106840980", + "2111838219", + "2120209286", + "2128274900", + "2130653499", + "2131533407", + "2134633067", + "2139356353", + "2140890722", + "2144038733", + "2144334298", + "2144433126", + "2153131460", + "2153185479", + "2153843289", + "2154693467", + "2155528875", + "2157662360", + "2160394585", + "2163654949", + "2167579602", + "2170508236", + "2237415205", + "2295178416", + "2295303622", + "2561515725", + "2592618839", + "2604269348", + "2618917786", + "2619742609", + "2625691739", + "2761619692", + "2790216947", + "2949626814", + "3207110004" + ], + "abstract": "modern software packages have become increasingly complex with millions of lines of code and references to many external libraries redundant operations are a common performance limiter in these code bases missed compiler optimization opportunities inappropriate data structure and algorithm choices and developers inattention to performance are some common reasons for the existence of redundant operations developers mainly depend on compilers to eliminate redundant operations however compilers static analysis often misses optimization opportunities due to ambiguities and limited analysis scope automatic optimizations to algorithmic and data structural problems are out of scope we develop loadspy a whole program profiler to pinpoint redundant memory load operations which are often a symptom of many redundant operations the strength of loadspy exists in identifying and quantifying redundant load operations in programs and associating the redundancies with program execution contexts and scopes to focus developers attention on problematic code loadspy works on fully optimized binaries adopts various optimization techniques to reduce its overhead and provides a rich graphic user interface which make it a complete developer tool applying loadspy showed that a large fraction of redundant loads is common in modern software packages despite highest levels of automatic compiler optimizations guided by loadspy we optimize several well known benchmarks and real world applications yielding significant speedups", + "title_raw": "Redundant loads: a software inefficiency indicator", + "abstract_raw": "Modern software packages have become increasingly complex with millions of lines of code and references to many external libraries. Redundant operations are a common performance limiter in these code bases. Missed compiler optimization opportunities, inappropriate data structure and algorithm choices, and developers' inattention to performance are some common reasons for the existence of redundant operations. Developers mainly depend on compilers to eliminate redundant operations. However, compilers' static analysis often misses optimization opportunities due to ambiguities and limited analysis scope; automatic optimizations to algorithmic and data structural problems are out of scope. We develop LoadSpy, a whole-program profiler to pinpoint redundant memory load operations, which are often a symptom of many redundant operations. The strength of LoadSpy exists in identifying and quantifying redundant load operations in programs and associating the redundancies with program execution contexts and scopes to focus developers' attention on problematic code. LoadSpy works on fully optimized binaries, adopts various optimization techniques to reduce its overhead, and provides a rich graphic user interface, which make it a complete developer tool. Applying LoadSpy showed that a large fraction of redundant loads is common in modern software packages despite highest levels of automatic compiler optimizations. Guided by LoadSpy, we optimize several well-known benchmarks and real-world applications, yielding significant speedups.", + "link": "https://www.semanticscholar.org/paper/836112889f4ad78886081b9e96dd40678cd376d5", + "scraped_abstract": null, + "citation_best": 27 + }, + { + "paper": "2953487403", + "venue": "1174403976", + "year": "2019", + "title": "resource aware program analysis via online abstraction coarsening", + "label": [ + "98183937", + "113775141", + "45374587", + "137955351", + "97686452" + ], + "author": [ + "2135809656", + "2617306464", + "2125768396" + ], + "reference": [ + "205167324", + "1550698229", + "1576452626", + "1938285999", + "1971859150", + "2028284083", + "2040900804", + "2049311281", + "2068463433", + "2082000355", + "2101234009", + "2111362445", + "2111996055", + "2113964673", + "2115191221", + "2120346334", + "2121863487", + "2130005627", + "2141559645", + "2151562310", + "2153353890", + "2156981320", + "2165744911", + "2264244749", + "2295367909", + "2465429106", + "2508075332", + "2552878490", + "2619331983", + "2761799536", + "2762491430", + "2884243532" + ], + "abstract": "we present a new technique for developing a resource aware program analysis such an analysis is aware of constraints on available physical resources such as memory size tracks its resource use and adjusts its behaviors during fixpoint computation in order to meet the constraint and achieve high precision our resource aware analysis adjusts behaviors by coarsening program abstraction which usually makes the analysis consume less memory and time until completion it does so multiple times during the analysis under the direction of what we call a controller the controller constantly intervenes in the fixpoint computation of the analysis and decides how much the analysis should coarsen the abstraction we present an algorithm for learning a good controller automatically from benchmark programs we applied our technique to a static analysis for c programs where we control the degree of flow sensitivity to meet a constraint on peak memory consumption the experimental results with 18 real world programs show that our algorithm can learn a good controller and the analysis with this controller meets the constraint and utilizes available memory effectively", + "title_raw": "Resource-aware program analysis via online abstraction coarsening", + "abstract_raw": "We present a new technique for developing a resource-aware program analysis. Such an analysis is aware of constraints on available physical resources, such as memory size, tracks its resource use, and adjusts its behaviors during fixpoint computation in order to meet the constraint and achieve high precision. Our resource-aware analysis adjusts behaviors by coarsening program abstraction, which usually makes the analysis consume less memory and time until completion. It does so multiple times during the analysis, under the direction of what we call a controller. The controller constantly intervenes in the fixpoint computation of the analysis and decides how much the analysis should coarsen the abstraction. We present an algorithm for learning a good controller automatically from benchmark programs. We applied our technique to a static analysis for C programs, where we control the degree of flow-sensitivity to meet a constraint on peak memory consumption. The experimental results with 18 real-world programs show that our algorithm can learn a good controller and the analysis with this controller meets the constraint and utilizes available memory effectively.", + "link": "https://www.semanticscholar.org/paper/4b55d8f19b4c5b5cf05dcdb16ba01fb402a9f719", + "scraped_abstract": null, + "citation_best": 18 + }, + { + "paper": "2939724256", + "venue": "1174403976", + "year": "2019", + "title": "scalable approaches for test suite reduction", + "label": [ + "53942775", + "161821725", + "97824396", + "124101348", + "43126263", + "127705205" + ], + "author": [ + "2797376322", + "2620114314", + "2560516710", + "2164159618" + ], + "reference": [ + "1971137495", + "1985896931", + "1990146665", + "1991292921", + "1992786608", + "1997380619", + "2014515160", + "2037757210", + "2041836310", + "2042025208", + "2053873374", + "2056640446", + "2062692353", + "2073459066", + "2095873049", + "2098639318", + "2110068396", + "2142392209", + "2144600533", + "2156723666", + "2162739315", + "2162789495", + "2163339495", + "2514423060", + "2768483214", + "2789966497", + "2795003837", + "2795612311", + "2807006342", + "2811226876", + "2979473749" + ], + "abstract": "test suite reduction approaches aim at decreasing software regression testing costs by selecting a representative subset from large size test suites most existing techniques are too expensive for handling modern massive systems and moreover depend on artifacts such as code coverage metrics or specification models that are not commonly available at large scale we present a family of novel very efficient approaches for similarity based test suite reduction that apply algorithms borrowed from the big data domain together with smart heuristics for finding an evenly spread subset of test cases the approaches are very general since they only use as input the test cases themselves test source code or command line input we evaluate four approaches in a version that selects a fixed budget b of test cases and also in an adequate version that does the reduction guaranteeing some fixed coverage the results show that the approaches yield a fault detection loss comparable to state of the art techniques while providing huge gains in terms of efficiency when applied to a suite of more than 500k real world test cases the most efficient of the four approaches could select b test cases for varying b values in less than 10 seconds", + "title_raw": "Scalable approaches for test suite reduction", + "abstract_raw": "Test suite reduction approaches aim at decreasing software regression testing costs by selecting a representative subset from large-size test suites. Most existing techniques are too expensive for handling modern massive systems and moreover depend on artifacts, such as code coverage metrics or specification models, that are not commonly available at large scale. We present a family of novel very efficient approaches for similarity-based test suite reduction that apply algorithms borrowed from the big data domain together with smart heuristics for finding an evenly spread subset of test cases. The approaches are very general since they only use as input the test cases themselves (test source code or command line input). We evaluate four approaches in a version that selects a fixed budget B of test cases, and also in an adequate version that does the reduction guaranteeing some fixed coverage. The results show that the approaches yield a fault detection loss comparable to state-of-the-art techniques, while providing huge gains in terms of efficiency. When applied to a suite of more than 500K real world test cases, the most efficient of the four approaches could select B test cases (for varying B values) in less than 10 seconds.", + "link": "https://www.semanticscholar.org/4b7f01ad1fa09ea1ca09db245e37b43abd258d65", + "scraped_abstract": null, + "citation_best": 41 + }, + { + "paper": "2954121408", + "venue": "1174403976", + "year": "2019", + "title": "smoke scalable path sensitive memory leak detection for millions of lines of code", + "label": [ + "153083717", + "113775141", + "48044578", + "156731835", + "137955351", + "136134403", + "68339613", + "199519371" + ], + "author": [ + "2799043141", + "2162345536", + "2230978251", + "2634458896", + "2223704919", + "2189948379" + ], + "reference": [ + "1480909796", + "1511982475", + "1554164734", + "1605152326", + "1710734607", + "1878544538", + "1973108383", + "1982205631", + "1991546210", + "2008626182", + "2016522855", + "2036265926", + "2043855508", + "2072385532", + "2080573945", + "2102674270", + "2121094954", + "2138369269", + "2142890871", + "2153185479", + "2156858199", + "2158767818", + "2160401437", + "2166140339", + "2170922286", + "2171240827", + "2171680292", + "2799226481", + "3114516458" + ], + "abstract": "detecting memory leak at industrial scale is still not well addressed in spite of the tremendous effort from both industry and academia in the past decades existing work suffers from an unresolved paradox a highly precise analysis limits its scalability and an imprecise one seriously hurts its precision or recall in this work we present smoke a staged approach to resolve this paradox in the first stage instead of using a uniform precise analysis for all paths we use a scalable but imprecise analysis to compute a succinct set of candidate memory leak paths in the second stage we leverage a more precise analysis to verify the feasibility of those candidates the first stage is scalable due to the design of a new sparse program representation the use flow graph ufg that models the problem as a polynomial time state analysis the second stage analysis is both precise and efficient due to the smaller number of candidates and the design of a dedicated constraint solver experimental results show that smoke can finish checking industrial sized projects up to 8mloc in forty minutes with an average false positive rate of 24 4 besides smoke is significantly faster than the state of the art research techniques as well as the industrial tools with the speedup ranging from 5 2x to 22 8x in the twenty nine mature and extensively checked benchmark projects smoke has discovered thirty previously unknown memory leaks which were confirmed by developers and one even assigned a cve id", + "title_raw": "Smoke: scalable path-sensitive memory leak detection for millions of lines of code", + "abstract_raw": "Detecting memory leak at industrial scale is still not well addressed, in spite of the tremendous effort from both industry and academia in the past decades. Existing work suffers from an unresolved paradox - a highly precise analysis limits its scalability and an imprecise one seriously hurts its precision or recall. In this work, we present Smoke, a staged approach to resolve this paradox. In the first stage, instead of using a uniform precise analysis for all paths, we use a scalable but imprecise analysis to compute a succinct set of candidate memory leak paths. In the second stage, we leverage a more precise analysis to verify the feasibility of those candidates. The first stage is scalable, due to the design of a new sparse program representation, the use-flow graph (UFG), that models the problem as a polynomial-time state analysis. The second stage analysis is both precise and efficient, due to the smaller number of candidates and the design of a dedicated constraint solver. Experimental results show that Smoke can finish checking industrial-sized projects, up to 8MLoC, in forty minutes with an average false positive rate of 24.4%. Besides, Smoke is significantly faster than the state-of-the-art research techniques as well as the industrial tools, with the speedup ranging from 5.2X to 22.8X. In the twenty-nine mature and extensively checked benchmark projects, Smoke has discovered thirty previously-unknown memory leaks which were confirmed by developers, and one even assigned a CVE ID.", + "link": "https://www.semanticscholar.org/paper/d0e23baf655713857e7eef0cf9b6320a8cad5c55", + "scraped_abstract": "Detecting memory leak at industrial scale is still not well addressed, in spite of the tremendous effort from both industry and academia in the past decades. Existing work suffers from an unresolved paradox - a highly precise analysis limits its scalability and an imprecise one seriously hurts its precision or recall. In this work, we present SMOKE, a staged approach to resolve this paradox. In the ?rst stage, instead of using a uniform precise analysis for all paths, we use a scalable but imprecise analysis to compute a succinct set of candidate memory leak paths. In the second stage, we leverage a more precise analysis to verify the feasibility of those candidates. The ?rst stage is scalable, due to the design of a new sparse program representation, the use-?ow graph (UFG), that models the problem as a polynomial-time state analysis. The second stage analysis is both precise and ef?cient, due to the smaller number of candidates and the design of a dedicated constraint solver. Experimental results show that SMOKE can ?nish checking industrial-sized projects, up to 8MLoC, in forty minutes with an average false positive rate of 24.4%. Besides, SMOKE is signi?cantly faster than the state-of-the-art research techniques as well as the industrial tools, with the speedup ranging from 5.2X to 22.8X. In the twenty-nine mature and extensively checked benchmark projects, SMOKE has discovered thirty previously unknown memory leaks which were con?rmed by developers, and one even assigned a CVE ID.", + "citation_best": 58 + }, + { + "paper": "2955656327", + "venue": "1174403976", + "year": "2019", + "title": "the seven sins security smells in infrastructure as code scripts", + "label": [ + "9903902", + "38652104", + "109297577", + "61423126", + "93996380", + "97686452" + ], + "author": [ + "2492390558", + "347693595", + "2175749388" + ], + "reference": [ + "42836620", + "1500946169", + "1656502710", + "1803273808", + "1963598945", + "1986222079", + "1990393403", + "2008810193", + "2060561050", + "2064653370", + "2082314767", + "2095881341", + "2103239853", + "2103370348", + "2104556041", + "2140504739", + "2152874840", + "2153575752", + "2153887189", + "2154398797", + "2157353183", + "2160958420", + "2164777277", + "2172433705", + "2267596186", + "2295782180", + "2402800985", + "2407292968", + "2559885217", + "2618689158", + "2622273324", + "2698406033", + "2711432360", + "2729314777", + "2759023773", + "2763107276", + "2765648970", + "2766217896", + "2767331170", + "2789825598", + "2796047065", + "2807298173", + "2809773602", + "2883411629", + "3101131928", + "3106158252", + "3106534413" + ], + "abstract": "practitioners use infrastructure as code iac scripts to provision servers and development environments while developing iac scripts practitioners may inadvertently introduce security smells security smells are recurring coding patterns that are indicative of security weakness and can potentially lead to security breaches the goal of this paper is to help practitioners avoid insecure coding practices while developing infrastructure as code iac scripts through an empirical study of security smells in iac scripts we apply qualitative analysis on 1 726 iac scripts to identify seven security smells next we implement and validate a static analysis tool called security linter for infrastructure as code scripts slic to identify the occurrence of each smell in 15 232 iac scripts collected from 293 open source repositories we identify 21 201 occurrences of security smells that include 1 326 occurrences of hard coded passwords we submitted bug reports for 1 000 randomly selected security smell occurrences we obtain 212 responses to these bug reports of which 148 occurrences were accepted by the development teams to be fixed we observe security smells can have a long lifetime e g a hard coded secret can persist for as long as 98 months with a median lifetime of 20 months", + "title_raw": "The seven sins: security smells in infrastructure as code scripts", + "abstract_raw": "Practitioners use infrastructure as code (IaC) scripts to provision servers and development environments. While developing IaC scripts, practitioners may inadvertently introduce security smells. Security smells are recurring coding patterns that are indicative of security weakness and can potentially lead to security breaches. The goal of this paper is to help practitioners avoid insecure coding practices while developing infrastructure as code (IaC) scripts through an empirical study of security smells in IaC scripts. We apply qualitative analysis on 1,726 IaC scripts to identify seven security smells. Next, we implement and validate a static analysis tool called Security Linter for Infrastructure as Code scripts (SLIC) to identify the occurrence of each smell in 15,232 IaC scripts collected from 293 open source repositories. We identify 21,201 occurrences of security smells that include 1,326 occurrences of hard-coded passwords. We submitted bug reports for 1,000 randomly-selected security smell occurrences. We obtain 212 responses to these bug reports, of which 148 occurrences were accepted by the development teams to be fixed. We observe security smells can have a long lifetime, e.g., a hard-coded secret can persist for as long as 98 months, with a median lifetime of 20 months.", + "link": "https://www.semanticscholar.org/paper/1cc47e5aea8d6d71817c438e4e790972c04722e2", + "scraped_abstract": "Practitioners use infrastructure as code (IaC) scripts to provision servers and development environments. While developing IaC scripts, practitioners may inadvertently introduce security smells. Security smells are recurring coding patterns that are indicative of security weakness and can potentially lead to security breaches. The goal of this paper is to help practitioners avoid insecure coding practices while developing infrastructure as code (IaC) scripts through an empirical study of security smells in IaC scripts. We apply qualitative analysis on 1,726 IaC scripts to identify seven security smells. Next, we implement and validate a static analysis tool called Security Linter for Infrastructure as Code scripts (SLIC) to identify the occurrence of each smell in 15,232 IaC scripts collected from 293 open source repositories. We identify 21,201 occurrences of security smells that include 1,326 occurrences of hard-coded passwords. We submitted bug reports for 1,000 randomly-selected security smell occurrences. We obtain 212 responses to these bug reports, of which 148 occurrences were accepted by the development teams to be fixed. We observe security smells can have a long lifetime, e.g., a hard-coded secret can persist for as long as 98 months, with a median lifetime of 20 months.", + "citation_best": 159 + }, + { + "paper": "2954691065", + "venue": "1174403976", + "year": "2019", + "title": "view centric performance optimization for database backed web applications", + "label": [ + "167955471", + "81639021", + "77088390", + "21959979", + "118643609", + "72615636" + ], + "author": [ + "2767363908", + "2278437819", + "2983108302", + "2111557135", + "2124896226" + ], + "reference": [ + "1646696449", + "2056659466", + "2062666593", + "2069171862", + "2087515886", + "2096666207", + "2101000001", + "2113000322", + "2132231921", + "2133802223", + "2144344516", + "2145458045", + "2157662360", + "2243423430", + "2340417905", + "2364861723", + "2418123271", + "2767894601", + "2785559831", + "2794933251", + "2799144475", + "2898912109" + ], + "abstract": "web developers face the stringent task of designing informative web pages while keeping the page load time low this task has become increasingly challenging as most web contents are now generated by processing ever growing amount of user data stored in back end databases it is difficult for developers to understand the cost of generating every web page element not to mention explore and pick the web design with the best trade off between performance and functionality in this paper we present panorama a view centric and database aware development environment for web developers using database aware program analysis and novel ide design panorama provides developers with intuitive information about the cost and the performance enhancing opportunities behind every html element as well as suggesting various global code refactorings that enable developers to easily explore a wide spectrum of performance and functionality trade offs", + "title_raw": "View-centric performance optimization for database-backed web applications", + "abstract_raw": "Web developers face the stringent task of designing informative web pages while keeping the page-load time low. This task has become increasingly challenging as most web contents are now generated by processing ever-growing amount of user data stored in back-end databases. It is difficult for developers to understand the cost of generating every web-page element, not to mention explore and pick the web design with the best trade-off between performance and functionality. In this paper, we present Panorama, a view-centric and database-aware development environment for web developers. Using database-aware program analysis and novel IDE design, Panorama provides developers with intuitive information about the cost and the performance-enhancing opportunities behind every HTML element, as well as suggesting various global code refactorings that enable developers to easily explore a wide spectrum of performance and functionality trade-offs.", + "link": "https://www.semanticscholar.org/paper/de7f9e96615d0a38371b8f9bdb96eb2425b50068", + "scraped_abstract": "Web developers face the stringent task of designing informative web pages while keeping the page-load time low. This task has become increasingly challenging as most web contents are now generated by processing ever-growing amount of user data stored in back-end databases. It is difficult for developers to understand the cost of generating every web-page element, not to mention explore and pick the web design with the best trade-off between performance and functionality. In this paper, we present Panorama, a view-centric and database-aware development environment for web developers. Using database-aware program analysis and novel IDE design, Panorama provides developers with intuitive information about the cost and the performance-enhancing opportunities behind every HTML element, as well as suggesting various global code refactorings that enable developers to easily explore a wide spectrum of performance and functionality trade-offs.", + "citation_best": 18 + }, + { + "paper": "2965266021", + "venue": "1203999783", + "year": "2019", + "title": "boosting for comparison based learning", + "label": [ + "11413529", + "46686674" + ], + "author": [ + "2303265816", + "251023228" + ], + "reference": [], + "abstract": "", + "title_raw": "Boosting for Comparison-Based Learning", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/46c4b5f572599a50e1524c9e1eab638d3bca56d4", + "scraped_abstract": null, + "citation_best": 4 + }, + { + "paper": "2949380784", + "venue": "1130985203", + "year": "2019", + "title": "optimizing impression counts for outdoor advertising", + "label": [ + "311688", + "2776684213", + "62764039" + ], + "author": [ + "2786579825", + "2303117182", + "2098147036", + "2990372460", + "2528181257" + ], + "reference": [ + "34666602", + "1509863433", + "1973462280", + "1975988657", + "1977948180", + "1984314602", + "1995472412", + "2024626449", + "2028939941", + "2050175694", + "2080379754", + "2080480222", + "2085479788", + "2114477897", + "2116725038", + "2129838006", + "2134374287", + "2137442203", + "2160152607", + "2161363814", + "2174722713", + "2328820819", + "2495571292", + "2606197134", + "2775511139", + "2799689005", + "2799934046", + "3102286757", + "3193700697" + ], + "abstract": "in this paper we propose and study the problem of optimizing the influence of outdoor advertising ad when impression counts are taken into consideration given a database u of billboards each of which has a location and a non uniform cost a trajectory database t and a budget b it aims to find a set of billboards that has the maximum influence under the budget in line with the advertising consumer behavior studies we adopt the logistic function to take into account the impression counts of an ad placed at different billboards to a user trajectory when defining the influence measurement however this poses two challenges 1 our problem is np hard to approximate within a factor of o t 1 e for any e 0 in polynomial time 2 the influence measurement is non submodular which means a straightforward greedy approach is not applicable therefore we propose a tangent line based algorithm to compute a submodular function to estimate the upper bound of influence henceforth we introduce a branch and bound framework with a termination condition achieving 2 1 1 e approximation ratio however this framework is time consuming when u is huge thus we further optimize it with a progressive pruning upper bound estimation approach which achieves 2 1 1 e e approximation ratio and significantly decreases the running time we conduct the experiments on real world billboard and trajectory datasets and show that the proposed approaches outperform the baselines by 95 in effectiveness moreover the optimized approach is around two orders of magnitude faster than the original framework", + "title_raw": "Optimizing Impression Counts for Outdoor Advertising", + "abstract_raw": "In this paper we propose and study the problem of optimizing the influence of outdoor advertising (ad) when impression counts are taken into consideration. Given a database U of billboards, each of which has a location and a non-uniform cost, a trajectory database T and a budget B, it aims to find a set of billboards that has the maximum influence under the budget. In line with the advertising consumer behavior studies, we adopt the logistic function to take into account the impression counts of an ad (placed at different billboards) to a user trajectory when defining the influence measurement. However, this poses two challenges: (1) our problem is NP-hard to approximate within a factor of O(|T|1-e) for any e>0 in polynomial time; (2) the influence measurement is non-submodular, which means a straightforward greedy approach is not applicable. Therefore, we propose a tangent line based algorithm to compute a submodular function to estimate the upper bound of influence. Henceforth, we introduce a branch-and-bound framework with a \u03b8-termination condition, achieving \u03b82/(1 - 1/e) approximation ratio. However, this framework is time-consuming when |U| is huge. Thus, we further optimize it with a progressive pruning upper bound estimation approach which achieves \u03b82/(1 - 1/e - e) approximation ratio and significantly decreases the running-time. We conduct the experiments on real-world billboard and trajectory datasets, and show that the proposed approaches outperform the baselines by 95% in effectiveness. Moreover, the optimized approach is around two orders of magnitude faster than the original framework.", + "link": "https://www.semanticscholar.org/paper/Optimizing-Impression-Counts-for-Outdoor-Zhang-Li/6e2560e99438f561314aa4a7bc10e1e339f31cfb", + "scraped_abstract": null, + "citation_best": 33 + }, + { + "paper": "2979514732", + "venue": "1123349196", + "year": "2019", + "title": "ebp a wearable system for frequent and comfortable blood pressure monitoring from user s ear", + "label": [ + "150594956" + ], + "author": [ + "2627644035", + "2952635412", + "2986155415", + "2707198646", + "2150596555", + "2682525658", + "2223959141", + "2002199957", + "2478990042", + "2115614380", + "2213905386", + "1982323184", + "2113796895" + ], + "reference": [ + "35725784", + "85997425", + "189407019", + "944977885", + "1484251943", + "1502129941", + "1515135833", + "1517530811", + "1530344025", + "1542573427", + "1582627271", + "1591045726", + "1594461330", + "1597986375", + "1666455735", + "1856769726", + "1904618591", + "1922741048", + "1965376531", + "1966159268", + "1967548446", + "1968291490", + "1976093437", + "1977110021", + "1977471369", + "1984170510", + "1984702242", + "1987077480", + "1991810038", + "1993700698", + "1998639500", + "1999965589", + "2003899531", + "2006359998", + "2007260315", + "2013423849", + "2020130607", + "2021915906", + "2029473249", + "2030081396", + "2041420156", + "2041502303", + "2048090345", + "2050189573", + "2050612188", + "2062510362", + "2066227468", + "2077522448", + "2080146198", + "2088819196", + "2088956449", + "2093471614", + "2098347923", + "2107065092", + "2107936142", + "2122791124", + "2125710888", + "2125901565", + "2131984157", + "2135651613", + "2142738495", + "2148129229", + "2149159105", + "2151407466", + "2156447271", + "2156628018", + "2163369409", + "2163533329", + "2164485302", + "2171214492", + "2230698079", + "2250138717", + "2287743491", + "2339637820", + "2399174931", + "2476326651", + "2525373437", + "2538561053", + "2547717408", + "2548326739", + "2593247599", + "2615219528", + "2729624487", + "2754779781", + "2758465322", + "2763382388", + "2790680409", + "2795778794", + "2851538888", + "2893389807", + "2896436581", + "2899652619", + "2931601292", + "2961301265", + "3100387911", + "3204970117" + ], + "abstract": "frequent blood pressure bp assessment is key to the diagnosis and treatment of many severe diseases such as heart failure kidney failure hypertension and hemodialysis current gold standard bp measurement techniques require the complete blockage of blood flow which causes discomfort and disruption to normal activity when the assessment is done repetitively and frequently unfortunately patients with hypertension or hemodialysis often have to get their bp measured every 15 minutes for a duration of 4 5 hours or more the discomfort of wearing a cumbersome and limited mobility device affects their normal activities in this work we propose a device called ebp to measure bp from inside the user s ear aiming to minimize the measurement s impact on users normal activities while maximizing its comfort level ebp has 3 key components 1 a light based pulse sensor attached on an inflatable pipe that goes inside the ear 2 a digital air pump with a fine controller and 3 a bp estimation algorithm in contrast to existing devices ebp introduces a novel technique that eliminates the need to block the blood flow inside the ear which alleviates the user s discomfort we prototyped ebp custom hardware and software and evaluated the system through a comparative study on 35 subjects the study shows that ebp obtains the average error of 1 8 mmhg and 3 1 mmhg and a standard deviation error of 7 2 mmhg and 7 9 mmhg for systolic high pressure value and diastolic low pressure value respectively these errors are around the acceptable margins regulated by the fda s aami protocol which allows mean errors of up to 5 mmhg and a standard deviation of up to 8 mmhg", + "title_raw": "eBP: A Wearable System For Frequent and Comfortable Blood Pressure Monitoring From User's Ear", + "abstract_raw": "Frequent blood pressure (BP) assessment is key to the diagnosis and treatment of many severe diseases, such as heart failure, kidney failure, hypertension, and hemodialysis. Current \"gold-standard'' BP measurement techniques require the complete blockage of blood flow, which causes discomfort and disruption to normal activity when the assessment is done repetitively and frequently. Unfortunately, patients with hypertension or hemodialysis often have to get their BP measured every 15 minutes for a duration of 4-5 hours or more. The discomfort of wearing a cumbersome and limited mobility device affects their normal activities. In this work, we propose a device called eBP to measure BP from inside the user's ear aiming to minimize the measurement's impact on users' normal activities while maximizing its comfort level. eBP has 3 key components: (1) a light-based pulse sensor attached on an inflatable pipe that goes inside the ear, (2) a digital air pump with a fine controller, and (3) a BP estimation algorithm. In contrast to existing devices, eBP introduces a novel technique that eliminates the need to block the blood flow inside the ear, which alleviates the user's discomfort. We prototyped eBP custom hardware and software and evaluated the system through a comparative study on 35 subjects. The study shows that eBP obtains the average error of 1.8 mmHg and -3.1 mmHg and a standard deviation error of 7.2 mmHg and 7.9 mmHg for systolic (high-pressure value) and diastolic (low-pressure value), respectively. These errors are around the acceptable margins regulated by the FDA's AAMI protocol, which allows mean errors of up to 5 mmHg and a standard deviation of up to 8 mmHg.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=eBP:+A+Wearable+System+For+Frequent+and+Comfortable+Blood+Pressure+Monitoring+From+User's+Ear&as_oq=&as_eq=&as_occt=any&as_sauthors=Bui", + "scraped_abstract": "Frequent blood pressure (BP) assessment is key to the diagnosis and treatment of many severe diseases, such as heart failure, kidney failure, hypertension, and hemodialysis. Current \"gold-standard'' BP measurement techniques require the complete blockage of blood flow, which causes discomfort and disruption to normal activity when the assessment is done repetitively and frequently. Unfortunately, patients with hypertension or hemodialysis often have to get their BP measured every 15 minutes for a duration of 4-5 hours or more. The discomfort of wearing a cumbersome and limited mobility device affects their normal activities. In this work, we propose a device called eBP to measure BP from inside the user's ear aiming to minimize the measurement's impact on users' normal activities while maximizing its comfort level. eBP has 3 key components: (1) a light-based pulse sensor attached on an inflatable pipe that goes inside the ear, (2) a digital air pump with a fine controller, and (3) a BP estimation algorithm. In contrast to existing devices, eBP introduces a novel technique that eliminates the need to block the blood flow inside the ear, which alleviates the user's discomfort. We prototyped eBP custom hardware and software and evaluated the system through a comparative study on 35 subjects. The study shows that eBP obtains the average error of 1.8 mmHg and -3.1 mmHg and a standard deviation error of 7.2 mmHg and 7.9 mmHg for systolic (high-pressure value) and diastolic (low-pressure value), respectively. These errors are around the acceptable margins regulated by the FDA's AAMI protocol, which allows mean errors of up to 5 mmHg and a standard deviation of up to 8 mmHg.", + "citation_best": 0 + }, + { + "paper": "2979925109", + "venue": "1123349196", + "year": "2019", + "title": "fluid flexible user interface distribution for ubiquitous multi device interaction", + "label": [ + "2778044989", + "2777911697", + "557433098", + "107457646", + "162932704", + "105446022", + "89505385" + ], + "author": [ + "2560109847", + "2979919074", + "2979932995", + "2227254770", + "2626817758", + "2096722238", + "2098336271" + ], + "reference": [ + "114285977", + "152436860", + "338991206", + "1539689205", + "1995362840", + "2011565812", + "2023380813", + "2049817002", + "2056103304", + "2074011868", + "2083977441", + "2098055897", + "2101788345", + "2114879705", + "2148865036", + "2228917334", + "2294471273", + "2295332283", + "2342091124", + "2395525566", + "2406854312", + "2494693620", + "2588403709", + "2612268779", + "2626808184", + "2761305481", + "2940876996", + "2951691662", + "2952763851", + "2976071174", + "3021091026", + "3095711147", + "3103005787" + ], + "abstract": "the growing trend of multi device ownerships creates a need and an opportunity to use applications across multiple devices however in general the current app development and usage still remain within the single device paradigm falling far short of user expectations for example it is currently not possible for a user to dynamically partition an existing live streaming app with chatting capabilities across different devices such that she watches her favorite broadcast on her smart tv while real time chatting on her smartphone in this paper we present fluid a new android based multi device platform that enables innovative ways of using multiple devices fluid aims to i allow users to migrate or replicate individual user interfaces uis of a single app on multiple devices high flexibility ii require no additional development effort to support unmodified legacy applications ease of development and iii support a wide range of apps that follow the trend of using custom made uis wide applicability previous approaches such as screen mirroring app migration and customized apps utilizing multiple devices do not satisfy those goals altogether fluid on the other hand meets the goals by carefully analyzing which ui states are necessary to correctly render ui objects deploying only those states on different devices supporting cross device function calls transparently and synchronizing the ui states of replicated ui objects across multiple devices our evaluation with 20 unmodified real world android apps shows that fluid can transparently support a wide range of apps and is fast enough for interactive use", + "title_raw": "FLUID: Flexible User Interface Distribution for Ubiquitous Multi-device Interaction", + "abstract_raw": "The growing trend of multi-device ownerships creates a need and an opportunity to use applications across multiple devices. However, in general, the current app development and usage still remain within the single-device paradigm, falling far short of user expectations. For example, it is currently not possible for a user to dynamically partition an existing live streaming app with chatting capabilities across different devices, such that she watches her favorite broadcast on her smart TV while real-time chatting on her smartphone. In this paper, we present FLUID, a new Android-based multi-device platform that enables innovative ways of using multiple devices. FLUID aims to i) allow users to migrate or replicate individual user interfaces (UIs) of a single app on multiple devices (high flexibility), ii) require no additional development effort to support unmodified, legacy applications (ease of development), and iii) support a wide range of apps that follow the trend of using custom-made UIs (wide applicability). Previous approaches, such as screen mirroring, app migration, and customized apps utilizing multiple devices, do not satisfy those goals altogether. FLUID, on the other hand, meets the goals by carefully analyzing which UI states are necessary to correctly render UI objects, deploying only those states on different devices, supporting cross-device function calls transparently, and synchronizing the UI states of replicated UI objects across multiple devices. Our evaluation with 20 unmodified, real-world Android apps shows that FLUID can transparently support a wide range of apps and is fast enough for interactive use.", + "link": "https://www.semanticscholar.org/paper/52b83bab9251e7cf02eea1251e3b1b2c386ea02e", + "scraped_abstract": "The growing trend of multi-device ownerships creates a need and an opportunity to use applications across multiple devices. However, in general, the current app development and usage still remain within the single-device paradigm, falling far short of user expectations. For example, it is currently not possible for a user to dynamically partition an existing live streaming app with chatting capabilities across different devices, such that she watches her favorite broadcast on her smart TV while real-time chatting on her smartphone. In this paper, we present FLUID, a new Android-based multi-device platform that enables innovative ways of using multiple devices. FLUID aims to i) allow users to migrate or replicate individual user interfaces (UIs) of a single app on multiple devices (high flexibility), ii) require no additional development effort to support unmodified, legacy applications (ease of development), and iii) support a wide range of apps that follow the trend of using custom-made UIs (wide applicability). Previous approaches, such as screen mirroring, app migration, and customized apps utilizing multiple devices, do not satisfy those goals altogether. FLUID, on the other hand, meets the goals by carefully analyzing which UI states are necessary to correctly render UI objects, deploying only those states on different devices, supporting cross-device function calls transparently, and synchronizing the UI states of replicated UI objects across multiple devices. Our evaluation with 20 unmodified, real-world Android apps shows that FLUID can transparently support a wide range of apps and is fast enough for interactive use.", + "citation_best": 4 + }, + { + "paper": "2806709843", + "venue": "1158363782", + "year": "2019", + "title": "datacenter rpcs can be general and fast", + "label": [ + "31258907", + "172173386", + "48044578", + "172086080", + "49265948", + "78766204", + "43126263", + "54108766", + "130795937" + ], + "author": [ + "2100143089", + "2151237659", + "2130397481" + ], + "reference": [ + "192446467", + "982826035", + "1497100682", + "1532546444", + "1559685971", + "1963656762", + "1977329921", + "1984788566", + "2003597767", + "2007888220", + "2074881976", + "2080584411", + "2087178199", + "2091776255", + "2093584083", + "2106035061", + "2117884704", + "2125901106", + "2129554014", + "2140101134", + "2154565572", + "2156580773", + "2158512617", + "2165663045", + "2169414316", + "2303620077", + "2338370098", + "2415017588", + "2416075414", + "2520381032", + "2528120841", + "2557879050", + "2574582689", + "2576670572", + "2579461576", + "2606393112", + "2606496125", + "2620820275", + "2743636099", + "2753147542", + "2762470888", + "2769038759", + "2798641544", + "2809353470", + "2844752995", + "2964109949" + ], + "abstract": "it is commonly believed that datacenter networking software must sacrifice generality to attain high performance the popularity of specialized distributed systems designed specifically for niche technologies such as rdma lossless networks fpgas and programmable switches testifies to this belief in this paper we show that such specialization is not necessary erpc is a new general purpose remote procedure call rpc library that offers performance comparable to specialized systems while running on commodity cpus in traditional datacenter networks based on either lossy ethernet or lossless fabrics erpc performs well in three key metrics message rate for small messages bandwidth for large messages and scalability to a large number of nodes and cpu cores it handles packet loss congestion and background request execution in microbenchmarks one cpu core can handle up to 10 million small rpcs per second or send large messages at 75 gbps we port a production grade implementation of raft state machine replication to erpc without modifying the core raft source code we achieve 5 5 s of replication latency on lossy ethernet which is faster than or comparable to specialized replication systems that use programmable switches fpgas or rdma", + "title_raw": "Datacenter RPCs can be general and fast", + "abstract_raw": "It is commonly believed that datacenter networking software must sacrifice generality to attain high performance. The popularity of specialized distributed systems designed specifically for niche technologies such as RDMA, lossless networks, FPGAs, and programmable switches testifies to this belief. In this paper, we show that such specialization is not necessary. eRPC is a new general-purpose remote procedure call (RPC) library that offers performance comparable to specialized systems, while running on commodity CPUs in traditional datacenter networks based on either lossy Ethernet or lossless fabrics. eRPC performs well in three key metrics: message rate for small messages; bandwidth for large messages; and scalability to a large number of nodes and CPU cores. It handles packet loss, congestion, and background request execution. In microbenchmarks, one CPU core can handle up to 10 million small RPCs per second, or send large messages at 75 Gbps. We port a production-grade implementation of Raft state machine replication to eRPC without modifying the core Raft source code. We achieve 5.5 \u00b5s of replication latency on lossy Ethernet, which is faster than or comparable to specialized replication systems that use programmable switches, FPGAs, or RDMA.", + "link": "https://www.semanticscholar.org/paper/fa5f622c43e42fe4d09295d63325bdf252b9c1f1", + "scraped_abstract": null, + "citation_best": 40 + }, + { + "paper": "2918488807", + "venue": "1158363782", + "year": "2019", + "title": "understanding lifecycle management complexity of datacenter topologies", + "label": [ + "199845137", + "30452754" + ], + "author": [ + "2120425630", + "1979506528", + "115183126", + "2102043890" + ], + "reference": [ + "1534157463", + "1543942291", + "1998471240", + "2007089612", + "2070232376", + "2096313289", + "2119638333", + "2126210439", + "2129437643", + "2130531694", + "2143065961", + "2143217585", + "2157420744", + "2164309468", + "2480346368", + "2486960733", + "2559629050", + "2625782120", + "2743429249", + "2744462874", + "2902262232" + ], + "abstract": "", + "title_raw": "Understanding Lifecycle Management Complexity of Datacenter Topologies", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/fa5f622c43e42fe4d09295d63325bdf252b9c1f1", + "scraped_abstract": null, + "citation_best": 15 + }, + { + "paper": "2963212338", + "venue": "1127352206", + "year": "2019", + "title": "low latency graph streaming using compressed purely functional trees", + "label": [ + "43364308", + "191558696", + "173608175", + "162319229", + "207024777", + "78766204" + ], + "author": [ + "2227580508", + "149058271", + "2018101967" + ], + "reference": [ + "72241736", + "145712316", + "164704629", + "973024310", + "1475732121", + "1480006450", + "1482680420", + "1506342804", + "1507039213", + "1512845581", + "1543897857", + "1593868476", + "1807272711", + "1881841366", + "1916385248", + "1956410125", + "1973276585", + "1988144253", + "1994257154", + "1994727615", + "2000041758", + "2008159385", + "2017972843", + "2024042592", + "2034102265", + "2045107949", + "2055385473", + "2078210269", + "2080098453", + "2084738937", + "2097805736", + "2098522119", + "2101196063", + "2105045857", + "2120490947", + "2130747448", + "2154875162", + "2160459668", + "2170616854", + "2218076943", + "2262656054", + "2429966330", + "2492613899", + "2521098462", + "2521218765", + "2523378841", + "2534918852", + "2539828367", + "2557279889", + "2562123954", + "2564843855", + "2574229471", + "2604668004", + "2611081592", + "2613261966", + "2613361247", + "2621280964", + "2739409954", + "2765486566", + "2767135145", + "2783778195", + "2798525482", + "2896032594", + "2901608006", + "2922946586", + "2963806858", + "3098121261", + "3105322242", + "3124031623" + ], + "abstract": "there has been a growing interest in the graph streaming setting where a continuous stream of graph updates is mixed with graph queries in principle purely functional trees are an ideal fit for this setting as they enable safe parallelism lightweight snapshots and strict serializability for queries however directly using them for graph processing leads to significant space overhead and poor cache locality this paper presents c trees a compressed purely functional search tree data structure that significantly improves on the space usage and locality of purely functional trees we design theoretically efficient and practical algorithms for performing batch updates to c trees and also show that we can store massive dynamic real world graphs using only a few bytes per edge thereby achieving space usage close to that of the best static graph processing frameworks to study the efficiency and applicability of our data structure we designed aspen a graph streaming framework that extends the interface of ligra with operations for updating graphs we show that aspen is faster than two state of the art graph streaming systems stinger and llama while requiring less memory and is competitive in performance with the state of the art static graph frameworks galois gap and ligra with aspen we are able to efficiently process the largest publicly available graph with over two hundred billion edges in the graph streaming setting using a single commodity multicore server with 1tb of memory", + "title_raw": "Low-latency graph streaming using compressed purely-functional trees", + "abstract_raw": "There has been a growing interest in the graph-streaming setting where a continuous stream of graph updates is mixed with graph queries. In principle, purely-functional trees are an ideal fit for this setting as they enable safe parallelism, lightweight snapshots, and strict serializability for queries. However, directly using them for graph processing leads to significant space overhead and poor cache locality. This paper presents C-trees, a compressed purely-functional search tree data structure that significantly improves on the space usage and locality of purely-functional trees. We design theoretically-efficient and practical algorithms for performing batch updates to C-trees, and also show that we can store massive dynamic real-world graphs using only a few bytes per edge, thereby achieving space usage close to that of the best static graph processing frameworks. To study the efficiency and applicability of our data structure, we designed Aspen, a graph-streaming framework that extends the interface of Ligra with operations for updating graphs. We show that Aspen is faster than two state-of-the-art graph-streaming systems, Stinger and LLAMA, while requiring less memory, and is competitive in performance with the state-of-the-art static graph frameworks, Galois, GAP, and Ligra+. With Aspen, we are able to efficiently process the largest publicly-available graph with over two hundred billion edges in the graph-streaming setting using a single commodity multicore server with 1TB of memory.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Low-Latency+Graph+Streaming+using+Compressed+Purely-Functional+Trees&as_oq=&as_eq=&as_occt=any&as_sauthors=Dhulipala", + "scraped_abstract": "There has been a growing interest in the graph-streaming setting where a continuous stream of graph updates is mixed with graph queries. In principle, purely-functional trees are an ideal fit for this setting as they enable safe parallelism, lightweight snapshots, and strict serializability for queries. However, directly using them for graph processing leads to significant space overhead and poor cache locality. This paper presents C-trees, a compressed purely-functional search tree data structure that significantly improves on the space usage and locality of purely-functional trees. We design theoretically-efficient and practical algorithms for performing batch updates to C-trees, and also show that we can store massive dynamic real-world graphs using only a few bytes per edge, thereby achieving space usage close to that of the best static graph processing frameworks. To study the efficiency and applicability of our data structure, we designed Aspen, a graph-streaming framework that extends the interface of Ligra with operations for updating graphs. We show that Aspen is faster than two state-of-the-art graph-streaming systems, Stinger and LLAMA, while requiring less memory, and is competitive in performance with the state-of-the-art static graph frameworks, Galois, GAP, and Ligra+. With Aspen, we are able to efficiently process the largest publicly-available graph with over two hundred billion edges in the graph-streaming setting using a single commodity multicore server with 1TB of memory.", + "citation_best": 1 + }, + { + "paper": "2954738632", + "venue": "1127352206", + "year": "2019", + "title": "continuously reasoning about programs using differential bayesian inference", + "label": [ + "119857082", + "97686452", + "160234255", + "98183937", + "97364631", + "136134403", + "202105479", + "199519371" + ], + "author": [ + "2135809656", + "309535120", + "2736300542", + "2005763208" + ], + "reference": [ + "26752331", + "57185801", + "1511986666", + "1545038039", + "1595443289", + "1750827721", + "1912598576", + "1971859150", + "1986453394", + "2008626182", + "2043100293", + "2074888021", + "2082000355", + "2106108278", + "2108395261", + "2111996055", + "2113709047", + "2119648923", + "2128932642", + "2130308071", + "2134401695", + "2138224468", + "2140021378", + "2147130904", + "2170736936", + "2293092623", + "2297774820", + "2493901872", + "2619331983", + "2624094989", + "2784876765", + "2798352717", + "2798573268", + "2799226481", + "2810768857", + "2871369288", + "3101554748" + ], + "abstract": "programs often evolve by continuously integrating changes from multiple programmers the effective adoption of program analysis tools in this continuous integration setting is hindered by the need to only report alarms relevant to a particular program change we present a probabilistic framework drake to apply program analyses to continuously evolving programs drake is applicable to a broad range of analyses that are based on deductive reasoning the key insight underlying drake is to compute a graph that concisely and precisely captures differences between the derivations of alarms produced by the given analysis on the program before and after the change performing bayesian inference on the graph thereby enables to rank alarms by likelihood of relevance to the change we evaluate drake using sparrow a static analyzer that targets buffer overrun format string and integer overflow errors on a suite of ten widely used c programs each comprising 13k 112k lines of code drake enables to discover all true bugs by inspecting only 30 alarms per benchmark on average compared to 85 3 more alarms by the same ranking approach in batch mode and 118 4 more alarms by a differential approach based on syntactic masking of alarms which also misses 4 of the 26 bugs overall", + "title_raw": "Continuously reasoning about programs using differential Bayesian inference", + "abstract_raw": "Programs often evolve by continuously integrating changes from multiple programmers. The effective adoption of program analysis tools in this continuous integration setting is hindered by the need to only report alarms relevant to a particular program change. We present a probabilistic framework, Drake, to apply program analyses to continuously evolving programs. Drake is applicable to a broad range of analyses that are based on deductive reasoning. The key insight underlying Drake is to compute a graph that concisely and precisely captures differences between the derivations of alarms produced by the given analysis on the program before and after the change. Performing Bayesian inference on the graph thereby enables to rank alarms by likelihood of relevance to the change. We evaluate Drake using Sparrow\u2014a static analyzer that targets buffer-overrun, format-string, and integer-overflow errors\u2014on a suite of ten widely-used C programs each comprising 13k\u2013112k lines of code. Drake enables to discover all true bugs by inspecting only 30 alarms per benchmark on average, compared to 85 (3\u00d7 more) alarms by the same ranking approach in batch mode, and 118 (4\u00d7 more) alarms by a differential approach based on syntactic masking of alarms which also misses 4 of the 26 bugs overall.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Continuously+Reasoning+about+Programs+using+Differential+Bayesian+Inference&as_oq=&as_eq=&as_occt=any&as_sauthors=Heo", + "scraped_abstract": null, + "citation_best": 20 + }, + { + "paper": "2954518791", + "venue": "1127352206", + "year": "2019", + "title": "towards certified separate compilation for concurrent programs", + "label": [ + "26713055", + "55439883", + "10784920", + "169590947", + "184337299", + "193702766", + "199360897", + "200632571" + ], + "author": [ + "2897293184", + "2155860889", + "2897691209", + "2896036444", + "2152751103" + ], + "reference": [ + "59640247", + "188282463", + "1509000726", + "1521087854", + "1976194690", + "1981439955", + "1983572666", + "1990191223", + "1995065158", + "2023035194", + "2023785576", + "2062177228", + "2064390891", + "2074630311", + "2075373350", + "2085050643", + "2085773946", + "2090551028", + "2129059083", + "2136739578", + "2148662736", + "2152621318", + "2152885346", + "2157178219", + "2157995936", + "2167029843", + "2294599249", + "2295752850", + "2405985696", + "2729732978", + "2798365728", + "2895933549", + "3004631746", + "3004681883", + "3014241878", + "3023751354", + "3122287721" + ], + "abstract": "certified separate compilation is important for establishing end to end guarantees for certified systems consisting of multiple program modules there has been much work building certified compilers for sequential programs in this paper we propose a language independent framework consisting of the key semantics components and lemmas that bridge the verification gap between the compilers for sequential programs and those for race free concurrent programs so that the existing verification work for the former can be reused one of the key contributions of the framework is a novel footprint preserving compositional simulation as the compilation correctness criterion the framework also provides a new mechanism to support confined benign races which are usually found in efficient implementations of synchronization primitives with our framework we develop cascompcert which extends compcert for certified separate compilation of race free concurrent clight programs it also allows linking of concurrent clight modules with x86 tso implementations of synchronization primitives containing benign races all our work has been implemented in the coq proof assistant", + "title_raw": "Towards certified separate compilation for concurrent programs", + "abstract_raw": "Certified separate compilation is important for establishing end-to-end guarantees for certified systems consisting of multiple program modules. There has been much work building certified compilers for sequential programs. In this paper, we propose a language-independent framework consisting of the key semantics components and lemmas that bridge the verification gap between the compilers for sequential programs and those for (race-free) concurrent programs, so that the existing verification work for the former can be reused. One of the key contributions of the framework is a novel footprint-preserving compositional simulation as the compilation correctness criterion. The framework also provides a new mechanism to support confined benign races which are usually found in efficient implementations of synchronization primitives. With our framework, we develop CASCompCert, which extends CompCert for certified separate compilation of race-free concurrent Clight programs. It also allows linking of concurrent Clight modules with x86-TSO implementations of synchronization primitives containing benign races. All our work has been implemented in the Coq proof assistant.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Towards+Certified+Separate+Compilation+for+Concurrent+Programs&as_oq=&as_eq=&as_occt=any&as_sauthors=Jiang", + "scraped_abstract": "Certified separate compilation is important for establishing end-to-end guarantees for certified systems consisting of multiple program modules. There has been much work building certified compilers for sequential programs. In this paper, we propose a language-independent framework consisting of the key semantics components and lemmas that bridge the verification gap between the compilers for sequential programs and those for (race-free) concurrent programs, so that the existing verification work for the former can be reused. One of the key contributions of the framework is a novel footprint-preserving compositional simulation as the compilation correctness criterion. The framework also provides a new mechanism to support confined benign races which are usually found in efficient implementations of synchronization primitives. With our framework, we develop CASCompCert, which extends CompCert for certified separate compilation of race-free concurrent Clight programs. It also allows linking of concurrent Clight modules with x86-TSO implementations of synchronization primitives containing benign races. All our work has been implemented in the Coq proof assistant.", + "citation_best": 17 + }, + { + "paper": "3104371626", + "venue": "1127352206", + "year": "2019", + "title": "an inductive synthesis framework for verifiable reinforcement learning", + "label": [ + "50644808", + "2779167558", + "111498074", + "97541855", + "149091818", + "2776937632", + "120314980", + "2776350369" + ], + "author": [ + "2305185851", + "2955337413", + "3181993941", + "2141982898" + ], + "reference": [ + "1523211942", + "1590649805", + "1896363143", + "1963790880", + "1977797609", + "1985815049", + "2053572490", + "2098022036", + "2149479912", + "2276356546", + "2296356821", + "2395317528", + "2427917354", + "2528726414", + "2543296129", + "2580175322", + "2594877703", + "2735485745", + "2794609696", + "2796284132", + "2798957601", + "2803974723", + "2883239200", + "2912131246", + "2952645808", + "2952720101", + "2952905979", + "2962749646", + "2962957031", + "2963208512", + "2963575966", + "2963864421", + "2964040381" + ], + "abstract": "despite the tremendous advances that have been made in the last decade on developing useful machine learning applications their wider adoption has been hindered by the lack of strong assurance guarantees that can be made about their behavior in this paper we consider how formal verification techniques developed for traditional software systems can be repurposed for verification of reinforcement learning enabled ones a particularly important class of machine learning systems rather than enforcing safety by examining and altering the structure of a complex neural network implementation our technique uses blackbox methods to synthesizes deterministic programs simpler more interpretable approximations of the network that can nonetheless guarantee desired safety properties are preserved even when the network is deployed in unanticipated or previously unobserved environments our methodology frames the problem of neural network verification in terms of a counterexample and syntax guided inductive synthesis procedure over these programs the synthesis procedure searches for both a deterministic program and an inductive invariant over an infinite state transition system that represents a specification of an application s control logic additional specifications defining environment based constraints can also be provided to further refine the search space synthesized programs deployed in conjunction with a neural network implementation dynamically enforce safety conditions by monitoring and preventing potentially unsafe actions proposed by neural policies experimental results over a wide range of cyber physical applications demonstrate that software inspired formal verification techniques can be used to realize trustworthy reinforcement learning systems with low overhead", + "title_raw": "An inductive synthesis framework for verifiable reinforcement learning", + "abstract_raw": "Despite the tremendous advances that have been made in the last decade on developing useful machine-learning applications, their wider adoption has been hindered by the lack of strong assurance guarantees that can be made about their behavior. In this paper, we consider how formal verification techniques developed for traditional software systems can be repurposed for verification of reinforcement learning-enabled ones, a particularly important class of machine learning systems. Rather than enforcing safety by examining and altering the structure of a complex neural network implementation, our technique uses blackbox methods to synthesizes deterministic programs, simpler, more interpretable, approximations of the network that can nonetheless guarantee desired safety properties are preserved, even when the network is deployed in unanticipated or previously unobserved environments. Our methodology frames the problem of neural network verification in terms of a counterexample and syntax-guided inductive synthesis procedure over these programs. The synthesis procedure searches for both a deterministic program and an inductive invariant over an infinite state transition system that represents a specification of an application's control logic. Additional specifications defining environment-based constraints can also be provided to further refine the search space. Synthesized programs deployed in conjunction with a neural network implementation dynamically enforce safety conditions by monitoring and preventing potentially unsafe actions proposed by neural policies. Experimental results over a wide range of cyber-physical applications demonstrate that software-inspired formal verification techniques can be used to realize trustworthy reinforcement learning systems with low overhead.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=An+Inductive+Synthesis+Framework+for+Verifiable+Reinforcement+Learning&as_oq=&as_eq=&as_occt=any&as_sauthors=Zhu", + "scraped_abstract": null, + "citation_best": 72 + }, + { + "paper": "2955231257", + "venue": "1127352206", + "year": "2019", + "title": "a typed algebraic approach to parsing", + "label": [ + "198838072", + "77735154", + "118364021", + "199360897", + "186644900", + "156884757", + "207648694" + ], + "author": [ + "238187693", + "1971600047" + ], + "reference": [ + "168212000", + "1562942180", + "1573333946", + "1582705003", + "1762192966", + "1776029027", + "1808317016", + "1968315635", + "1969167499", + "1973817937", + "1987823535", + "1988177568", + "2008070495", + "2048808408", + "2064490449", + "2072607050", + "2111229114", + "2116813111", + "2117161590", + "2135641942", + "2139695229", + "2149092147", + "2154464718", + "2156429182", + "2160249850", + "2166030873", + "2171433297", + "2247694024", + "2616790502", + "2905603442", + "2914032487", + "2978341312" + ], + "abstract": "in this paper we recall the definition of the context free expressions or regular expressions an algebraic presentation of the context free languages then we define a core type system for the context free expressions which gives a compositional criterion for identifying those context free expressions which can be parsed unambiguously by predictive algorithms in the style of recursive descent or ll 1 next we show how these typed grammar expressions can be used to derive a parser combinator library which both guarantees linear time parsing with no backtracking and single token lookahead and which respects the natural denotational semantics of context free expressions finally we show how to exploit the type information to write a staged version of this library which produces dramatic increases in performance even outperforming code generated by the standard parser generator tool ocamlyacc", + "title_raw": "A typed, algebraic approach to parsing", + "abstract_raw": "In this paper, we recall the definition of the context-free expressions (or \u00b5-regular expressions), an algebraic presentation of the context-free languages. Then, we define a core type system for the context-free expressions which gives a compositional criterion for identifying those context-free expressions which can be parsed unambiguously by predictive algorithms in the style of recursive descent or LL(1). Next, we show how these typed grammar expressions can be used to derive a parser combinator library which both guarantees linear-time parsing with no backtracking and single-token lookahead, and which respects the natural denotational semantics of context-free expressions. Finally, we show how to exploit the type information to write a staged version of this library, which produces dramatic increases in performance, even outperforming code generated by the standard parser generator tool ocamlyacc.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=A+Typed,+Algebraic+Approach+to+Parsing&as_oq=&as_eq=&as_occt=any&as_sauthors=Krishnaswami", + "scraped_abstract": "In this paper, we recall the definition of the context-free expressions (or \u00b5-regular expressions), an algebraic presentation of the context-free languages. Then, we define a core type system for the context-free expressions which gives a compositional criterion for identifying those context-free expressions which can be parsed unambiguously by predictive algorithms in the style of recursive descent or LL(1). Next, we show how these typed grammar expressions can be used to derive a parser combinator library which both guarantees linear-time parsing with no backtracking and single-token lookahead, and which respects the natural denotational semantics of context-free expressions. Finally, we show how to exploit the type information to write a staged version of this library, which produces dramatic increases in performance, even outperforming code generated by the standard parser generator tool ocamlyacc.", + "citation_best": 13 + }, + { + "paper": "3105155462", + "venue": "1127352206", + "year": "2019", + "title": "optimization and abstraction a synergistic approach for analyzing neural network robustness", + "label": [ + "50644808", + "97970142", + "2780654840", + "11413529" + ], + "author": [ + "2797943931", + "2939800113", + "61468710", + "2153285360" + ], + "reference": [ + "145069693", + "178079818", + "1496681274", + "1575350781", + "1603872376", + "1673923490", + "1746819321", + "1932198206", + "2028284083", + "2043100293", + "2099201756", + "2110889728", + "2112796928", + "2136213019", + "2136535855", + "2165073069", + "2179402106", + "2194775991", + "2280703106", + "2313513770", + "2342840547", + "2395317528", + "2395707743", + "2408141691", + "2432142698", + "2508075332", + "2525778437", + "2552767274", + "2565186948", + "2566919497", + "2581082771", + "2594877703", + "2616028256", + "2618530766", + "2721006554", + "2744095836", + "2760733685", + "2761709036", + "2768915615", + "2775273147", + "2777449390", + "2791251367", + "2794609696", + "2798356176", + "2952345740", + "2963003451", + "2963207607", + "2963341057", + "2963689459", + "2964253222", + "3118608800" + ], + "abstract": "in recent years the notion of local robustness or robustness for short has emerged as a desirable property of deep neural networks intuitively robustness means that small perturbations to an input do not cause the network to perform misclassifications in this paper we present a novel algorithm for verifying robustness properties of neural networks our method synergistically combines gradient based optimization methods for counterexample search with abstraction based proof search to obtain a sound and complete decision procedure our method also employs a data driven approach to learn a verification policy that guides abstract interpretation during proof search we have implemented the proposed approach in a tool called charon and experimentally evaluated it on hundreds of benchmarks our experiments show that the proposed approach significantly outperforms three state of the art tools namely ai 2 reluplex and reluval", + "title_raw": "Optimization and abstraction: a synergistic approach for analyzing neural network robustness", + "abstract_raw": "In recent years, the notion of local robustness (or robustness for short) has emerged as a desirable property of deep neural networks. Intuitively, robustness means that small perturbations to an input do not cause the network to perform misclassifications. In this paper, we present a novel algorithm for verifying robustness properties of neural networks. Our method synergistically combines gradient-based optimization methods for counterexample search with abstraction-based proof search to obtain a sound and (\u03b4 -)complete decision procedure. Our method also employs a data-driven approach to learn a verification policy that guides abstract interpretation during proof search. We have implemented the proposed approach in a tool called Charon and experimentally evaluated it on hundreds of benchmarks. Our experiments show that the proposed approach significantly outperforms three state-of-the-art tools, namely AI^2, Reluplex, and Reluval.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Optimization+and+Abstraction:+A+Synergistic+Approach+for+Analyzing+Neural+Network+Robustness&as_oq=&as_eq=&as_occt=any&as_sauthors=Anderson", + "scraped_abstract": "In recent years, the notion of local robustness (or robustness for short) has emerged as a desirable property of deep neural networks. Intuitively, robustness means that small perturbations to an input do not cause the network to perform misclassifications. In this paper, we present a novel algorithm for verifying robustness properties of neural networks. Our method synergistically combines gradient-based optimization methods for counterexample search with abstraction-based proof search to obtain a sound and (\u03b4 -)complete decision procedure. Our method also employs a data-driven approach to learn a verification policy that guides abstract interpretation during proof search. We have implemented the proposed approach in a tool called Charon and experimentally evaluated it on hundreds of benchmarks. Our experiments show that the proposed approach significantly outperforms three state-of-the-art tools, namely AI^2, Reluplex, and Reluval.", + "citation_best": 49 + }, + { + "paper": "2948130259", + "venue": "1175089206", + "year": "2019", + "title": "interventional fairness causal database repair for algorithmic fairness", + "label": [ + "49585438", + "77088390", + "48103436" + ], + "author": [ + "2049010007", + "2231996890", + "2095906382", + "1986159374" + ], + "reference": [ + "1558832481", + "1961345416", + "1963921006", + "1974042790", + "1985239372", + "1988368118", + "2014352947", + "2059141064", + "2097246321", + "2098952447", + "2100960835", + "2114508388", + "2116666691", + "2124172487", + "2143117649", + "2146521249", + "2157928966", + "2159080219", + "2162670686", + "2169157836", + "2343513962", + "2540757487", + "2584805976", + "2588229802", + "2594166818", + "2606013986", + "2730550703", + "2750585749", + "2753845591", + "2798682670", + "2809628680", + "2809878087", + "2886452416", + "2908766589", + "2950538796", + "2962959301", + "2962977061", + "2963453196", + "2964031043", + "2964116855", + "3023126635", + "3023309920", + "3100046612", + "3120740533", + "3121683006", + "3125789530" + ], + "abstract": "fairness is increasingly recognized as a critical component of machine learning systems however it is the underlying data on which these systems are trained that often reflect discrimination suggesting a database repair problem existing treatments of fairness rely on statistical correlations that can be fooled by statistical anomalies such as simpson s paradox proposals for causality based definitions of fairness can correctly model some of these situations but they require specification of the underlying causal models in this paper we formalize the situation as a database repair problem proving sufficient conditions for fair classifiers in terms of admissible variables as opposed to a complete causal model we show that these conditions correctly capture subtle fairness violations we then use these conditions as the basis for database repair algorithms that provide provable fairness guarantees about classifiers trained on their training labels we evaluate our algorithms on real data demonstrating improvement over the state of the art on multiple fairness metrics proposed in the literature while retaining high utility", + "title_raw": "Interventional Fairness: Causal Database Repair for Algorithmic Fairness", + "abstract_raw": "Fairness is increasingly recognized as a critical component of machine learning systems. However, it is the underlying data on which these systems are trained that often reflect discrimination, suggesting a database repair problem. Existing treatments of fairness rely on statistical correlations that can be fooled by statistical anomalies, such as Simpson's paradox. Proposals for causality-based definitions of fairness can correctly model some of these situations, but they require specification of the underlying causal models. In this paper, we formalize the situation as a database repair problem, proving sufficient conditions for fair classifiers in terms of admissible variables as opposed to a complete causal model. We show that these conditions correctly capture subtle fairness violations. We then use these conditions as the basis for database repair algorithms that provide provable fairness guarantees about classifiers trained on their training labels. We evaluate our algorithms on real data, demonstrating improvement over the state of the art on multiple fairness metrics proposed in the literature while retaining high utility.", + "link": "https://www.semanticscholar.org/paper/9977fa93c2451831edf30f98156ada39a19fcecb", + "scraped_abstract": "Fairness is increasingly recognized as a critical component of machine learning systems. However, it is the underlying data on which these systems are trained that often reflect discrimination, suggesting a database repair problem. Existing treatments of fairness rely on statistical correlations that can be fooled by statistical anomalies, such as Simpson's paradox. Proposals for causality-based definitions of fairness can correctly model some of these situations, but they require specification of the underlying causal models. In this paper, we formalize the situation as a database repair problem, proving sufficient conditions for fair classifiers in terms of admissible variables as opposed to a complete causal model. We show that these conditions correctly capture subtle fairness violations. We then use these conditions as the basis for database repair algorithms that provide provable fairness guarantees about classifiers trained on their training labels. We evaluate our algorithms on real data, demonstrating improvement over the state of the art on multiple fairness metrics proposed in the literature while retaining high utility.", + "citation_best": 0 + }, + { + "paper": "2963311060", + "venue": "1163618098", + "year": "2019", + "title": "spectre attacks exploiting speculative execution", + "label": [ + "26713055", + "202491316", + "49154492", + "38652104", + "141331961", + "168522837", + "62913178", + "49289754", + "107598950", + "115537543", + "98045186" + ], + "author": [ + "3131425006", + "2883088217", + "2955752193", + "2037979295", + "1461862240", + "2782170360", + "2658528294", + "2496857916", + "339490103", + "2781904831", + "2405324185", + "2412269837" + ], + "reference": [ + "1427174644", + "1488058190", + "1499791368", + "1503814339", + "1507889006", + "1555558540", + "1557855942", + "1613874182", + "1724890242", + "1824405704", + "1934458198", + "1992291252", + "2053832511", + "2061354941", + "2119028650", + "2126132644", + "2131019288", + "2138381338", + "2146573211", + "2154909745", + "2157116240", + "2162800072", + "2163563130", + "2172060328", + "2173730676", + "2337480911", + "2394822940", + "2397246570", + "2484027757", + "2532499458", + "2533043266", + "2550858797", + "2562036180", + "2564856904", + "2593994116", + "2594560662", + "2612687770", + "2664885055", + "2744774028", + "2747709512", + "2750990141", + "2751989915", + "2775990858", + "2795111205", + "2795231660", + "2883613460", + "2964118667", + "2964206587", + "2978757628" + ], + "abstract": "modern processors use branch prediction and speculative execution to maximize performance for example if the destination of a branch depends on a memory value that is in the process of being read cpus will try to guess the destination and attempt to execute ahead when the memory value finally arrives the cpu either discards or commits the speculative computation speculative logic is unfaithful in how it executes can access the victim s memory and registers and can perform operations with measurable side effects spectre attacks involve inducing a victim to speculatively perform operations that would not occur during correct program execution and which leak the victim s confidential information via a side channel to the adversary this paper describes practical attacks that combine methodology from side channel attacks fault attacks and return oriented programming that can read arbitrary memory from the victim s process more broadly the paper shows that speculative execution implementations violate the security assumptions underpinning numerous software security mechanisms including operating system process separation containerization just in time jit compilation and countermeasures to cache timing and side channel attacks these attacks represent a serious threat to actual systems since vulnerable speculative execution capabilities are found in microprocessors from intel amd and arm that are used in billions of devices while makeshift processor specific countermeasures are possible in some cases sound solutions will require fixes to processor designs as well as updates to instruction set architectures isas to give hardware architects and software developers a common understanding as to what computation state cpu implementations are and are not permitted to leak", + "title_raw": "Spectre Attacks: Exploiting Speculative Execution", + "abstract_raw": "Modern processors use branch prediction and speculative execution to maximize performance. For example, if the destination of a branch depends on a memory value that is in the process of being read, CPUs will try to guess the destination and attempt to execute ahead. When the memory value finally arrives, the CPU either discards or commits the speculative computation. Speculative logic is unfaithful in how it executes, can access the victim's memory and registers, and can perform operations with measurable side effects. Spectre attacks involve inducing a victim to speculatively perform operations that would not occur during correct program execution and which leak the victim's confidential information via a side channel to the adversary. This paper describes practical attacks that combine methodology from side channel attacks, fault attacks, and return-oriented programming that can read arbitrary memory from the victim's process. More broadly, the paper shows that speculative execution implementations violate the security assumptions underpinning numerous software security mechanisms, including operating system process separation, containerization, just-in-time (JIT) compilation, and countermeasures to cache timing and side-channel attacks. These attacks represent a serious threat to actual systems since vulnerable speculative execution capabilities are found in microprocessors from Intel, AMD, and ARM that are used in billions of devices. While makeshift processor-specific countermeasures are possible in some cases, sound solutions will require fixes to processor designs as well as updates to instruction set architectures (ISAs) to give hardware architects and software developers a common understanding as to what computation state CPU implementations are (and are not) permitted to leak.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Spectre+Attacks:+Exploiting+Speculative+Execution&as_oq=&as_eq=&as_occt=any&as_sauthors=Kocher", + "scraped_abstract": "Modern processors use branch prediction and speculative execution to maximize performance. For example, if the destination of a branch depends on a memory value that is in the process of being read, CPUs will try to guess the destination and attempt to execute ahead. When the memory value finally arrives, the CPU either discards or commits the speculative computation. Speculative logic is unfaithful in how it executes, can access the victim's memory and registers, and can perform operations with measurable side effects. Spectre attacks involve inducing a victim to speculatively perform operations that would not occur during correct program execution and which leak the victim's confidential information via a side channel to the adversary. This paper describes practical attacks that combine methodology from side channel attacks, fault attacks, and return-oriented programming that can read arbitrary memory from the victim's process. More broadly, the paper shows that speculative execution implementations violate the security assumptions underpinning numerous software security mechanisms, including operating system process separation, containerization, just-in-time (JIT) compilation, and countermeasures to cache timing and side-channel attacks. These attacks represent a serious threat to actual systems since vulnerable speculative execution capabilities are found in microprocessors from Intel, AMD, and ARM that are used in billions of devices. While makeshift processor-specific countermeasures are possible in some cases, sound solutions will require fixes to processor designs as well as updates to instruction set architectures (ISAs) to give hardware architects and software developers a common understanding as to what computation state CPU implementations are (and are not) permitted to leak.", + "citation_best": 1495 + }, + { + "paper": "2968103728", + "venue": "1152462849", + "year": "2019", + "title": "underwater backscatter networking", + "label": [ + "169111936", + "555944384" + ], + "author": [ + "2967348811", + "2085731619" + ], + "reference": [ + "114297782", + "576506256", + "1500716034", + "1516241739", + "1534848476", + "1561171787", + "1579862611", + "1806000254", + "1981752360", + "1997834106", + "1998647644", + "2034927591", + "2037195613", + "2040471651", + "2050824703", + "2067632594", + "2067670802", + "2079405396", + "2085327061", + "2088585322", + "2094769033", + "2099461393", + "2099651019", + "2100602452", + "2104933959", + "2105144556", + "2107052066", + "2110052350", + "2113346827", + "2129879516", + "2133509944", + "2134142056", + "2145213600", + "2146756896", + "2150103071", + "2151163992", + "2152269371", + "2168821867", + "2170984917", + "2174065014", + "2187255755", + "2212501963", + "2290418131", + "2308002575", + "2430388750", + "2434017230", + "2522085611", + "2547263323", + "2566010097", + "2603155052", + "2606701507", + "2608608853", + "2625905314", + "2722392453", + "2761238645", + "2804112984", + "2810692777", + "2830147196", + "2846127207", + "2855857260", + "2869395566", + "2896839916", + "2897498375", + "2901676314", + "3009623176", + "3136666733" + ], + "abstract": "we present piezo acoustic backscatter pab the first technology that enables backscatter networking in underwater environments pab relies on the piezoelectric effect to enable underwater communication and sensing at near zero power its architecture is inspired by radio backscatter which works well in air but cannot work well underwater due to the exponential attenuation of radio signals in water pab nodes harvest energy from underwater acoustic signals using piezoelectric interfaces and communicate by modulating the piezoelectric impedance our design introduces innovations that enable concurrent multiple access through circuit based frequency tuning of backscatter modulation and a mac that exploits the properties of pab nodes to deliver higher network throughput and decode network collisions we built a prototype of our design using custom designed mechanically fabricated transducers and an end to end battery free hardware implementation we tested our nodes in large experimental water tanks at the mit sea grant our results demonstrate single link throughputs up to 3 kbps and power up ranges up to 10 m finally we show how our design can be used to measure acidity temperature and pressure looking ahead the system can be used in ocean exploration marine life sensing and underwater climate change monitoring", + "title_raw": "Underwater backscatter networking", + "abstract_raw": "We present Piezo-Acoustic Backscatter (PAB), the first technology that enables backscatter networking in underwater environments. PAB relies on the piezoelectric effect to enable underwater communication and sensing at near-zero power. Its architecture is inspired by radio backscatter which works well in air but cannot work well underwater due to the exponential attenuation of radio signals in water. PAB nodes harvest energy from underwater acoustic signals using piezoelectric interfaces and communicate by modulating the piezoelectric impedance. Our design introduces innovations that enable concurrent multiple access through circuit-based frequency tuning of backscatter modulation and a MAC that exploits the properties of PAB nodes to deliver higher network throughput and decode network collisions. We built a prototype of our design using custom-designed, mechanically fabricated transducers and an end-to-end battery-free hardware implementation. We tested our nodes in large experimental water tanks at the MIT Sea Grant. Our results demonstrate single-link throughputs up to 3 kbps and power-up ranges up to 10 m. Finally, we show how our design can be used to measure acidity, temperature, and pressure. Looking ahead, the system can be used in ocean exploration, marine life sensing, and underwater climate change monitoring.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Underwater+Backscatter+Networking&as_oq=&as_eq=&as_occt=any&as_sauthors=Jang", + "scraped_abstract": "We present Piezo-Acoustic Backscatter (PAB), the first technology that enables backscatter networking in underwater environments. PAB relies on the piezoelectric effect to enable underwater communication and sensing at near-zero power. Its architecture is inspired by radio backscatter which works well in air but cannot work well underwater due to the exponential attenuation of radio signals in water. PAB nodes harvest energy from underwater acoustic signals using piezoelectric interfaces and communicate by modulating the piezoelectric impedance. Our design introduces innovations that enable concurrent multiple access through circuit-based frequency tuning of backscatter modulation and a MAC that exploits the properties of PAB nodes to deliver higher network throughput and decode network collisions. We built a prototype of our design using custom-designed, mechanically fabricated transducers and an end-to-end battery-free hardware implementation. We tested our nodes in large experimental water tanks at the MIT Sea Grant. Our results demonstrate single-link throughputs up to 3 kbps and power-up ranges up to 10 m. Finally, we show how our design can be used to measure acidity, temperature, and pressure. Looking ahead, the system can be used in ocean exploration, marine life sensing, and underwater climate change monitoring.", + "citation_best": 99 + }, + { + "paper": "2948795993", + "venue": "1140684652", + "year": "2019", + "title": "variance reduction in gradient exploration for online learning to rank", + "label": [ + "189430467", + "97824396", + "163175372", + "83665646", + "11413529", + "50817715" + ], + "author": [ + "2556947837", + "2798325040", + "2798563959", + "2479883750", + "2157880984" + ], + "reference": [ + "135190683", + "1530210183", + "1992549066", + "2003473029", + "2004001705", + "2009979684", + "2011285051", + "2044493620", + "2094790959", + "2099213975", + "2115584760", + "2115711489", + "2129245267", + "2147892741", + "2149427297", + "2152314154", + "2156541733", + "2279385734", + "2340526403", + "2471222571", + "2535047536", + "2884475480", + "2890291106", + "2997842202", + "3103733168", + "3104894801" + ], + "abstract": "online learning to rank ol2r algorithms learn from implicit user feedback on the fly the key to such algorithms is an unbiased estimate of gradients which is often trivially achieved by uniformly sampling from the entire parameter space unfortunately this leads to high variance in gradient estimation resulting in high regret during model updates especially when the dimension of the parameter space is large in this work we aim at reducing the variance of gradient estimation in ol2r algorithms we project the selected updating direction i e the winning direction into a space spanned by the feature vectors from examined documents under the current query termed the document space for short after an interleaved test our key insight is that the result of an interleaved test is solely governed by a user s relevance evaluation over the examined documents hence the true gradient introduced by this test is only reflected in the constructed document space and components of the proposed gradient which are orthogonal to the document space can be safely removed for variance reduction purpose we prove that this projected gradient is still an unbiased estimation of the true gradient and show that this lower variance gradient estimation results in significant regret reduction our proposed method is compatible with all existing ol2r algorithms which rank documents using a linear model extensive experimental comparisons with several state of the art ol2r algorithms have confirmed the effectiveness of our proposed method in reducing the variance of gradient estimation and improving overall ranking performance", + "title_raw": "Variance Reduction in Gradient Exploration for Online Learning to Rank", + "abstract_raw": "Online Learning to Rank (OL2R) algorithms learn from implicit user feedback on the fly. The key to such algorithms is an unbiased estimate of gradients, which is often (trivially) achieved by uniformly sampling from the entire parameter space. Unfortunately, this leads to high-variance in gradient estimation, resulting in high regret during model updates, especially when the dimension of the parameter space is large. In this work, we aim at reducing the variance of gradient estimation in OL2R algorithms. We project the selected updating direction (i.e., the winning direction) into a space spanned by the feature vectors from examined documents under the current query (termed the \"document space\" for short), after an interleaved test. Our key insight is that the result of an interleaved test is solely governed by a user's relevance evaluation over the examined documents. Hence, the true gradient introduced by this test is only reflected in the constructed document space, and components of the proposed gradient which are orthogonal to the document space can be safely removed, for variance reduction purpose. We prove that this projected gradient is still an unbiased estimation of the true gradient, and show that this lower-variance gradient estimation results in significant regret reduction. Our proposed method is compatible with all existing OL2R algorithms which rank documents using a linear model. Extensive experimental comparisons with several state-of-the-art OL2R algorithms have confirmed the effectiveness of our proposed method in reducing the variance of gradient estimation and improving overall ranking performance.", + "link": "https://www.semanticscholar.org/paper/538ce5a49afb614255aa669f45b60ec0fc89cce6", + "scraped_abstract": null, + "citation_best": 15 + }, + { + "paper": "2964022882", + "venue": "1131589359", + "year": "2019", + "title": "computationally efficient estimation of the spectral gap of a markov chain", + "label": [ + "111350023", + "162443888", + "98763669" + ], + "author": [ + "2098430289", + "2131380413" + ], + "reference": [ + "1600266325", + "1743780845", + "1980032585", + "2041990574", + "2063986634", + "2103012681", + "2135194391", + "2566505556", + "2750001921", + "2750579340", + "2952867986", + "2963211500", + "3023788998" + ], + "abstract": "we consider the problem of estimating from sample paths the absolute spectral gap 1 of a reversible irreducible and aperiodic markov chain xt t n over a finite state space we propose the ucpi upper confidence power iteration algorithm for this problem a low complexity algorithm which estimates the spectral gap in time o n and memory space o ln n 2 given n samples this is in stark contrast with most known methods which require at least memory space o so that they cannot be applied to large state spaces furthermore ucpi is amenable to parallel implementation", + "title_raw": "Computationally Efficient Estimation of the Spectral Gap of a Markov Chain", + "abstract_raw": "We consider the problem of estimating from sample paths the absolute spectral gap 1-\u03bb\u22c6 of a reversible, irreducible and aperiodic Markov chain (Xt)t\u2208N over a finite state space \u03a9. We propose the UCPI (Upper Confidence Power Iteration) algorithm for this problem, a low-complexity algorithm which estimates the spectral gap in time O(n) and memory space O((ln n)2 given n samples. This is in stark contrast with most known methods which require at least memory space O(|\u03a9|), so that they cannot be applied to large state spaces. Furthermore, UCPI is amenable to parallel implementation.", + "link": "https://www.semanticscholar.org/paper/153ca59230f13c3b599787565258b2770d025907", + "scraped_abstract": null, + "citation_best": 1 + }, + { + "paper": "2948130259", + "venue": "1175089206", + "year": "2019", + "title": "interventional fairness causal database repair for algorithmic fairness", + "label": [ + "49585438", + "77088390", + "48103436" + ], + "author": [ + "2049010007", + "2231996890", + "2095906382", + "1986159374" + ], + "reference": [ + "1558832481", + "1961345416", + "1963921006", + "1974042790", + "1985239372", + "1988368118", + "2014352947", + "2059141064", + "2097246321", + "2098952447", + "2100960835", + "2114508388", + "2116666691", + "2124172487", + "2143117649", + "2146521249", + "2157928966", + "2159080219", + "2162670686", + "2169157836", + "2343513962", + "2540757487", + "2584805976", + "2588229802", + "2594166818", + "2606013986", + "2730550703", + "2750585749", + "2753845591", + "2798682670", + "2809628680", + "2809878087", + "2886452416", + "2908766589", + "2950538796", + "2962959301", + "2962977061", + "2963453196", + "2964031043", + "2964116855", + "3023126635", + "3023309920", + "3100046612", + "3120740533", + "3121683006", + "3125789530" + ], + "abstract": "fairness is increasingly recognized as a critical component of machine learning systems however it is the underlying data on which these systems are trained that often reflect discrimination suggesting a database repair problem existing treatments of fairness rely on statistical correlations that can be fooled by statistical anomalies such as simpson s paradox proposals for causality based definitions of fairness can correctly model some of these situations but they require specification of the underlying causal models in this paper we formalize the situation as a database repair problem proving sufficient conditions for fair classifiers in terms of admissible variables as opposed to a complete causal model we show that these conditions correctly capture subtle fairness violations we then use these conditions as the basis for database repair algorithms that provide provable fairness guarantees about classifiers trained on their training labels we evaluate our algorithms on real data demonstrating improvement over the state of the art on multiple fairness metrics proposed in the literature while retaining high utility", + "title_raw": "Interventional Fairness: Causal Database Repair for Algorithmic Fairness", + "abstract_raw": "Fairness is increasingly recognized as a critical component of machine learning systems. However, it is the underlying data on which these systems are trained that often reflect discrimination, suggesting a database repair problem. Existing treatments of fairness rely on statistical correlations that can be fooled by statistical anomalies, such as Simpson's paradox. Proposals for causality-based definitions of fairness can correctly model some of these situations, but they require specification of the underlying causal models. In this paper, we formalize the situation as a database repair problem, proving sufficient conditions for fair classifiers in terms of admissible variables as opposed to a complete causal model. We show that these conditions correctly capture subtle fairness violations. We then use these conditions as the basis for database repair algorithms that provide provable fairness guarantees about classifiers trained on their training labels. We evaluate our algorithms on real data, demonstrating improvement over the state of the art on multiple fairness metrics proposed in the literature while retaining high utility.", + "link": "https://www.semanticscholar.org/paper/9977fa93c2451831edf30f98156ada39a19fcecb", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2974073952", + "venue": "1171178643", + "year": "2019", + "title": "scaling symbolic evaluation for automated verification of systems code with serval", + "label": [ + "202491316", + "553261973", + "169590947", + "122783720", + "187191949", + "199360897", + "2777904410", + "32833848" + ], + "author": [ + "2761286422", + "2026265091", + "2892555077", + "2150586784", + "2344136120", + "2303948204" + ], + "reference": [ + "69090831", + "183305829", + "563467911", + "944802673", + "1253485236", + "1434079718", + "1436965661", + "1480909796", + "1493060511", + "1556604985", + "1606177908", + "1710734607", + "1787074469", + "1835682006", + "1930200668", + "1963833875", + "1977764760", + "1978364288", + "1979693894", + "1981810578", + "1988079752", + "1989310296", + "2000178662", + "2018325363", + "2019404692", + "2023035194", + "2029224396", + "2034314861", + "2042033151", + "2043501224", + "2057156093", + "2073742357", + "2084175640", + "2091776255", + "2093852121", + "2095762545", + "2095770127", + "2098660395", + "2101512909", + "2106192381", + "2107147876", + "2109276114", + "2110904621", + "2112088099", + "2120034731", + "2126823808", + "2130427425", + "2132897303", + "2136310957", + "2137530017", + "2139151574", + "2139799388", + "2141729404", + "2153185479", + "2155216527", + "2163347957", + "2163671349", + "2186104740", + "2195119025", + "2415236938", + "2430362266", + "2463516579", + "2484015479", + "2493296585", + "2576393274", + "2578546025", + "2751239020", + "2751455878", + "2761236351", + "2761718075", + "2761953162", + "2762625979", + "2787976344", + "2883613460", + "2884325678", + "2888798936", + "2898543817", + "2898893133", + "2899599233", + "2963247769", + "2963311060", + "2963723316", + "3088268709" + ], + "abstract": "this paper presents serval a framework for developing automated verifiers for systems software serval provides an extensible infrastructure for creating verifiers by lifting interpreters under symbolic evaluation and a systematic approach to identifying and repairing verification performance bottlenecks using symbolic profiling and optimizations using serval we build automated verifiers for the risc v x86 32 llvm and bpf instruction sets we report our experience of retrofitting certikos and komodo two systems previously verified using coq and dafny respectively for automated verification using serval and discuss trade offs of different verification methodologies in addition we apply serval to the keystone security monitor and the bpf compilers in the linux kernel and uncover 18 new bugs through verification all confirmed and fixed by developers", + "title_raw": "Scaling symbolic evaluation for automated verification of systems code with Serval", + "abstract_raw": "This paper presents Serval, a framework for developing automated verifiers for systems software. Serval provides an extensible infrastructure for creating verifiers by lifting interpreters under symbolic evaluation, and a systematic approach to identifying and repairing verification performance bottlenecks using symbolic profiling and optimizations. Using Serval, we build automated verifiers for the RISC-V, x86--32, LLVM, and BPF instruction sets. We report our experience of retrofitting CertiKOS and Komodo, two systems previously verified using Coq and Dafny, respectively, for automated verification using Serval, and discuss trade-offs of different verification methodologies. In addition, we apply Serval to the Keystone security monitor and the BPF compilers in the Linux kernel, and uncover 18 new bugs through verification, all confirmed and fixed by developers.", + "link": "https://www.semanticscholar.org/paper/52bb999d9f8754f28c2f717880ac767868599a34", + "scraped_abstract": null, + "citation_best": 63 + }, + { + "paper": "2963620995", + "venue": "1190910084", + "year": "2019", + "title": "the reachability problem for petri nets is not elementary", + "label": [ + "161771561", + "311688", + "2777669093", + "136643341", + "193702766", + "38677869" + ], + "author": [ + "2294587838", + "2133353816", + "2119621042", + "2096889439", + "309224405" + ], + "reference": [ + "166893320", + "1506102917", + "1545252802", + "1577053809", + "1591023364", + "1625359640", + "1862276852", + "1932636156", + "1966307150", + "1975261774", + "1989788631", + "2000116086", + "2010265885", + "2013911710", + "2017315323", + "2031575609", + "2036526834", + "2036564813", + "2044541206", + "2049741213", + "2055081868", + "2073614038", + "2075742699", + "2078057071", + "2078521778", + "2078846861", + "2091676842", + "2106001218", + "2110012716", + "2164003033", + "2166452446", + "2188682159", + "2271542264", + "2344518945", + "2424307510", + "2563122255", + "2568513581", + "2922024459", + "2963633665", + "2964347059", + "3098342602", + "3099076450", + "3105059340", + "3124194344" + ], + "abstract": "petri nets also known as vector addition systems are a long established model of concurrency with extensive applications in modelling and analysis of hardware software and database systems as well as chemical biological and business processes the central algorithmic problem for petri nets is reachability whether from the given initial configuration there exists a sequence of valid execution steps that reaches the given final configuration the complexity of the problem has remained unsettled since the 1960s and it is one of the most prominent open questions in the theory of verification decidability was proved by mayr in his seminal stoc 1981 work and the currently best published upper bound is non primitive recursive ackermannian of leroux and schmitz from lics 2019 we establish a non elementary lower bound i e that the reachability problem needs a tower of exponentials of time and space until this work the best lower bound has been exponential space due to lipton in 1976 the new lower bound is a major breakthrough for several reasons firstly it shows that the reachability problem is much harder than the coverability i e state reachability problem which is also ubiquitous but has been known to be complete for exponential space since the late 1970s secondly it implies that a plethora of problems from formal languages logic concurrent systems process calculi and other areas that are known to admit reductions from the petri nets reachability problem are also not elementary thirdly it makes obsolete the currently best lower bounds for the reachability problems for two key extensions of petri nets with branching and with a pushdown stack at the heart of our proof is a novel gadget so called the factorial amplifier that assuming availability of counters that are zero testable and bounded by k guarantees to produce arbitrarily large pairs of values whose ratio is exactly the factorial of k we also develop a novel construction that uses arbitrarily large pairs of values with ratio r to provide zero testable counters that are bounded by r repeatedly composing the factorial amplifier with itself by means of the construction then enables us to compute in linear time petri nets that simulate minsky machines whose counters are bounded by a tower of exponentials which yields the non elementary lower bound by refining this scheme further we in fact establish hardness for h exponential space already for petri nets with h 13 counters", + "title_raw": "The reachability problem for Petri nets is not elementary", + "abstract_raw": "Petri nets, also known as vector addition systems, are a long established model of concurrency with extensive applications in modelling and analysis of hardware, software and database systems, as well as chemical, biological and business processes. The central algorithmic problem for Petri nets is reachability: whether from the given initial configuration there exists a sequence of valid execution steps that reaches the given final configuration. The complexity of the problem has remained unsettled since the 1960s, and it is one of the most prominent open questions in the theory of verification. Decidability was proved by Mayr in his seminal STOC 1981 work, and the currently best published upper bound is non-primitive recursive Ackermannian of Leroux and Schmitz from LICS 2019. We establish a non-elementary lower bound, i.e. that the reachability problem needs a tower of exponentials of time and space. Until this work, the best lower bound has been exponential space, due to Lipton in 1976. The new lower bound is a major breakthrough for several reasons. Firstly, it shows that the reachability problem is much harder than the coverability (i.e., state reachability) problem, which is also ubiquitous but has been known to be complete for exponential space since the late 1970s. Secondly, it implies that a plethora of problems from formal languages, logic, concurrent systems, process calculi and other areas, that are known to admit reductions from the Petri nets reachability problem, are also not elementary. Thirdly, it makes obsolete the currently best lower bounds for the reachability problems for two key extensions of Petri nets: with branching and with a pushdown stack. At the heart of our proof is a novel gadget so called the factorial amplifier that, assuming availability of counters that are zero testable and bounded by\u00a0k, guarantees to produce arbitrarily large pairs of values whose ratio is exactly the factorial of\u00a0k. We also develop a novel construction that uses arbitrarily large pairs of values with ratio R to provide zero testable counters that are bounded by\u00a0R. Repeatedly composing the factorial amplifier with itself by means of the construction then enables us to compute in linear time Petri nets that simulate Minsky machines whose counters are bounded by a tower of exponentials, which yields the non-elementary lower bound. By refining this scheme further, we in fact establish hardness for h-exponential space already for Petri nets with h + 13 counters.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=The+Reachability+Problem+for+Petri+Nets+is+Not+Elementary&as_oq=&as_eq=&as_occt=any&as_sauthors=Czerwinski", + "scraped_abstract": null, + "citation_best": 4 + }, + { + "paper": "2980899416", + "venue": "1166315290", + "year": "2019", + "title": "tiptext eyes free text entry on a fingertip keyboard", + "label": [ + "2777873542", + "64754055", + "207347870", + "31972630", + "200632571", + "150594956" + ], + "author": [ + "2980395523", + "2563715590", + "2532468807", + "2228792564", + "2535287249", + "2279506386", + "2098177638", + "2171700048", + "2158065556", + "2125695339" + ], + "reference": [ + "1839487816", + "1987934251", + "2012241589", + "2012865772", + "2022422334", + "2035675819", + "2054731618", + "2058444391", + "2063264538", + "2066765108", + "2069657084", + "2069874365", + "2090073010", + "2098924240", + "2099800354", + "2100019621", + "2102413118", + "2106931201", + "2108518773", + "2109438710", + "2109481294", + "2112261049", + "2129036414", + "2129160770", + "2133990837", + "2152528000", + "2163097095", + "2163444123", + "2167854962", + "2169709590", + "2209204668", + "2231369798", + "2252227770", + "2395465087", + "2397512475", + "2402790201", + "2408252211", + "2409088501", + "2508112722", + "2510194348", + "2510746639", + "2514097384", + "2516043700", + "2538172027", + "2538734362", + "2581706031", + "2602861616", + "2610979204", + "2611784450", + "2611884327", + "2731419753", + "2732902709", + "2754253996", + "2763209402", + "2766813539", + "2795433822", + "2795466100", + "2795885990", + "2798933574", + "2896752950", + "2901482039", + "2901817221", + "2920852065", + "2941364481", + "2941851345", + "2941878792" + ], + "abstract": "in this paper we propose and investigate a new text entry technique using micro thumb tip gestures our technique features a miniature qwerty keyboard residing invisibly on the first segment of the user s index finger text entry can be carried out using the thumb tip to tap the tip of the index finger the keyboard layout was optimized for eyes free input by utilizing a spatial model reflecting the users natural spatial awareness of key locations on the index finger we present our approach of designing and optimizing the keyboard layout through a series of user studies and computer simulated text entry tests over 1 146 484 possibilities in the design space the outcome is a 2 3 grid with the letters highly confining to the alphabetic and spatial arrangement of qwerty our user evaluation showed that participants achieved an average text entry speed of 11 9 wpm and were able to type as fast as 13 3 wpm towards the end of the experiment", + "title_raw": "TipText: Eyes-Free Text Entry on a Fingertip Keyboard", + "abstract_raw": "In this paper, we propose and investigate a new text entry technique using micro thumb-tip gestures. Our technique features a miniature QWERTY keyboard residing invisibly on the first segment of the user's index finger. Text entry can be carried out using the thumb-tip to tap the tip of the index finger. The keyboard layout was optimized for eyes-free input by utilizing a spatial model reflecting the users' natural spatial awareness of key locations on the index finger. We present our approach of designing and optimizing the keyboard layout through a series of user studies and computer simulated text entry tests over 1,146,484 possibilities in the design space. The outcome is a 2\u00d73 grid with the letters highly confining to the alphabetic and spatial arrangement of QWERTY. Our user evaluation showed that participants achieved an average text entry speed of 11.9 WPM and were able to type as fast as 13.3 WPM towards the end of the experiment.", + "link": "https://www.semanticscholar.org/paper/50b2e5adc0af45f9e22711267899383dc781c367", + "scraped_abstract": "In this paper, we propose and investigate a new text entry technique using micro thumb-tip gestures. Our technique features a miniature QWERTY keyboard residing invisibly on the first segment of the user's index finger. Text entry can be carried out using the thumb-tip to tap the tip of the index finger. The keyboard layout was optimized for eyes-free input by utilizing a spatial model reflecting the users' natural spatial awareness of key locations on the index finger. We present our approach of designing and optimizing the keyboard layout through a series of user studies and computer simulated text entry tests over 1,146,484 possibilities in the design space. The outcome is a 2\u00d73 grid with the letters highly confining to the alphabetic and spatial arrangement of QWERTY. Our user evaluation showed that participants achieved an average text entry speed of 11.9 WPM and were able to type as fast as 13.3 WPM towards the end of the experiment.", + "citation_best": 70 + }, + { + "paper": "2954124074", + "venue": "1133523790", + "year": "2019", + "title": "fine grained secure and efficient data provenance for blockchain", + "label": [ + "77088390", + "2779687700" + ], + "author": [ + "2594080405", + "2608476590", + "2231413248", + "2291095767", + "2037466936", + "2150446338" + ], + "reference": [], + "abstract": "", + "title_raw": "Fine-Grained, Secure and Efficient Data Provenance for Blockchain.", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/af7ffa3f1dd86a4b3b72280f5a00abb1bba40cfe", + "scraped_abstract": "The success of Bitcoin and other cryptocurrencies bring enormous interest to blockchains. A blockchain system implements a tamper-evident ledger for recording transactions that modify some global states. The system captures entire evolution history of the states. The management of that history, also known as data provenance or lineage, has been studied extensively in database systems. However, querying data history in existing blockchains can only be done by replaying all transactions. This approach is applicable to large-scale, offline analysis, but is not suitable for online transaction processing.\n \n We present\n LineageChain\n , a fine-grained, secure and efficient provenance system for blockchains.\n LineageChain\n exposes provenance information to smart contracts via simple and elegant interfaces, thereby enabling a new class of blockchain applications whose execution logics depend on provenance information at runtime.\n LineageChain\n captures provenance during contract execution, and efficiently stores it in a Merkle tree.\n LineageChain\n provides a novel skip list index designed for supporting efficient provenance query processing. We have implemented\n LineageChain\n on top of Hyperledger and a blockchain-optimized storage system called ForkBase. Our extensive evaluation of\n LineageChain\n demonstrates its benefits to the new class of blockchain applications, its efficient query, and its small storage overhead.\n", + "citation_best": 4 + }, + { + "paper": "2962947218", + "venue": "1135342153", + "year": "2019", + "title": "emoji powered representation learning for cross lingual sentiment classification", + "label": [ + "45746551", + "137955351", + "59404180", + "2779247141", + "204321447", + "203005215" + ], + "author": [ + "2705645650", + "2972596061", + "2892286306", + "2119511691", + "2166036605", + "2126677274" + ], + "reference": [ + "1495762646", + "1522301498", + "1614298861", + "1815076433", + "1853947067", + "1983286042", + "1985643839", + "1994966918", + "2012348783", + "2021097538", + "2028904519", + "2041587709", + "2064675550", + "2069143585", + "2089065004", + "2097891230", + "2108646579", + "2112251034", + "2122369144", + "2133564696", + "2143326696", + "2156876426", + "2158199200", + "2159505618", + "2166706824", + "2167277498", + "2171068337", + "2226734577", + "2250629460", + "2250904672", + "2274912527", + "2462290672", + "2510632587", + "2514567832", + "2518630504", + "2526960150", + "2554861503", + "2568148245", + "2574189006", + "2582154088", + "2604944277", + "2621240141", + "2735926531", + "2740582239", + "2741115544", + "2755222014", + "2767917127", + "2789190634", + "2791662055", + "2794941713", + "2953384591", + "2964236337", + "3105262041" + ], + "abstract": "sentiment classification typically relies on a large amount of labeled data in practice the availability of labels is highly imbalanced among different languages e g more english texts are labeled than texts in any other languages which creates a considerable inequality in the quality of related information services received by users speaking different languages to tackle this problem cross lingual sentiment classification approaches aim to transfer knowledge learned from one language that has abundant labeled examples i e the source language usually english to another language with fewer labels i e the target language the source and the target languages are usually bridged through off the shelf machine translation tools through such a channel cross language sentiment patterns can be successfully learned from english and transferred into the target languages this approach however often fails to capture sentiment knowledge specific to the target language and thus compromises the accuracy of the downstream classification task in this paper we employ emojis which are widely available in many languages as a new channel to learn both the cross language and the language specific sentiment patterns we propose a novel representation learning method that uses emoji prediction as an instrument to learn respective sentiment aware representations for each language the learned representations are then integrated to facilitate cross lingual sentiment classification the proposed method demonstrates state of the art performance on benchmark datasets which is sustained even when sentiment labels are scarce", + "title_raw": "Emoji-Powered Representation Learning for Cross-Lingual Sentiment Classification", + "abstract_raw": "Sentiment classification typically relies on a large amount of labeled data. In practice, the availability of labels is highly imbalanced among different languages, e.g., more English texts are labeled than texts in any other languages, which creates a considerable inequality in the quality of related information services received by users speaking different languages. To tackle this problem, cross-lingual sentiment classification approaches aim to transfer knowledge learned from one language that has abundant labeled examples (i.e., the source language, usually English) to another language with fewer labels (i.e., the target language). The source and the target languages are usually bridged through off-the-shelf machine translation tools. Through such a channel, cross-language sentiment patterns can be successfully learned from English and transferred into the target languages. This approach, however, often fails to capture sentiment knowledge specific to the target language, and thus compromises the accuracy of the downstream classification task. In this paper, we employ emojis, which are widely available in many languages, as a new channel to learn both the cross-language and the language-specific sentiment patterns. We propose a novel representation learning method that uses emoji prediction as an instrument to learn respective sentiment-aware representations for each language. The learned representations are then integrated to facilitate cross-lingual sentiment classification. The proposed method demonstrates state-of-the-art performance on benchmark datasets, which is sustained even when sentiment labels are scarce.", + "link": "https://www.semanticscholar.org/paper/4575a1a01e81ad8015b9a1ecde0b10432dede8f8", + "scraped_abstract": null, + "citation_best": 1 + }, + { + "paper": "2911975451", + "venue": "1135342153", + "year": "2019", + "title": "outguard detecting in browser covert cryptocurrency mining in the wild", + "label": [ + "22111027", + "184895639", + "2780801425", + "136764020", + "180706569" + ], + "author": [ + "2275563875", + "2604428088", + "2889486534", + "2232414673", + "2144144832", + "2118081206", + "2156971739", + "321442596", + "2300615266" + ], + "reference": [ + "22566950", + "1506738637", + "1650881334", + "1973697585", + "1985247771", + "1985683032", + "1993370323", + "2012286502", + "2025827699", + "2077375749", + "2101234009", + "2119359024", + "2152060369", + "2228075399", + "2460170719", + "2461373307", + "2474411373", + "2476865836", + "2513529237", + "2601591992", + "2616029431", + "2794855529", + "2885525054", + "2889349357", + "2890228473", + "2897385569", + "2963603877", + "3100545544" + ], + "abstract": "in browser cryptojacking is a form of resource abuse that leverages end users machines to mine cryptocurrency without obtaining the users consent in this paper we design implement and evaluate outguard an automated cryptojacking detection system we construct a large ground truth dataset extract several features using an instrumented web browser and ultimately select seven distinctive features that are used to build an svm classification model outguardachieves a 97 9 tpr and 1 1 fpr and is reasonably tolerant to adversarial evasions we utilized outguardin the wild by deploying it across the alexa top 1m websites and found 6 302 cryptojacking sites of which 3 600 are new detections that were absent from the training data these cryptojacking sites paint a broad picture of the cryptojacking ecosystem with particular emphasis on the prevalence of cryptojacking websites and the shared infrastructure that provides clues to the operators behind the cryptojacking phenomenon", + "title_raw": "Outguard: Detecting In-Browser Covert Cryptocurrency Mining in the Wild", + "abstract_raw": "In-browser cryptojacking is a form of resource abuse that leverages end-users' machines to mine cryptocurrency without obtaining the users' consent. In this paper, we design, implement, and evaluate Outguard, an automated cryptojacking detection system. We construct a large ground-truth dataset, extract several features using an instrumented web browser, and ultimately select seven distinctive features that are used to build an SVM classification model. Outguardachieves a 97.9% TPR and 1.1% FPR and is reasonably tolerant to adversarial evasions. We utilized Outguardin the wild by deploying it across the Alexa Top 1M websites and found 6,302 cryptojacking sites, of which 3,600 are new detections that were absent from the training data. These cryptojacking sites paint a broad picture of the cryptojacking ecosystem, with particular emphasis on the prevalence of cryptojacking websites and the shared infrastructure that provides clues to the operators behind the cryptojacking phenomenon.", + "link": "https://www.semanticscholar.org/paper/c2dfaf2b331dfa6cc84f8ef5b7ea30efa039a0f7", + "scraped_abstract": null, + "citation_best": 84 + }, + { + "paper": "2788603415", + "venue": "1184914352", + "year": "2018", + "title": "memory augmented monte carlo tree search", + "label": [ + "46149586", + "11413529" + ], + "author": [ + "2598392123", + "2124670113", + "2157217378" + ], + "reference": [ + "64088143", + "568673721", + "1489792465", + "1625390266", + "1714211023", + "1997840820", + "2012833704", + "2070996757", + "2116123331", + "2134264591", + "2140365369", + "2153039919", + "2184714326", + "2257979135", + "2530887700", + "2594103415", + "2594466397", + "2963284097", + "2963341924" + ], + "abstract": "", + "title_raw": "Memory-Augmented Monte Carlo Tree Search.", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/2d7fb18fa63f718866055ab494e1b0707b0bd56a", + "scraped_abstract": null, + "citation_best": 20 + }, + { + "paper": "2962941914", + "venue": "1188739475", + "year": "2018", + "title": "finding syntax in human encephalography with beam search", + "label": [ + "137293760", + "39890363", + "50644808", + "53893814", + "19889080", + "1462715", + "147168706", + "60048249", + "186644900", + "204321447" + ], + "author": [ + "2143953440", + "2119216958", + "2509308542", + "2115890144" + ], + "reference": [ + "2789868", + "1566344874", + "1964817481", + "1968218156", + "1972172159", + "1981617416", + "1999048673", + "2022192320", + "2064675550", + "2067575282", + "2097606805", + "2111417482", + "2119728020", + "2135894974", + "2144499799", + "2160580906", + "2164645230", + "2170167574", + "2407291067", + "2512700785", + "2547026914", + "2549835527", + "2554915555", + "2561299349", + "2564486991", + "2603982613", + "2767771229", + "2775102574", + "2786354489", + "2794525651", + "2909386406", + "2949350555", + "2949952998", + "2953256171", + "2962733492", + "2963069010", + "2963073938", + "2963462075", + "3184707605" + ], + "abstract": "recurrent neural network grammars rnngs are generative models of tree string pairs that rely on neural networks to evaluate derivational choices parsing with them using beam search yields a variety of incremental complexity metrics such as word surprisal and parser action count when used as regressors against human electrophysiological responses to naturalistic text they derive two amplitude effects an early peak and a p600 like later peak by contrast a non syntactic neural language model yields no reliable effects model comparisons attribute the early peak to syntactic composition within the rnng this pattern of results recommends the rnng beam search combination as a mechanistic model of the syntactic processing that occurs during normal human language comprehension", + "title_raw": "Finding syntax in human encephalography with beam search", + "abstract_raw": "Recurrent neural network grammars (RNNGs) are generative models of (tree , string ) pairs that rely on neural networks to evaluate derivational choices. Parsing with them using beam search yields a variety of incremental complexity metrics such as word surprisal and parser action count. When used as regressors against human electrophysiological responses to naturalistic text, they derive two amplitude effects: an early peak and a P600-like later peak. By contrast, a non-syntactic neural language model yields no reliable effects. Model comparisons attribute the early peak to syntactic composition within the RNNG. This pattern of results recommends the RNNG+beam search combination as a mechanistic model of the syntactic processing that occurs during normal human language comprehension.", + "link": "https://www.semanticscholar.org/paper/8981a71bb7dc6d0e9a25e24fc742e9cd5a511135", + "scraped_abstract": null, + "citation_best": 16 + }, + { + "paper": "2963033005", + "venue": "1188739475", + "year": "2018", + "title": "learning to ask good questions ranking clarification questions using neural expected value of perfect information", + "label": [ + "124885418", + "189430467", + "2522767166" + ], + "author": [ + "2519961061", + "1204061966" + ], + "reference": [ + "48039281", + "88661437", + "1533917153", + "1784132254", + "1983405110", + "2064675550", + "2086159275", + "2096968458", + "2118508845", + "2130942839", + "2169676805", + "2250425483", + "2250539671", + "2250674021", + "2252047747", + "2256784804", + "2275056699", + "2294661159", + "2328886022", + "2466071179", + "2915240437", + "2962854379", + "2962883855", + "2963351776", + "2963546833", + "2964183327" + ], + "abstract": "inquiry is fundamental to communication and machines cannot effectively collaborate with humans unless they can ask questions in this work we build a neural network model for the task of ranking clarification questions our model is inspired by the idea of expected value of perfect information a good question is one whose expected answer will be useful we study this problem using data from stackexchange a plentiful online resource in which people routinely ask clarifying questions to posts so that they can better offer assistance to the original poster we create a dataset of clarification questions consisting of 77k posts paired with a clarification question and answer from three domains of stackexchange askubuntu unix and superuser we evaluate our model on 500 samples of this dataset against expert human judgments and demonstrate significant improvements over controlled baselines", + "title_raw": "Learning to Ask Good Questions: Ranking Clarification Questions using Neural Expected Value of Perfect Information", + "abstract_raw": "Inquiry is fundamental to communication, and machines cannot effectively collaborate with humans unless they can ask questions. In this work, we build a neural network model for the task of ranking clarification questions. Our model is inspired by the idea of expected value of perfect information: a good question is one whose expected answer will be useful. We study this problem using data from StackExchange, a plentiful online resource in which people routinely ask clarifying questions to posts so that they can better offer assistance to the original poster. We create a dataset of clarification questions consisting of 77K posts paired with a clarification question (and answer) from three domains of StackExchange: askubuntu, unix and superuser. We evaluate our model on 500 samples of this dataset against expert human judgments and demonstrate significant improvements over controlled baselines.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Learning+to+Ask+Good+Questions:+Ranking+Clarification+Questions+using+Neural+Expected+Value+of+Perfect+Information&as_oq=&as_eq=&as_occt=any&as_sauthors=Rao", + "scraped_abstract": null, + "citation_best": 9 + }, + { + "paper": "2963686541", + "venue": "1188739475", + "year": "2018", + "title": "let s do it again a first computational approach to detecting adverbial presupposition triggers", + "label": [ + "206134035", + "2779357210", + "2776187449", + "204321447", + "170858558" + ], + "author": [ + "2748714594", + "3014364443", + "2797771873", + "2148676791" + ], + "reference": [ + "216153039", + "1564411030", + "1601018530", + "1632114991", + "1793121960", + "1810943226", + "1815076433", + "1832693441", + "1890830999", + "2033047024", + "2064675550", + "2095705004", + "2110915701", + "2123442489", + "2129882630", + "2153579005", + "2160745555", + "2270070752", + "2323143533", + "2550628494", + "2575224319", + "2963403868", + "2963748792", + "2963768805", + "2964121744", + "2964267515", + "2964308564" + ], + "abstract": "we introduce the novel task of predicting adverbial presupposition triggers which is useful for natural language generation tasks such as summarization and dialogue systems we introduce two new corpora derived from the penn treebank and the annotated english gigaword dataset and investigate the use of a novel attention mechanism tailored to this task our attention mechanism augments a baseline recurrent neural network without the need for additional trainable parameters minimizing the added computational cost of our mechanism we demonstrate that this model statistically outperforms our baselines", + "title_raw": "Let\u2019s do it \u201cagain\u201d: A First Computational Approach to Detecting Adverbial Presupposition Triggers", + "abstract_raw": "We introduce the novel task of predicting adverbial presupposition triggers, which is useful for natural language generation tasks such as summarization and dialogue systems. We introduce two new corpora, derived from the Penn Treebank and the Annotated English Gigaword dataset and investigate the use of a novel attention mechanism tailored to this task. Our attention mechanism augments a baseline recurrent neural network without the need for additional trainable parameters, minimizing the added computational cost of our mechanism. We demonstrate that this model statistically outperforms our baselines.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Let's+do+it+'again':+A+First+Computational+Approach+to+Detecting+Adverbial+Presupposition+Triggers&as_oq=&as_eq=&as_occt=any&as_sauthors=Cianflone", + "scraped_abstract": null, + "citation_best": 10 + }, + { + "paper": "2796382388", + "venue": "1163450153", + "year": "2018", + "title": "agile 3d sketching with air scaffolding", + "label": [ + "89429830", + "14185376" + ], + "author": [ + "2251897260", + "2760802256", + "2142864399", + "2151341577" + ], + "reference": [ + "10311529", + "48630142", + "1022448614", + "1963545996", + "1981963587", + "2019288999", + "2026687385", + "2033974677", + "2044656118", + "2064403794", + "2068775275", + "2081808639", + "2088096262", + "2089991557", + "2104623069", + "2111869537", + "2120255133", + "2127738431", + "2130338154", + "2135700807", + "2136217726", + "2145615502", + "2145930572", + "2329911166", + "2469784314", + "2471653388", + "2492893897", + "2536588878", + "2611561499" + ], + "abstract": "hand motion and pen drawing can be intuitive and expressive inputs for professional digital 3d authoring however their inherent limitations have hampered wider adoption 3d sketching using hand motion is rapid but rough and 3d sketching using pen drawing is delicate but tedious our new 3d sketching workflow combines these two in a complementary manner the user makes quick hand motions in the air to generate approximate 3d shapes and uses them as scaffolds on which to add details via pen based 3d sketching on a tablet device our air scaffolding technique and corresponding algorithm extract only the intended shapes from unconstrained hand motions then the user sketches 3d ideas by defining sketching planes on these scaffolds while appending new scaffolds as needed a user study shows that our progressive and iterative workflow enables more agile 3d sketching compared to ones using either hand motion or pen drawing alone", + "title_raw": "Agile 3D Sketching with Air Scaffolding", + "abstract_raw": "Hand motion and pen drawing can be intuitive and expressive inputs for professional digital 3D authoring. However, their inherent limitations have hampered wider adoption. 3D sketching using hand motion is rapid but rough, and 3D sketching using pen drawing is delicate but tedious. Our new 3D sketching workflow combines these two in a complementary manner. The user makes quick hand motions in the air to generate approximate 3D shapes, and uses them as scaffolds on which to add details via pen-based 3D sketching on a tablet device. Our air scaffolding technique and corresponding algorithm extract only the intended shapes from unconstrained hand motions. Then, the user sketches 3D ideas by defining sketching planes on these scaffolds while appending new scaffolds, as needed. A user study shows that our progressive and iterative workflow enables more agile 3D sketching compared to ones using either hand motion or pen drawing alone.", + "link": "https://www.semanticscholar.org/paper/51eca42a98965a919fc06aae55605d92a8b2fde2", + "scraped_abstract": null, + "citation_best": 18 + }, + { + "paper": "2796228338", + "venue": "1163450153", + "year": "2018", + "title": "pinpointing precise head and eye based target selection for augmented reality", + "label": [ + "104114177", + "2779569201", + "150594956", + "56461940", + "31972630", + "153050134", + "153715457", + "122027848", + "81917197" + ], + "author": [ + "2137036312", + "2114569290", + "400723261", + "2147236103", + "2069749540" + ], + "reference": [ + "169235709", + "1536474500", + "1549257996", + "1561256858", + "1590806791", + "1601345954", + "1753362005", + "1977909099", + "1979038477", + "1981925954", + "1983239853", + "1986291329", + "1995055145", + "2000537113", + "2016877042", + "2021580208", + "2035567870", + "2049046173", + "2073364464", + "2076873289", + "2078869203", + "2080401675", + "2081167820", + "2092993982", + "2105238517", + "2110576487", + "2113923829", + "2127406376", + "2129989585", + "2132869195", + "2133229061", + "2135282300", + "2144359515", + "2157289187", + "2159055426", + "2160951686", + "2169489294", + "2173674698", + "2272545788", + "2293649818", + "2293970415", + "2412889296", + "2484393319", + "2487998920", + "2511637758", + "2527335612", + "2536455836", + "2536487173", + "2537664091", + "2558712111", + "2582663318", + "2604150046", + "2604494650", + "2760735355", + "2761409209", + "2762185336", + "2765986156" + ], + "abstract": "head and eye movement can be leveraged to improve the user s interaction repertoire for wearable displays head movements are deliberate and accurate and provide the current state of the art pointing technique eye gaze can potentially be faster and more ergonomic but suffers from low accuracy due to calibration errors and drift of wearable eye tracking sensors this work investigates precise multimodal selection techniques using head motion and eye gaze a comparison of speed and pointing accuracy reveals the relative merits of each method including the achievable target size for robust selection we demonstrate and discuss example applications for augmented reality including compact menus with deep structure and a proof of concept method for on line correction of calibration drift", + "title_raw": "Pinpointing: Precise Head- and Eye-Based Target Selection for Augmented Reality", + "abstract_raw": "Head and eye movement can be leveraged to improve the user's interaction repertoire for wearable displays. Head movements are deliberate and accurate, and provide the current state-of-the-art pointing technique. Eye gaze can potentially be faster and more ergonomic, but suffers from low accuracy due to calibration errors and drift of wearable eye-tracking sensors. This work investigates precise, multimodal selection techniques using head motion and eye gaze. A comparison of speed and pointing accuracy reveals the relative merits of each method, including the achievable target size for robust selection. We demonstrate and discuss example applications for augmented reality, including compact menus with deep structure, and a proof-of-concept method for on-line correction of calibration drift.", + "link": "https://www.semanticscholar.org/paper/f1da7257f5b90b17130487c912d56bf7f2ad2806", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2795857247", + "venue": "1163450153", + "year": "2018", + "title": "data illustrator augmenting vector design tools with lazy data binding for expressive visualization authoring", + "label": [ + "37789001", + "2776803701", + "107457646", + "104257957", + "15724806", + "36464697", + "21442007", + "172367668", + "59662460" + ], + "author": [ + "2110342502", + "2158888593", + "2304871011", + "3191578582", + "2796449728", + "2795412079", + "2613866513", + "2087252583" + ], + "reference": [ + "146699588", + "330700155", + "1516293359", + "1549080094", + "1568439938", + "1587026990", + "1591017069", + "1709805170", + "1964048414", + "2035324138", + "2048905912", + "2053690843", + "2067022444", + "2073796368", + "2081617138", + "2098417174", + "2112974919", + "2119828234", + "2135415614", + "2148329479", + "2158711339", + "2160382748", + "2165741325", + "2170836630", + "2275889908", + "2401208927", + "2512402302", + "2516678343", + "2517256332", + "2573978326", + "2747835641" + ], + "abstract": "building graphical user interfaces for visualization authoring is challenging as one must reconcile the tension between flexible graphics manipulation and procedural visualization generation based on a graphical grammar or declarative languages to better support designers workflows and practices we propose data illustrator a novel visualization framework in our approach all visualizations are initially vector graphics data binding is applied when necessary and only constrains interactive manipulation to that data bound property the framework augments graphic design tools with new concepts and operators and describes the structure and generation of a variety of visualizations based on the framework we design and implement a visualization authoring system the system extends interaction techniques in modern vector design tools for direct manipulation of visualization configurations and parameters we demonstrate the expressive power of our approach through a variety of examples a qualitative study shows that designers can use our framework to compose visualizations", + "title_raw": "Data Illustrator: Augmenting Vector Design Tools with Lazy Data Binding for Expressive Visualization Authoring", + "abstract_raw": "Building graphical user interfaces for visualization authoring is challenging as one must reconcile the tension between flexible graphics manipulation and procedural visualization generation based on a graphical grammar or declarative languages. To better support designers' workflows and practices, we propose Data Illustrator, a novel visualization framework. In our approach, all visualizations are initially vector graphics; data binding is applied when necessary and only constrains interactive manipulation to that data bound property. The framework augments graphic design tools with new concepts and operators, and describes the structure and generation of a variety of visualizations. Based on the framework, we design and implement a visualization authoring system. The system extends interaction techniques in modern vector design tools for direct manipulation of visualization configurations and parameters. We demonstrate the expressive power of our approach through a variety of examples. A qualitative study shows that designers can use our framework to compose visualizations.", + "link": "https://www.semanticscholar.org/paper/26d45d8bc58bfb43d3c4603321b8201219a08e32", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2795566406", + "venue": "1163450153", + "year": "2018", + "title": "a stalker s paradise how intimate partner abusers exploit technology", + "label": [ + "41065033", + "165696696", + "108827166", + "140547941", + "89505385", + "37736160", + "171289174" + ], + "author": [ + "2107432055", + "2774833558", + "2775673761", + "2561946600", + "197246800", + "2108085267" + ], + "reference": [ + "62194644", + "79275384", + "1485200701", + "1503444668", + "1526630818", + "1554028473", + "1574063504", + "1609446656", + "1844767618", + "1911952547", + "1966256426", + "1977953443", + "1983693435", + "2012380135", + "2044173330", + "2056278989", + "2071539281", + "2118291130", + "2124952808", + "2125034970", + "2136553381", + "2136781735", + "2144211824", + "2145963088", + "2145985203", + "2147201525", + "2153635880", + "2156146541", + "2294119943", + "2315891070", + "2343060339", + "2378984127", + "2398063731", + "2408204361", + "2464632266", + "2487453645", + "2559802746", + "2588107045", + "2598000026", + "2610906739", + "2715343780", + "2764059454", + "2772213089" + ], + "abstract": "this paper describes a qualitative study with 89 participants that details how abusers in intimate partner violence ipv contexts exploit technologies to intimidate threaten monitor impersonate harass or otherwise harm their victims we show that at their core many of the attacks in ipv contexts are technologically unsophisticated from the perspective of a security practitioner or researcher for example they are often carried out by a ui bound adversary an adversarial but authenticated user that interacts with a victim s device or account via standard user interfaces or by downloading and installing a ready made application that enables spying on a victim nevertheless we show how the sociotechnical and relational factors that characterize ipv make such attacks both extremely damaging to victims and challenging to counteract in part because they undermine the predominant threat models under which systems have been designed we discuss the nature of these new ipv threat models and outline opportunities for hci research and design to mitigate these attacks", + "title_raw": "\u201cA Stalker's Paradise\u201d: How Intimate Partner Abusers Exploit Technology", + "abstract_raw": "This paper describes a qualitative study with 89 participants that details how abusers in intimate partner violence (IPV) contexts exploit technologies to intimidate, threaten, monitor, impersonate, harass, or otherwise harm their victims. We show that, at their core, many of the attacks in IPV contexts are technologically unsophisticated from the perspective of a security practitioner or researcher. For example, they are often carried out by a UI-bound adversary - an adversarial but authenticated user that interacts with a victim\u00bbs device or account via standard user interfaces - or by downloading and installing a ready-made application that enables spying on a victim. Nevertheless, we show how the sociotechnical and relational factors that characterize IPV make such attacks both extremely damaging to victims and challenging to counteract, in part because they undermine the predominant threat models under which systems have been designed. We discuss the nature of these new IPV threat models and outline opportunities for HCI research and design to mitigate these attacks.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq='A+Stalker's+Paradise':+How+Intimate+Partner+Abusers+Exploit+Technology&as_oq=&as_eq=&as_occt=any&as_sauthors=Freed", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2787712888", + "venue": "1163450153", + "year": "2018", + "title": "voice interfaces in everyday life", + "label": [ + "190954187", + "107457646", + "89505385" + ], + "author": [ + "2052499517", + "2143204226", + "2099767421", + "2038821706" + ], + "reference": [ + "9150632", + "42461611", + "241803415", + "644273409", + "1543678897", + "1595768576", + "1600071179", + "1765875054", + "1796112755", + "1892502720", + "1973120253", + "1999041299", + "2040886108", + "2093480241", + "2098117747", + "2109636054", + "2110537656", + "2117488952", + "2130607791", + "2141373701", + "2153190547", + "2162634167", + "2170183598", + "2217452037", + "2331515936", + "2399063318", + "2404769241", + "2405187948", + "2408002127", + "2523602859", + "2567237851", + "2581904482", + "2588723544", + "2589063696", + "2625696364", + "2789513075" + ], + "abstract": "voice user interfaces vuis are becoming ubiquitously available being embedded both into everyday mobility via smartphones and into the life of the home via assistant devices yet exactly how users of such devices practically thread that use into their everyday social interactions remains underexplored by collecting and studying audio data from month long deployments of the amazon echo in participants homes informed by ethnomethodology and conversation analysis our study documents the methodical practices of vui users and how that use is accomplished in the complex social life of the home data we present shows how the device is made accountable to and embedded into conversational settings like family dinners where various simultaneous activities are being achieved we discuss how the vui is finely coordinated with the sequential organisation of talk finally we locate implications for the accountability of vui interaction request and response design and raise conceptual challenges to the notion of designing conversational interfaces", + "title_raw": "Voice Interfaces in Everyday Life", + "abstract_raw": "Voice User Interfaces (VUIs) are becoming ubiquitously available, being embedded both into everyday mobility via smartphones, and into the life of the home via 'assistant' devices. Yet, exactly how users of such devices practically thread that use into their everyday social interactions remains underexplored. By collecting and studying audio data from month-long deployments of the Amazon Echo in participants' homes-informed by ethnomethodology and conversation analysis-our study documents the methodical practices of VUI users, and how that use is accomplished in the complex social life of the home. Data we present shows how the device is made accountable to and embedded into conversational settings like family dinners where various simultaneous activities are being achieved. We discuss how the VUI is finely coordinated with the sequential organisation of talk. Finally, we locate implications for the accountability of VUI interaction, request and response design, and raise conceptual challenges to the notion of designing 'conversational' interfaces.", + "link": "https://www.semanticscholar.org/paper/4ace92c22a895d5e23e58de8d738df8e500d8d79", + "scraped_abstract": null, + "citation_best": 552 + }, + { + "paper": "2795389852", + "venue": "1163450153", + "year": "2018", + "title": "let s talk about race identity chatbots and ai", + "label": [ + "60048249", + "108583219" + ], + "author": [ + "2611908861", + "2044174557", + "2163777400" + ], + "reference": [ + "7542544", + "57668938", + "182831726", + "1498112155", + "1500693574", + "1579838312", + "1614298861", + "1651525653", + "1977135436", + "1978451524", + "1991164020", + "1996796871", + "2001771035", + "2009685682", + "2042882066", + "2047185874", + "2054021874", + "2055261030", + "2056879345", + "2066904448", + "2079462199", + "2084624217", + "2101761627", + "2123774149", + "2129657639", + "2138857742", + "2145482038", + "2147603330", + "2155987657", + "2159097316", + "2160285203", + "2161466446", + "2213735822", + "2248761606", + "2266294403", + "2292070666", + "2293714037", + "2318723339", + "2322516740", + "2327521297", + "2337002970", + "2345380342", + "2399394062", + "2405187948", + "2410628241", + "2510955516", + "2510976782", + "2531099434", + "2550981489", + "2562803530", + "2563390065", + "2564034046", + "2574785060", + "2575439561", + "2584530815", + "2606321545", + "2610364475", + "2610743056", + "2735886174", + "2737089608", + "2942733065", + "2948770719", + "2962807820", + "2963233086", + "3000406623", + "3021909058", + "3049051127" + ], + "abstract": "why is it so hard for chatbots to talk about race this work explores how the biased contents of databases the syntactic focus of natural language processing and the opaque nature of deep learning algorithms cause chatbots difficulty in handling race talk in each of these areas the tensions between race and chatbots create new opportunities for people and machines by making the abstract and disparate qualities of this problem space tangible we can develop chatbots that are more capable of handling race talk in its many forms our goal is to provide the hci community with ways to begin addressing the question how can chatbots handle race talk in new and improved ways", + "title_raw": "Let's Talk About Race: Identity, Chatbots, and AI", + "abstract_raw": "Why is it so hard for chatbots to talk about race? This work explores how the biased contents of databases, the syntactic focus of natural language processing, and the opaque nature of deep learning algorithms cause chatbots difficulty in handling race-talk. In each of these areas, the tensions between race and chatbots create new opportunities for people and machines. By making the abstract and disparate qualities of this problem space tangible, we can develop chatbots that are more capable of handling race-talk in its many forms. Our goal is to provide the HCI community with ways to begin addressing the question, how can chatbots handle race-talk in new and improved ways?", + "link": "https://www.semanticscholar.org/paper/f34c73c75a640f59c11472bf6c9786aeb774856a", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2795783386", + "venue": "1163450153", + "year": "2018", + "title": "semi automated coding for qualitative research a user centered inquiry and initial prototypes", + "label": [ + "127759330", + "161772715", + "106516650", + "23123220" + ], + "author": [ + "2486242049", + "2114684898" + ], + "reference": [ + "8550301", + "8870360", + "567101338", + "1099652466", + "1515587369", + "1524824878", + "1532325895", + "1605824561", + "1684809301", + "1803273808", + "1808244176", + "1826129262", + "1938990613", + "1975469999", + "1979290264", + "1981425990", + "1994495527", + "2003238113", + "2004184632", + "2026993398", + "2033832618", + "2033952602", + "2038572603", + "2040150143", + "2048534968", + "2071827190", + "2084551014", + "2102611807", + "2115312643", + "2138679593", + "2142776520", + "2147152072", + "2150104072", + "2157821464", + "2186654215", + "2330516491", + "2342014309", + "2565297696", + "2588117145", + "2604314461", + "2944568404", + "2971035958", + "3013219582", + "3103319922" + ], + "abstract": "qualitative researchers perform an important and painstaking data annotation process known as coding however much of the process can be tedious and repetitive becoming prohibitive for large datasets could coding be partially automated and should it be to answer this question we interviewed researchers and observed them code interview transcripts we found that across disciplines researchers follow several coding practices well suited to automation further researchers desire automation after having developed a codebook and coded a subset of data particularly in extending their coding to unseen data researchers also require any assistive tool to be transparent about its recommendations based on our findings we built prototypes to partially automate coding using simple natural language processing techniques our top performing system generates coding that matches human coders on inter rater reliability measures we discuss implications for interface and algorithm design meta issues around automating qualitative research and suggestions for future work", + "title_raw": "Semi-Automated Coding for Qualitative Research: A User-Centered Inquiry and Initial Prototypes", + "abstract_raw": "Qualitative researchers perform an important and painstaking data annotation process known as coding. However, much of the process can be tedious and repetitive, becoming prohibitive for large datasets. Could coding be partially automated, and should it be? To answer this question, we interviewed researchers and observed them code interview transcripts. We found that across disciplines, researchers follow several coding practices well-suited to automation. Further, researchers desire automation after having developed a codebook and coded a subset of data, particularly in extending their coding to unseen data. Researchers also require any assistive tool to be transparent about its recommendations. Based on our findings, we built prototypes to partially automate coding using simple natural language processing techniques. Our top-performing system generates coding that matches human coders on inter-rater reliability measures. We discuss implications for interface and algorithm design, meta-issues around automating qualitative research, and suggestions for future work.", + "link": "https://www.semanticscholar.org/paper/db2b16c3f1c094dfed8b309e668018dd14b36c7b", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2796076588", + "venue": "1163450153", + "year": "2018", + "title": "wall room scale interactive and context aware sensing", + "label": [ + "107457646", + "207347870", + "144430266", + "52102323" + ], + "author": [ + "2305394960", + "2343655495", + "2171298838", + "2123491528", + "2040045115" + ], + "reference": [ + "195148789", + "197864537", + "1021847362", + "1525878913", + "1918749672", + "1926324105", + "1963735412", + "1970334548", + "1974939676", + "1991937516", + "2005198142", + "2006036128", + "2016549506", + "2028192593", + "2036471813", + "2045590778", + "2050552147", + "2050735177", + "2055483942", + "2063812706", + "2065012543", + "2067040941", + "2070924230", + "2073408573", + "2076566073", + "2082930097", + "2100989187", + "2103793365", + "2104066809", + "2107146394", + "2111310561", + "2112151495", + "2123910460", + "2124917042", + "2127957107", + "2131776077", + "2133990480", + "2135450947", + "2141336889", + "2144299314", + "2145881665", + "2146711757", + "2151034334", + "2152361837", + "2153604804", + "2154224206", + "2155207172", + "2161708246", + "2162286072", + "2164692160", + "2170465625", + "2185872672", + "2346308358", + "2346389740", + "2346915687", + "2398098909", + "2416228074", + "2478003042", + "2514390409", + "2538172027", + "2559085405", + "2592368662", + "2610935612", + "2611427051", + "2906710874", + "2999214452", + "3149875751" + ], + "abstract": "human environments are typified by walls homes offices schools museums hospitals and pretty much every indoor context one can imagine has walls in many cases they make up a majority of readily accessible indoor surface area and yet they are static their primary function is to be a wall separating spaces and hiding infrastructure we present wall a low cost sensing approach that allows walls to become a smart infrastructure instead of merely separating spaces walls can now enhance rooms with sensing and interactivity our wall treatment and sensing hardware can track users touch and gestures as well as estimate body pose if they are close by capturing airborne electromagnetic noise we can also detect what appliances are active and where they are located through a series of evaluations we demonstrate wall can enable robust room scale interactive and context aware applications", + "title_raw": "Wall++: Room-Scale Interactive and Context-Aware Sensing", + "abstract_raw": "Human environments are typified by walls, homes, offices, schools, museums, hospitals and pretty much every indoor context one can imagine has walls. In many cases, they make up a majority of readily accessible indoor surface area, and yet they are static their primary function is to be a wall, separating spaces and hiding infrastructure. We present Wall++, a low-cost sensing approach that allows walls to become a smart infrastructure. Instead of merely separating spaces, walls can now enhance rooms with sensing and interactivity. Our wall treatment and sensing hardware can track users' touch and gestures, as well as estimate body pose if they are close. By capturing airborne electromagnetic noise, we can also detect what appliances are active and where they are located. Through a series of evaluations, we demonstrate Wall++ can enable robust room-scale interactive and context-aware applications.", + "link": "https://www.semanticscholar.org/paper/39dde3d0e00b2820e52e5badb52cb70691c3423d", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2795442664", + "venue": "1163450153", + "year": "2018", + "title": "expressive time series querying with hand drawn scale free sketches", + "label": [ + "121329065", + "58489278", + "61455927", + "2780719617", + "136197465", + "23123220" + ], + "author": [ + "2795958524", + "2097080516" + ], + "reference": [ + "116902681", + "1487098297", + "1509578651", + "1510964588", + "1520352740", + "1528027857", + "1548778934", + "1598265492", + "1777389131", + "1792912906", + "1972420097", + "2006783944", + "2009899978", + "2015217008", + "2017528563", + "2024466965", + "2039761202", + "2042661054", + "2042997238", + "2057427057", + "2066834853", + "2067288508", + "2069870183", + "2081028405", + "2085321555", + "2097267403", + "2097983034", + "2098759488", + "2099302229", + "2100900426", + "2109399414", + "2111736285", + "2129330015", + "2139440613", + "2148212498", + "2161783015", + "2162756694", + "2162800060", + "2400311702", + "2483430316", + "2516815969" + ], + "abstract": "we present qetch a tool where users freely sketch patterns on a scale less canvas to query time series data without specifying query length or amplitude we study how humans sketch time series patterns humans preserve visually salient perceptual features but often non uniformly scale and locally distort a pattern and we develop a novel matching algorithm that accounts for human sketching errors qetch enables the easy construction of complex and expressive queries with two key features regular expressions over sketches and relative positioning of sketches to query multiple time aligned series through user studies we demonstrate the effectiveness of qetch s different interaction features we also demonstrate the effectiveness of qetch s matching algorithm compared to popular algorithms on targeted and exploratory query by sketch search tasks on a variety of data sets", + "title_raw": "Expressive Time Series Querying with Hand-Drawn Scale-Free Sketches", + "abstract_raw": "We present Qetch, a tool where users freely sketch patterns on a scale-less canvas to query time series data without specifying query length or amplitude. We study how humans sketch time series patterns --- humans preserve visually salient perceptual features but often non-uniformly scale and locally distort a pattern --- and we develop a novel matching algorithm that accounts for human sketching errors. Qetch enables the easy construction of complex and expressive queries with two key features: regular expressions over sketches and relative positioning of sketches to query multiple time-aligned series. Through user studies, we demonstrate the effectiveness of Qetch's different interaction features. We also demonstrate the effectiveness of Qetch's matching algorithm compared to popular algorithms on targeted, and exploratory query-by-sketch search tasks on a variety of data sets.", + "link": "https://www.semanticscholar.org/paper/b145f72d9abc4b2c48a46fd1bdc4476a4b5fa4f8", + "scraped_abstract": null, + "citation_best": 51 + }, + { + "paper": "2795619128", + "venue": "1163450153", + "year": "2018", + "title": "project zanzibar a portable and flexible tangible interaction platform", + "label": [ + "63000827", + "107457646", + "207347870", + "146054899", + "202474056", + "41826821" + ], + "author": [ + "2165003359", + "2610476763", + "2103102540", + "2208258931", + "2136306449", + "2345481512", + "2084788496", + "2539088722", + "2796465372", + "2099635113" + ], + "reference": [ + "1815090327", + "1970854835", + "1975234921", + "1987114563", + "1997556709", + "1998297311", + "2001716033", + "2005198142", + "2010070161", + "2010353889", + "2016621390", + "2023412552", + "2029055514", + "2031757018", + "2037703416", + "2042899456", + "2050896993", + "2052194215", + "2059403519", + "2068389981", + "2068893873", + "2070924230", + "2097163846", + "2103131022", + "2107460475", + "2107820924", + "2114296568", + "2122928453", + "2127177812", + "2130733969", + "2130877787", + "2132854028", + "2136617671", + "2137917711", + "2138613430", + "2138727017", + "2139967937", + "2148819007", + "2149891956", + "2167686873", + "2177404794", + "2249453717", + "2346915687", + "2397886250", + "2592368662" + ], + "abstract": "we present project zanzibar a flexible mat that can locate uniquely identify and communicate with tangible objects placed on its surface as well as sense a user s touch and hover hand gestures we describe the underlying technical contributions efficient and localised near field communication nfc over a large surface area object tracking combining nfc signal strength and capacitive footprint detection and manufacturing techniques for a rollable device form factor that enables portability while providing a sizable interaction area when unrolled in addition we detail design patterns for tangibles of varying complexity and interactive capabilities including the ability to sense orientation on the mat harvest power provide additional input and output stack or extend sensing outside the bounds of the mat capabilities and interaction modalities are illustrated with self generated applications finally we report on the experience of professional game developers building novel physical digital experiences using the platform", + "title_raw": "Project Zanzibar: A Portable and Flexible Tangible Interaction Platform", + "abstract_raw": "We present Project Zanzibar: a flexible mat that can locate, uniquely identify and communicate with tangible objects placed on its surface, as well as sense a user's touch and hover hand gestures. We describe the underlying technical contributions: efficient and localised Near Field Communication (NFC) over a large surface area; object tracking combining NFC signal strength and capacitive footprint detection, and manufacturing techniques for a rollable device form-factor that enables portability, while providing a sizable interaction area when unrolled. In addition, we detail design patterns for tangibles of varying complexity and interactive capabilities, including the ability to sense orientation on the mat, harvest power, provide additional input and output, stack, or extend sensing outside the bounds of the mat. Capabilities and interaction modalities are illustrated with self-generated applications. Finally, we report on the experience of professional game developers building novel physical/digital experiences using the platform.", + "link": "https://www.semanticscholar.org/paper/4ea0495460b38b31ab00601567dfe47ec9ea2c84", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2796058046", + "venue": "1163450153", + "year": "2018", + "title": "extending manual drawing practices with artist centric programming tools", + "label": [ + "153083717", + "34165917", + "107457646" + ], + "author": [ + "2222602092", + "2121327176", + "2064952311", + "2131840961" + ], + "reference": [ + "159010171", + "1490090861", + "1516886930", + "1540823594", + "1597187241", + "1815090327", + "1985161107", + "1987581944", + "1995067288", + "1996138408", + "1996455866", + "2019701196", + "2027847765", + "2030514725", + "2037703416", + "2053925648", + "2071873073", + "2084944215", + "2090351970", + "2092059964", + "2102661934", + "2114877339", + "2117769378", + "2121624795", + "2122023692", + "2123929355", + "2134816385", + "2135415614", + "2139374478", + "2140205964", + "2141385588", + "2149252982", + "2404175483", + "2489343363", + "2507500182", + "2611393743", + "2612337108", + "2799138774", + "2919176151", + "2981401584" + ], + "abstract": "procedural art or art made with programming suggests opportunities to extend traditional arts like painting and drawing however this potential is limited by tools that conflict with manual practices programming languages present learning barriers and manual drawing input is not a first class primitive in common programming models we hypothesize that by developing programming languages and environments that align with how manual artists work we can build procedural systems that enhance rather than displace manual art to explore this we developed dynamic brushes a programming and drawing environment motivated by interviews with artists dynamic brushes enables the creation of ad hoc drawing tools that transform stylus inputs to procedural patterns applications range from transforming individual strokes to behaviors that draw multiple strokes simultaneously respond to temporal events and leverage external data results from an extended evaluation with artists provide guidelines for learnable expressive systems that blend manual and procedural creation", + "title_raw": "Extending Manual Drawing Practices with Artist-Centric Programming Tools", + "abstract_raw": "Procedural art, or art made with programming, suggests opportunities to extend traditional arts like painting and drawing; however, this potential is limited by tools that conflict with manual practices. Programming languages present learning barriers and manual drawing input is not a first class primitive in common programming models. We hypothesize that by developing programming languages and environments that align with how manual artists work, we can build procedural systems that enhance, rather than displace, manual art. To explore this, we developed Dynamic Brushes, a programming and drawing environment motivated by interviews with artists. Dynamic Brushes enables the creation of ad-hoc drawing tools that transform stylus inputs to procedural patterns. Applications range from transforming individual strokes to behaviors that draw multiple strokes simultaneously, respond to temporal events, and leverage external data. Results from an extended evaluation with artists provide guidelines for learnable, expressive systems that blend manual and procedural creation.", + "link": "https://www.semanticscholar.org/paper/4068b28a0568fc090c2b943057a89461e8f4450e", + "scraped_abstract": null, + "citation_best": 42 + }, + { + "paper": "2964185501", + "venue": "1158167855", + "year": "2018", + "title": "taskonomy disentangling task transfer learning", + "label": [ + "150899416", + "119857082", + "154945302", + "2778770139" + ], + "author": [ + "2074020674", + "2798539662", + "2769415616", + "356043702", + "2136556746", + "1618661958" + ], + "reference": [ + "93016980", + "99485931", + "219040644", + "343636949", + "1501500081", + "1519152752", + "1520997877", + "1571401318", + "1682403713", + "1722318740", + "1771410628", + "1773179558", + "1821462560", + "1861492603", + "1959608418", + "1988348003", + "1996777517", + "2005295545", + "2046078150", + "2047161559", + "2053186076", + "2060277733", + "2062118960", + "2089685866", + "2090923791", + "2095705004", + "2098676252", + "2104068492", + "2106837051", + "2114760689", + "2115733720", + "2120501001", + "2123470622", + "2126725946", + "2128053425", + "2128152674", + "2134670479", + "2134845968", + "2143915663", + "2145482038", + "2150145411", + "2155541015", + "2157904933", + "2162188269", + "2162708558", + "2163605009", + "2163922914", + "2179146407", + "2194321275", + "2206998438", + "2286353276", + "2290104316", + "2312252795", + "2321533354", + "2326925005", + "2412320034", + "2520377600", + "2555182955", + "2565989828", + "2566971417", + "2586114507", + "2592170186", + "2611035488", + "2750549109", + "2950527759", + "2953127211", + "2962824366", + "2963305465", + "2963420272", + "2963591054", + "2963634205", + "2963749571", + "2964047820", + "2964056935", + "2964118262", + "2964121744", + "2964153729", + "2964163480", + "2964294881", + "2964339842", + "3137695714" + ], + "abstract": "do visual tasks have a relationship or are they unrelated for instance could having surface normals simplify estimating the depth of an image intuition answers these questions positively implying existence of a structure among visual tasks knowing this structure has notable values it is the concept underlying transfer learning and provides a principled way for identifying redundancies across tasks e g to seamlessly reuse supervision among related tasks or solve many tasks in one system without piling up the complexity we proposes a fully computational approach for modeling the structure of space of visual tasks this is done via finding first and higher order transfer learning dependencies across a dictionary of twenty six 2d 2 5d 3d and semantic tasks in a latent space the product is a computational taxonomic map for task transfer learning we study the consequences of this structure e g nontrivial emerged relationships and exploit them to reduce the demand for labeled data we provide a set of tools for computing and probing this taxonomical structure including a solver users can employ to find supervision policies for their use cases", + "title_raw": "Taskonomy: Disentangling Task Transfer Learning", + "abstract_raw": "Do visual tasks have a relationship, or are they unrelated? For instance, could having surface normals simplify estimating the depth of an image? Intuition answers these questions positively, implying existence of a structure among visual tasks. Knowing this structure has notable values; it is the concept underlying transfer learning and provides a principled way for identifying redundancies across tasks, e.g., to seamlessly reuse supervision among related tasks or solve many tasks in one system without piling up the complexity. We proposes a fully computational approach for modeling the structure of space of visual tasks. This is done via finding (first and higher-order) transfer learning dependencies across a dictionary of twenty six 2D, 2.5D, 3D, and semantic tasks in a latent space. The product is a computational taxonomic map for task transfer learning. We study the consequences of this structure, e.g. nontrivial emerged relationships, and exploit them to reduce the demand for labeled data. We provide a set of tools for computing and probing this taxonomical structure including a solver users can employ to find supervision policies for their use cases.", + "link": "https://www.semanticscholar.org/paper/2fe2cfd98e232f1396f01881853ed6b3d5e37d65", + "scraped_abstract": null, + "citation_best": 82 + }, + { + "paper": "2899462170", + "venue": "1199533187", + "year": "2018", + "title": "the impact of regular expression denial of service redos in practice an empirical study at the ecosystem scale", + "label": [ + "64869954", + "38652104", + "544833334", + "121329065", + "38822068", + "82815560", + "186644900", + "127705205" + ], + "author": [ + "2606103259", + "2898730559", + "2222066022", + "2110410315" + ], + "reference": [ + "87129872", + "170279777", + "593150012", + "1563402047", + "1601674470", + "1813069714", + "1994584977", + "2093709900", + "2122041620", + "2343468826", + "2460699391", + "2480448195", + "2506796853", + "2542877701", + "2740279154", + "2760683747", + "2767914643", + "2783641411", + "2806253293", + "2888047193", + "2889480272", + "2898674000", + "2952007931", + "3106010854", + "3146438627", + "3150132563" + ], + "abstract": "regular expressions regexes are a popular and powerful means of automatically manipulating text regexes are also an understudied denial of service vector redos if a regex has super linear worst case complexity an attacker may be able to trigger this complexity exhausting the victim s cpu resources and causing denial of service existing research has shown how to detect these superlinear regexes and practitioners have identified super linear regex anti pattern heuristics that may lead to such complexity in this paper we empirically study three major aspects of redos that have hitherto been unexplored the incidence of super linear regexes in practice how they can be prevented and how they can be repaired in the ecosystems of two of the most popular programming languages javascript and python we detected thousands of super linear regexes affecting over 10 000 modules across diverse application domains we also found that the conventional wisdom for super linear regex anti patterns has few false negatives but many false positives these anti patterns appear to be necessary but not sufficient signals of super linear behavior finally we found that when faced with a super linear regex developers favor revising it over truncating input or developing a custom parser regardless of whether they had been shown examples of all three fix strategies these findings motivate further research into redos since many modules are vulnerable to it and existing mechanisms to avoid it are insufficient we believe that redos vulnerabilities are a larger threat in practice than might have been guessed", + "title_raw": "The impact of regular expression denial of service (ReDoS) in practice: an empirical study at the ecosystem scale", + "abstract_raw": "Regular expressions (regexes) are a popular and powerful means of automatically manipulating text. Regexes are also an understudied denial of service vector (ReDoS). If a regex has super-linear worst-case complexity, an attacker may be able to trigger this complexity, exhausting the victim\u2019s CPU resources and causing denial of service. Existing research has shown how to detect these superlinear regexes, and practitioners have identified super-linear regex anti-pattern heuristics that may lead to such complexity. In this paper, we empirically study three major aspects of ReDoS that have hitherto been unexplored: the incidence of super-linear regexes in practice, how they can be prevented, and how they can be repaired. In the ecosystems of two of the most popular programming languages \u2014 JavaScript and Python \u2013 we detected thousands of super-linear regexes affecting over 10,000 modules across diverse application domains. We also found that the conventional wisdom for super-linear regex anti-patterns has few false negatives but many false positives; these anti-patterns appear to be necessary, but not sufficient, signals of super-linear behavior. Finally, we found that when faced with a super-linear regex, developers favor revising it over truncating input or developing a custom parser, regardless of whether they had been shown examples of all three fix strategies. These findings motivate further research into ReDoS, since many modules are vulnerable to it and existing mechanisms to avoid it are insufficient. We believe that ReDoS vulnerabilities are a larger threat in practice than might have been guessed.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=The+Impact+of+Regular+Expression+Denial+of+Service+(ReDoS)+in+Practice:+An+Empirical+Study+at+the+Ecosystem+Scale&as_oq=&as_eq=&as_occt=any&as_sauthors=Davis", + "scraped_abstract": null, + "citation_best": 86 + }, + { + "paper": "2964177714", + "venue": "1199533187", + "year": "2018", + "title": "adversarial symbolic execution for detecting concurrency related cache timing leaks", + "label": [ + "149635348", + "164155591", + "167955471", + "2779639559", + "177264268", + "49289754", + "193702766", + "87551280", + "115537543" + ], + "author": [ + "2223396976", + "2480381493", + "2854535502" + ], + "reference": [ + "52769613", + "72918221", + "116061528", + "147129824", + "150190951", + "200116028", + "1511843316", + "1549714654", + "1598102953", + "1607006990", + "1613874182", + "1710734607", + "1724890242", + "1811355673", + "1824405704", + "1850312962", + "1969074822", + "2003299277", + "2024875714", + "2059987590", + "2077732325", + "2095551048", + "2107971662", + "2115595690", + "2116207513", + "2116998907", + "2117290716", + "2153185479", + "2154909745", + "2169815249", + "2169870841", + "2171278566", + "2293441536", + "2343729057", + "2418260908", + "2488210329", + "2515614845", + "2517404595", + "2538823313", + "2546922927", + "2580207986", + "2603394416", + "2605815030", + "2620701188", + "2626217303", + "2740486794", + "2741390934", + "2744175342", + "2753573171", + "2758227629", + "2761823169", + "2766845335", + "2766853874", + "2767446656", + "2772336084", + "2778798134", + "2791544367", + "2795180100", + "2809301752", + "2858117630", + "2883707793", + "2910752585", + "2951734705", + "2963047853" + ], + "abstract": "the timing characteristics of cache a high speed storage between the fast cpu and the slow memory may reveal sensitive information of a program thus allowing an adversary to conduct side channel attacks existing methods for detecting timing leaks either ignore cache all together or focus only on passive leaks generated by the program itself without considering leaks that are made possible by concurrently running some other threads in this work we show that timing leak freedom is not a compositional property a program that is not leaky when running alone may become leaky when interleaved with other threads thus we develop a new method named adversarial symbolic execution to detect such leaks it systematically explores both the feasible program paths and their interleavings while modeling the cache and leverages an smt solver to decide if there are timing leaks we have implemented our method in llvm and evaluated it on a set of real world ciphers with 14 455 lines of c code in total our experiments demonstrate both the efficiency of our method and its effectiveness in detecting side channel leaks", + "title_raw": "Adversarial symbolic execution for detecting concurrency-related cache timing leaks", + "abstract_raw": "The timing characteristics of cache, a high-speed storage between the fast CPU and the slow memory, may reveal sensitive information of a program, thus allowing an adversary to conduct side-channel attacks. Existing methods for detecting timing leaks either ignore cache all together or focus only on passive leaks generated by the program itself, without considering leaks that are made possible by concurrently running some other threads. In this work, we show that timing-leak-freedom is not a compositional property: a program that is not leaky when running alone may become leaky when interleaved with other threads. Thus, we develop a new method, named adversarial symbolic execution, to detect such leaks. It systematically explores both the feasible program paths and their interleavings while modeling the cache, and leverages an SMT solver to decide if there are timing leaks. We have implemented our method in LLVM and evaluated it on a set of real-world ciphers with 14,455 lines of C code in total. Our experiments demonstrate both the efficiency of our method and its effectiveness in detecting side-channel leaks.", + "link": "https://www.semanticscholar.org/paper/ecd8fab5cfa2754f1c819c421aee715d31b19619", + "scraped_abstract": null, + "citation_best": 44 + }, + { + "paper": "2884340427", + "venue": "1199533187", + "year": "2018", + "title": "data race detection on compressed traces", + "label": [ + "140006998", + "162478608", + "50341643", + "53893814", + "1462715", + "11413529", + "25343380" + ], + "author": [ + "1848275081", + "2651860200", + "2161931206" + ], + "reference": [ + "190006957", + "1020474857", + "1487375638", + "1512849744", + "1522334395", + "1534988187", + "1652742168", + "1887412317", + "1972544179", + "1982538209", + "1990653637", + "2002179840", + "2007541742", + "2014589371", + "2016323637", + "2025871578", + "2043003055", + "2045238089", + "2046368282", + "2065939997", + "2066952803", + "2079114230", + "2090590416", + "2091939412", + "2105686649", + "2105794846", + "2109033563", + "2109875364", + "2116746874", + "2116815344", + "2120476011", + "2121696621", + "2130433404", + "2130956967", + "2132613390", + "2133662847", + "2135395375", + "2144871685", + "2149984854", + "2153174073", + "2154557525", + "2156428492", + "2159765571", + "2164726441", + "2166091242", + "2166615267", + "2167744164", + "2171818384", + "2187526581", + "2471554138", + "2523223668", + "2536074297", + "2547439180", + "2548714034", + "2602771387", + "2606910946", + "2950582171", + "3003570873", + "3137220996" + ], + "abstract": "we consider the problem of detecting data races in program traces that have been compressed using straight line programs slp which are special context free grammars that generate exactly one string namely the trace that they represent we consider two classical approaches to race detection using the happens before relation and the lockset discipline we present algorithms for both these methods that run in time that is linear in the size of the compressed slp representation typical program executions almost always exhibit patterns that lead to significant compression thus our algorithms are expected to result in large speedups when compared with analyzing the uncompressed trace our experimental evaluation of these new algorithms on standard benchmarks confirms this observation", + "title_raw": "Data race detection on compressed traces", + "abstract_raw": "We consider the problem of detecting data races in program traces that have been compressed using straight line programs (SLP), which are special context-free grammars that generate exactly one string, namely the trace that they represent. We consider two classical approaches to race detection --- using the happens-before relation and the lockset discipline. We present algorithms for both these methods that run in time that is linear in the size of the compressed, SLP representation. Typical program executions almost always exhibit patterns that lead to significant compression. Thus, our algorithms are expected to result in large speedups when compared with analyzing the uncompressed trace. Our experimental evaluation of these new algorithms on standard benchmarks confirms this observation.", + "link": "https://www.semanticscholar.org/paper/5e689f10362516a96a024c47fc6e6b602bbb3a87", + "scraped_abstract": null, + "citation_best": 11 + }, + { + "paper": "3102504333", + "venue": "1199533187", + "year": "2018", + "title": "do android taint analysis tools keep their promises", + "label": [ + "557433098", + "63116202", + "146849305", + "2522767166" + ], + "author": [ + "2553029247", + "1994811108", + "2062320239" + ], + "reference": [ + "1915915253", + "1963971515", + "2004333568", + "2017025011", + "2027538101", + "2047421912", + "2047764386", + "2077202047", + "2078197322", + "2094716892", + "2113115074", + "2140095007", + "2153542583", + "2166743230", + "2343325785", + "2385667943", + "2529069893", + "2529696250", + "2535386169", + "2600871181", + "2603044584", + "2620061339", + "2768047339", + "2853432192", + "2947231160" + ], + "abstract": "in recent years researchers have developed a number of tools to conduct taint analysis of android applications while all the respective papers aim at providing a thorough empirical evaluation comparability is hindered by varying or unclear evaluation targets sometimes the apps used for evaluation are not precisely described in other cases authors use an established benchmark but cover it only partially in yet other cases the evaluations differ in terms of the data leaks searched for or lack a ground truth to compare against all those limitations make it impossible to truly compare the tools based on those published evaluations we thus present reprodroid a framework allowing the accurate comparison of android taint analysis tools reprodroid supports researchers in inferring the ground truth for data leaks in apps in automatically applying tools to benchmarks and in evaluating the obtained results we use reprodroid to comparatively evaluate on equal grounds the six prominent taint analysis tools amandroid dialdroid didfail droidsafe flowdroid and iccta the results are largely positive although four tools violate some promises concerning features and accuracy finally we contribute to the area of unbiased benchmarking with a new and improved version of the open test suite droidbench", + "title_raw": "Do Android taint analysis tools keep their promises", + "abstract_raw": "In recent years, researchers have developed a number of tools to conduct taint analysis of Android applications. While all the respective papers aim at providing a thorough empirical evaluation, comparability is hindered by varying or unclear evaluation targets. Sometimes, the apps used for evaluation are not precisely described. In other cases, authors use an established benchmark but cover it only partially. In yet other cases, the evaluations differ in terms of the data leaks searched for, or lack a ground truth to compare against. All those limitations make it impossible to truly compare the tools based on those published evaluations. We thus present ReproDroid, a framework allowing the accurate comparison of Android taint analysis tools. ReproDroid supports researchers in inferring the ground truth for data leaks in apps, in automatically applying tools to benchmarks, and in evaluating the obtained results. We use ReproDroid to comparatively evaluate on equal grounds the six prominent taint analysis tools Amandroid, DIALDroid, DidFail, DroidSafe, FlowDroid and IccTA. The results are largely positive although four tools violate some promises concerning features and accuracy. Finally, we contribute to the area of unbiased benchmarking with a new and improved version of the open test suite DroidBench.", + "link": "https://www.semanticscholar.org/paper/701b5d86e9626dd1f5f80194009c7a30356bdbd7", + "scraped_abstract": null, + "citation_best": 86 + }, + { + "paper": "2899510293", + "venue": "1199533187", + "year": "2018", + "title": "an empirical study on crash recovery bugs in large scale distributed systems", + "label": [ + "26713055", + "46135064", + "183469790", + "120314980", + "113324615" + ], + "author": [ + "2768062674", + "2230178826", + "2345606310", + "2231415079", + "2898641810", + "2133385333", + "2164843234", + "2899212111", + "2899037683" + ], + "reference": [ + "192446467", + "1412006679", + "1423003888", + "1455804204", + "1511806592", + "1566086941", + "1587989274", + "1834470623", + "1847696060", + "1865515598", + "1981420413", + "1985229168", + "1992479210", + "2006307164", + "2023718005", + "2039157918", + "2044819105", + "2073742357", + "2091776255", + "2105947650", + "2106468386", + "2119565742", + "2122465391", + "2124877509", + "2147891468", + "2153704625", + "2163961697", + "2167814583", + "2169870841", + "2170224888", + "2288327952", + "2291146106", + "2303724510", + "2327265941", + "2527771912", + "2604985896", + "2605276142", + "2606939423", + "2752311492", + "2792833030", + "2804035958", + "2899083685", + "2913205103" + ], + "abstract": "in large scale distributed systems node crashes are inevitable and can happen at any time as such distributed systems are usually designed to be resilient to these node crashes via various crash recovery mechanisms such as write ahead logging in hbase and hinted handoffs in cassandra however faults in crash recovery mechanisms and their implementations can introduce intricate crash recovery bugs and lead to severe consequences in this paper we present creb the most comprehensive study on 103 crash recovery bugs from four popular open source distributed systems including zookeeper hadoop mapreduce cassandra and hbase for all the studied bugs we analyze their root causes triggering conditions bug impacts and fixing through this study we obtain many interesting findings that can open up new research directions for combating crash recovery bugs", + "title_raw": "An empirical study on crash recovery bugs in large-scale distributed systems", + "abstract_raw": "In large-scale distributed systems, node crashes are inevitable, and can happen at any time. As such, distributed systems are usually designed to be resilient to these node crashes via various crash recovery mechanisms, such as write-ahead logging in HBase and hinted handoffs in Cassandra. However, faults in crash recovery mechanisms and their implementations can introduce intricate crash recovery bugs, and lead to severe consequences. In this paper, we present CREB, the most comprehensive study on 103 Crash REcovery Bugs from four popular open-source distributed systems, including ZooKeeper, Hadoop MapReduce, Cassandra and HBase. For all the studied bugs, we analyze their root causes, triggering conditions, bug impacts and fixing. Through this study, we obtain many interesting findings that can open up new research directions for combating crash recovery bugs.", + "link": "https://www.semanticscholar.org/paper/c6fe902952241174c6b951b1302c3b2ecc90c3b5", + "scraped_abstract": null, + "citation_best": 45 + }, + { + "paper": "3105535951", + "venue": "1199533187", + "year": "2018", + "title": "oreo detection of clones in the twilight zone", + "label": [ + "103278499", + "81669768", + "2778739878", + "178980831", + "43126263" + ], + "author": [ + "2131896179", + "2808441088", + "2808311960", + "2060797211", + "2132640881" + ], + "reference": [ + "266424129", + "1490878083", + "1500382227", + "1512285202", + "1537851244", + "1569487506", + "1593203335", + "1677182931", + "1698439592", + "1965555277", + "1971959540", + "1998874662", + "2036295879", + "2037537863", + "2076063813", + "2088479623", + "2096491586", + "2101832700", + "2104609444", + "2107697055", + "2108101947", + "2109759383", + "2109943392", + "2115791615", + "2118024368", + "2125260159", + "2128698639", + "2128782367", + "2128888088", + "2129624210", + "2131477050", + "2136099030", + "2138756793", + "2141535436", + "2146483864", + "2156387975", + "2157532207", + "2158439356", + "2164370980", + "2165739648", + "2166278331", + "2172244770", + "2286236884", + "2298313545", + "2316073290", + "2334699878", + "2511803001", + "2578208870", + "2584966780", + "2618530766", + "2620636222", + "2741705590", + "2767717989" + ], + "abstract": "source code clones are categorized into four types of increasing difficulty of detection ranging from purely textual type 1 to purely semantic type 4 most clone detectors reported in the literature work well up to type 3 which accounts for syntactic differences in between type 3 and type 4 however there lies a spectrum of clones that although still exhibiting some syntactic similarities are extremely hard to detect the twilight zone most clone detectors reported in the literature fail to operate in this zone we present oreo a novel approach to source code clone detection that not only detects type 1 to type 3 clones accurately but is also capable of detecting harder to detect clones in the twilight zone oreo is built using a combination of machine learning information retrieval and software metrics we evaluate the recall of oreo on bigclonebench and perform manual evaluation for precision oreo has both high recall and precision more importantly it pushes the boundary in detection of clones with moderate to weak syntactic similarity in a scalable manner", + "title_raw": "Oreo: detection of clones in the twilight zone", + "abstract_raw": "Source code clones are categorized into four types of increasing difficulty of detection, ranging from purely textual (Type-1) to purely semantic (Type-4). Most clone detectors reported in the literature work well up to Type-3, which accounts for syntactic differences. In between Type-3 and Type-4, however, there lies a spectrum of clones that, although still exhibiting some syntactic similarities, are extremely hard to detect \u2013 the Twilight Zone. Most clone detectors reported in the literature fail to operate in this zone. We present Oreo, a novel approach to source code clone detection that not only detects Type-1 to Type-3 clones accurately, but is also capable of detecting harder-to-detect clones in the Twilight Zone. Oreo is built using a combination of machine learning, information retrieval, and software metrics. We evaluate the recall of Oreo on BigCloneBench, and perform manual evaluation for precision. Oreo has both high recall and precision. More importantly, it pushes the boundary in detection of clones with moderate to weak syntactic similarity in a scalable manner", + "link": "https://www.semanticscholar.org/paper/cb580b86ed801b9d8c2749a78d397c78f9697c09", + "scraped_abstract": null, + "citation_best": 145 + }, + { + "paper": "2963143631", + "venue": "1180662882", + "year": "2018", + "title": "obfuscated gradients give a false sense of security circumventing defenses to adversarial examples", + "label": [ + "38652104", + "140547941", + "51777371", + "37736160" + ], + "author": [ + "2738073824", + "1606335232", + "2109123731" + ], + "reference": [], + "abstract": "", + "title_raw": "Obfuscated Gradients Give a False Sense of Security: Circumventing Defenses to Adversarial Examples", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/651adaa058f821a890f2c5d1053d69eb481a8352", + "scraped_abstract": null, + "citation_best": 1077 + }, + { + "paper": "2965749257", + "venue": "1203999783", + "year": "2019", + "title": "delayed impact of fair machine learning", + "label": [ + "119857082", + "181321632", + "194145944" + ], + "author": [ + "2794695878", + "2763007676", + "2766067247", + "2285015872", + "2301637896" + ], + "reference": [], + "abstract": "", + "title_raw": "Delayed Impact of Fair Machine Learning.", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/4f2baff3195b6fc43a38e3e869496dab9fe9dbc3", + "scraped_abstract": null, + "citation_best": 11 + }, + { + "paper": "3105413283", + "venue": "1174403976", + "year": "2018", + "title": "large scale analysis of framework specific exceptions in android apps", + "label": [ + "198824145", + "1009929", + "55439883", + "557433098", + "97686452", + "2522767166" + ], + "author": [ + "2560017178", + "2228396298", + "2497389582", + "2226695957", + "2201441985", + "3161285258", + "2100529836", + "2102704429" + ], + "reference": [ + "81879861", + "176206521", + "1514703713", + "1812630525", + "1966024199", + "1973702655", + "1976652907", + "1976672509", + "2004921952", + "2013856010", + "2015748725", + "2022429945", + "2025786423", + "2027999475", + "2039127983", + "2055477391", + "2055703785", + "2071751474", + "2080696000", + "2087248009", + "2088749975", + "2091932246", + "2095839586", + "2101800210", + "2130343490", + "2143860600", + "2146511370", + "2161963160", + "2164170598", + "2246822044", + "2344208265", + "2406976408", + "2463553622", + "2481985212", + "2513201734", + "2535143650", + "2548846043", + "2557674136", + "2588125269", + "2596289355", + "2598353968", + "2611266457", + "2735250639", + "2739751633", + "2740742367", + "2767357513", + "2767668103", + "2767816545", + "2770368305", + "3101412407", + "3102650716" + ], + "abstract": "mobile apps have become ubiquitous for app developers it is a key priority to ensure their apps correctness and reliability however many apps still suffer from occasional to frequent crashes weakening their competitive edge large scale deep analyses of the characteristics of real world app crashes can provide useful insights to guide developers or help improve testing and analysis tools however such studies do not exist this paper fills this gap over a four month long effort we have collected 16 245 unique exception traces from 2 486 open source android apps and observed that framework specific exceptions account for the majority of these crashes we then extensively investigated the 8 243 framework specific exceptions which took six person months 1 identifying their characteristics e g manifestation locations common fault categories 2 evaluating their manifestation via state of the art bug detection techniques and 3 reviewing their fixes besides the insights they provide these findings motivate and enable follow up research on mobile apps such as bug detection fault localization and patch generation in addition to demonstrate the utility of our findings we have optimized stoat a dynamic testing tool and implemented exlocator an exception localization tool for android apps stoat is able to quickly uncover three previously unknown confirmed fixed crashes in gmail and google exlocator is capable of precisely locating the root causes of identified exceptions in real world apps our substantial dataset is made publicly available to share with and benefit the community", + "title_raw": "Large-scale analysis of framework-specific exceptions in Android apps", + "abstract_raw": "Mobile apps have become ubiquitous. For app developers, it is a key priority to ensure their apps' correctness and reliability. However, many apps still suffer from occasional to frequent crashes, weakening their competitive edge. Large-scale, deep analyses of the characteristics of real-world app crashes can provide useful insights to guide developers, or help improve testing and analysis tools. However, such studies do not exist --- this paper fills this gap. Over a four-month long effort, we have collected 16,245 unique exception traces from 2,486 open-source Android apps, and observed that framework-specific exceptions account for the majority of these crashes. We then extensively investigated the 8,243 framework-specific exceptions (which took six person-months): (1) identifying their characteristics (e.g., manifestation locations, common fault categories), (2) evaluating their manifestation via state-of-the-art bug detection techniques, and (3) reviewing their fixes. Besides the insights they provide, these findings motivate and enable follow-up research on mobile apps, such as bug detection, fault localization and patch generation. In addition, to demonstrate the utility of our findings, we have optimized Stoat, a dynamic testing tool, and implemented ExLocator, an exception localization tool, for Android apps. Stoat is able to quickly uncover three previously-unknown, confirmed/fixed crashes in Gmail and Google+; ExLocator is capable of precisely locating the root causes of identified exceptions in real-world apps. Our substantial dataset is made publicly available to share with and benefit the community.", + "link": "https://www.semanticscholar.org/paper/fccad6bc2c2a5bac53adaef3874710e00871b69c", + "scraped_abstract": null, + "citation_best": 57 + }, + { + "paper": "2794889478", + "venue": "1174403976", + "year": "2018", + "title": "spatio temporal context reduction a pointer analysis based static approach for detecting use after free vulnerabilities", + "label": [ + "1009929", + "98183937", + "110251889", + "124101348", + "7263679", + "2777904410", + "97686452", + "43126263" + ], + "author": [ + "2763419528", + "2170939854", + "2165184226", + "2144824710" + ], + "reference": [ + "46012774", + "327452528", + "951859702", + "1480909796", + "1546625432", + "1577404745", + "1657354101", + "1826158585", + "1878544538", + "1966634194", + "1971782746", + "1984471991", + "1991546210", + "1996567876", + "1997394198", + "1999517284", + "2008106620", + "2017842497", + "2050320220", + "2056331235", + "2059278087", + "2072102701", + "2072385532", + "2080205938", + "2080573945", + "2082000355", + "2102983272", + "2112936713", + "2117703831", + "2122757982", + "2124377830", + "2126508237", + "2133497528", + "2134569009", + "2136271630", + "2136938453", + "2140809377", + "2144706305", + "2146649139", + "2149237601", + "2154795299", + "2156858199", + "2167029843", + "2170922286", + "2289498116", + "2297774820", + "2511015845", + "2547862110", + "2553375745", + "2606752733", + "2735214320", + "2735704859" + ], + "abstract": "zero day use after free uaf vulnerabilities are increasingly popular and highly dangerous but few mitigations exist we introduce a new pointer analysis based static analysis cr ed for finding uaf bugs in multi mloc c source code efficiently and effectively cr ed achieves this by making three advances i a spatio temporal context reduction technique for scaling down soundly and precisely the exponential number of contexts that would otherwise be considered at a pair of free and use sites ii a multi stage analysis for filtering out false alarms efficiently and iii a path sensitive demand driven approach for finding the points to information required we have implemented cr ed in llvm 3 8 0 and compared it with four different state of the art static tools cbmc model checking c lang abstract interpretation c occinelle pattern matching and s upa pointer analysis using all the c test cases in juliet test suite jts and 10 open source c applications for the ground truth validated with jts cr ed detects all the 138 known uaf bugs as cbmc and s upa do while c lang and c occinelle miss some bugs with no false alarms from any tool for practicality validated with the 10 applications totaling 3 mloc cr ed reports 132 warnings including 85 bugs in 7 6 hours while the existing tools are either unscalable by terminating within 3 days only for one application cbmc or impractical by finding virtually no bugs c lang and c occinelle or issuing an excessive number of false alarms s upa", + "title_raw": "Spatio-temporal context reduction: a pointer-analysis-based static approach for detecting use-after-free vulnerabilities", + "abstract_raw": "Zero-day Use-After-Free (UAF) vulnerabilities are increasingly popular and highly dangerous, but few mitigations exist. We introduce a new pointer-analysis-based static analysis, CR ed , for finding UAF bugs in multi-MLOC C source code efficiently and effectively. CR ed achieves this by making three advances: (i) a spatio-temporal context reduction technique for scaling down soundly and precisely the exponential number of contexts that would otherwise be considered at a pair of free and use sites, (ii) a multi-stage analysis for filtering out false alarms efficiently, and (iii) a path-sensitive demand-driven approach for finding the points-to information required. We have implemented CR ed in LLVM-3.8.0 and compared it with four different state-of-the-art static tools: CBMC (model checking), C lang (abstract interpretation), C occinelle (pattern matching), and S upa (pointer analysis) using all the C test cases in Juliet Test Suite (JTS) and 10 open-source C applications. For the ground-truth validated with JTS, CR ed detects all the 138 known UAF bugs as CBMC and S upa do while C lang and C occinelle miss some bugs, with no false alarms from any tool. For practicality validated with the 10 applications (totaling 3+ MLOC), CR ed reports 132 warnings including 85 bugs in 7.6 hours while the existing tools are either unscalable by terminating within 3 days only for one application (CBMC) or impractical by finding virtually no bugs (C lang and C occinelle ) or issuing an excessive number of false alarms (S upa ).", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Spatio-Temporal+Context+Reduction:+A+Pointer-Analysis-Based+Static+Approach+for+Detecting+Use-After-Free+Vulnerabilities&as_oq=&as_eq=&as_occt=any&as_sauthors=Yan", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2794694213", + "venue": "1174403976", + "year": "2018", + "title": "identifying design problems in the source code a grounded theory", + "label": [ + "78646695", + "52913732", + "2522767166", + "43126263", + "149091818", + "56666940", + "2777904410" + ], + "author": [ + "2346238452", + "2750739235", + "2060289530", + "2114483206", + "2148604163", + "2152210060", + "2305394353", + "2150135511", + "2265460666", + "2567045488", + "2182731144", + "1980600362" + ], + "reference": [ + "2622881", + "3261618", + "85043938", + "119177261", + "201388407", + "617329964", + "1533574204", + "1589676330", + "1649645444", + "1658908529", + "1730782591", + "1833911422", + "1933119144", + "1996503548", + "2001730430", + "2018415007", + "2025380177", + "2028950226", + "2037698798", + "2044556410", + "2048847754", + "2050754458", + "2072267206", + "2099535882", + "2102049362", + "2108086273", + "2116805092", + "2118023438", + "2122626138", + "2126161037", + "2137659640", + "2140504739", + "2140819045", + "2147242382", + "2147810104", + "2148802839", + "2151295763", + "2153887189", + "2154196314", + "2160559612", + "2161676644", + "2171994855", + "2240406150", + "2290314319", + "2353739181", + "2357064193", + "2359516361", + "2532677196", + "2565862629", + "2755137195", + "2963443047" + ], + "abstract": "the prevalence of design problems may cause re engineering or even discontinuation of the system due to missing informal or outdated design documentation developers often have to rely on the source code to identify design problems therefore developers have to analyze different symptoms that manifest in several code elements which may quickly turn into a complex task although researchers have been investigating techniques to help developers in identifying design problems there is little knowledge on how developers actually proceed to identify design problems in order to tackle this problem we conducted a multi trial industrial experiment with professionals from 5 software companies to build a grounded theory the resulting theory offers explanations on how developers identify design problems in practice for instance it reveals the characteristics of symptoms that developers consider helpful moreover developers often combine different types of symptoms to identify a single design problem this knowledge serves as a basis to further understand the phenomena and advance towards more effective identification techniques", + "title_raw": "Identifying design problems in the source code: a grounded theory", + "abstract_raw": "The prevalence of design problems may cause re-engineering or even discontinuation of the system. Due to missing, informal or outdated design documentation, developers often have to rely on the source code to identify design problems. Therefore, developers have to analyze different symptoms that manifest in several code elements, which may quickly turn into a complex task. Although researchers have been investigating techniques to help developers in identifying design problems, there is little knowledge on how developers actually proceed to identify design problems. In order to tackle this problem, we conducted a multi-trial industrial experiment with professionals from 5 software companies to build a grounded theory. The resulting theory offers explanations on how developers identify design problems in practice. For instance, it reveals the characteristics of symptoms that developers consider helpful. Moreover, developers often combine different types of symptoms to identify a single design problem. This knowledge serves as a basis to further understand the phenomena and advance towards more effective identification techniques.", + "link": "https://www.semanticscholar.org/paper/3a5b60e2584e3b21fe8a3d8760732dc9a0a85d1e", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2795338679", + "venue": "1174403976", + "year": "2018", + "title": "static automated program repair for heap properties", + "label": [ + "137287247", + "1009929", + "55439883", + "161969638", + "156731835", + "2777904410", + "115903868", + "97686452", + "117447612" + ], + "author": [ + "2500063185", + "2569774467" + ], + "reference": [ + "777621473", + "989845945", + "1475493299", + "1495368328", + "1508811155", + "1514171102", + "1560374668", + "1561946714", + "1562865234", + "1566987880", + "1567479568", + "1592154101", + "1608869910", + "1872701416", + "1973468708", + "1977696633", + "1986544324", + "1989657183", + "1992012690", + "2008626182", + "2016027000", + "2019730517", + "2053154567", + "2061575154", + "2063387237", + "2078393527", + "2099866050", + "2104220745", + "2121898351", + "2130162474", + "2132761501", + "2140021378", + "2141670850", + "2144540268", + "2145124323", + "2153418968", + "2153881107", + "2156723666", + "2156841542", + "2160443286", + "2165747537", + "2170612356", + "2171685273", + "2274071363", + "2288083000", + "2344973853", + "2373227884", + "2400994325", + "2486590439", + "2514084604", + "2572051352", + "2964011134" + ], + "abstract": "static analysis tools have demonstrated effectiveness at finding bugs in real world code such tools are increasingly widely adopted to improve software quality in practice automated program repair apr has the potential to further cut down on the cost of improving software quality however there is a disconnect between these effective bug finding tools and apr recent advances in apr rely on test cases making them inapplicable to newly discovered bugs or bugs difficult to test for deterministically like memory leaks additionally the quality of patches generated to satisfy a test suite is a key challenge we address these challenges by adapting advances in practical static analysis and verification techniques to enable a new technique that finds and then accurately fixes real bugs without test cases we present a new automated program repair technique using separation logic at a high level our technique reasons over semantic effects of existing program fragments to fix faults related to general pointer safety properties resource leaks memory leaks and null dereferences the procedure automatically translates identified fragments into source level patches and verifies patch correctness with respect to reported faults in this work we conduct the largest study of automatically fixing undiscovered bugs in real world code to date we demonstrate our approach by correctly fixing 55 bugs including 11 previously undiscovered bugs in 11 real world projects", + "title_raw": "Static automated program repair for heap properties", + "abstract_raw": "Static analysis tools have demonstrated effectiveness at finding bugs in real world code. Such tools are increasingly widely adopted to improve software quality in practice. Automated Program Repair (APR) has the potential to further cut down on the cost of improving software quality. However, there is a disconnect between these effective bug-finding tools and APR. Recent advances in APR rely on test cases, making them inapplicable to newly discovered bugs or bugs difficult to test for deterministically (like memory leaks). Additionally, the quality of patches generated to satisfy a test suite is a key challenge. We address these challenges by adapting advances in practical static analysis and verification techniques to enable a new technique that finds and then accurately fixes real bugs without test cases. We present a new automated program repair technique using Separation Logic. At a high-level, our technique reasons over semantic effects of existing program fragments to fix faults related to general pointer safety properties: resource leaks, memory leaks, and null dereferences. The procedure automatically translates identified fragments into source-level patches, and verifies patch correctness with respect to reported faults. In this work we conduct the largest study of automatically fixing undiscovered bugs in real-world code to date. We demonstrate our approach by correctly fixing 55 bugs, including 11 previously undiscovered bugs, in 11 real-world projects.", + "link": "https://www.semanticscholar.org/paper/87c1cb1764d257d2060ed316cb86e5641ed6afea", + "scraped_abstract": null, + "citation_best": 71 + }, + { + "paper": "2962804757", + "venue": "1174403976", + "year": "2018", + "title": "automated localization for unreproducible builds", + "label": [ + "2778012447", + "49585438", + "61423126", + "101317890", + "2777904410", + "43126263", + "23123220" + ], + "author": [ + "2111859474", + "2165436201", + "2137955303", + "2265437558" + ], + "reference": [ + "1973435495", + "1985947101", + "1993167880", + "1993932111", + "1995224147", + "2009077327", + "2012380206", + "2014030636", + "2018663431", + "2040577374", + "2043481571", + "2049238280", + "2058230372", + "2058824677", + "2081771230", + "2082805143", + "2091601801", + "2111978054", + "2116737258", + "2129476175", + "2131346202", + "2165803716", + "2170267084", + "2249980257", + "2742100187", + "3102650716" + ], + "abstract": "reproducibility is the ability of recreating identical binaries under pre defined build environments due to the need of quality assurance and the benefit of better detecting attacks against build environments the practice of reproducible builds has gained popularity in many open source software repositories such as debian and bitcoin however identifying the unreproducible issues remains a labour intensive and time consuming challenge because of the lacking of information to guide the search and the diversity of the causes that may lead to the unreproducible binaries in this paper we propose an automated framework called reploc to localize the problematic files for unreproducible builds reploc features a query augmentation component that utilizes the information extracted from the build logs and a heuristic rule based filtering component that narrows the search scope by integrating the two components with a weighted file ranking module reploc is able to automatically produce a ranked list of files that are helpful in locating the problematic files for the unreproducible builds we have implemented a prototype and conducted extensive experiments over 671 real world unreproducible debian packages in four different categories by considering the topmost ranked file only reploc achieves an accuracy rate of 47 09 if we expand our examination to the top ten ranked files in the list produced by reploc the accuracy rate becomes 79 28 considering that there are hundreds of source code scripts makefiles etc in a package reploc significantly reduces the scope of localizing problematic files moreover with the help of reploc we successfully identified and fixed six new unreproducible packages from debian and guix", + "title_raw": "Automated localization for unreproducible builds", + "abstract_raw": "Reproducibility is the ability of recreating identical binaries under pre-defined build environments. Due to the need of quality assurance and the benefit of better detecting attacks against build environments, the practice of reproducible builds has gained popularity in many open-source software repositories such as Debian and Bitcoin. However, identifying the unreproducible issues remains a labour intensive and time consuming challenge, because of the lacking of information to guide the search and the diversity of the causes that may lead to the unreproducible binaries. In this paper we propose an automated framework called RepLoc to localize the problematic files for unreproducible builds. RepLoc features a query augmentation component that utilizes the information extracted from the build logs, and a heuristic rule-based filtering component that narrows the search scope. By integrating the two components with a weighted file ranking module, RepLoc is able to automatically produce a ranked list of files that are helpful in locating the problematic files for the unreproducible builds. We have implemented a prototype and conducted extensive experiments over 671 real-world unreproducible Debian packages in four different categories. By considering the topmost ranked file only, RepLoc achieves an accuracy rate of 47.09%. If we expand our examination to the top ten ranked files in the list produced by RepLoc, the accuracy rate becomes 79.28%. Considering that there are hundreds of source code, scripts, Makefiles, etc., in a package, RepLoc significantly reduces the scope of localizing problematic files. Moreover, with the help of RepLoc, we successfully identified and fixed six new unreproducible packages from Debian and Guix.", + "link": "https://www.semanticscholar.org/paper/7d39ec1b78811f959ea7acb9e52967a6b5935f73", + "scraped_abstract": null, + "citation_best": 32 + }, + { + "paper": "2794901250", + "venue": "1174403976", + "year": "2018", + "title": "generalized data structure synthesis", + "label": [ + "2777394884", + "48103436", + "2778514511", + "2780632077", + "162319229", + "2776937632", + "16963264", + "2777884278", + "80444323" + ], + "author": [ + "2243994318", + "2235702021", + "2344136120" + ], + "reference": [ + "1480909796", + "1851390469", + "1974514467", + "1976978933", + "2006875594", + "2033901233", + "2040073555", + "2074489032", + "2087987137", + "2093666661", + "2094878426", + "2098575846", + "2102101008", + "2109647823", + "2122816893", + "2129466151", + "2135275954", + "2136650506", + "2137824953", + "2149236697", + "2160985005", + "2161159055", + "2276356546", + "2340086043", + "2417132441", + "2545572223", + "3008793775" + ], + "abstract": "data structure synthesis is the task of generating data structure implementations from high level specifications recent work in this area has shown potential to save programmer time and reduce the risk of defects existing techniques focus on data structures for manipulating subsets of a single collection but real world programs often track multiple related collections and aggregate properties such as sums counts minimums and maximums this paper shows how to synthesize data structures that track subsets and aggregations of multiple related collections our technique decomposes the synthesis task into alternating steps of query synthesis and incrementalization the query synthesis step implements pure operations over the data structure state by leveraging existing enumerative synthesis techniques specialized to the data structures domain the incrementalization step implements imperative state modifications by re framing them as fresh queries that determine what to change coupled with a small amount of code to apply the change as an added benefit of this approach over previous work the synthesized data structure is optimized for not only the queries in the specification but also the required update operations we have evaluated our approach in four large case studies demonstrating that these extensions are broadly applicable", + "title_raw": "Generalized data structure synthesis", + "abstract_raw": "Data structure synthesis is the task of generating data structure implementations from high-level specifications. Recent work in this area has shown potential to save programmer time and reduce the risk of defects. Existing techniques focus on data structures for manipulating subsets of a single collection, but real-world programs often track multiple related collections and aggregate properties such as sums, counts, minimums, and maximums. This paper shows how to synthesize data structures that track subsets and aggregations of multiple related collections. Our technique decomposes the synthesis task into alternating steps of query synthesis and incrementalization. The query synthesis step implements pure operations over the data structure state by leveraging existing enumerative synthesis techniques, specialized to the data structures domain. The incrementalization step implements imperative state modifications by re-framing them as fresh queries that determine what to change, coupled with a small amount of code to apply the change. As an added benefit of this approach over previous work, the synthesized data structure is optimized for not only the queries in the specification but also the required update operations. We have evaluated our approach in four large case studies, demonstrating that these extensions are broadly applicable.", + "link": "https://www.semanticscholar.org/paper/8d15bfc861c2baaaaa13ac3f1aa95d62eaed476d", + "scraped_abstract": null, + "citation_best": 31 + }, + { + "paper": "2964064835", + "venue": "1174403976", + "year": "2018", + "title": "traceability in the wild automatically augmenting incomplete trace links", + "label": [ + "1009929", + "529173508", + "153180980", + "101317890", + "2777904410", + "43126263", + "153876917", + "95623464", + "23123220" + ], + "author": [ + "2703065901", + "2794734593", + "2223580030", + "231137837", + "2052978553" + ], + "reference": [ + "87753372", + "1554055400", + "1570448133", + "1608320902", + "1656916165", + "1881107579", + "1906861448", + "1959797663", + "1967577593", + "1968882522", + "1972978214", + "1975879668", + "1984925551", + "1991334006", + "1998519879", + "2000241667", + "2005350125", + "2005569040", + "2031400820", + "2033020528", + "2038090270", + "2045895677", + "2060756319", + "2079317829", + "2085421499", + "2085435546", + "2085996072", + "2102936662", + "2103699027", + "2113947629", + "2118202700", + "2118761313", + "2119070456", + "2125603046", + "2125682153", + "2128581098", + "2129559874", + "2133990480", + "2140264852", + "2146338950", + "2146781646", + "2149072143", + "2149398387", + "2150775529", + "2155829706", + "2172232422", + "2395560004", + "2532915684", + "2538276272", + "2540556128", + "2572316797", + "2589508708", + "2593373254", + "2598621189", + "2602568545", + "2795266985", + "2911964244", + "3098598077", + "3103913776" + ], + "abstract": "software and systems traceability is widely accepted as an essential element for supporting many software development tasks today s version control systems provide inbuilt features that allow developers to tag each commit with one or more issue id thereby providing the building blocks from which project wide traceability can be established between feature requests bug fixes commits source code and specific developers however our analysis of six open source projects showed that on average only 60 of the commits were linked to specific issues without these fundamental links the entire set of project wide links will be incomplete and therefore not trustworthy in this paper we address the fundamental problem of missing links between commits and issues our approach leverages a combination of process and text related features characterizing issues and code changes to train a classifier to identify missing issue tags in commit messages thereby generating the missing links we conducted a series of experiments to evaluate our approach against six open source projects and showed that it was able to effectively recommend links for tagging issues at an average of 96 recall and 33 precision in a related task for augmenting a set of existing trace links the classifier returned precision at levels greater than 89 in all projects and recall of 50", + "title_raw": "Traceability in the wild: automatically augmenting incomplete trace links", + "abstract_raw": "Software and systems traceability is widely accepted as an essential element for supporting many software development tasks. Today's version control systems provide inbuilt features that allow developers to tag each commit with one or more issue ID, thereby providing the building blocks from which project-wide traceability can be established between feature requests, bug fixes, commits, source code, and specific developers. However, our analysis of six open source projects showed that on average only 60% of the commits were linked to specific issues. Without these fundamental links the entire set of project-wide links will be incomplete, and therefore not trustworthy. In this paper we address the fundamental problem of missing links between commits and issues. Our approach leverages a combination of process and text-related features characterizing issues and code changes to train a classifier to identify missing issue tags in commit messages, thereby generating the missing links. We conducted a series of experiments to evaluate our approach against six open source projects and showed that it was able to effectively recommend links for tagging issues at an average of 96% recall and 33% precision. In a related task for augmenting a set of existing trace links, the classifier returned precision at levels greater than 89% in all projects and recall of 50%.", + "link": "https://www.semanticscholar.org/paper/78ef2e17f5b136c15a66a4bcd64875d252d5924b", + "scraped_abstract": null, + "citation_best": 18 + }, + { + "paper": "2795283266", + "venue": "1174403976", + "year": "2018", + "title": "towards optimal concolic testing", + "label": [ + "110251889", + "49937458", + "2779639559", + "11219265", + "51823790", + "127705205" + ], + "author": [ + "2114282303", + "2302978205", + "2228957528", + "2794553365", + "2773187611", + "2144095093" + ], + "reference": [ + "23932775", + "83761634", + "125598877", + "157156687", + "1497028280", + "1498432697", + "1588454361", + "1710734607", + "1738817570", + "1965194038", + "1971719033", + "1975191777", + "1982487869", + "1992485833", + "2002758617", + "2009489720", + "2011367000", + "2022873981", + "2053577778", + "2060573639", + "2061548807", + "2063918473", + "2096449544", + "2104993088", + "2109276114", + "2110311336", + "2114345296", + "2117189826", + "2121650870", + "2134875273", + "2134991157", + "2159899121", + "2163499368", + "2169063148", + "2171469152", + "2197338197", + "2202286026", + "2340281863", + "2501678966", + "2509636684", + "2547093895", + "2574017551", + "2733978128", + "3006211908" + ], + "abstract": "concolic testing integrates concrete execution e g random testing and symbolic execution for test case generation it is shown to be more cost effective than random testing or symbolic execution sometimes a concolic testing strategy is a function which decides when to apply random testing or symbolic execution and if it is the latter case which program path to symbolically execute many heuristics based strategies have been proposed it is still an open problem what is the optimal concolic testing strategy in this work we make two contributions towards solving this problem first we show the optimal strategy can be defined based on the probability of program paths and the cost of constraint solving the problem of identifying the optimal strategy is then reduced to a model checking problem of markov decision processes with costs secondly in view of the complexity in identifying the optimal strategy we design a greedy algorithm for approximating the optimal strategy we conduct two sets of experiments one is based on randomly generated models and the other is based on a set of c programs the results show that existing heuristics have much room to improve and our greedy algorithm often outperforms existing heuristics", + "title_raw": "Towards optimal concolic testing", + "abstract_raw": "Concolic testing integrates concrete execution (e.g., random testing) and symbolic execution for test case generation. It is shown to be more cost-effective than random testing or symbolic execution sometimes. A concolic testing strategy is a function which decides when to apply random testing or symbolic execution, and if it is the latter case, which program path to symbolically execute. Many heuristics-based strategies have been proposed. It is still an open problem what is the optimal concolic testing strategy. In this work, we make two contributions towards solving this problem. First, we show the optimal strategy can be defined based on the probability of program paths and the cost of constraint solving. The problem of identifying the optimal strategy is then reduced to a model checking problem of Markov Decision Processes with Costs. Secondly, in view of the complexity in identifying the optimal strategy, we design a greedy algorithm for approximating the optimal strategy. We conduct two sets of experiments. One is based on randomly generated models and the other is based on a set of C programs. The results show that existing heuristics have much room to improve and our greedy algorithm often outperforms existing heuristics.", + "link": "https://www.semanticscholar.org/paper/79f08cd41b8d67a9301ecad456da10e1912a516b", + "scraped_abstract": null, + "citation_best": 64 + }, + { + "paper": "2808437126", + "venue": "1203999783", + "year": "2018", + "title": "sentigan generating sentimental texts via mixture adversarial networks", + "label": [ + "37736160", + "154945302" + ], + "author": [ + "2234806591", + "2146508076" + ], + "reference": [ + "648786980", + "1959608418", + "2136891251", + "2160660844", + "2251939518", + "2311110368", + "2523469089", + "2565378226", + "2740167620", + "2757836268", + "2786744843", + "2951523806", + "2963188990", + "2963373786", + "2963857374" + ], + "abstract": "", + "title_raw": "SentiGAN: Generating Sentimental Texts via Mixture Adversarial Networks", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/55caf5154cd558c355d7191daa565ebebb8336e1", + "scraped_abstract": null, + "citation_best": 166 + }, + { + "paper": "2807936987", + "venue": "1203999783", + "year": "2018", + "title": "r svm robust learning with privileged information", + "label": [ + "119857082", + "12267149" + ], + "author": [ + "2646297023", + "2157883762", + "2115321199", + "2807781723", + "2104063197", + "2104129307" + ], + "reference": [ + "134960717", + "1533517359", + "1998899775", + "2028592945", + "2090001453", + "2108424265", + "2112796928", + "2126942721", + "2138580951", + "2173379916", + "2542226129", + "2580802682", + "2757028014" + ], + "abstract": "", + "title_raw": "R-SVM+: Robust Learning with Privileged Information", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/8b4048325bfe14a2dc11dea88626e35b7cb5e59f", + "scraped_abstract": null, + "citation_best": 11 + }, + { + "paper": "3092809319", + "venue": "1203999783", + "year": "2018", + "title": "from conjunctive queries to instance queries in ontology mediated querying", + "label": [ + "2777530160", + "166724064", + "65647387", + "154690210", + "102993220", + "80444323" + ], + "author": [ + "1986491566", + "2394551344", + "2076471859" + ], + "reference": [], + "abstract": "", + "title_raw": "From Conjunctive Queries to Instance Queries in Ontology-Mediated Querying.", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/e36440509511e82d646407ec94a8b859ccdf3494", + "scraped_abstract": null, + "citation_best": 1 + }, + { + "paper": "2962756936", + "venue": "1203999783", + "year": "2018", + "title": "what game are we playing end to end learning in normal and extensive form games", + "label": [ + "155032097", + "2778770139", + "2777884278", + "80444323" + ], + "author": [ + "2801815769", + "2346526278", + "2219978048" + ], + "reference": [], + "abstract": "", + "title_raw": "What Game Are We Playing? End-to-end Learning in Normal and Extensive Form Games", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/8d859af05b1f50708ff83aa8a5623c4cbc45d490", + "scraped_abstract": null, + "citation_best": 56 + }, + { + "paper": "2807873315", + "venue": "1203999783", + "year": "2018", + "title": "commonsense knowledge aware conversation generation with graph attention", + "label": [ + "136134403", + "204321447", + "30542707" + ], + "author": [ + "2574238730", + "2607330899", + "2162268045", + "2737723157", + "2737004204", + "2147746173" + ], + "reference": [ + "10957333", + "1518951372", + "1592970009", + "1757716044", + "1825507529", + "2127795553", + "2130942839", + "2250770256", + "2296712013", + "2467963359", + "2583741591", + "2586847566", + "2740702290", + "2754194354", + "2757177109", + "2963206148", + "2963963856", + "2964165364", + "2964308564" + ], + "abstract": "", + "title_raw": "Commonsense Knowledge Aware Conversation Generation with Graph Attention", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/c2ae7c861d4b8b310d1318e1fdb6135b1739801f", + "scraped_abstract": null, + "citation_best": 517 + }, + { + "paper": "3098276446", + "venue": "1130985203", + "year": "2018", + "title": "adversarial attacks on neural networks for graph data", + "label": [ + "2778403875", + "58973888", + "50644808", + "97970142", + "45374587", + "37736160", + "108583219", + "80444323" + ], + "author": [ + "2785324054", + "2803600188", + "316694267" + ], + "reference": [ + "131956760", + "2000042664", + "2144906988", + "2152284345", + "2153959628", + "2162630660", + "2174402890", + "2180612164", + "2293844262", + "2403788960", + "2468907370", + "2501871465", + "2558460151", + "2584187726", + "2612637113", + "2624431344", + "2744095836", + "2745001864", + "2788579977", + "2788963265", + "2952254971", + "2962756421", + "2963207607", + "2963224980", + "2963903822", + "2963920355", + "2964015378", + "2964153729", + "2964282455", + "3099608705", + "3104097132", + "3111818035" + ], + "abstract": "deep learning models for graphs have achieved strong performance for the task of node classification despite their proliferation currently there is no study of their robustness to adversarial attacks yet in domains where they are likely to be used e g the web adversaries are common can deep learning models for graphs be easily fooled in this work we introduce the first study of adversarial attacks on attributed graphs specifically focusing on models exploiting ideas of graph convolutions in addition to attacks at test time we tackle the more challenging class of poisoning causative attacks which focus on the training phase of a machine learning model we generate adversarial perturbations targeting the node s features and the graph structure thus taking the dependencies between instances in account moreover we ensure that the perturbations remain unnoticeable by preserving important data characteristics to cope with the underlying discrete domain we propose an efficient algorithm nettack exploiting incremental computations our experimental study shows that accuracy of node classification significantly drops even when performing only few perturbations even more our attacks are transferable the learned attacks generalize to other state of the art node classification models and unsupervised approaches and likewise are successful even when only limited knowledge about the graph is given", + "title_raw": "Adversarial Attacks on Neural Networks for Graph Data", + "abstract_raw": "Deep learning models for graphs have achieved strong performance for the task of node classification. Despite their proliferation, currently there is no study of their robustness to adversarial attacks. Yet, in domains where they are likely to be used, e.g. the web, adversaries are common. Can deep learning models for graphs be easily fooled? In this work, we introduce the first study of adversarial attacks on attributed graphs, specifically focusing on models exploiting ideas of graph convolutions. In addition to attacks at test time, we tackle the more challenging class of poisoning/causative attacks, which focus on the training phase of a machine learning model.We generate adversarial perturbations targeting the node's features and the graph structure, thus, taking the dependencies between instances in account. Moreover, we ensure that the perturbations remain unnoticeable by preserving important data characteristics. To cope with the underlying discrete domain we propose an efficient algorithm Nettack exploiting incremental computations. Our experimental study shows that accuracy of node classification significantly drops even when performing only few perturbations. Even more, our attacks are transferable: the learned attacks generalize to other state-of-the-art node classification models and unsupervised approaches, and likewise are successful even when only limited knowledge about the graph is given.", + "link": "https://www.semanticscholar.org/paper/6c44f8e62d824bcda4f291c679a5518bbd4225f6", + "scraped_abstract": null, + "citation_best": 350 + }, + { + "paper": "2896992572", + "venue": "1123349196", + "year": "2018", + "title": "skycore moving core to the edge for untethered and reliable uav based lte networks", + "label": [ + "31258907", + "68649174", + "158379750", + "2776174695", + "25730044", + "555944384", + "5038329", + "2778555145", + "10597312" + ], + "author": [ + "2228709945", + "2024846517", + "2780047193", + "2171138797", + "2159635634" + ], + "reference": [ + "1593857093", + "1608999459", + "1980959920", + "1994926493", + "1997691221", + "2017606936", + "2022768928", + "2074616737", + "2089194229", + "2102090846", + "2106335692", + "2107035076", + "2118428193", + "2126822952", + "2164336170", + "2469021143", + "2512468212", + "2521222097", + "2526598189", + "2559121813", + "2594783833", + "2744954493", + "2763776149", + "2785607814", + "2791136298", + "2792975008", + "2798915702", + "2893608272", + "2895675539", + "2963061782", + "2964315738", + "2998113761" + ], + "abstract": "the advances in unmanned aerial vehicle uav technology have empowered mobile operators to deploy lte base stations bss on uavs and provide on demand adaptive connectivity to hotspot venues as well as emergency scenarios however today s evolved packet core epc that orchestrates the lte ran faces fundamental limitations in catering to such a challenging wireless and mobile uav environment particularly in the presence of multiple bss uavs in this work we argue for and propose an alternate radical edge epc design called skycore that pushes the epc functionality to the extreme edge of the core network collapses the epc into a single light weight self contained entity that is co located with each of the uav bs skycore incorporates elements that are designed to address the unique challenges facing such a distributed design in the uav environment namely the resource constraints of uav platforms and the distributed management of pronounced uav and ue mobility we build and deploy a fully functional version of skycore on a two uav lte network and showcase its i ability to interoperate with commercial lte bss as well as smartphones ii support for both hotspot and standalone multi uav deployments and iii superior control and data plane performance compared to other epc variants in this environment", + "title_raw": "SkyCore: Moving Core to the Edge for Untethered and Reliable UAV-based LTE Networks", + "abstract_raw": "The advances in unmanned aerial vehicle (UAV) technology have empowered mobile operators to deploy LTE base stations (BSs) on UAVs, and provide on-demand, adaptive connectivity to hotspot venues as well as emergency scenarios. However, today's evolved packet core (EPC) that orchestrates the LTE RAN faces fundamental limitations in catering to such a challenging, wireless and mobile UAV environment, particularly in the presence of multiple BSs (UAVs). In this work, we argue for and propose an alternate, radical edge EPC design, called SkyCore that pushes the EPC functionality to the extreme edge of the core network - collapses the EPC into a single, light-weight, self-contained entity that is co-located with each of the UAV BS. SkyCore incorporates elements that are designed to address the unique challenges facing such a distributed design in the UAV environment, namely the resource-constraints of UAV platforms, and the distributed management of pronounced UAV and UE mobility. We build and deploy a fully functional version of SkyCore on a two-UAV LTE network and showcase its (i) ability to interoperate with commercial LTE BSs as well as smartphones, (ii) support for both hotspot and standalone multi-UAV deployments, and (iii) superior control and data plane performance compared to other EPC variants in this environment.", + "link": "https://www.semanticscholar.org/paper/b53f12ccd91534b4083ea3d7357f98a6c102c3ed", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2890192685", + "venue": "1127325140", + "year": "2018", + "title": "non delusional q learning and value iteration", + "label": [ + "2780945871", + "37404715", + "188116033", + "121163568", + "91873725", + "127705205" + ], + "author": [ + "2160099514", + "1817936516", + "2060802895" + ], + "reference": [ + "13294968", + "196871588", + "1547105496", + "1576452626", + "1646707810", + "1971361630", + "2012547817", + "2020913456", + "2102847620", + "2103626435", + "2107977784", + "2115008305", + "2115211925", + "2121863487", + "2125074935", + "2130304665", + "2139418546", + "2150468603", + "2169071224", + "2593044849", + "2761873684", + "2788953735", + "2800415562", + "2962847657", + "2963325394", + "2963423916", + "3011120880" + ], + "abstract": "we identify a fundamental source of error in q learning and other forms of dynamic programming with function approximation delusional bias arises when the approximation architecture limits the class of expressible greedy policies since standard q updates make globally uncoordinated action choices with respect to the expressible policy class inconsistent or even conflicting q value estimates can result leading to pathological behaviour such as over under estimation instability and even divergence to solve this problem we introduce a new notion of policy consistency and define a local backup process that ensures global consistency through the use of information sets sets that record constraints on policies consistent with backed up q values we prove that both the model based and model free algorithms using this backup remove delusional bias yielding the first known algorithms that guarantee optimal results under general conditions these algorithms furthermore only require polynomially many information sets from a potentially exponential support finally we suggest other practical heuristics for value iteration and q learning that attempt to reduce delusional bias", + "title_raw": "Non-delusional Q-learning and value-iteration", + "abstract_raw": "We identify a fundamental source of error in Q-learning and other forms of dynamic programming with function approximation. Delusional bias arises when the approximation architecture limits the class of expressible greedy policies. Since standard Q-updates make globally uncoordinated action choices with respect to the expressible policy class, inconsistent or even conflicting Q-value estimates can result, leading to pathological behaviour such as over/under-estimation, instability and even divergence. To solve this problem, we introduce a new notion of policy consistency and define a local backup process that ensures global consistency through the use of information sets---sets that record constraints on policies consistent with backed-up Q-values. We prove that both the model-based and model-free algorithms using this backup remove delusional bias, yielding the first known algorithms that guarantee optimal results under general conditions. These algorithms furthermore only require polynomially many information sets (from a potentially exponential support). Finally, we suggest other practical heuristics for value-iteration and Q-learning that attempt to reduce delusional bias.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Non-delusional+Q-learning+and+Value-iteration&as_oq=&as_eq=&as_occt=any&as_sauthors=Lu", + "scraped_abstract": null, + "citation_best": 27 + }, + { + "paper": "2963755523", + "venue": "1127325140", + "year": "2018", + "title": "neural ordinary differential equations", + "label": [ + "155512373", + "167966045", + "50644808", + "11413529", + "2778770139", + "51167844" + ], + "author": [ + "2895496966", + "2735727796", + "2809473988", + "2059490951" + ], + "reference": [ + "9124626", + "299440670", + "1909320841", + "1959608418", + "1998583908", + "2030276156", + "2057235791", + "2112796928", + "2167503371", + "2194775991", + "2295821368", + "2435809071", + "2442089939", + "2509830164", + "2550341318", + "2559626456", + "2569260160", + "2587284713", + "2592457170", + "2605147767", + "2605331588", + "2625178737", + "2765872171", + "2766207105", + "2767008699", + "2786384882", + "2788823228", + "2791004381", + "2795344262", + "2950452665", + "2951668940", + "2952922798", + "2962727772", + "2963112935", + "2963684275", + "2964010366", + "2964253689", + "2985962305", + "3098011980", + "3206777520" + ], + "abstract": "we introduce a new family of deep neural network models instead of specifying a discrete sequence of hidden layers we parameterize the derivative of the hidden state using a neural network the output of the network is computed using a black box differential equation solver these continuous depth models have constant memory cost adapt their evaluation strategy to each input and can explicitly trade numerical precision for speed we demonstrate these properties in continuous depth residual networks and continuous time latent variable models we also construct continuous normalizing flows a generative model that can train by maximum likelihood without partitioning or ordering the data dimensions for training we show how to scalably backpropagate through any ode solver without access to its internal operations this allows end to end training of odes within larger models", + "title_raw": "Neural ordinary differential equations", + "abstract_raw": "We introduce a new family of deep neural network models. Instead of specifying a discrete sequence of hidden layers, we parameterize the derivative of the hidden state using a neural network. The output of the network is computed using a black-box differential equation solver. These continuous-depth models have constant memory cost, adapt their evaluation strategy to each input, and can explicitly trade numerical precision for speed. We demonstrate these properties in continuous-depth residual networks and continuous-time latent variable models. We also construct continuous normalizing flows, a generative model that can train by maximum likelihood, without partitioning or ordering the data dimensions. For training, we show how to scalably backpropagate through any ODE solver, without access to its internal operations. This allows end-to-end training of ODEs within larger models.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Neural+Ordinary+Differential+Equations&as_oq=&as_eq=&as_occt=any&as_sauthors=Chen", + "scraped_abstract": null, + "citation_best": 2108 + }, + { + "paper": "2798641544", + "venue": "1158363782", + "year": "2018", + "title": "netchain scale free sub rtt coordination", + "label": [ + "120314980" + ], + "author": [ + "2598385656", + "2232371090", + "2556930796", + "2102579274", + "2118067685", + "2147749062", + "2101044554", + "2161479384" + ], + "reference": [ + "1524103123", + "1601925768", + "1642392512", + "1801241948", + "1814601622", + "1963656762", + "1981780124", + "1992479210", + "1994926493", + "1997269120", + "2000832815", + "2007888220", + "2020765652", + "2036527145", + "2060440895", + "2072811945", + "2075854425", + "2118726720", + "2133394135", + "2139391817", + "2150676586", + "2153704625", + "2156580773", + "2209943643", + "2254505267", + "2303620077", + "2319809716", + "2576670572", + "2579461576", + "2604553456", + "2743093301", + "2761338514" + ], + "abstract": "", + "title_raw": "Netchain: scale-free sub-RTT coordination", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/5fafd836d3f2a2691bc012eb25e06cf80a8abe4d", + "scraped_abstract": null, + "citation_best": 164 + }, + { + "paper": "2897044322", + "venue": "1185109434", + "year": "2018", + "title": "rept reverse debugging of failures in deployed software", + "label": [ + "111919701", + "138673069", + "168065819", + "489000", + "113954288", + "508378895", + "160191386", + "2777904410" + ], + "author": [ + "2107517118", + "2158582854", + "1488309829", + "2225192588", + "2896079737", + "2154150918", + "2488966664" + ], + "reference": [ + "1505468428", + "1619695433", + "1710734607", + "2033472898", + "2049381173", + "2097576663", + "2100189461", + "2101819268", + "2102967730", + "2105614525", + "2105691657", + "2114800225", + "2115855199", + "2116998907", + "2117324184", + "2118315969", + "2121766388", + "2122532513", + "2128049346", + "2128345666", + "2129663982", + "2130473288", + "2149222015", + "2152795747", + "2156515608", + "2170224888", + "2383417445", + "2604695547", + "2605106683", + "2742016905", + "2752802732", + "2761662547" + ], + "abstract": "debugging software failures in deployed systems is important because they impact real users and customers however debugging such failures is notoriously hard in practice because developers have to rely on limited information such as memory dumps the execution history is usually unavailable because high fidelity program tracing is not affordable in deployed systems in this paper we present rept a practical system that enables reverse debugging of software failures in deployed systems rept reconstructs the execution history with high fidelity by combining online lightweight hardware tracing of a program s control flow with offline binary analysis that recovers its data flow it is seemingly impossible to recover data values thousands of instructions before the failure due to information loss and concurrent execution rept tackles these challenges by constructing a partial execution order based on timestamps logged by hardware and iteratively performing forward and backward execution with error correction we design and implement rept deploy it on microsoft windows and integrate it into windbg we evaluate rept on 16 real world bugs and show that it can recover data values accurately 92 on average and efficiently in less than 20 seconds for these bugs we also show that it enables effective reverse debugging for 14 bugs", + "title_raw": "REPT: reverse debugging of failures in deployed software", + "abstract_raw": "Debugging software failures in deployed systems is important because they impact real users and customers. However, debugging such failures is notoriously hard in practice because developers have to rely on limited information such as memory dumps. The execution history is usually unavailable because high-fidelity program tracing is not affordable in deployed systems.\r\n\r\nIn this paper, we present REPT, a practical system that enables reverse debugging of software failures in deployed systems. REPT reconstructs the execution history with high fidelity by combining online lightweight hardware tracing of a program's control flow with offline binary analysis that recovers its data flow. It is seemingly impossible to recover data values thousands of instructions before the failure due to information loss and concurrent execution. REPT tackles these challenges by constructing a partial execution order based on timestamps logged by hardware and iteratively performing forward and backward execution with error correction.\r\n\r\nWe design and implement REPT, deploy it on Microsoft Windows, and integrate it into WinDbg. We evaluate REPT on 16 real-world bugs and show that it can recover data values accurately (92% on average) and efficiently (in less than 20 seconds) for these bugs. We also show that it enables effective reverse debugging for 14 bugs.", + "link": "https://www.semanticscholar.org/paper/3a4ab3ad63eb65622c53d74952acea87968ea59d", + "scraped_abstract": null, + "citation_best": 37 + }, + { + "paper": "2899396876", + "venue": "1185109434", + "year": "2018", + "title": "legoos a disseminated distributed os for hardware resource disaggregation", + "label": [ + "2776191121", + "9390403", + "149091818", + "105339364", + "93996380" + ], + "author": [ + "2757159308", + "2757207333", + "2759487486", + "2109045567" + ], + "reference": [ + "78077100", + "1457268004", + "1474183241", + "1532546444", + "1744795482", + "1970466368", + "1981943579", + "2003597767", + "2007567551", + "2010324635", + "2012426216", + "2028069406", + "2086112773", + "2095687239", + "2100272538", + "2101668240", + "2104644701", + "2114139799", + "2114153586", + "2114667497", + "2117271294", + "2117590013", + "2122465391", + "2122960384", + "2124288146", + "2128336546", + "2134539478", + "2134807578", + "2135875530", + "2136545991", + "2141992894", + "2153005994", + "2156094106", + "2157733805", + "2160121678", + "2161047342", + "2161234420", + "2161573014", + "2169875292", + "2175085668", + "2199253235", + "2294693415", + "2335806889", + "2337699331", + "2345506493", + "2514934935", + "2604260473", + "2604701668", + "2604759068", + "2758102451", + "2762796393", + "2783840310", + "2798688636", + "2885736510", + "2889116347", + "2957907788" + ], + "abstract": "the monolithic server model where a server is the unit of deployment operation and failure is meeting its limits in the face of several recent hardware and application trends to improve resource utilization elasticity heterogeneity and failure handling in datacenters we believe that datacenters should break monolithic servers into disaggregated network attached hardware components despite the promising benefits of hardware resource disaggregation no existing oses or software systems can properly manage it we propose a new os model called the splitkernel to manage disaggregated systems splitkernel disseminates traditional os functionalities into loosely coupled monitors each of which runs on and manages a hardware component a splitkernel also performs resource allocation and failure handling of a distributed set of hardware components using the splitkernel model we built legoos a new os designed for hardware resource disaggregation legoos appears to users as a set of distributed servers internally a user application can span multiple processor memory and storage hardware components we implemented legoos on x86 64 and evaluated it by emulating hardware components using commodity servers our evaluation results show that legoos performance is comparable to monolithic linux servers while largely improving resource packing and reducing failure rate over monolithic clusters", + "title_raw": "LegoOS: a disseminated, distributed OS for hardware resource disaggregation", + "abstract_raw": "The monolithic server model where a server is the unit of deployment, operation, and failure is meeting its limits in the face of several recent hardware and application trends. To improve resource utilization, elasticity, heterogeneity, and failure handling in datacenters, we believe that datacenters should break monolithic servers into disaggregated, network-attached hardware components. Despite the promising benefits of hardware resource disaggregation, no existing OSes or software systems can properly manage it.\r\n\r\nWe propose a new OS model called the splitkernel to manage disaggregated systems. Splitkernel disseminates traditional OS functionalities into loosely-coupled monitors, each of which runs on and manages a hardware component. A splitkernel also performs resource allocation and failure handling of a distributed set of hardware components. Using the splitkernel model, we built LegoOS, a new OS designed for hardware resource disaggregation. LegoOS appears to users as a set of distributed servers. Internally, a user application can span multiple processor, memory, and storage hardware components. We implemented LegoOS on x86-64 and evaluated it by emulating hardware components using commodity servers. Our evaluation results show that LegoOS' performance is comparable to monolithic Linux servers, while largely improving resource packing and reducing failure rate over monolithic clusters.", + "link": "https://www.semanticscholar.org/paper/7945684818786fcb32cf92bace2566d7d6bc8945", + "scraped_abstract": null, + "citation_best": 140 + }, + { + "paper": "2963823348", + "venue": "2534597628", + "year": "2019", + "title": "orca differential bug localization in large scale services", + "label": [ + "137287247", + "167955471", + "37836645", + "153180980", + "105339364", + "2777904410", + "115903868" + ], + "author": [ + "1843571100", + "2949671386", + "2899027025", + "2899365365" + ], + "reference": [], + "abstract": "", + "title_raw": "Orca: Differential Bug Localization in Large-Scale Services", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/79e0629cba38895331a9a5ced9082912a5a85d84", + "scraped_abstract": null, + "citation_best": 8 + }, + { + "paper": "2964240296", + "venue": "1127352206", + "year": "2018", + "title": "program synthesis using conflict driven learning", + "label": [ + "195344581", + "42383842", + "2776937632", + "80444323" + ], + "author": [ + "2481272730", + "2120549242", + "2776748778", + "61468710" + ], + "reference": [ + "5815757", + "128869332", + "1480909796", + "1489527380", + "1590649805", + "1614061774", + "1858945639", + "1905591175", + "2013596093", + "2038123353", + "2044560939", + "2093535699", + "2094878426", + "2096307462", + "2132525863", + "2134734244", + "2143861926", + "2149236697", + "2153943889", + "2157976942", + "2158615804", + "2161159055", + "2162960800", + "2238673293", + "2276356546", + "2293101314", + "2409856616", + "2416325154", + "2496170334", + "2550100435", + "2550471858", + "2561055248", + "2626990892", + "2731197199", + "2735416437", + "2762513422", + "2899212285", + "2949800357", + "2949888546", + "2963929497" + ], + "abstract": "we propose a new conflict driven program synthesis technique that is capable of learning from past mistakes given a spurious program that violates the desired specification our synthesis algorithm identifies the root cause of the conflict and learns new lemmas that can prevent similar mistakes in the future specifically we introduce the notion of equivalence modulo conflict and show how this idea can be used to learn useful lemmas that allow the synthesizer to prune large parts of the search space we have implemented a general purpose cdcl style program synthesizer called neo and evaluate it in two different application domains namely data wrangling in r and functional programming over lists our experiments demonstrate the substantial benefits of conflict driven learning and show that neo outperforms two state of the art synthesis tools morpheus and deepcoder that target these respective domains", + "title_raw": "Program synthesis using conflict-driven learning", + "abstract_raw": "We propose a new conflict-driven program synthesis technique that is capable of learning from past mistakes. Given a spurious program that violates the desired specification, our synthesis algorithm identifies the root cause of the conflict and learns new lemmas that can prevent similar mistakes in the future. Specifically, we introduce the notion of equivalence modulo conflict and show how this idea can be used to learn useful lemmas that allow the synthesizer to prune large parts of the search space. We have implemented a general-purpose CDCL-style program synthesizer called Neo and evaluate it in two different application domains, namely data wrangling in R and functional programming over lists. Our experiments demonstrate the substantial benefits of conflict-driven learning and show that Neo outperforms two state-of-the-art synthesis tools, Morpheus and Deepcoder, that target these respective domains.", + "link": "https://www.semanticscholar.org/paper/bbe832982b47a6b39904d5abc608a8c2fc10c5ee", + "scraped_abstract": null, + "citation_best": 101 + }, + { + "paper": "2798609500", + "venue": "1127352206", + "year": "2018", + "title": "a data driven chc solver", + "label": [ + "106663253", + "189790780", + "40738166", + "194989596", + "2780440489", + "11413529", + "2778770139" + ], + "author": [ + "2305185851", + "3181993941", + "2141982898" + ], + "reference": [ + "81626549", + "145069693", + "967948971", + "1268528572", + "1510368738", + "1549166962", + "1819209966", + "1879823367", + "1925669739", + "1928820179", + "1970168990", + "1978277032", + "1979711143", + "2041404167", + "2108776051", + "2125055259", + "2134770942", + "2148177688", + "2153635508", + "2158493209", + "2170322991", + "2185676247", + "2247845964", + "2280703106", + "2414616261", + "2416392025", + "2466715688", + "2483771058", + "2514073783", + "2522890436", + "2741361950", + "2798232457", + "2964054648" + ], + "abstract": "we present a data driven technique to solve constrained horn clauses chcs that encode verification conditions of programs containing unconstrained loops and recursions our chc solver neither constrains the search space from which a predicate s components are inferred e g by constraining the number of variables or the values of coefficients used to specify an invariant nor fixes the shape of the predicate itself e g by bounding the number and kind of logical connectives instead our approach is based on a novel machine learning inspired tool chain that synthesizes chc solutions in terms of arbitrary boolean combinations of unrestricted atomic predicates a cegar based verification loop inside the solver progressively samples representative positive and negative data from recursive chcs which is fed to the machine learning tool chain our solver is implemented as an llvm pass in the seahorn verification framework and has been used to successfully verify a large number of nontrivial and challenging c programs from the literature and well known benchmark suites e g sv comp", + "title_raw": "A data-driven CHC solver", + "abstract_raw": "We present a data-driven technique to solve Constrained Horn Clauses (CHCs) that encode verification conditions of programs containing unconstrained loops and recursions. Our CHC solver neither constrains the search space from which a predicate's components are inferred (e.g., by constraining the number of variables or the values of coefficients used to specify an invariant), nor fixes the shape of the predicate itself (e.g., by bounding the number and kind of logical connectives). Instead, our approach is based on a novel machine learning-inspired tool chain that synthesizes CHC solutions in terms of arbitrary Boolean combinations of unrestricted atomic predicates. A CEGAR-based verification loop inside the solver progressively samples representative positive and negative data from recursive CHCs, which is fed to the machine learning tool chain. Our solver is implemented as an LLVM pass in the SeaHorn verification framework and has been used to successfully verify a large number of nontrivial and challenging C programs from the literature and well-known benchmark suites (e.g., SV-COMP).", + "link": "https://www.semanticscholar.org/paper/32230fc3c06679474528fc74fd6cc5e189019bd3", + "scraped_abstract": null, + "citation_best": 60 + }, + { + "paper": "3099159815", + "venue": "1127352206", + "year": "2018", + "title": "the semantics of transactions and weak memory in x86 power arm and c", + "label": [ + "12096594", + "63000827", + "27738082", + "134277064", + "184337299", + "199360897", + "2776937632", + "200632571", + "170723468" + ], + "author": [ + "2148831107", + "2558643503", + "131095925" + ], + "reference": [ + "867008410", + "1525350307", + "1545792627", + "1589458582", + "1594228127", + "1808398105", + "1895387792", + "1945229733", + "2017700354", + "2029089965", + "2032945467", + "2039509099", + "2054739713", + "2058943117", + "2075626884", + "2099661831", + "2100091242", + "2105789303", + "2108140316", + "2111858071", + "2113751407", + "2117502039", + "2120473403", + "2129240242", + "2140318192", + "2152885346", + "2159582805", + "2172064476", + "2186104740", + "2280574045", + "2293215590", + "2533070090", + "2535050116", + "2553522901", + "2605308338", + "2625764229", + "2626631502", + "2738891045", + "2898628115", + "2898839952", + "2899402023" + ], + "abstract": "weak memory models provide a complex system centric semantics for concurrent programs while transactional memory tm provides a simpler programmer centric semantics both have been studied in detail but their combined semantics is not well understood this is problematic because such widely used architectures and languages as x86 power and c all support tm and all have weak memory models our work aims to clarify the interplay between weak memory and tm by extending existing axiomatic weak memory models x86 power armv8 and c with new rules for tm our formal models are backed by automated tooling that enables 1 the synthesis of tests for validating our models against existing implementations and 2 the model checking of tm related transformations such as lock elision and compiling c transactions to hardware a key finding is that a proposed tm extension to armv8 currently being considered within arm research is incompatible with lock elision without sacrificing portability or performance", + "title_raw": "The semantics of transactions and weak memory in x86, Power, ARM, and C++", + "abstract_raw": "Weak memory models provide a complex, system-centric semantics for concurrent programs, while transactional memory (TM) provides a simpler, programmer-centric semantics. Both have been studied in detail, but their combined semantics is not well understood. This is problematic because such widely-used architectures and languages as x86, Power, and C++ all support TM, and all have weak memory models. Our work aims to clarify the interplay between weak memory and TM by extending existing axiomatic weak memory models (x86, Power, ARMv8, and C++) with new rules for TM. Our formal models are backed by automated tooling that enables (1) the synthesis of tests for validating our models against existing implementations and (2) the model-checking of TM-related transformations, such as lock elision and compiling C++ transactions to hardware. A key finding is that a proposed TM extension to ARMv8 currently being considered within ARM Research is incompatible with lock elision without sacrificing portability or performance.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=The+Semantics+of+Transactions+and+Weak+Memory+in+x86,+Power,+ARM,+and+C++&as_oq=&as_eq=&as_occt=any&as_sauthors=Chong", + "scraped_abstract": null, + "citation_best": 21 + }, + { + "paper": "2798832443", + "venue": "1184151122", + "year": "2018", + "title": "entity matching with active monotone classification", + "label": [ + "55166926", + "95623464" + ], + "author": [ + "2131082813" + ], + "reference": [ + "51908901", + "107039962", + "1507695921", + "1528361845", + "1981590391", + "2043772275", + "2056707879", + "2067566391", + "2070162726", + "2095708598", + "2104511295", + "2107966677", + "2114232233", + "2115404432", + "2117756453", + "2119320829", + "2143521632", + "2167595980", + "2244198747", + "2295428206", + "2492590231", + "2542998387", + "2559870814", + "2903158431" + ], + "abstract": "given two sets of entities x and y entity matching aims to decide whether x and y represent the same entity for each pair x y in x x y as the last resort human experts can be called upon to inspect every x y but this is expensive because the correct verdict could not be determined without investigation efforts dedicated specifically to the two entities x and y involved it is therefore important to design an algorithm that asks humans to look at only some pairs and renders the verdicts on the other pairs automatically with good accuracy at the core of most if not all existing approaches is the following classification problem the input is a set p of points in rd each of which carries a binary label 0 or 1 a classifier f is a function from rd to 0 1 the objective is to find a classifier that captures the labels of a large number of points in p in this paper we cast the problem as an instance of active learning where the goal is to learn a monotone classifier f namely f p f q holds whenever the coordinate of p is at least that of q on all dimensions in our formulation the labels of all points in p are hidden at the beginning an algorithm a can invoke an oracle which discloses the label of a point p in p chosen by a the algorithm may do so repetitively until it has garnered enough information to produce f the cost of a is the number of times that the oracle is called the challenge is to strike a good balance between the cost and the accuracy of the classifier produced we describe algorithms with non trivial guarantees on the cost and accuracy simultaneously we also prove lower bounds that establish the asymptotic optimality of our solutions for a wide range of parameters", + "title_raw": "Entity Matching with Active Monotone Classification", + "abstract_raw": "Given two sets of entities X and Y, entity matching aims to decide whether x and y represent the same entity for each pair (x, y) in X x Y. As the last resort, human experts can be called upon to inspect every (x, y), but this is expensive because the correct verdict could not be determined without investigation efforts dedicated specifically to the two entities x and y involved. It is therefore important to design an algorithm that asks humans to look at only some pairs, and renders the verdicts on the other pairs automatically with good accuracy. At the core of most (if not all) existing approaches is the following classification problem. The input is a set P of points in Rd, each of which carries a binary label: 0 or 1. A classifier F is a function from Rd to (0, 1). The objective is to find a classifier that captures the labels of a large number of points in P. In this paper, we cast the problem as an instance of active learning where the goal is to learn a monotone classifier F, namely, F(p) \u2265 F(q) holds whenever the coordinate of p is at least that of q on all dimensions. In our formulation, the labels of all points in P are hidden at the beginning. An algorithm A can invoke an oracle, which discloses the label of a point p in P chosen by A. The algorithm may do so repetitively, until it has garnered enough information to produce F. The cost of A is the number of times that the oracle is called. The challenge is to strike a good balance between the cost and the accuracy of the classifier produced. We describe algorithms with non-trivial guarantees on the cost and accuracy simultaneously. We also prove lower bounds that establish the asymptotic optimality of our solutions for a wide range of parameters.", + "link": "https://www.semanticscholar.org/paper/8aa30805d0a1fe9b5e8c5a26d8c925cb49acbbff", + "scraped_abstract": null, + "citation_best": 16 + }, + { + "paper": "2794473399", + "venue": "1163618098", + "year": "2018", + "title": "on enforcing the digital immunity of a large humanitarian organization", + "label": [ + "69360830", + "108964555", + "123201435", + "2776041517" + ], + "author": [ + "2559524632", + "2795207120", + "1982423839", + "2292701633", + "2347146009", + "2954918469" + ], + "reference": [ + "409344806", + "755860182", + "1447429957", + "1635361314", + "1655958391", + "1919636199", + "1975016298", + "1987686668", + "2021732467", + "2103299932", + "2105731959", + "2116428199", + "2125624394", + "2129660502", + "2141420453", + "2167882086", + "2184387311", + "2278038329", + "2397066487", + "2460441129", + "2490317843", + "2585287165", + "2599930814", + "2604691034", + "2613209986", + "2613584285", + "2717184009", + "2726282394", + "2729599932", + "2733812526", + "2752073028", + "2773868624", + "2794533297", + "2911978475", + "2962933337", + "2963517786", + "2963997141" + ], + "abstract": "humanitarian action the process of aiding individuals in situations of crises poses unique information security challenges due to natural or manmade disasters the adverse environments in which it takes place and the scale and multi disciplinary nature of the problems despite these challenges humanitarian organizations are transitioning towards a strong reliance on the digitization of collected data and digital tools which improves their effectiveness but also exposes them to computer security threats in this paper we conduct a qualitative analysis of the computer security challenges of the international committee of the red cross icrc a large humanitarian organization with over sixteen thousand employees an international legal personality which involves privileges and immunities and over 150 years of experience with armed conflicts and other situations of violence worldwide to investigate the computer security needs and practices of the icrc from an operational technical legal and managerial standpoint by considering individual organizational and governmental levels we interviewed 27 field workers it staff lawyers and managers our results provide a first look at the unique security and privacy challenges that humanitarian organizations face when collecting processing transferring and sharing data to enable humanitarian action for a multitude of sensitive activities these results highlight among other challenges the trade offs between operational security and requirements stemming from all stakeholders the legal barriers for data sharing among jurisdictions especially the need to complement privileges and immunities with robust technological safeguards in order to avoid any leakages that might hinder access and potentially compromise the neutrality impartiality and independence of humanitarian action", + "title_raw": "On Enforcing the Digital Immunity of a Large Humanitarian Organization", + "abstract_raw": "Humanitarian action, the process of aiding individuals in situations of crises, poses unique information-security challenges due to natural or manmade disasters, the adverse environments in which it takes place, and the scale and multi-disciplinary nature of the problems. Despite these challenges, humanitarian organizations are transitioning towards a strong reliance on the digitization of collected data and digital tools, which improves their effectiveness but also exposes them to computer security threats. In this paper, we conduct a qualitative analysis of the computer-security challenges of the International Committee of the Red Cross (ICRC), a large humanitarian organization with over sixteen thousand employees, an international legal personality, which involves privileges and immunities, and over 150 years of experience with armed conflicts and other situations of violence worldwide. To investigate the computer security needs and practices of the ICRC from an operational, technical, legal, and managerial standpoint by considering individual, organizational, and governmental levels, we interviewed 27 field workers, IT staff, lawyers, and managers. Our results provide a first look at the unique security and privacy challenges that humanitarian organizations face when collecting, processing, transferring, and sharing data to enable humanitarian action for a multitude of sensitive activities. These results highlight, among other challenges, the trade offs between operational security and requirements stemming from all stakeholders, the legal barriers for data sharing among jurisdictions; especially, the need to complement privileges and immunities with robust technological safeguards in order to avoid any leakages that might hinder access and potentially compromise the neutrality, impartiality, and independence of humanitarian action.", + "link": "https://www.semanticscholar.org/paper/a158c05ef5d55a6cce7c47cb805630412a1e2deb", + "scraped_abstract": null, + "citation_best": 18 + }, + { + "paper": "2787222284", + "venue": "1163618098", + "year": "2018", + "title": "deepsec deciding equivalence properties in security protocols theory and practice", + "label": [ + "136643341", + "33884865", + "178489894", + "80444323", + "148730421" + ], + "author": [ + "2779932898", + "2112142368", + "2786527262" + ], + "reference": [ + "35034951", + "40134741", + "146244851", + "171472358", + "598363404", + "1565697634", + "1570621944", + "1571378841", + "1588028307", + "1603878273", + "1965890562", + "1973054120", + "1979418580", + "1994676271", + "1996497395", + "2001214919", + "2001874371", + "2005880313", + "2009426964", + "2039875296", + "2042609207", + "2045673642", + "2049091002", + "2057936571", + "2060349224", + "2102565724", + "2114497629", + "2117064875", + "2128932399", + "2152744480", + "2167816957", + "2172018206", + "2336945419", + "2508881716", + "2520834878", + "2528654130", + "2592309220", + "2728389594", + "2765112575", + "2914971001", + "2963407533" + ], + "abstract": "automated verification has become an essential part in the security evaluation of cryptographic protocols recently there has been a considerable effort to lift the theory and tool support that existed for reachability properties to the more complex case of equivalence properties in this paper we contribute both to the theory and practice of this verification problem we establish new complexity results for static equivalence trace equivalence and labelled bisimilarity and provide a decision procedure for these equivalences in the case of a bounded number of sessions our procedure is the first to decide trace equivalence and labelled bisimilarity exactly for a large variety of cryptographic primitives those that can be represented by a subterm convergent destructor rewrite system we implemented the procedure in a new tool deepsec we showed through extensive experiments that it is significantly more efficient than other similar tools while at the same time raises the scope of the protocols that can be analysed", + "title_raw": "DEEPSEC: Deciding Equivalence Properties in Security Protocols Theory and Practice", + "abstract_raw": "Automated verification has become an essential part in the security evaluation of cryptographic protocols. Recently, there has been a considerable effort to lift the theory and tool support that existed for reachability properties to the more complex case of equivalence properties. In this paper we contribute both to the theory and practice of this verification problem. We establish new complexity results for static equivalence, trace equivalence and labelled bisimilarity and provide a decision procedure for these equivalences in the case of a bounded number of sessions. Our procedure is the first to decide trace equivalence and labelled bisimilarity exactly for a large variety of cryptographic primitives\u2014those that can be represented by a subterm convergent destructor rewrite system. We implemented the procedure in a new tool, DEEPSEC. We showed through extensive experiments that it is significantly more efficient than other similar tools, while at the same time raises the scope of the protocols that can be analysed.", + "link": "https://www.semanticscholar.org/paper/f1e11d12ab50408ef5e7c06e7a8f8998b31bc7f4", + "scraped_abstract": null, + "citation_best": 54 + }, + { + "paper": "2876796450", + "venue": "1152462849", + "year": "2018", + "title": "inferring persistent interdomain congestion", + "label": [ + "31258907", + "99237066", + "123745756", + "110875604", + "509933004" + ], + "author": [ + "2236811232", + "2135387429", + "2398812458", + "2117137529", + "2007884129", + "2875795067", + "2816123424", + "2134690889", + "2112598219", + "2152871422" + ], + "reference": [ + "4085598", + "78853231", + "198328412", + "211079837", + "330195052", + "1974561310", + "1995973276", + "2040534455", + "2044744030", + "2048565188", + "2049603274", + "2050353626", + "2054479920", + "2092643753", + "2100752355", + "2103117727", + "2104471584", + "2107754621", + "2115789261", + "2141708177", + "2157525327", + "2162266953", + "2164066487", + "2167719412", + "2398680174", + "2489822048", + "2531575454", + "2554361663", + "2555294241", + "2559093073", + "2744660401", + "2762038374", + "2765242363", + "2769547553", + "2770108145", + "2791869653", + "2951350170", + "2963017388", + "2969224021", + "3121797256" + ], + "abstract": "there is significant interest in the technical and policy communities regarding the extent scope and consumer harm of persistent interdomain congestion we provide empirical grounding for discussions of interdomain congestion by developing a system and method to measure congestion on thousands of interdomain links without direct access to them we implement a system based on the time series latency probes tslp technique that identifies links with evidence of recurring congestion suggestive of an under provisioned link we deploy our system at 86 vantage points worldwide and show that congestion inferred using our lightweight tslp method correlates with other metrics of interconnection performance impairment we use our method to study interdomain links of eight large u s broadband access providers from march 2016 to december 2017 and validate our inferences against ground truth traffic statistics from two of the providers for the period of time over which we gathered measurements we did not find evidence of widespread endemic congestion on interdomain links between access isps and directly connected transit and content providers although some such links exhibited recurring congestion patterns we describe limitations open challenges and a path toward the use of this method for large scale third party monitoring of the internet interconnection ecosystem", + "title_raw": "Inferring persistent interdomain congestion", + "abstract_raw": "There is significant interest in the technical and policy communities regarding the extent, scope, and consumer harm of persistent interdomain congestion. We provide empirical grounding for discussions of interdomain congestion by developing a system and method to measure congestion on thousands of interdomain links without direct access to them. We implement a system based on the Time Series Latency Probes (TSLP) technique that identifies links with evidence of recurring congestion suggestive of an under-provisioned link. We deploy our system at 86 vantage points worldwide and show that congestion inferred using our lightweight TSLP method correlates with other metrics of interconnection performance impairment. We use our method to study interdomain links of eight large U.S. broadband access providers from March 2016 to December 2017, and validate our inferences against ground-truth traffic statistics from two of the providers. For the period of time over which we gathered measurements, we did not find evidence of widespread endemic congestion on interdomain links between access ISPs and directly connected transit and content providers, although some such links exhibited recurring congestion patterns. We describe limitations, open challenges, and a path toward the use of this method for large-scale third-party monitoring of the Internet interconnection ecosystem.", + "link": "https://www.semanticscholar.org/paper/7aa75a0c2f1af69c57fc0cda64ce3263785e58f9", + "scraped_abstract": null, + "citation_best": 88 + }, + { + "paper": "2799048248", + "venue": "1140684652", + "year": "2018", + "title": "should i follow the crowd a probabilistic analysis of the effectiveness of popularity in recommender systems", + "label": [ + "119857082", + "21569690", + "24404364", + "557471498", + "87546605", + "200632571" + ], + "author": [ + "2339744729", + "2008802352" + ], + "reference": [ + "1224564842", + "1550621568", + "1752113488", + "1967810983", + "1972594981", + "1992665562", + "2009718036", + "2011700584", + "2019976298", + "2020631728", + "2041161771", + "2044758663", + "2046974451", + "2048045485", + "2091087160", + "2096563449", + "2101409192", + "2109603058", + "2118978333", + "2147453867", + "2148117599", + "2150886314", + "2159094788", + "2169038197", + "2171960770", + "2219888463", + "2286205751", + "2295912067", + "2476960546", + "2739587313", + "2739958520", + "2740253077", + "3122000667", + "3125944471" + ], + "abstract": "the use of ir methodology in the evaluation of recommender systems has become common practice in recent years ir metrics have been found however to be strongly biased towards rewarding algorithms that recommend popular items the same bias that state of the art recommendation algorithms display recent research has confirmed and measured such biases and proposed methods to avoid them the fundamental question remains open though whether popularity is really a bias we should avoid or not whether it could be a useful and reliable signal in recommendation or it may be unfairly rewarded by the experimental biases we address this question at a formal level by identifying and modeling the conditions that can determine the answer in terms of dependencies between key random variables involving item rating discovery and relevance we find conditions that guarantee popularity to be effective or quite the opposite and for the measured metric values to reflect a true effectiveness or qualitatively deviate from it we exemplify and confirm the theoretical findings with empirical results we build a crowdsourced dataset devoid of the usual biases displayed by common publicly available data in which we illustrate contradictions between the accuracy that would be measured in a common biased offline experimental setting and the actual accuracy that can be measured with unbiased observations", + "title_raw": "Should I Follow the Crowd?: A Probabilistic Analysis of the Effectiveness of Popularity in Recommender Systems", + "abstract_raw": "The use of IR methodology in the evaluation of recommender systems has become common practice in recent years. IR metrics have been found however to be strongly biased towards rewarding algorithms that recommend popular items \"the same bias that state of the art recommendation algorithms display. Recent research has confirmed and measured such biases, and proposed methods to avoid them. The fundamental question remains open though whether popularity is really a bias we should avoid or not; whether it could be a useful and reliable signal in recommendation, or it may be unfairly rewarded by the experimental biases. We address this question at a formal level by identifying and modeling the conditions that can determine the answer, in terms of dependencies between key random variables, involving item rating, discovery and relevance. We find conditions that guarantee popularity to be effective or quite the opposite, and for the measured metric values to reflect a true effectiveness, or qualitatively deviate from it. We exemplify and confirm the theoretical findings with empirical results. We build a crowdsourced dataset devoid of the usual biases displayed by common publicly available data, in which we illustrate contradictions between the accuracy that would be measured in a common biased offline experimental setting, and the actual accuracy that can be measured with unbiased observations.", + "link": "https://www.semanticscholar.org/paper/dc6f76cb0036c26560de4e8512042d2576f84b38", + "scraped_abstract": null, + "citation_best": 13 + }, + { + "paper": "2771541561", + "venue": "1131589359", + "year": "2017", + "title": "a refined mean field approximation", + "label": [ + "22684755" + ], + "author": [ + "1968213470", + "1508961335" + ], + "reference": [ + "1596150805", + "1968517161", + "2006537960", + "2010859647", + "2029804570", + "2053513378", + "2062259223", + "2062573385", + "2065597356", + "2071490795", + "2087056310", + "2088748973", + "2096829378", + "2112157291", + "2117702591", + "2117966169", + "2128604165", + "2149633325", + "2170048175", + "2172051303", + "2414210137", + "2480611454", + "2731312897", + "2963917527", + "2964136258", + "3005347330", + "3011357933", + "3023662267", + "3104659353", + "3128062393" + ], + "abstract": "stochastic models have been used to assess the performance of computer and other systems for many decades as a direct analysis of large and complex stochastic models is often prohibitive approximations methods to study their behavior have been devised one very popular approximation method relies on mean field theory its widespread use can be explained by the relative ease involved to define and solve a mean field model in combination with its high accuracy for large systems", + "title_raw": "A Refined Mean Field Approximation", + "abstract_raw": "Stochastic models have been used to assess the performance of computer (and other) systems for many decades. As a direct analysis of large and complex stochastic models is often prohibitive, approximations methods to study their behavior have been devised. One very popular approximation method relies on mean field theory. Its widespread use can be explained by the relative ease involved to define and solve a mean field model in combination with its high accuracy for large systems.", + "link": "https://www.semanticscholar.org/paper/2272d22546e77ca98364351e75e5dad1ff582699", + "scraped_abstract": null, + "citation_best": 32 + }, + { + "paper": "2798891709", + "venue": "1175089206", + "year": "2018", + "title": "surf practical range query filtering with fast succinct tries", + "label": [ + "110432227", + "46135064", + "147224247", + "90790829", + "190290938", + "162319229", + "2776901988", + "11413529", + "106131492" + ], + "author": [ + "2483299331", + "2168649741", + "2069156272", + "2130397481", + "2151237659", + "1894617025", + "1916729783" + ], + "reference": [ + "17958277", + "102693926", + "202508549", + "1507759507", + "1533567700", + "1551498417", + "1554285337", + "1580997674", + "1798412263", + "1823694584", + "1946402190", + "1974033543", + "1985229168", + "2000568570", + "2013291893", + "2014977566", + "2030062409", + "2035735180", + "2068739275", + "2087946700", + "2093788424", + "2114790712", + "2122011234", + "2123845384", + "2125770033", + "2145269645", + "2148113067", + "2149243190", + "2152589529", + "2157740020", + "2157963663", + "2161715626", + "2170899819", + "2176659866", + "2259014610", + "2310385842", + "2429518132", + "2584004798", + "2605800201", + "2612368883", + "3136682179" + ], + "abstract": "we present the succinct range filter surf a fast and compact data structure for approximate membership tests unlike traditional bloom filters surf supports both single key lookups and common range queries open range queries closed range queries and range counts surf is based on a new data structure called the fast succinct trie fst that matches the point and range query performance of state of the art order preserving indexes while consuming only 10 bits per trie node the false positive rates in surf for both point and range queries are tunable to satisfy different application needs we evaluate surf in rocksdb as a replacement for its bloom filters to reduce i o by filtering requests before they access on disk data structures our experiments on a 100 gb dataset show that replacing rocksdb s bloom filters with surfs speeds up open seek without upper bound and closed seek with upper bound queries by up to 1 5 and 5 with a modest cost on the worst case all missing point query throughput due to slightly higher false positive rate", + "title_raw": "SuRF: Practical Range Query Filtering with Fast Succinct Tries", + "abstract_raw": "We present the Succinct Range Filter (SuRF), a fast and compact data structure for approximate membership tests. Unlike traditional Bloom filters, SuRF supports both single-key lookups and common range queries: open-range queries, closed-range queries, and range counts. SuRF is based on a new data structure called the Fast Succinct Trie (FST) that matches the point and range query performance of state-of-the-art order-preserving indexes, while consuming only 10 bits per trie node. The false positive rates in SuRF for both point and range queries are tunable to satisfy different application needs. We evaluate SuRF in RocksDB as a replacement for its Bloom filters to reduce I/O by filtering requests before they access on-disk data structures. Our experiments on a 100 GB dataset show that replacing RocksDB's Bloom filters with SuRFs speeds up open-seek (without upper-bound) and closed-seek (with upper-bound) queries by up to 1.5\u00d7 and 5\u00d7 with a modest cost on the worst-case (all-missing) point query throughput due to slightly higher false positive rate.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=SuRF:+Practical+Range+Query+Filtering+with+Fast+Succinct+Tries&as_oq=&as_eq=&as_occt=any&as_sauthors=Zhang", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2896381102", + "venue": "1166315290", + "year": "2018", + "title": "authoring and verifying human robot interactions", + "label": [ + "145460709", + "98183937", + "107457646", + "13854087", + "162947575", + "128644962", + "90509273" + ], + "author": [ + "2897086169", + "229636765", + "1272946479", + "2149956271" + ], + "reference": [ + "15191017", + "125598877", + "966787950", + "1504975347", + "1505483226", + "1512912754", + "1517943492", + "1560374668", + "1574182216", + "1607414258", + "1862398452", + "1966988118", + "1969784320", + "1973356946", + "1997969970", + "2000818954", + "2001006503", + "2011652428", + "2012844124", + "2023808162", + "2056257735", + "2058919032", + "2060981264", + "2062160780", + "2063688338", + "2067589702", + "2083150459", + "2103530214", + "2108979926", + "2109525524", + "2110727992", + "2124623089", + "2125320384", + "2142294821", + "2153190547", + "2157289187", + "2157837575", + "2161082723", + "2169549255", + "2169574110", + "2170486381", + "2186442710", + "2295204826", + "2314161171", + "2529170714", + "2599509318", + "2605161467" + ], + "abstract": "as social agents robots designed for human interaction must adhere to human social norms how can we enable designers engineers and roboticists to design robot behaviors that adhere to human social norms and do not result in interaction breakdowns in this paper we use automated formal verification methods to facilitate the encoding of appropriate social norms into the interaction design of social robots and the detection of breakdowns and norm violations in order to prevent them we have developed an authoring environment that utilizes these methods to provide developers of social robot applications with feedback at design time and evaluated the benefits of their use in reducing such breakdowns and violations in human robot interactions our evaluation with application developers n 9 shows that the use of formal verification methods increases designers ability to identify and contextualize social norm violations we discuss the implications of our approach for the future development of tools for effective design of social robot applications", + "title_raw": "Authoring and Verifying Human-Robot Interactions", + "abstract_raw": "As social agents, robots designed for human interaction must adhere to human social norms. How can we enable designers, engineers, and roboticists to design robot behaviors that adhere to human social norms and do not result in interaction breakdowns? In this paper, we use automated formal-verification methods to facilitate the encoding of appropriate social norms into the interaction design of social robots and the detection of breakdowns and norm violations in order to prevent them. We have developed an authoring environment that utilizes these methods to provide developers of social-robot applications with feedback at design time and evaluated the benefits of their use in reducing such breakdowns and violations in human-robot interactions. Our evaluation with application developers (N=9) shows that the use of formal-verification methods increases designers' ability to identify and contextualize social-norm violations. We discuss the implications of our approach for the future development of tools for effective design of social-robot applications.", + "link": "https://www.semanticscholar.org/paper/cc2ca2851136ed851b4b77da7bc49fee2090e7bd", + "scraped_abstract": null, + "citation_best": 52 + }, + { + "paper": "2896806675", + "venue": "1166315290", + "year": "2018", + "title": "porta profiling software tutorials using operating system wide activity tracing", + "label": [ + "138673069", + "21959979", + "48044578", + "169590947", + "49774154", + "187191949", + "25688753", + "2777904410", + "93996380" + ], + "author": [ + "2344266429", + "2285416364" + ], + "reference": [ + "148956775", + "1748815599", + "2016681195", + "2047226031", + "2051074879", + "2051277183", + "2111009050", + "2121867457", + "2127184512", + "2129896776", + "2142094977", + "2142389575", + "2144433126", + "2149074597", + "2158532686", + "2168189845", + "2533771361", + "2765429991", + "2766094787", + "2767109660" + ], + "abstract": "it can be hard for tutorial creators to get fine grained feedback about how learners are actually stepping through their tutorials and which parts lead to the most struggle to provide such feedback for technical software tutorials we introduce the idea of tutorial profiling which is inspired by software code profiling we prototyped this idea in a system called porta that automatically tracks how users navigate through a tutorial webpage and what actions they take on their computer such as running shell commands invoking compilers and logging into remote servers porta surfaces this trace data in the form of profiling visualizations that augment the tutorial with heatmaps of activity hotspots and markers that expand to show event details error messages and embedded screencast videos of user actions we found through a user study of 3 tutorial creators and 12 students who followed their tutorials that porta enabled both the tutorial creators and the students to provide more specific targeted and actionable feedback about how to improve these tutorials porta opens up possibilities for performing user testing of technical documentation in a more systematic and scalable way", + "title_raw": "Porta: Profiling Software Tutorials Using Operating-System-Wide Activity Tracing", + "abstract_raw": "It can be hard for tutorial creators to get fine-grained feedback about how learners are actually stepping through their tutorials and which parts lead to the most struggle. To provide such feedback for technical software tutorials, we introduce the idea of tutorial profiling, which is inspired by software code profiling. We prototyped this idea in a system called Porta that automatically tracks how users navigate through a tutorial webpage and what actions they take on their computer such as running shell commands, invoking compilers, and logging into remote servers. Porta surfaces this trace data in the form of profiling visualizations that augment the tutorial with heatmaps of activity hotspots and markers that expand to show event details, error messages, and embedded screencast videos of user actions. We found through a user study of 3 tutorial creators and 12 students who followed their tutorials that Porta enabled both the tutorial creators and the students to provide more specific, targeted, and actionable feedback about how to improve these tutorials. Porta opens up possibilities for performing user testing of technical documentation in a more systematic and scalable way.", + "link": "https://www.semanticscholar.org/paper/4a88bbcac684033d9c3de35866d9c150d174e53c", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2897456565", + "venue": "1166315290", + "year": "2018", + "title": "resi a highly flexible pressure sensitive imperceptible textile interface based on resistive yarns", + "label": [ + "6899612", + "108265739" + ], + "author": [ + "2223409512", + "1238674649", + "2151520490", + "1793298736", + "2343259542", + "2899294867", + "2282489031", + "2431725070", + "1989514917", + "2237584407", + "2948658362" + ], + "reference": [ + "434451455", + "1493041716", + "1964708859", + "1968016787", + "1981018544", + "1982828750", + "2009840913", + "2028781180", + "2031901555", + "2040126927", + "2040565638", + "2058286452", + "2066005892", + "2068506577", + "2073688524", + "2100212585", + "2103793365", + "2111424004", + "2120062115", + "2139924932", + "2147376995", + "2149389784", + "2153255031", + "2159338847", + "2171760908", + "2237285750", + "2332866451", + "2336951101", + "2345907455", + "2347171008", + "2405564622", + "2518198411", + "2539017927", + "2611140757", + "2766122570", + "2766835435", + "2793708510", + "2795676776", + "2955471183" + ], + "abstract": "we present resi resistive textile sensor interfaces a novel sensing approach enabling a new kind of yarn based resistive pressure sensing the core of resi builds on a newly designed yarn which features conductive and resistive properties we run a technical study to characterize the behaviour of the yarn and to determine the sensing principle we demonstrate how the yarn can be used as a pressure sensor and discuss how specific issues such as connecting the soft textile sensor with the rigid electronics can be solved in addition we present a platform independent api that allows rapid prototyping to show its versatility we present applications developed with different textile manufacturing techniques including hand sewing machine sewing and weaving resi is a novel technology enabling textile pressure sensing to augment everyday objects with interactive capabilities", + "title_raw": "RESi: A Highly Flexible, Pressure-Sensitive, Imperceptible Textile Interface Based on Resistive Yarns", + "abstract_raw": "We present RESi (Resistive tExtile Sensor Interfaces), a novel sensing approach enabling a new kind of yarn-based, resistive pressure sensing. The core of RESi builds on a newly designed yarn, which features conductive and resistive properties. We run a technical study to characterize the behaviour of the yarn and to determine the sensing principle. We demonstrate how the yarn can be used as a pressure sensor and discuss how specific issues, such as connecting the soft textile sensor with the rigid electronics can be solved. In addition, we present a platform-independent API that allows rapid prototyping. To show its versatility, we present applications developed with different textile manufacturing techniques, including hand sewing, machine sewing, and weaving. RESi is a novel technology, enabling textile pressure sensing to augment everyday objects with interactive capabilities.", + "link": "https://www.semanticscholar.org/paper/8d66011e47ddad59388be792559a120b2d33a03b", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "3105298605", + "venue": "1133523790", + "year": "2020", + "title": "the ubiquity of large graphs and surprising challenges of graph processing extended survey", + "label": [ + "48044578", + "36464697", + "2777904410", + "72634772", + "2522767166" + ], + "author": [ + "2613897873", + "2754405838", + "2093871940", + "2163619555", + "360895095" + ], + "reference": [ + "169140825", + "631140850", + "1458652123", + "1564275181", + "1810256132", + "1893177189", + "1971132461", + "2026743036", + "2050516909", + "2067783443", + "2124967403", + "2147468287", + "2170616854", + "2219764230", + "2281349958", + "2296407087", + "2404907442", + "2468929674", + "2493178615", + "2520469311", + "2546973305", + "2594719925", + "2668736619", + "2771362234", + "2889320527", + "2897893551", + "2963636116", + "3099706321", + "3100284210" + ], + "abstract": "graph processing is becoming increasingly prevalent across many application domains in spite of this prevalence there is little research about how graphs are actually used in practice we performed an extensive study that consisted of an online survey of 89 users a review of the mailing lists source repositories and white papers of a large suite of graph software products and in person interviews with 6 users and 2 developers of these products our online survey aimed at understanding i the types of graphs users have ii the graph computations users run iii the types of graph software users use and iv the major challenges users face when processing their graphs we describe the participants responses to our questions highlighting common patterns and challenges based on our interviews and survey of the rest of our sources we were able to answer some new questions that were raised by participants responses to our online survey and understand the specific applications that use graph data and software our study revealed surprising facts about graph processing in practice in particular real world graphs represent a very diverse range of entities and are often very large scalability and visualization are undeniably the most pressing challenges faced by participants and data integration recommendations and fraud detection are very popular applications supported by existing graph software we hope these findings can guide future research", + "title_raw": "The ubiquity of large graphs and surprising challenges of graph processing: extended survey", + "abstract_raw": "Graph processing is becoming increasingly prevalent across many application domains. In spite of this prevalence, there is little research about how graphs are actually used in practice. We performed an extensive study that consisted of an online survey of 89 users, a review of the mailing lists, source repositories, and white papers of a large suite of graph software products, and in-person interviews with 6 users and 2 developers of these products. Our online survey aimed at understanding: (i) the types of graphs users have; (ii) the graph computations users run; (iii) the types of graph software users use; and (iv) the major challenges users face when processing their graphs. We describe the participants\u2019 responses to our questions highlighting common patterns and challenges. Based on our interviews and survey of the rest of our sources, we were able to answer some new questions that were raised by participants\u2019 responses to our online survey and understand the specific applications that use graph data and software. Our study revealed surprising facts about graph processing in practice. In particular, real-world graphs represent a very diverse range of entities and are often very large, scalability and visualization are undeniably the most pressing challenges faced by participants, and data integration, recommendations, and fraud detection are very popular applications supported by existing graph software. We hope these findings can guide future research.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=The+Ubiquity+of+Large+Graphs+and+Surprising+Challenges+of+Graph+Processing:+Extended+Survey&as_oq=&as_eq=&as_occt=any&as_sauthors=Sahu", + "scraped_abstract": null, + "citation_best": 78 + }, + { + "paper": "2788150400", + "venue": "1135342153", + "year": "2018", + "title": "highlife higher arity fact harvesting", + "label": [ + "120567893", + "2777530160", + "204321447" + ], + "author": [ + "2106695673", + "2167055687", + "514836396" + ], + "reference": [ + "11298561", + "224064951", + "1489949474", + "1515300998", + "1529731474", + "1538085078", + "1561787084", + "1984179398", + "1996894838", + "2004763266", + "2006587457", + "2007269563", + "2021682442", + "2031966872", + "2038324640", + "2038742116", + "2041140023", + "2045495924", + "2051547811", + "2052947212", + "2064299012", + "2080133951", + "2088800331", + "2101816065", + "2103330371", + "2106921868", + "2107598941", + "2119325477", + "2122865749", + "2129842875", + "2132655161", + "2133973199", + "2138627627", + "2141312052", + "2143672210", + "2149997683", + "2151170651", + "2151718942", + "2155069392", + "2158847908", + "2160920407", + "2167571757", + "2169022614", + "2250225327", + "2251896332", + "2305972650", + "2407338347", + "2503980970", + "2515658387", + "2573225424", + "2963020213" + ], + "abstract": "text based knowledge extraction methods for populating knowledge bases have focused on binary facts relationships between two entities however in advanced domains such as health it is often crucial to consider ternary and higher arity relations an example is to capture which drug is used for which disease at which dosage e g 2 5 mg day for which kinds of patients e g children vs adults in this work we present an approach to harvest higher arity facts from textual sources our method is distantly supervised by seed facts and uses the fact pattern duality principle to gather fact candidates with high recall for high precision we devise a constraint based reasoning method to eliminate false candidates a major novelty is in coping with the difficulty that higher arity facts are often expressed only partially in texts and strewn across multiple sources for example one sentence may refer to a drug a disease and a group of patients whereas another sentence talks about the drug its dosage and the target group without mentioning the disease our methods cope well with such partially observed facts at both pattern learning and constraint reasoning stages experiments with health related documents and with news articles demonstrate the viability of our method", + "title_raw": "HighLife: Higher-arity Fact Harvesting", + "abstract_raw": "Text-based knowledge extraction methods for populating knowledge bases have focused on binary facts: relationships between two entities. However, in advanced domains such as health, it is often crucial to consider ternary and higher-arity relations. An example is to capture which drug is used for which disease at which dosage (e.g. 2.5 mg/day) for which kinds of patients (e.g., children vs. adults). In this work, we present an approach to harvest higher-arity facts from textual sources. Our method is distantly supervised by seed facts, and uses the fact-pattern duality principle to gather fact candidates with high recall. For high precision, we devise a constraint-based reasoning method to eliminate false candidates. A major novelty is in coping with the difficulty that higher-arity facts are often expressed only partially in texts and strewn across multiple sources. For example, one sentence may refer to a drug, a disease and a group of patients, whereas another sentence talks about the drug, its dosage and the target group without mentioning the disease. Our methods cope well with such partially observed facts, at both pattern-learning and constraint-reasoning stages. Experiments with health-related documents and with news articles demonstrate the viability of our method.", + "link": "https://www.semanticscholar.org/paper/eb09d07d828153c22f1abf0eef8ce59ce63c08e1", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2963019788", + "venue": "1184914352", + "year": "2017", + "title": "label free supervision of neural networks with physics and domain knowledge", + "label": [ + "119857082", + "81363708", + "207685749", + "50644808", + "8038995", + "125411270" + ], + "author": [ + "2611252502", + "248646318" + ], + "reference": [], + "abstract": "in many machine learning applications labeled data is scarce and obtaining more labels is expensive we introduce a new approach to supervising neural networks by specifying constraints that should hold over the output space rather than direct examples of input output pairs these constraints are derived from prior domain knowledge e g from known laws of physics we demonstrate the effectiveness of this approach on real world and simulated computer vision tasks we are able to train a convolutional neural network to detect and track objects without any labeled examples our approach can significantly reduce the need for labeled training data but introduces new challenges for encoding prior knowledge into appropriate loss functions", + "title_raw": "Label-Free Supervision of Neural Networks with Physics and Domain Knowledge", + "abstract_raw": "In many machine learning applications, labeled data is scarce and obtaining more labels is expensive. We introduce a new approach to supervising neural networks by specifying constraints that should hold over the output space, rather than direct examples of input-output pairs. These constraints are derived from prior domain knowledge, e.g., from known laws of physics. We demonstrate the effectiveness of this approach on real world and simulated computer vision tasks. We are able to train a convolutional neural network to detect and track objects without any labeled examples. Our approach can significantly reduce the need for labeled training data, but introduces new challenges for encoding prior knowledge into appropriate loss functions.", + "link": "https://www.semanticscholar.org/paper/2ee629820b95f311927d24570d7719bd2843f66d", + "scraped_abstract": null, + "citation_best": 279 + }, + { + "paper": "2963069394", + "venue": "1188739475", + "year": "2017", + "title": "probabilistic typology deep generative models of vowel inventories", + "label": [ + "49937458", + "39890363", + "60509570", + "2779581591", + "204321447" + ], + "author": [ + "2148165152", + "2023211357" + ], + "reference": [ + "107988705", + "335420304", + "1514520293", + "1547224907", + "1586236210", + "1875231349", + "1949981797", + "1976587578", + "1980452149", + "1985093013", + "1992153276", + "2000982178", + "2006672084", + "2020999234", + "2042492924", + "2051434435", + "2064774787", + "2068674173", + "2119056510", + "2134145060", + "2138556193", + "2251164799", + "2251523416", + "2270070752", + "2408128689", + "2964105469", + "2991416376", + "3103014337", + "3104324249", + "3129013190" + ], + "abstract": "linguistic typology studies the range of structures present in human language the main goal of the field is to discover which sets of possible phenomena are universal and which are merely frequent for example all languages have vowels while most but not all languages have an u sound in this paper we present the first probabilistic treatment of a basic question in phonological typology what makes a natural vowel inventory we introduce a series of deep stochastic point processes and contrast them with previous computational simulation based approaches we provide a comprehensive suite of experiments on over 200 distinct languages", + "title_raw": "Probabilistic Typology: Deep Generative Models of Vowel Inventories", + "abstract_raw": "Linguistic typology studies the range of structures present in human language. The main goal of the field is to discover which sets of possible phenomena are universal, and which are merely frequent. For example, all languages have vowels, while most\u2014but not all\u2014languages have an /u/ sound. In this paper we present the first probabilistic treatment of a basic question in phonological typology: What makes a natural vowel inventory? We introduce a series of deep stochastic point processes, and contrast them with previous computational, simulation-based approaches. We provide a comprehensive suite of experiments on over 200 distinct languages.", + "link": "https://www.semanticscholar.org/paper/6fad97c4fe0cfb92478d8a17a4e6aaa8637d8222", + "scraped_abstract": null, + "citation_best": 7 + }, + { + "paper": "2591131751", + "venue": "1163450153", + "year": "2017", + "title": "designing gamified applications that make safe driving more engaging", + "label": [ + "313442", + "2780689630", + "107457646", + "40140605" + ], + "author": [ + "1992394875", + "1974215767", + "2158710748", + "2915492427" + ], + "reference": [ + "17855702", + "49015900", + "116278252", + "162195864", + "204634304", + "240785110", + "377384883", + "587751979", + "1153871568", + "1445294979", + "1563185315", + "1575647990", + "1592821266", + "1622751954", + "1629766301", + "1647397793", + "1919094054", + "1945788957", + "1968621287", + "1973076323", + "1974370022", + "1974975426", + "1980996830", + "1990698646", + "1993744789", + "1998933811", + "2005643425", + "2008370323", + "2011655389", + "2015476236", + "2017427280", + "2017560722", + "2018030857", + "2022832873", + "2023718959", + "2032191478", + "2036087188", + "2040180567", + "2043963681", + "2048949537", + "2066735888", + "2066813875", + "2067515554", + "2078569098", + "2082425367", + "2091449145", + "2106866030", + "2118531620", + "2118615835", + "2119927950", + "2127090229", + "2127693141", + "2133443878", + "2156565444", + "2160353804", + "2162514427", + "2164682760", + "2164912151", + "2206121576", + "2212143498", + "2295109534", + "2317102837", + "2342091124", + "2343175937", + "2399583267", + "2408887383", + "2416323622", + "2478554489", + "2479485957", + "2528638295", + "2532949291", + "2561740679", + "2574066460", + "3023580939" + ], + "abstract": "low levels of engagement while driving can pose road safety risks e g inattention during low traffic or routine trips interactive technologies that increase task engagement could therefore offer safety benefits e g through performance feedback increased challenge and incentives as a means to build upon these notions we chose to explore gamification of the driving task the research aim was to study how to design gamified applications that make safe driving more engaging we present six design lenses which bring into focus considerations most relevant to creating engaging car applications a user study enhanced our understanding of design requirements and revealed user personas to support the development of such applications these lenses and personas informed two prototypes which we evaluated in driving simulator studies our results indicate that the gamified conditions increased driver engagement and reduced driving speeds as such our work contributes towards the design of engaging applications that are both appropriate to the safety critical driving context and compelling to users", + "title_raw": "Designing Gamified Applications that Make Safe Driving More Engaging", + "abstract_raw": "Low levels of engagement while driving can pose road safety risks, e.g., inattention during low traffic or routine trips. Interactive technologies that increase task engagement could therefore offer safety benefits, e.g., through performance feedback, increased challenge, and incentives. As a means to build upon these notions, we chose to explore gamification of the driving task. The research aim was to study how to design gamified applications that make safe driving more engaging. We present six design lenses which bring into focus considerations most relevant to creating engaging car applications. A user study enhanced our understanding of design requirements and revealed user personas to support the development of such applications. These lenses and personas informed two prototypes, which we evaluated in driving simulator studies. Our results indicate that the gamified conditions increased driver engagement and reduced driving speeds. As such, our work contributes towards the design of engaging applications that are both appropriate to the safety-critical driving context and compelling to users.", + "link": "https://www.semanticscholar.org/paper/b5e8c1f00292f67219f3bc74c07769a3ce29d028", + "scraped_abstract": null, + "citation_best": 37 + }, + { + "paper": "2611348597", + "venue": "1163450153", + "year": "2017", + "title": "illumination aesthetics light as a creative material within computational design", + "label": [ + "136197465", + "98045186", + "121684516" + ], + "author": [ + "2161195778", + "2489695760", + "2657026555", + "2811401379" + ], + "reference": [ + "566000239", + "1506373916", + "2006457055", + "2018515743", + "2021326179", + "2032373937", + "2039072549", + "2039496801", + "2042719556", + "2070639429", + "2076612622", + "2099908542", + "2105667279", + "2147326417", + "2249198671", + "2347171008", + "2409088501", + "2912046550" + ], + "abstract": "recent digital fabrication tools have enabled new form giving using a wide range of physical materials however light as a first class creative material has been largely ignored within the design of our electronic objects our work expands the illumination design space by treating light as a physical material we introduce a digital design tool that simulates and visualizes physical light interactions with a variety of materials for creating custom luminaires we further develop a computational design and fabrication process for creating custom secondary optics elements soes which provides additional handles for users to physically shape and redirect light to compose fill and evenly diffuse planar and volumetric geometries through a workshop study with novice electronic designers we show how incorporating physical techniques to shape light alters how users view the role and function of leds and electronics we produce example pieces that showcase how our approach expands the electronics aesthetic and discuss how viewing light as material can engender novel expressive artifacts", + "title_raw": "Illumination Aesthetics: Light as a Creative Material within Computational Design", + "abstract_raw": "Recent digital fabrication tools have enabled new form-giving using a wide range of physical materials. However, light as a first class creative material has been largely ignored within the design of our electronic objects. Our work expands the illumination design space by treating light as a physical material. We introduce a digital design tool that simulates and visualizes physical light interactions with a variety of materials for creating custom luminaires. We further develop a computational design and fabrication process for creating custom secondary optics elements (SOEs), which provides additional handles for users to physically shape and redirect light to compose, fill, and evenly diffuse planar and volumetric geometries. Through a workshop study with novice electronic designers, we show how incorporating physical techniques to shape light alters how users view the role and function of LEDs and electronics. We produce example pieces that showcase how our approach expands the electronics aesthetic and discuss how viewing light as material can engender novel, expressive artifacts.", + "link": "https://www.semanticscholar.org/paper/a818f94643e8aab91ccca8b3efbca159f9dab49d", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2611579020", + "venue": "1163450153", + "year": "2017", + "title": "sharevr enabling co located experiences for virtual reality between hmd and non hmd users", + "label": [ + "153715457", + "134202134", + "107457646", + "49774154", + "194969405", + "35173682", + "206776904" + ], + "author": [ + "2069135899", + "2610957422", + "2007558240", + "74286286" + ], + "reference": [ + "119734730", + "1481981364", + "1586856071", + "1598048033", + "1719697449", + "1802304736", + "1967319761", + "1976520638", + "1995760065", + "2024558911", + "2026948010", + "2041812258", + "2057636109", + "2063821783", + "2082930097", + "2084397606", + "2090206147", + "2114448792", + "2116483339", + "2122591915", + "2128213565", + "2132527746", + "2142834377", + "2145327874", + "2152495088", + "2154073950", + "2155883373", + "2157063405", + "2157171220", + "2157961634", + "2160188431", + "2163397906", + "2166713718", + "2167557160", + "2167648906", + "2169124776", + "2169702589", + "2172146914", + "2207874332", + "2235715729", + "2295756453", + "2399226551", + "2399477691", + "2475969299", + "2532372538", + "2534086317", + "2536605168", + "2538305559", + "2582699884" + ], + "abstract": "virtual reality vr head mounted displays hmd allow for a highly immersive experience and are currently becoming part of the living room entertainment current vr systems focus mainly on increasing the immersion and enjoyment for the user wearing the hmd hmd user resulting in all the bystanders non hmd users being excluded from the experience we propose sharevr a proof of concept prototype using floor projection and mobile displays in combination with positional tracking to visualize the virtual world for the non hmd user enabling them to interact with the hmd user and become part of the vr experience we designed and implemented sharevr based on the insights of an initial online survey n 48 with early adopters of vr hmds we ran a user study n 16 comparing sharevrto a baseline condition showing how the interaction using sharevr led to an increase of enjoyment presence and social interaction in a last step we implemented several experiences for sharevr exploring its design space and giving insights for designers of co located asymmetric vr experiences", + "title_raw": "ShareVR: Enabling Co-Located Experiences for Virtual Reality between HMD and Non-HMD Users", + "abstract_raw": "Virtual reality (VR) head-mounted displays (HMD) allow for a highly immersive experience and are currently becoming part of the living room entertainment. Current VR systems focus mainly on increasing the immersion and enjoyment for the user wearing the HMD (HMD user), resulting in all the bystanders (Non-HMD users) being excluded from the experience. We propose ShareVR, a proof-of-concept prototype using floor projection and mobile displays in combination with positional tracking to visualize the virtual world for the Non-HMD user, enabling them to interact with the HMD user and become part of the VR experience. We designed and implemented ShareVR based on the insights of an initial online survey (n=48) with early adopters of VR HMDs. We ran a user study (n=16) comparing ShareVRto a baseline condition showing how the interaction using ShareVR led to an increase of enjoyment, presence and social interaction. In a last step we implemented several experiences for ShareVR, exploring its design space and giving insights for designers of co-located asymmetric VR experiences.", + "link": "https://www.semanticscholar.org/paper/4c18b646863d47336a871af57fd8b75a32617116", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2611586694", + "venue": "1163450153", + "year": "2017", + "title": "explaining the gap visualizing one s predictions improves recall and comprehension of data", + "label": [ + "119857082", + "511192102", + "2775924081", + "107457646", + "36464697", + "185578843", + "144430266" + ], + "author": [ + "2653329838", + "2136495853", + "1993670098" + ], + "reference": [ + "169182603", + "1487445742", + "1530426295", + "1963515284", + "1972170794", + "1985235331", + "1989017865", + "1990100773", + "1991082842", + "2012487898", + "2013792988", + "2017586516", + "2023421220", + "2026507183", + "2027525087", + "2036496650", + "2048923076", + "2060374126", + "2061173121", + "2071270727", + "2081525910", + "2084895013", + "2095738647", + "2102995898", + "2104882621", + "2118389537", + "2124371667", + "2124868162", + "2129815685", + "2132904076", + "2140995456", + "2143963523", + "2156645509", + "2158241055", + "2160269795", + "2181630170", + "2182489632", + "2339954278", + "2495926494", + "3010383937", + "3159934092" + ], + "abstract": "information visualizations use interactivity to enable user driven querying of visualized data however users interactions with their internal representations including their expectations about data are also critical for a visualization to support learning we present multiple graphically based techniques for eliciting and incorporating a user s prior knowledge about data into visualization interaction we use controlled experiments to evaluate how graphically eliciting forms of prior knowledge and presenting feedback on the gap between prior knowledge and the observed data impacts a user s ability to recall and understand the data we find that participants who are prompted to reflect on their prior knowledge by predicting and self explaining data outperform a control group in recall and comprehension these effects persist when participants have moderate or little prior knowledge on the datasets we discuss how the effects differ based on text versus visual presentations of data we characterize the design space of graphical prediction and feedback techniques and describe design recommendations", + "title_raw": "Explaining the Gap: Visualizing One's Predictions Improves Recall and Comprehension of Data", + "abstract_raw": "Information visualizations use interactivity to enable user-driven querying of visualized data. However, users' interactions with their internal representations, including their expectations about data, are also critical for a visualization to support learning. We present multiple graphically-based techniques for eliciting and incorporating a user's prior knowledge about data into visualization interaction. We use controlled experiments to evaluate how graphically eliciting forms of prior knowledge and presenting feedback on the gap between prior knowledge and the observed data impacts a user's ability to recall and understand the data. We find that participants who are prompted to reflect on their prior knowledge by predicting and self-explaining data outperform a control group in recall and comprehension. These effects persist when participants have moderate or little prior knowledge on the datasets. We discuss how the effects differ based on text versus visual presentations of data. We characterize the design space of graphical prediction and feedback techniques and describe design recommendations.", + "link": "https://www.semanticscholar.org/paper/eea27124377394792d42e4b3c573c5356eba01cd", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2611474773", + "venue": "1163450153", + "year": "2017", + "title": "what is interaction", + "label": [ + "107457646" + ], + "author": [ + "162880119", + "1968824034" + ], + "reference": [ + "3292517", + "609276591", + "744307030", + "1492027863", + "1514890265", + "1557496348", + "1604608156", + "1605604909", + "1651701101", + "1666229076", + "1759394199", + "1892018222", + "1895273801", + "1955705676", + "1963984981", + "1969878909", + "1971690354", + "1971781829", + "1974329625", + "1975692152", + "1981258610", + "1985512177", + "1987970830", + "1997447598", + "1998065338", + "1998515403", + "2004166090", + "2005471662", + "2014190538", + "2015956967", + "2017398389", + "2020639717", + "2026645894", + "2027222744", + "2029544898", + "2041277401", + "2049124404", + "2050896993", + "2056926626", + "2056939196", + "2059216172", + "2059635092", + "2062299969", + "2065070540", + "2072296808", + "2075208666", + "2097104268", + "2097248932", + "2098113358", + "2099001635", + "2099794055", + "2101623580", + "2106427454", + "2115647291", + "2122544819", + "2124696116", + "2126512988", + "2133353349", + "2134803429", + "2135029100", + "2146626009", + "2147654878", + "2149315699", + "2158318240", + "2158364723", + "2158390034", + "2160858448", + "2161304134", + "2163509922", + "2164912151", + "2165145573", + "2168443748", + "2179427518", + "2294694821", + "2329821500", + "2346451802", + "2351491323", + "2396575601", + "2469070852", + "2490606569", + "2513705288", + "2515089797", + "2524186836", + "2610685016", + "2611369375", + "2911537016", + "3145637508" + ], + "abstract": "the term interaction is field defining yet surprisingly confused this essay discusses what interaction is we first argue that only few attempts to directly define interaction exist nevertheless we extract from the literature distinct and highly developed concepts for instance viewing interaction as dialogue transmission optimal behavior embodiment and tool use importantly these concepts are associated with different scopes and ways of construing the causal relationships between the human and the computer this affects their ability to inform empirical studies and design based on this discussion we list desiderata for future work on interaction emphasizing the need to improve scope and specificity to better account for the effects and agency that computers have in interaction and to generate strong propositions about interaction", + "title_raw": "What Is Interaction", + "abstract_raw": "The term interaction is field-defining, yet surprisingly confused. This essay discusses what interaction is. We first argue that only few attempts to directly define interaction exist. Nevertheless, we extract from the literature distinct and highly developed concepts, for instance viewing interaction as dialogue, transmission, optimal behavior, embodiment, and tool use. Importantly, these concepts are associated with different scopes and ways of construing the causal relationships between the human and the computer. This affects their ability to inform empirical studies and design. Based on this discussion, we list desiderata for future work on interaction, emphasizing the need to improve scope and specificity, to better account for the effects and agency that computers have in interaction, and to generate strong propositions about interaction.", + "link": "https://www.semanticscholar.org/paper/1b9e912a20c4ad9632ce3c5a90ab9b6e70c531ff", + "scraped_abstract": null, + "citation_best": 137 + }, + { + "paper": "2610874624", + "venue": "1163450153", + "year": "2017", + "title": "bignav bayesian information gain for guiding multiscale navigation", + "label": [ + "119857082", + "152139883", + "99173435", + "43472768", + "107673813" + ], + "author": [ + "2343375443", + "2211385082", + "2674390412", + "665044766" + ], + "reference": [ + "53214508", + "1488749665", + "1606635698", + "1928061009", + "1970823786", + "1974450273", + "1975684025", + "1995571060", + "1995875735", + "2006070420", + "2013985061", + "2024851719", + "2028770149", + "2036272629", + "2041404167", + "2045282674", + "2059216172", + "2073261288", + "2076009665", + "2076580309", + "2085435394", + "2103497666", + "2112824399", + "2124262094", + "2126730365", + "2141293686", + "2149406594", + "2158685692", + "2160049071", + "2179427518", + "2232034389", + "2258582381", + "2993383518" + ], + "abstract": "this paper introduces bignav a new multiscale navigation technique based on bayesian experimental design where the criterion is to maximize the information theoretic concept of mutual information also known as information gain rather than simply executing user navigation commands bignav interprets user input to update its knowledge about the user s intended target then it navigates to a new view that maximizes the information gain provided by the user s expected subsequent input we conducted a controlled experiment demonstrating that bignav is significantly faster than conventional pan and zoom and requires fewer commands for distant targets especially in non uniform information spaces we also applied bignav to a realistic application and showed that users can navigate to highly probable points of interest on a map with only a few steps we then discuss the tradeoffs of bignav including efficiency vs increased cognitive load and its application to other interaction tasks", + "title_raw": "BIGnav: Bayesian Information Gain for Guiding Multiscale Navigation", + "abstract_raw": "This paper introduces BIGnav, a new multiscale navigation technique based on Bayesian Experimental Design where the criterion is to maximize the information-theoretic concept of mutual information, also known as information gain. Rather than simply executing user navigation commands, BIGnav interprets user input to update its knowledge about the user's intended target. Then it navigates to a new view that maximizes the information gain provided by the user's expected subsequent input. We conducted a controlled experiment demonstrating that BIGnav is significantly faster than conventional pan and zoom and requires fewer commands for distant targets, especially in non-uniform information spaces. We also applied BIGnav to a realistic application and showed that users can navigate to highly probable points of interest on a map with only a few steps. We then discuss the tradeoffs of BIGnav--including efficiency vs. increased cognitive load--and its application to other interaction tasks.", + "link": "https://www.semanticscholar.org/paper/c805b234ef25ff06ddc250c3652a53e5454716d8", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2611632447", + "venue": "1163450153", + "year": "2017", + "title": "modelling learning of new keyboard layouts", + "label": [ + "119857082", + "158495155", + "2781209916" + ], + "author": [ + "1966787236", + "300811454", + "1968824034", + "280756850", + "2521492292", + "2144164611" + ], + "reference": [ + "94044162", + "114148772", + "160989634", + "1495886451", + "1525083342", + "1550321890", + "1708874574", + "1798703415", + "1867289061", + "1912975988", + "1928882148", + "1951724000", + "1969822014", + "1973437133", + "1974587651", + "1986841213", + "1990242729", + "1991691398", + "1992440621", + "1999555653", + "2000919228", + "2014448832", + "2021898394", + "2030597443", + "2035080477", + "2046781328", + "2047001392", + "2050188448", + "2052570369", + "2060291419", + "2061753879", + "2068573633", + "2074854029", + "2075458572", + "2085781384", + "2088563966", + "2094163178", + "2096707172", + "2102162397", + "2104069532", + "2109168899", + "2109488581", + "2113725822", + "2117160827", + "2123221019", + "2125904902", + "2127519553", + "2128536124", + "2128901702", + "2134549953", + "2139096448", + "2153076044", + "2156058996", + "2160559232", + "2160951757", + "2165645720", + "2397597494", + "2411087957", + "2413358622", + "2786157445", + "3098920173", + "3122947385" + ], + "abstract": "predicting how users learn new or changed interfaces is a long standing objective in hci research this paper contributes to understanding of visual search and learning in text entry with a goal of explaining variance in novices typing performance that is attributable to visual search a model was designed to predict how users learn to locate keys on a keyboard initially relying on visual short term memory but then transitioning to recall based search this allows predicting search times and visual search patterns for completely and partially new layouts the model complements models of motor performance and learning in text entry by predicting change in visual search patterns over time practitioners can use it for estimating how long it takes to reach the desired level of performance with a given layout", + "title_raw": "Modelling Learning of New Keyboard Layouts", + "abstract_raw": "Predicting how users learn new or changed interfaces is a long-standing objective in HCI research. This paper contributes to understanding of visual search and learning in text entry. With a goal of explaining variance in novices' typing performance that is attributable to visual search, a model was designed to predict how users learn to locate keys on a keyboard: initially relying on visual short-term memory but then transitioning to recall-based search. This allows predicting search times and visual search patterns for completely and partially new layouts. The model complements models of motor performance and learning in text entry by predicting change in visual search patterns over time. Practitioners can use it for estimating how long it takes to reach the desired level of performance with a given layout.", + "link": "https://www.semanticscholar.org/paper/96567f78cb93e573a706b94b60b8429ddae7bdec", + "scraped_abstract": null, + "citation_best": 47 + }, + { + "paper": "2610281177", + "venue": "1163450153", + "year": "2017", + "title": "flash organizations crowdsourcing complex work by structuring crowds as organizations", + "label": [ + "177212765", + "2777852691", + "89187990", + "56739046", + "62230096" + ], + "author": [ + "2498833676", + "2226569051", + "2343034608", + "2120209517", + "1983424165", + "1974803209" + ], + "reference": [ + "607150617", + "783257918", + "1213740548", + "1523117923", + "1554858132", + "1578902217", + "1590411898", + "1600296154", + "1896125358", + "1911845565", + "1929954474", + "1966852018", + "1968326021", + "1976306346", + "1986814607", + "1994734668", + "1996293917", + "2007018772", + "2007851141", + "2009603815", + "2010793828", + "2023664726", + "2028953510", + "2032843080", + "2036625304", + "2042061650", + "2049308095", + "2055699460", + "2056621186", + "2059105030", + "2080317642", + "2083534399", + "2086434545", + "2089628081", + "2096848877", + "2098914049", + "2099358840", + "2108598243", + "2110151287", + "2112021024", + "2112193291", + "2116915306", + "2120396827", + "2120953715", + "2121668868", + "2127008633", + "2127031179", + "2128677857", + "2132445595", + "2133485007", + "2135406940", + "2136240258", + "2137834155", + "2138847321", + "2139864466", + "2140256961", + "2147603330", + "2148479118", + "2150404203", + "2151785234", + "2153656501", + "2155735325", + "2163986367", + "2166145477", + "2168290613", + "2170699450", + "2182782998", + "2276569499", + "2277895402", + "2290652328", + "2293765620", + "2329312544", + "2337430557", + "2396078445", + "2396268257", + "2404427926", + "2406037548", + "2485327215", + "2536952838", + "2539397184", + "2544952174", + "2553944973", + "2911311425", + "3121257585", + "3125574497" + ], + "abstract": "this paper introduces flash organizations crowds structured like organizations to achieve complex and open ended goals microtask workflows the dominant crowdsourcing structures today only enable goals that are so simple and modular that their path can be entirely pre defined we present a system that organizes crowd workers into computationally represented structures inspired by those used in organizations roles teams and hierarchies which support emergent and adaptive coordination toward open ended goals our system introduces two technical contributions 1 encoding the crowd s division of labor into de individualized roles much as movie crews or disaster response teams use roles to support coordination between on demand workers who have not worked together before and 2 reconfiguring these structures through a model inspired by version control enabling continuous adaptation of the work and the division of labor we report a deployment in which flash organizations successfully carried out open ended and complex goals previously out of reach for crowdsourcing including product design software development and game production this research demonstrates digitally networked organizations that flexibly assemble and reassemble themselves from a globally distributed online workforce to accomplish complex work", + "title_raw": "Flash Organizations: Crowdsourcing Complex Work by Structuring Crowds As Organizations", + "abstract_raw": "This paper introduces flash organizations: crowds structured like organizations to achieve complex and open-ended goals. Microtask workflows, the dominant crowdsourcing structures today, only enable goals that are so simple and modular that their path can be entirely pre-defined. We present a system that organizes crowd workers into computationally-represented structures inspired by those used in organizations - roles, teams, and hierarchies - which support emergent and adaptive coordination toward open-ended goals. Our system introduces two technical contributions: 1) encoding the crowd's division of labor into de-individualized roles, much as movie crews or disaster response teams use roles to support coordination between on-demand workers who have not worked together before; and 2) reconfiguring these structures through a model inspired by version control, enabling continuous adaptation of the work and the division of labor. We report a deployment in which flash organizations successfully carried out open-ended and complex goals previously out of reach for crowdsourcing, including product design, software development, and game production. This research demonstrates digitally networked organizations that flexibly assemble and reassemble themselves from a globally distributed online workforce to accomplish complex work.", + "link": "https://www.semanticscholar.org/paper/873f19f1218931c64bf34c1ef8222bf81c0a1bf0", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2610874523", + "venue": "1163450153", + "year": "2017", + "title": "reflective practicum a framework of sensitising concepts to design for transformative reflection", + "label": [ + "119957404", + "56739046" + ], + "author": [ + "1874413994", + "2645034926", + "2196340233" + ], + "reference": [ + "357505205", + "655213282", + "1210709148", + "1533746441", + "1979024597", + "1980560842", + "1986119901", + "2011495949", + "2013392359", + "2024262300", + "2041333115", + "2043940045", + "2048431936", + "2051493061", + "2051696144", + "2052485731", + "2065191385", + "2073142503", + "2081408547", + "2081446842", + "2086034674", + "2088994900", + "2093262046", + "2128689084", + "2142477234", + "2143512323", + "2144199035", + "2145421863", + "2146882979", + "2153146691", + "2163950204", + "2166626639", + "2167299310", + "2170644967", + "2237209076", + "2294753915", + "2295996570", + "2296463440", + "2307219365", + "2469340992", + "2568476927" + ], + "abstract": "designing for reflection is becoming an increasingly important part of many hci systems in a wide range of application domains however there is a gap in our understanding of how the process of reflection can be supported through technology in fact an implicit assumption in the majority of existing work is that just by providing access to well selected data in depth reflection can and will occur to counter this view we draw on schon s notion of reflective practicum and apply it as a sensitising concept to identify the complex interplay of factors that support transformative reflection in the context of two social emotional learning sel studies the results highlight the need to carefully scaffold the process of reflection rather than simply assume that the capability to reflect is a broadly available trait to be triggered through data building on this analysis we develop a conceptual framework that extends the concept of the reflective practicum towards identifying appropriate roles of technology to support transformative reflection while our case is within the context of sel we argue that a deeper understanding of these opportunities can also benefit designing for reflection in other areas", + "title_raw": "Reflective Practicum: A Framework of Sensitising Concepts to Design for Transformative Reflection", + "abstract_raw": "Designing for reflection is becoming an increasingly important part of many HCI systems in a wide range of application domains. However, there is a gap in our understanding of how the process of reflection can be supported through technology. In fact, an implicit assumption in the majority of existing work is that, just by providing access to well-selected data, in-depth reflection can and will occur. To counter this view, we draw on Schon's notion of reflective practicum and apply it as a sensitising concept to identify the complex interplay of factors that support transformative reflection in the context of two social-emotional learning (SEL) studies. The results highlight the need to carefully scaffold the process of reflection, rather than simply assume that the capability to reflect is a broadly available trait to be 'triggered' through data. Building on this analysis, we develop a conceptual framework that extends the concept of the reflective practicum towards identifying appropriate roles of technology to support transformative reflection. While our case is within the context of SEL, we argue that a deeper understanding of these opportunities can also benefit designing for reflection in other areas.", + "link": "https://www.semanticscholar.org/paper/d2597fc897ea36740c899da73237e2b69ea71079", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2610929834", + "venue": "1163450153", + "year": "2017", + "title": "fingertip tactile devices for virtual object manipulation and exploration", + "label": [ + "171268870", + "2780575108", + "64729616", + "51970089", + "194969405", + "31972630", + "152086174" + ], + "author": [ + "2118224854", + "1217925295" + ], + "reference": [ + "1526820983", + "1535896842", + "1561772885", + "1922760316", + "1969090956", + "1984736745", + "2008169038", + "2014446122", + "2030898082", + "2075583668", + "2092511662", + "2102585815", + "2108498278", + "2114987226", + "2116189780", + "2123917150", + "2142098163", + "2154225652", + "2161314913", + "2162604092", + "2182767761", + "2241992751", + "2337591881", + "2345768504", + "2397519240", + "2409458253", + "2412923296", + "2890169396" + ], + "abstract": "one of the main barriers to immersivity during object manipulation in virtual reality is the lack of realistic haptic feedback our goal is to convey compelling interactions with virtual objects such as grasping squeezing pressing lifting and stroking without requiring a bulky world grounded kinesthetic feedback device traditional haptics or the use of predetermined passive objects haptic retargeting to achieve this we use a pair of finger mounted haptic feedback devices that deform the skin on the fingertips to convey cutaneous force information from object manipulation we show that users can perceive differences in virtual object weight and that they apply increasing grasp forces when lifting virtual objects as rendered mass is increased moreover we show how naive users perceive changes of a virtual object s physical properties when we use skin deformation to render objects with varying mass friction and stiffness these studies demonstrate that fingertip skin deformation devices can provide a compelling haptic experience appropriate for virtual reality scenarios involving object manipulation", + "title_raw": "Fingertip Tactile Devices for Virtual Object Manipulation and Exploration", + "abstract_raw": "One of the main barriers to immersivity during object manipulation in virtual reality is the lack of realistic haptic feedback. Our goal is to convey compelling interactions with virtual objects, such as grasping, squeezing, pressing, lifting, and stroking, without requiring a bulky, world-grounded kinesthetic feedback device (traditional haptics) or the use of predetermined passive objects (haptic retargeting). To achieve this, we use a pair of finger-mounted haptic feedback devices that deform the skin on the fingertips to convey cutaneous force information from object manipulation. We show that users can perceive differences in virtual object weight and that they apply increasing grasp forces when lifting virtual objects as rendered mass is increased. Moreover, we show how naive users perceive changes of a virtual object's physical properties when we use skin deformation to render objects with varying mass, friction, and stiffness. These studies demonstrate that fingertip skin deformation devices can provide a compelling haptic experience appropriate for virtual reality scenarios involving object manipulation.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Fingertip+Tactile+Devices+for+Virtual+Object+Manipulation+and+Exploration&as_oq=&as_eq=&as_occt=any&as_sauthors=Schorr", + "scraped_abstract": null, + "citation_best": 134 + }, + { + "paper": "2558581619", + "venue": "1163450153", + "year": "2017", + "title": "what can be predicted from six seconds of driver glances", + "label": [ + "119857082", + "2779916870", + "31972630", + "136389625", + "23224414" + ], + "author": [ + "2107690011", + "2481437860", + "2131447314", + "2634837644", + "2020429386", + "2147285394", + "2918489498", + "2025090343" + ], + "reference": [ + "14563246", + "116278252", + "173206510", + "207182081", + "262571353", + "638298592", + "1510036995", + "1579568887", + "1726694533", + "1982629367", + "2029772767", + "2041775463", + "2060555212", + "2064044806", + "2076063813", + "2076618452", + "2078949139", + "2092728879", + "2093449404", + "2095589680", + "2106220766", + "2117190240", + "2131076267", + "2132991226", + "2133658999", + "2146948159", + "2148143831", + "2150463764", + "2160936727", + "2165065922", + "2387582548", + "2498730194", + "2537200477", + "2588730826", + "2963905015", + "3162874039" + ], + "abstract": "we consider a large dataset of real world on road driving from a 100 car naturalistic study to explore the predictive power of driver glances and specifically to answer the following question what can be predicted about the state of the driver and the state of the driving environment from a 6 second sequence of macro glances the context based nature of such glances allows for application of supervised learning to the problem of vision based gaze estimation making it robust accurate and reliable in messy real world conditions so it s valuable to ask whether such macro glances can be used to infer behavioral environmental and demographic variables we analyze 27 binary classification problems based on these variables the takeaway is that glance can be used as part of a multi sensor real time system to predict radio tuning fatigue state failure to signal talking and several environment variables", + "title_raw": "What Can Be Predicted from Six Seconds of Driver Glances", + "abstract_raw": "We consider a large dataset of real-world, on-road driving from a 100-car naturalistic study to explore the predictive power of driver glances and, specifically, to answer the following question: what can be predicted about the state of the driver and the state of the driving environment from a 6-second sequence of macro-glances? The context-based nature of such glances allows for application of supervised learning to the problem of vision-based gaze estimation, making it robust, accurate, and reliable in messy, real-world conditions. So, it's valuable to ask whether such macro-glances can be used to infer behavioral, environmental, and demographic variables? We analyze 27 binary classification problems based on these variables. The takeaway is that glance can be used as part of a multi-sensor real-time system to predict radio-tuning, fatigue state, failure to signal, talking, and several environment variables.", + "link": "https://www.semanticscholar.org/paper/8af646de6b4c1ec70b78685a5a3ed3205b3b6985", + "scraped_abstract": null, + "citation_best": 47 + }, + { + "paper": "2610414453", + "venue": "1163450153", + "year": "2017", + "title": "design and evaluation of a data driven password meter", + "label": [ + "38652104", + "109297577", + "107457646", + "23875713", + "70530487", + "2780440489" + ], + "author": [ + "2077543365", + "2621089372", + "2621370536", + "2151574249", + "2167982539", + "2276557979", + "368046975", + "2623128729", + "2620676984", + "2399278320", + "2676155987", + "2230208907" + ], + "reference": [ + "143386018", + "150647875", + "167157979", + "178850302", + "1463944966", + "1466389411", + "1526940914", + "1534968492", + "1554853440", + "1572182570", + "1931604409", + "1955645522", + "1971881814", + "1980235022", + "2019578814", + "2023306951", + "2025448348", + "2045591401", + "2048584594", + "2048755632", + "2050296478", + "2073342447", + "2089884450", + "2100142573", + "2109744978", + "2110065044", + "2111397260", + "2113266120", + "2121386924", + "2133824719", + "2134080857", + "2146270836", + "2150341374", + "2157151879", + "2163006719", + "2167841977", + "2171920515", + "2185032576", + "2394619600", + "2463456957", + "2488850733", + "2623402483" + ], + "abstract": "despite their ubiquity many password meters provide inaccurate strength estimates furthermore they do not explain to users what is wrong with their password or how to improve it we describe the development and evaluation of a data driven password meter that provides accurate strength measurement and actionable detailed feedback to users this meter combines neural networks and numerous carefully combined heuristics to score passwords and generate data driven text feedback about the user s password we describe the meter s iterative development and final design we detail the security and usability impact of the meter s design dimensions examined through a 4 509 participant online study under the more common password composition policy we tested we found that the data driven meter with detailed feedback led users to create more secure and no less memorable passwords than a meter with only a bar as a strength indicator", + "title_raw": "Design and Evaluation of a Data-Driven Password Meter", + "abstract_raw": "Despite their ubiquity, many password meters provide inaccurate strength estimates. Furthermore, they do not explain to users what is wrong with their password or how to improve it. We describe the development and evaluation of a data-driven password meter that provides accurate strength measurement and actionable, detailed feedback to users. This meter combines neural networks and numerous carefully combined heuristics to score passwords and generate data-driven text feedback about the user's password. We describe the meter's iterative development and final design. We detail the security and usability impact of the meter's design dimensions, examined through a 4,509-participant online study. Under the more common password-composition policy we tested, we found that the data-driven meter with detailed feedback led users to create more secure, and no less memorable, passwords than a meter with only a bar as a strength indicator.", + "link": "https://www.semanticscholar.org/paper/747b322690c8c3408304bdaf05baca02ec4e141f", + "scraped_abstract": null, + "citation_best": 118 + }, + { + "paper": "2963446712", + "venue": "1158167855", + "year": "2017", + "title": "densely connected convolutional networks", + "label": [ + "50644808", + "167955471", + "154945302", + "40608802", + "2778149865", + "17777890", + "137955351", + "173608175", + "193415008", + "157899210" + ], + "author": [ + "2114281204", + "2566736780", + "731054299", + "2003907699" + ], + "reference": [ + "104184427", + "830076066", + "1026270304", + "1677182931", + "1836465849", + "1903029394", + "1948751323", + "2095705004", + "2097117768", + "2102013737", + "2108598243", + "2109779438", + "2112796928", + "2117539524", + "2147800946", + "2156387975", + "2162741153", + "2163605009", + "2173905264", + "2174554700", + "2183341477", + "2194775991", + "2294059674", + "2302255633", + "2331143823", + "2333796428", + "2335728318", + "2401231614", + "2406474429", + "2408279554", + "2962694510", + "2962820688", + "2963382180", + "2963606038", + "2963911037", + "2964091842", + "2964118293", + "3020975476", + "3118608800" + ], + "abstract": "recent work has shown that convolutional networks can be substantially deeper more accurate and efficient to train if they contain shorter connections between layers close to the input and those close to the output in this paper we embrace this observation and introduce the dense convolutional network densenet which connects each layer to every other layer in a feed forward fashion whereas traditional convolutional networks with l layers have l connections x2014 one between each layer and its subsequent layer x2014 our network has l l 1 2 direct connections for each layer the feature maps of all preceding layers are used as inputs and its own feature maps are used as inputs into all subsequent layers densenets have several compelling advantages they alleviate the vanishing gradient problem strengthen feature propagation encourage feature reuse and substantially reduce the number of parameters we evaluate our proposed architecture on four highly competitive object recognition benchmark tasks cifar 10 cifar 100 svhn and imagenet densenets obtain significant improvements over the state of the art on most of them whilst requiring less memory and computation to achieve high performance code and pre trained models are available at https github com liuzhuang13 densenet", + "title_raw": "Densely Connected Convolutional Networks", + "abstract_raw": "Recent work has shown that convolutional networks can be substantially deeper, more accurate, and efficient to train if they contain shorter connections between layers close to the input and those close to the output. In this paper, we embrace this observation and introduce the Dense Convolutional Network (DenseNet), which connects each layer to every other layer in a feed-forward fashion. Whereas traditional convolutional networks with L layers have L connections—one between each layer and its subsequent layer—our network has L(L+1)/2 direct connections. For each layer, the feature-maps of all preceding layers are used as inputs, and its own feature-maps are used as inputs into all subsequent layers. DenseNets have several compelling advantages: they alleviate the vanishing-gradient problem, strengthen feature propagation, encourage feature reuse, and substantially reduce the number of parameters. We evaluate our proposed architecture on four highly competitive object recognition benchmark tasks (CIFAR-10, CIFAR-100, SVHN, and ImageNet). DenseNets obtain significant improvements over the state-of-the-art on most of them, whilst requiring less memory and computation to achieve high performance. Code and pre-trained models are available at https://github.com/liuzhuang13/DenseNet.", + "link": "https://www.semanticscholar.org/paper/5694e46284460a648fe29117cbc55f6c9be3fa3c", + "scraped_abstract": null, + "citation_best": 1167 + }, + { + "paper": "2963709863", + "venue": "1158167855", + "year": "2017", + "title": "learning from simulated and unsupervised images through adversarial training", + "label": [ + "176258234", + "50644808", + "154945302", + "178980831", + "8038995", + "200632571", + "52102323", + "67186912" + ], + "author": [ + "2183241032", + "2798413642", + "1967568329", + "669260807", + "2565739635", + "3091056286" + ], + "reference": [ + "1565402342", + "1861492603", + "1922126009", + "1950149599", + "1995694455", + "2001252859", + "2027879843", + "2036196300", + "2073246097", + "2075156252", + "2077532029", + "2087806427", + "2099471712", + "2101032778", + "2108598243", + "2123576187", + "2134557905", + "2152926413", + "2172248380", + "2175711684", + "2194775991", + "2214145768", + "2283234189", + "2298992465", + "2299591120", + "2302840847", + "2307770531", + "2339754110", + "2343052201", + "2397830550", + "2431874326", + "2475287302", + "2519536754", + "2523469089", + "2524365899", + "2949933669", + "2953318193", + "2962690307", + "2962694510", + "2962759496", + "2963194029", + "2963226019", + "2963373786", + "2963422987", + "2963784072", + "2963826402", + "2963826681", + "2964284374" + ], + "abstract": "with recent progress in graphics it has become more tractable to train models on synthetic images potentially avoiding the need for expensive annotations however learning from synthetic images may not achieve the desired performance due to a gap between synthetic and real image distributions to reduce this gap we propose simulated unsupervised s u learning where the task is to learn a model to improve the realism of a simulators output using unlabeled real data while preserving the annotation information from the simulator we develop a method for s u learning that uses an adversarial network similar to generative adversarial networks gans but with synthetic images as inputs instead of random vectors we make several key modifications to the standard gan algorithm to preserve annotations avoid artifacts and stabilize training i a self regularization term ii a local adversarial loss and iii updating the discriminator using a history of refined images we show that this enables generation of highly realistic images which we demonstrate both qualitatively and with a user study we quantitatively evaluate the generated images by training models for gaze estimation and hand pose estimation we show a significant improvement over using synthetic images and achieve state of the art results on the mpiigaze dataset without any labeled real data", + "title_raw": "Learning from Simulated and Unsupervised Images through Adversarial Training", + "abstract_raw": "With recent progress in graphics, it has become more tractable to train models on synthetic images, potentially avoiding the need for expensive annotations. However, learning from synthetic images may not achieve the desired performance due to a gap between synthetic and real image distributions. To reduce this gap, we propose Simulated+Unsupervised (S+U) learning, where the task is to learn a model to improve the realism of a simulators output using unlabeled real data, while preserving the annotation information from the simulator. We develop a method for S+U learning that uses an adversarial network similar to Generative Adversarial Networks (GANs), but with synthetic images as inputs instead of random vectors. We make several key modifications to the standard GAN algorithm to preserve annotations, avoid artifacts, and stabilize training: (i) a self-regularization term, (ii) a local adversarial loss, and (iii) updating the discriminator using a history of refined images. We show that this enables generation of highly realistic images, which we demonstrate both qualitatively and with a user study. We quantitatively evaluate the generated images by training models for gaze estimation and hand pose estimation. We show a significant improvement over using synthetic images, and achieve state-of-the-art results on the MPIIGaze dataset without any labeled real data.", + "link": "https://www.semanticscholar.org/paper/68cb9fce1e6af2740377494350b650533c9a29e1", + "scraped_abstract": null, + "citation_best": 99 + }, + { + "paper": "2741400146", + "venue": "1199533187", + "year": "2017", + "title": "the power of why and why not enriching scenario exploration with provenance", + "label": [ + "193237570", + "2522767166", + "2778029271", + "124101348", + "75606506", + "145644426", + "34628019", + "136197465" + ], + "author": [ + "2141152896", + "2740331263", + "2105290673", + "2133340784" + ], + "reference": [ + "72463235", + "367605388", + "1461485317", + "1465565820", + "1483744477", + "1509180139", + "1519503479", + "1527892282", + "1538801056", + "1568222232", + "1583654333", + "1586993363", + "1595209293", + "1667858065", + "1976371754", + "1976597924", + "2002530879", + "2005129294", + "2020169768", + "2041807992", + "2049542183", + "2050232116", + "2056872793", + "2061081083", + "2065086898", + "2099204735", + "2110712507", + "2112447468", + "2116411029", + "2118428193", + "2130089820", + "2132055325", + "2157922094", + "2158049821", + "2166007208", + "2168617729", + "2294333398", + "2744460230", + "3106203295", + "3124106557", + "3143219118" + ], + "abstract": "scenario finding tools like the alloy analyzer are widely used in numerous concrete domains like security network analysis uml analysis and so on they can help to verify properties and more generally aid in exploring a system s behavior while scenario finders are valuable for their ability to produce concrete examples individual scenarios only give insight into what is possible leaving the user to make their own conclusions about what might be necessary this paper enriches scenario finding by allowing users to ask why and why not questions about the examples they are given we show how to distinguish parts of an example that cannot be consistently removed or changed from those that merely reflect underconstraint in the specification in the former case we show how to determine which elements of the specification and which other components of the example together explain the presence of such facts this paper formalizes the act of computing provenance in scenario finding we present amalgam an extension of the popular alloy scenario finder which implements these foundations and provides interactive exploration of examples we also evaluate amalgam s algorithmics on a variety of both textbook and real world examples", + "title_raw": "The power of \"why\" and \"why not\": enriching scenario exploration with provenance", + "abstract_raw": "Scenario-finding tools like the Alloy Analyzer are widely used in numerous concrete domains like security, network analysis, UML analysis, and so on. They can help to verify properties and, more generally, aid in exploring a system's behavior. While scenario finders are valuable for their ability to produce concrete examples, individual scenarios only give insight into what is possible, leaving the user to make their own conclusions about what might be necessary. This paper enriches scenario finding by allowing users to ask ``why?'' and ``why not?'' questions about the examples they are given. We show how to distinguish parts of an example that cannot be consistently removed (or changed) from those that merely reflect underconstraint in the specification. In the former case we show how to determine which elements of the specification and which other components of the example together explain the presence of such facts. This paper formalizes the act of computing provenance in scenario-finding. We present Amalgam, an extension of the popular Alloy scenario-finder, which implements these foundations and provides interactive exploration of examples. We also evaluate Amalgam's algorithmics on a variety of both textbook and real-world examples.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=The+Power+of+'Why'+and+'Why+Not':+Enriching+Scenario+Exploration+with+Provenance&as_oq=&as_eq=&as_occt=any&as_sauthors=Nelson", + "scraped_abstract": null, + "citation_best": 33 + }, + { + "paper": "2741643261", + "venue": "1199533187", + "year": "2017", + "title": "understanding misunderstandings in source code", + "label": [ + "167955471", + "133237599", + "2778514511", + "199360897", + "150292731", + "2777904410", + "43126263" + ], + "author": [ + "2740523567", + "2741405782", + "2561906187", + "2740943098", + "2223056129", + "2562453348", + "2006892651" + ], + "reference": [ + "8486145", + "16534291", + "111098712", + "116026253", + "758407518", + "1505648523", + "1567046609", + "1718727263", + "1960605509", + "1964962870", + "1975599245", + "1981108270", + "1994718921", + "2001033929", + "2010837018", + "2014232144", + "2074562477", + "2078235355", + "2084413241", + "2090496119", + "2091990486", + "2113533445", + "2117049795", + "2117065827", + "2151315308", + "2152759860", + "2153887189", + "2170152929", + "2170183092", + "2179427518", + "2255462406" + ], + "abstract": "humans often mistake the meaning of source code and so misjudge a program s true behavior these mistakes can be caused by extremely small isolated patterns in code which can lead to significant runtime errors these patterns are used in large popular software projects and even recommended in style guides to identify code patterns that may confuse programmers we extracted a preliminary set of atoms of confusion from known confusing code we show empirically in an experiment with 73 participants that these code patterns can lead to a significantly increased rate of misunderstanding versus equivalent code without the patterns we then go on to take larger confusing programs and measure in an experiment with 43 participants the impact in terms of programmer confusion of removing these confusing patterns all of our instruments analysis code and data are publicly available online for replication experimentation and feedback", + "title_raw": "Understanding misunderstandings in source code", + "abstract_raw": "Humans often mistake the meaning of source code, and so misjudge a program's true behavior. These mistakes can be caused by extremely small, isolated patterns in code, which can lead to significant runtime errors. These patterns are used in large, popular software projects and even recommended in style guides. To identify code patterns that may confuse programmers we extracted a preliminary set of `atoms of confusion' from known confusing code. We show empirically in an experiment with 73 participants that these code patterns can lead to a significantly increased rate of misunderstanding versus equivalent code without the patterns. We then go on to take larger confusing programs and measure (in an experiment with 43 participants) the impact, in terms of programmer confusion, of removing these confusing patterns. All of our instruments, analysis code, and data are publicly available online for replication, experimentation, and feedback.", + "link": "https://www.semanticscholar.org/paper/e7231f1b144537a069b570e82c09bcc8e1dbb7f2", + "scraped_abstract": null, + "citation_best": 55 + }, + { + "paper": "2739010619", + "venue": "1199533187", + "year": "2017", + "title": "cooperative kernels gpu multitasking for blocking algorithms", + "label": [ + "154556556", + "113200698", + "169590947", + "106251023", + "205711294", + "173608175", + "11413529", + "50630238", + "3270621" + ], + "author": [ + "2558643503", + "2724715846", + "2157244358" + ], + "reference": [ + "1504291959", + "1968226734", + "1972090741", + "1983572666", + "1997162567", + "2007000019", + "2016706026", + "2021211271", + "2039997255", + "2041470524", + "2097643185", + "2107173440", + "2116721352", + "2117689653", + "2122711334", + "2125551452", + "2132093657", + "2134427337", + "2157103506", + "2299880715", + "2499962999", + "2507021752", + "2523378841", + "2535050116", + "2730205922" + ], + "abstract": "there is growing interest in accelerating irregular data parallel algorithms on gpus these algorithms are typically blocking so they require fair scheduling but gpu programming models e g opencl do not mandate fair scheduling and gpu schedulers are unfair in practice current approaches avoid this issue by exploiting scheduling quirks of today s gpus in a manner that does not allow the gpu to be shared with other workloads such as graphics rendering tasks we propose cooperative kernels an extension to the traditional gpu programming model geared towards writing blocking algorithms workgroups of a cooperative kernel are fairly scheduled and multitasking is supported via a small set of language extensions through which the kernel and scheduler cooperate we describe a prototype implementation of a cooperative kernel framework implemented in opencl 2 0 and evaluate our approach by porting a set of blocking gpu applications to cooperative kernels and examining their performance under multitasking our prototype exploits no vendor specific hardware driver or compiler support thus our results provide a lower bound on the efficiency with which cooperative kernels can be implemented in practice", + "title_raw": "Cooperative kernels: GPU multitasking for blocking algorithms", + "abstract_raw": "There is growing interest in accelerating irregular data-parallel algorithms on GPUs. These algorithms are typically blocking, so they require fair scheduling. But GPU programming models (e.g. OpenCL) do not mandate fair scheduling, and GPU schedulers are unfair in practice. Current approaches avoid this issue by exploiting scheduling quirks of today's GPUs in a manner that does not allow the GPU to be shared with other workloads (such as graphics rendering tasks). We propose cooperative kernels, an extension to the traditional GPU programming model geared towards writing blocking algorithms. Workgroups of a cooperative kernel are fairly scheduled, and multitasking is supported via a small set of language extensions through which the kernel and scheduler cooperate. We describe a prototype implementation of a cooperative kernel framework implemented in OpenCL 2.0 and evaluate our approach by porting a set of blocking GPU applications to cooperative kernels and examining their performance under multitasking. Our prototype exploits no vendor-specific hardware, driver or compiler support, thus our results provide a lower-bound on the efficiency with which cooperative kernels can be implemented in practice.", + "link": "https://www.semanticscholar.org/paper/50f6d38ca7e1a783d69806d988e755f7755a9056", + "scraped_abstract": null, + "citation_best": 8 + }, + { + "paper": "2730550703", + "venue": "1199533187", + "year": "2017", + "title": "fairness testing testing software for discrimination", + "label": [ + "124101348", + "180152950", + "149091818", + "2777904410", + "55166926" + ], + "author": [ + "1966014744", + "2108853658", + "220118967" + ], + "reference": [ + "123201289", + "234127567", + "333550619", + "1453222892", + "1483671890", + "1548806133", + "1562570268", + "1648303880", + "1816782878", + "1845288303", + "1961345416", + "1965194038", + "1979769549", + "1983306745", + "1983981400", + "1988368118", + "1993760289", + "1994866276", + "1999695874", + "2000809552", + "2003240725", + "2004248182", + "2010648791", + "2026947155", + "2028198237", + "2032536435", + "2041713059", + "2047694629", + "2054309922", + "2064877992", + "2069920427", + "2070544288", + "2082274331", + "2094819373", + "2095932468", + "2096713153", + "2097246321", + "2100775847", + "2100960835", + "2103275664", + "2103352742", + "2107709519", + "2109522846", + "2109998301", + "2110311336", + "2113004249", + "2116666691", + "2118024521", + "2119573243", + "2121650870", + "2123042765", + "2124026371", + "2136363516", + "2138428785", + "2143117649", + "2145212015", + "2149252982", + "2150997454", + "2157928966", + "2162670686", + "2170386256", + "2172260321", + "2185005785", + "2295073825", + "2394811972", + "2396394641", + "2399002042", + "2399352817", + "2400239131", + "2406163591", + "2430439109", + "2471601946", + "2521663729", + "2563486500", + "2590567727", + "2952517774", + "2964116855", + "2997591727" + ], + "abstract": "this paper defines software fairness and discrimination and develops a testing based method for measuring if and how much software discriminates focusing on causality in discriminatory behavior evidence of software discrimination has been found in modern software systems that recommend criminal sentences grant access to financial products and determine who is allowed to participate in promotions our approach themis generates efficient test suites to measure discrimination given a schema describing valid system inputs themis generates discrimination tests automatically and does not require an oracle we evaluate themis on 20 software systems 12 of which come from prior work with explicit focus on avoiding discrimination we find that 1 themis is effective at discovering software discrimination 2 state of the art techniques for removing discrimination from algorithms fail in many situations at times discriminating against as much as 98 of an input subdomain 3 themis optimizations are effective at producing efficient test suites for measuring discrimination and 4 themis is more efficient on systems that exhibit more discrimination we thus demonstrate that fairness testing is a critical aspect of the software development cycle in domains with possible discrimination and provide initial tools for measuring software discrimination", + "title_raw": "Fairness testing: testing software for discrimination", + "abstract_raw": "This paper defines software fairness and discrimination and develops a testing-based method for measuring if and how much software discriminates, focusing on causality in discriminatory behavior. Evidence of software discrimination has been found in modern software systems that recommend criminal sentences, grant access to financial products, and determine who is allowed to participate in promotions. Our approach, Themis, generates efficient test suites to measure discrimination. Given a schema describing valid system inputs, Themis generates discrimination tests automatically and does not require an oracle. We evaluate Themis on 20 software systems, 12 of which come from prior work with explicit focus on avoiding discrimination. We find that (1) Themis is effective at discovering software discrimination, (2) state-of-the-art techniques for removing discrimination from algorithms fail in many situations, at times discriminating against as much as 98% of an input subdomain, (3) Themis optimizations are effective at producing efficient test suites for measuring discrimination, and (4) Themis is more efficient on systems that exhibit more discrimination. We thus demonstrate that fairness testing is a critical aspect of the software development cycle in domains with possible discrimination and provide initial tools for measuring software discrimination.", + "link": "https://www.semanticscholar.org/paper/6a82cdf44e14474559ed951d601f9bf92a9c3b75", + "scraped_abstract": null, + "citation_best": 343 + }, + { + "paper": "2741221537", + "venue": "1199533187", + "year": "2017", + "title": "discovering relational specifications", + "label": [ + "98183937", + "148230440", + "199360897", + "2776937632", + "116253237" + ], + "author": [ + "2491332468", + "2739663864", + "1272946479" + ], + "reference": [ + "1268528572", + "1499053674", + "1531717019", + "1553352239", + "1595443289", + "1966767099", + "1999138184", + "2107690445", + "2110908283", + "2119467398", + "2119831128", + "2121818394", + "2131954495", + "2134770942", + "2150262694", + "2158391928", + "2165575313", + "2167187192", + "2294152467", + "2295399529", + "2416392025", + "2418260908", + "2493901872", + "3007267899", + "3139990154" + ], + "abstract": "formal specifications of library functions play a critical role in a number of program analysis and development tasks we present bach a technique for discovering likely relational specifications from data describing input output behavior of a set of functions comprising a library or a program relational specifications correlate different executions of different functions for instance commutativity transitivity equivalence of two functions etc bach combines novel insights from program synthesis and databases to discover a rich array of specifications we apply bach to learn specifications from data generated for a number of standard libraries our experimental evaluation demonstrates bach s ability to learn useful and deep specifications in a small amount of time", + "title_raw": "Discovering relational specifications", + "abstract_raw": "Formal specifications of library functions play a critical role in a number of program analysis and development tasks. We present Bach, a technique for discovering likely relational specifications from data describing input-output behavior of a set of functions comprising a library or a program. Relational specifications correlate different executions of different functions; for instance, commutativity, transitivity, equivalence of two functions, etc. Bach combines novel insights from program synthesis and databases to discover a rich array of specifications. We apply Bach to learn specifications from data generated for a number of standard libraries. Our experimental evaluation demonstrates Bach's ability to learn useful and deep specifications in a small amount of time.", + "link": "https://www.semanticscholar.org/paper/a6910db8b398fb178fbc577114105658efa4bc54", + "scraped_abstract": null, + "citation_best": 12 + }, + { + "paper": "2741422284", + "venue": "1199533187", + "year": "2017", + "title": "automatically diagnosing and repairing error handling bugs in c", + "label": [ + "119857082", + "123614077", + "124101348", + "2909795244", + "91587340" + ], + "author": [ + "2740763830", + "2166284654" + ], + "reference": [ + "1545684573", + "1971772683", + "2009526138", + "2018912833", + "2026675907", + "2027256755", + "2038081023", + "2043811931", + "2048954027", + "2054520963", + "2076719273", + "2101723202", + "2103240721", + "2104220745", + "2129262850", + "2140609933", + "2141670850", + "2145124323", + "2147506993", + "2152565783", + "2155061608", + "2484116259", + "2509822438", + "3004040842", + "3023843306" + ], + "abstract": "correct error handling is essential for building reliable and secure systems unfortunately low level languages like c often do not support any error handling primitives and leave it up to the developers to create their own mechanisms for error propagation and handling however in practice the developers often make mistakes while writing the repetitive and tedious error handling code and inadvertently introduce bugs such error handling bugs often have severe consequences undermining the security and reliability of the affected systems fixing these bugs is also tiring they are repetitive and cumbersome to implement therefore it is crucial to develop tool supports for automatically detecting and fixing error handling bugs to understand the nature of error handling bugs that occur in widely used c programs we conduct a comprehensive study of real world error handling bugs and their fixes leveraging the knowledge we then design implement and evaluate errdoc a tool that not only detects and characterizes different types of error handling bugs but also automatically fixes them our evaluation on five open source projects shows that errdoc can detect error handling bugs with 100 to 84 precision and around 95 recall and categorize them with 83 to 96 precision and above 90 recall thus errdoc improves precision up to 5 percentage points and recall up to 44 percentage points w r t the state of the art we also demonstrate that errdoc can fix the bugs with high accuracy", + "title_raw": "Automatically diagnosing and repairing error handling bugs in C", + "abstract_raw": "Correct error handling is essential for building reliable and secure systems. Unfortunately, low-level languages like C often do not support any error handling primitives and leave it up to the developers to create their own mechanisms for error propagation and handling. However, in practice, the developers often make mistakes while writing the repetitive and tedious error handling code and inadvertently introduce bugs. Such error handling bugs often have severe consequences undermining the security and reliability of the affected systems. Fixing these bugs is also tiring-they are repetitive and cumbersome to implement. Therefore, it is crucial to develop tool supports for automatically detecting and fixing error handling bugs. To understand the nature of error handling bugs that occur in widely used C programs, we conduct a comprehensive study of real world error handling bugs and their fixes. Leveraging the knowledge, we then design, implement, and evaluate ErrDoc, a tool that not only detects and characterizes different types of error handling bugs but also automatically fixes them. Our evaluation on five open-source projects shows that ErrDoc can detect error handling bugs with 100% to 84% precision and around 95% recall, and categorize them with 83% to 96% precision and above 90% recall. Thus, ErrDoc improves precision up to 5 percentage points, and recall up to 44 percentage points w.r.t. the state-of-the-art. We also demonstrate that ErrDoc can fix the bugs with high accuracy.", + "link": "https://www.semanticscholar.org/paper/cce926d9a3a1b0091459478c12acfde7baf03cd1", + "scraped_abstract": null, + "citation_best": 56 + }, + { + "paper": "2599765304", + "venue": "1158167855", + "year": "2017", + "title": "mask r cnn", + "label": [ + "100850083", + "167955471", + "115961682", + "64729616", + "89600930", + "2776151529", + "31972630", + "147037132", + "2777884278" + ], + "author": [ + "2164292938", + "102740216", + "1944499404", + "2473549963" + ], + "reference": [ + "603908379", + "1861492603", + "1948751323", + "1958328135", + "1991367009", + "2080873731", + "2088049833", + "2147800946", + "2255781698", + "2274287116", + "2407521645", + "2549139847", + "2555182955", + "2564566551", + "2572745118", + "2608858501", + "2734663976", + "2770201307", + "2777795072", + "2949295283", + "2949359214", + "2949533892", + "2949556967", + "2949650786", + "2949883907", + "2950139038", + "2951120635", + "2951548327", + "2951638509", + "2951856387", + "2952632681", + "2953106684", + "2953139137", + "2953236957", + "2953390309", + "2963785012", + "3098722327", + "3106221349" + ], + "abstract": "we present a conceptually simple flexible and general framework for object instance segmentation our approach efficiently detects objects in an image while simultaneously generating a high quality segmentation mask for each instance the method called mask r cnn extends faster r cnn by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition mask r cnn is simple to train and adds only a small overhead to faster r cnn running at 5 fps moreover mask r cnn is easy to generalize to other tasks e g allowing us to estimate human poses in the same framework we show top results in all three tracks of the coco suite of challenges including instance segmentation bounding box object detection and person keypoint detection without bells and whistles mask r cnn outperforms all existing single model entries on every task including the coco 2016 challenge winners we hope our simple and effective approach will serve as a solid baseline and help ease future research in instance level recognition code has been made available at this https url", + "title_raw": "Mask R-CNN", + "abstract_raw": "We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without bells and whistles, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. Code has been made available at: this https URL", + "link": "https://www.semanticscholar.org/paper/ea99a5535388196d0d44be5b4d7dd02029a43bb2", + "scraped_abstract": null, + "citation_best": 1581 + }, + { + "paper": "2597603852", + "venue": "1180662882", + "year": "2017", + "title": "understanding black box predictions via influence functions", + "label": [ + "119857082", + "81363708", + "163175372", + "124101348", + "67226441", + "94966114" + ], + "author": [ + "2619806161", + "2171686691" + ], + "reference": [ + "196761320", + "290150691", + "1977994906", + "1992129502", + "1998613841", + "2006903949", + "2014226385", + "2042088096", + "2051434435", + "2086923543", + "2104094955", + "2112126852", + "2112796928", + "2117539524", + "2130789388", + "2137406659", + "2145147745", + "2155541015", + "2159562728", + "2160536005", + "2167460663", + "2168127628", + "2169393322", + "2183341477", + "2187013920", + "2188217758", + "2243397390", + "2271840356", + "2293844262", + "2346578521", + "2384495648", + "2493343568", + "2510508396", + "2618530766", + "2949506549", + "2951501516", + "2962961534", + "2963207607" + ], + "abstract": "how can we explain the predictions of a black box model in this paper we use influence functions a classic technique from robust statistics to trace a model s prediction through the learning algorithm and back to its training data thereby identifying training points most responsible for a given prediction to scale up influence functions to modern machine learning settings we develop a simple efficient implementation that requires only oracle access to gradients and hessian vector products we show that even on non convex and non differentiable models where the theory breaks down approximations to influence functions can still provide valuable information on linear models and convolutional neural networks we demonstrate that influence functions are useful for multiple purposes understanding model behavior debugging models detecting dataset errors and even creating visually indistinguishable training set attacks", + "title_raw": "Understanding black-box predictions via influence functions", + "abstract_raw": "How can we explain the predictions of a black-box model? In this paper, we use influence functions \u2014 a classic technique from robust statistics \u2014 to trace a model's prediction through the learning algorithm and back to its training data, thereby identifying training points most responsible for a given prediction. To scale up influence functions to modern machine learning settings, we develop a simple, efficient implementation that requires only oracle access to gradients and Hessian-vector products. We show that even on non-convex and non-differentiable models where the theory breaks down, approximations to influence functions can still provide valuable information. On linear models and convolutional neural networks, we demonstrate that influence functions are useful for multiple purposes: understanding model behavior, debugging models, detecting dataset errors, and even creating visually-indistinguishable training-set attacks.", + "link": "https://www.semanticscholar.org/paper/08ad8fad21f6ec4cda4d56be1ca5e146b7c913a1", + "scraped_abstract": null, + "citation_best": 1137 + }, + { + "paper": "2620436109", + "venue": "1174403976", + "year": "2017", + "title": "clone refactoring with lambda expressions", + "label": [ + "152752567", + "2776760102", + "548217200", + "11685472", + "199360897", + "42383842", + "2778739878", + "76482347" + ], + "author": [ + "150546358", + "2025712077", + "2618683766" + ], + "reference": [ + "1649645444", + "1969191237", + "1974032838", + "1990090830", + "1998569777", + "1999457095", + "2004771867", + "2013638584", + "2019348938", + "2040867412", + "2048960787", + "2056514427", + "2059901918", + "2066455950", + "2067377566", + "2073398506", + "2111305209", + "2114063681", + "2119887272", + "2124354055", + "2127964873", + "2128782367", + "2131477050", + "2138756793", + "2144344516", + "2153887189", + "2157532207", + "2159933174", + "2165739648", + "2214958829", + "2286236884", + "2358692116", + "2476494490" + ], + "abstract": "lambda expressions have been introduced in java 8 to support functional programming and enable behavior parameterization by passing functions as parameters to methods the majority of software clones duplicated code are known to have behavioral differences i e type 2 and type 3 clones however to the best of our knowledge there is no previous work to investigate the utility of lambda expressions for parameterizing such behavioral differences in clones in this paper we propose a technique that examines the applicability of lambda expressions for the refactoring of clones with behavioral differences moreover we empirically investigate the applicability and characteristics of the lambda expressions introduced to refactor a large dataset of clones our findings show that lambda expressions enable the refactoring of a significant portion of clones that could not be refactored by any other means", + "title_raw": "Clone refactoring with lambda expressions", + "abstract_raw": "Lambda expressions have been introduced in Java 8 to support functional programming and enable behavior parameterization by passing functions as parameters to methods. The majority of software clones (duplicated code) are known to have behavioral differences (i.e., Type-2 and Type-3 clones). However, to the best of our knowledge, there is no previous work to investigate the utility of Lambda expressions for parameterizing such behavioral differences in clones. In this paper, we propose a technique that examines the applicability of Lambda expressions for the refactoring of clones with behavioral differences. Moreover, we empirically investigate the applicability and characteristics of the Lambda expressions introduced to refactor a large dataset of clones. Our findings show that Lambda expressions enable the refactoring of a significant portion of clones that could not be refactored by any other means.", + "link": "https://www.semanticscholar.org/paper/acb5942773aeae239f12cf268afd61bfb950bc0f", + "scraped_abstract": null, + "citation_best": 54 + }, + { + "paper": "2616911396", + "venue": "1174403976", + "year": "2017", + "title": "decoding the representation of code in the brain an fmri study of code review and expertise", + "label": [ + "167955471", + "150292731", + "57273362", + "204321447", + "195324797" + ], + "author": [ + "2617199231", + "2800415762", + "1977991679" + ], + "reference": [ + "162310774", + "774135413", + "1121492139", + "1510073064", + "1526710119", + "1537910544", + "1549492071", + "1575235053", + "1905898145", + "1967308381", + "1967878152", + "1969900674", + "1971895315", + "1978603066", + "1979342075", + "1979417611", + "1995436232", + "2008626182", + "2010136639", + "2010837018", + "2012686127", + "2014157362", + "2014232144", + "2022244667", + "2025459835", + "2037376210", + "2038546291", + "2039772616", + "2040710054", + "2043045839", + "2043662475", + "2058046532", + "2067436653", + "2071714163", + "2076719273", + "2078393527", + "2080534028", + "2082186327", + "2089708354", + "2091543666", + "2092594036", + "2105357036", + "2116821000", + "2119513713", + "2121333382", + "2125343911", + "2129651491", + "2133584444", + "2134305330", + "2139410856", + "2139906140", + "2141663826", + "2142029338", + "2142403498", + "2150238850", + "2151097560", + "2155367389", + "2159739762", + "2161204110", + "2164006284", + "2167775278", + "2170743336", + "2171883083", + "2174068427", + "2177797324", + "2224858928", + "2240892839", + "2342082360", + "2344367508", + "2404969801", + "2502442987", + "3119651796", + "3123536914", + "3145970008", + "3146720657" + ], + "abstract": "subjective judgments in software engineering tasks are of critical importance but can be difficult to study with conventional means medical imaging techniques hold the promise of relating cognition to physical activities and brain structures in a controlled experiment involving 29 participants we examine code comprehension code review and prose review using functional magnetic resonance imaging we find that the neural representations of programming languages vs natural languages are distinct we can classify which task a participant is under taking based solely on brain activity balanced accuracy 79 p", + "title_raw": "Decoding the representation of code in the brain: an fMRI study of code review and expertise", + "abstract_raw": "Subjective judgments in software engineering tasks are of critical importance but can be difficult to study with conventional means. Medical imaging techniques hold the promise of relating cognition to physical activities and brain structures. In a controlled experiment involving 29 participants, we examine code comprehension, code review and prose review using functional magnetic resonance imaging. We find that the neural representations of programming languages vs. natural languages are distinct. We can classify which task a participant is under-taking based solely on brain activity (balanced accuracy 79%, p", + "link": "https://www.semanticscholar.org/paper/91d186d0b2276d536c53f2193ab49881e3893594", + "scraped_abstract": null, + "citation_best": 121 + }, + { + "paper": "2617348763", + "venue": "1174403976", + "year": "2017", + "title": "challenges for static analysis of java reflection literature review and empirical study", + "label": [ + "112604564", + "137287247", + "132106392", + "114408938", + "168702491", + "109701466", + "548217200", + "199360897", + "121957198", + "172482141", + "60945770", + "2777904410", + "174954855", + "65682993", + "97686452", + "43126263", + "115903868" + ], + "author": [ + "2122583658", + "2047031519", + "2076191171" + ], + "reference": [ + "136002940", + "148369031", + "165207608", + "198969604", + "206636664", + "781283046", + "951352394", + "1491425252", + "1536098516", + "1588485088", + "1601193885", + "1603777797", + "1777693579", + "1810163311", + "1899538528", + "1959797194", + "1969423224", + "1969808647", + "1975474596", + "1975675278", + "1982773740", + "1985752637", + "1986480799", + "1987035533", + "1987809485", + "1999753800", + "2017025011", + "2043976729", + "2052802194", + "2063989349", + "2070192880", + "2076089459", + "2077337386", + "2080696000", + "2088472812", + "2090188769", + "2090306244", + "2095938258", + "2104789737", + "2111628838", + "2114275288", + "2118328848", + "2121114674", + "2125357166", + "2126667017", + "2128835824", + "2129509704", + "2132450494", + "2133824159", + "2134732158", + "2135389226", + "2137030454", + "2139980638", + "2140021378", + "2142194171", + "2142867733", + "2148397566", + "2152225177", + "2162762034", + "2166091242", + "2167363133", + "2169090130", + "2171362714", + "2241404614", + "2250113561", + "2293414449", + "2294658973", + "2296620627", + "2385667943", + "2470899015", + "2489895286", + "2577540292", + "2588441215", + "2680865343", + "3146696610" + ], + "abstract": "the behavior of software that uses the java reflection api is fundamentally hard to predict by analyzing code only recent static analysis approaches can resolve reflection under unsound yet pragmatic assumptions we survey what approaches exist and what their limitations are we then analyze how real world java code uses the reflection api and how many java projects contain code challenging state of the art static analysis using a systematic literature review we collected and categorized all known methods of statically approximating reflective java code next to this we constructed a representative corpus of java systems and collected descriptive statistics of the usage of the reflection api we then applied an analysis on the abstract syntax trees of all source code to count code idioms which go beyond the limitation boundaries of static analysis approaches the resulting data answers the research questions the corpus the tool and the results are openly available we conclude that the need for unsound assumptions to resolve reflection is widely supported in our corpus reflection can not be ignored for 78 of the projects common challenges for analysis tools such as non exceptional exceptions programmatic filtering meta objects semantics of collections and dynamic proxies widely occur in the corpus for java software engineers prioritizing on robustness we list tactics to obtain more easy to analyze reflection code and for static analysis tool builders we provide a list of opportunities to have significant impact on real java code", + "title_raw": "Challenges for static analysis of Java reflection: literature review and empirical study", + "abstract_raw": "The behavior of software that uses the Java Reflection API is fundamentally hard to predict by analyzing code. Only recent static analysis approaches can resolve reflection under unsound yet pragmatic assumptions. We survey what approaches exist and what their limitations are. We then analyze how real-world Java code uses the Reflection API, and how many Java projects contain code challenging state-of-the-art static analysis. Using a systematic literature review we collected and categorized all known methods of statically approximating reflective Java code. Next to this we constructed a representative corpus of Java systems and collected descriptive statistics of the usage of the Reflection API. We then applied an analysis on the abstract syntax trees of all source code to count code idioms which go beyond the limitation boundaries of static analysis approaches. The resulting data answers the research questions. The corpus, the tool and the results are openly available. We conclude that the need for unsound assumptions to resolve reflection is widely supported. In our corpus, reflection can not be ignored for 78% of the projects. Common challenges for analysis tools such as non-exceptional exceptions, programmatic filtering meta objects, semantics of collections, and dynamic proxies, widely occur in the corpus. For Java software engineers prioritizing on robustness, we list tactics to obtain more easy to analyze reflection code, and for static analysis tool builders we provide a list of opportunities to have significant impact on real Java code.", + "link": "https://www.semanticscholar.org/paper/90f4ee00e7d0d92b3e0b27dbcbda12a33fbd8616", + "scraped_abstract": null, + "citation_best": 101 + }, + { + "paper": "2850240473", + "venue": "1203999783", + "year": "2018", + "title": "accelerating innovation through analogy mining", + "label": [ + "2522767166", + "2777743986", + "81669768", + "2776291640", + "147168706", + "62230096" + ], + "author": [ + "2563249337", + "2716700415", + "2015015692", + "1275334909" + ], + "reference": [ + "34091735", + "1880262756", + "1965734368", + "1994335990", + "1994734668", + "1994925058", + "2003534453", + "2026161499", + "2032539947", + "2084291440", + "2096908543", + "2119907556", + "2122401796", + "2140492359", + "2145454741", + "2145994287", + "2147152072", + "2151401338", + "2151587352", + "2171960331", + "2206252341", + "2250539671", + "2290652328", + "2574098768", + "2950577311", + "3157647014" + ], + "abstract": "the availability of large idea repositories e g patents could significantly accelerate innovation and discovery by providing people inspiration from solutions to analogous problems however finding useful analogies in these large messy real world repositories remains a persistent challenge for both humans and computers previous approaches include costly hand created databases that do not scale or machine learning similarity metrics that struggle to account for structural similarity which is central to analogy in this paper we explore the viability and value of learning simple structural representations our approach combines crowdsourcing and recurrent neural networks to extract purpose and mechanism vector representations from product descriptions we demonstrate that these learned vectors allow us to find analogies with higher precision and recall than traditional methods in an ideation experiment analogies retrieved by our models significantly increased people s likelihood of generating creative ideas", + "title_raw": "Accelerating innovation through analogy mining", + "abstract_raw": "The availability of large idea repositories (e.g., patents) could significantly accelerate innovation and discovery by providing people inspiration from solutions to analogous problems. However, finding useful analogies in these large, messy, real-world repositories remains a persistent challenge for both humans and computers. Previous approaches include costly hand-created databases that do not scale, or machine-learning similarity metrics that struggle to account for structural similarity, which is central to analogy. In this paper we explore the viability and value of learning simple structural representations. Our approach combines crowdsourcing and recurrent neural networks to extract purpose and mechanism vector representations from product descriptions. We demonstrate that these learned vectors allow us to find analogies with higher precision and recall than traditional methods. In an ideation experiment, analogies retrieved by our models significantly increased people's likelihood of generating creative ideas.", + "link": "https://www.semanticscholar.org/paper/53cdda81aba76cce8cbb11abeb49a9ae6bb9b476", + "scraped_abstract": null, + "citation_best": 13 + }, + { + "paper": "2964223369", + "venue": "1127325140", + "year": "2017", + "title": "safe and nested subgame solving for imperfect information games", + "label": [ + "197362993", + "49585438", + "200632571" + ], + "author": [ + "2131951165", + "2070867630" + ], + "reference": [], + "abstract": "in imperfect information games the optimal strategy in a subgame may depend on the strategy in other unreached subgames thus a subgame cannot be solved in isolation and must instead consider the strategy for the entire game as a whole unlike perfect information games nevertheless it is possible to first approximate a solution for the whole game and then improve it in individual subgames this is referred to as subgame solving we introduce subgame solving techniques that outperform prior methods both in theory and practice we also show how to adapt them and past subgame solving techniques to respond to opponent actions that are outside the original action abstraction this significantly outperforms the prior state of the art approach action translation finally we show that subgame solving can be repeated as the game progresses down the game tree leading to far lower exploitability these techniques were a key component of libratus the first ai to defeat top humans in heads up no limit texas hold em poker", + "title_raw": "Safe and Nested Subgame Solving for Imperfect-Information Games", + "abstract_raw": "In imperfect-information games, the optimal strategy in a subgame may depend on the strategy in other, unreached subgames. Thus a subgame cannot be solved in isolation and must instead consider the strategy for the entire game as a whole, unlike perfect-information games. Nevertheless, it is possible to first approximate a solution for the whole game and then improve it in individual subgames. This is referred to as subgame solving. We introduce subgame-solving techniques that outperform prior methods both in theory and practice. We also show how to adapt them, and past subgame-solving techniques, to respond to opponent actions that are outside the original action abstraction; this significantly outperforms the prior state-of-the-art approach, action translation. Finally, we show that subgame solving can be repeated as the game progresses down the game tree, leading to far lower exploitability. These techniques were a key component of Libratus, the first AI to defeat top humans in heads-up no-limit Texas hold'em poker.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Safe+and+Nested+Subgame+Solving+for+Imperfect-Information+Games&as_oq=&as_eq=&as_occt=any&as_sauthors=Brown", + "scraped_abstract": null, + "citation_best": 53 + }, + { + "paper": "2944407464", + "venue": "1127325140", + "year": "2017", + "title": "variance based regularization with convex objectives", + "label": [ + "2781117939", + "107321475", + "8398441" + ], + "author": [ + "2531916331", + "2524462901" + ], + "reference": [], + "abstract": "we develop an approach to risk minimization and stochastic optimization that provides a convex surrogate for variance allowing near optimal and computationally efficient trading between approximation and estimation error our approach builds off of techniques for distributionally robust optimization and owen s empirical likelihood and we provide a number of finite sample and asymptotic results characterizing the theoretical performance of the estimator in particular we show that our procedure comes with certificates of optimality achieving in some scenarios faster rates of convergence than empirical risk minimization by virtue of automatically balancing bias and variance we give corroborating empirical evidence showing that in practice the estimator indeed trades between variance and absolute performance on a training sample improving out of sample test performance over standard empirical risk minimization for a number of classification problems", + "title_raw": "Variance-based Regularization with Convex Objectives", + "abstract_raw": "We develop an approach to risk minimization and stochastic optimization that provides a convex surrogate for variance, allowing near-optimal and computationally efficient trading between approximation and estimation error. Our approach builds off of techniques for distributionally robust optimization and Owen's empirical likelihood, and we provide a number of finite-sample and asymptotic results characterizing the theoretical performance of the estimator. In particular, we show that our procedure comes with certificates of optimality, achieving (in some scenarios) faster rates of convergence than empirical risk minimization by virtue of automatically balancing bias and variance. We give corroborating empirical evidence showing that in practice, the estimator indeed trades between variance and absolute performance on a training sample, improving out-of-sample (test) performance over standard empirical risk minimization for a number of classification problems.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Variance-based+Regularization+with+Convex+Objectives&as_oq=&as_eq=&as_occt=any&as_sauthors=Namkoong", + "scraped_abstract": null, + "citation_best": 114 + }, + { + "paper": "2605250808", + "venue": "1158363782", + "year": "2017", + "title": "mos a reusable networking stack for flow monitoring middleboxes", + "label": [ + "9395851", + "149635348" + ], + "author": [ + "2031749427", + "2229021199", + "2152288699", + "2138907572", + "2128804186" + ], + "reference": [ + "186989516", + "1424350945", + "1435010830", + "1511186238", + "1516506771", + "1674877186", + "1987575199", + "2010365467", + "2045260235", + "2057439722", + "2066089879", + "2087546474", + "2096915479", + "2099964107", + "2105545278", + "2108325650", + "2110153733", + "2111734949", + "2114412097", + "2119026482", + "2135446949", + "2139325411", + "2151062909", + "2169414316", + "2202294430", + "2303430686", + "2460047656", + "2498885363", + "2504074749", + "2512266566", + "2530734628" + ], + "abstract": "", + "title_raw": "mOS: A Reusable Networking Stack for Flow Monitoring Middleboxes.", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/daa249469011536088d706490d3f9033009764bc", + "scraped_abstract": null, + "citation_best": 45 + }, + { + "paper": "2731280337", + "venue": "1127352206", + "year": "2017", + "title": "low overhead dynamic binary translation on arm", + "label": [ + "100850083", + "202491316", + "168283630", + "137955351", + "26771161", + "173608175", + "2778971978" + ], + "author": [ + "2270196849", + "2254021122", + "2161408997", + "2190413427" + ], + "reference": [ + "35708471", + "1986152061", + "1999195534", + "2050227086", + "2072737419", + "2079530268", + "2089131124", + "2113181828", + "2118750966", + "2122963045", + "2127890955", + "2138019494", + "2148865465", + "2148947321", + "2152389483", + "2155943969", + "2156858199", + "2325071780", + "2599805048", + "2992694935" + ], + "abstract": "the armv8 architecture introduced aarch64 a 64 bit execution mode with a new instruction set while retaining binary compatibility with previous versions of the arm architecture through aarch32 a 32 bit execution mode most hardware implementations of armv8 processors support both aarch32 and aarch64 which comes at a cost in hardware complexity we present mambo x64 a dynamic binary translator for linux which executes 32 bit arm binaries using only the aarch64 instruction set we have evaluated the performance of mambo x64 on three existing armv8 processors which support both aarch32 and aarch64 instruction sets the performance was measured by comparing the running time of 32 bit benchmarks running under mambo x64 with the same benchmark running natively on spec cpu2006 we achieve a geometric mean overhead of less than 7 5 on in order cortex a53 processors and a performance improvement of 1 on out of order x gene 1 processors mambo x64 achieves such low overhead by novel optimizations to map aarch32 floating point registers to aarch64 registers dynamically handle overflowing address calculations efficiently generate traces that harness hardware return address prediction and handle operating system signals accurately", + "title_raw": "Low overhead dynamic binary translation on ARM", + "abstract_raw": "The ARMv8 architecture introduced AArch64, a 64-bit execution mode with a new instruction set, while retaining binary compatibility with previous versions of the ARM architecture through AArch32, a 32-bit execution mode. Most hardware implementations of ARMv8 processors support both AArch32 and AArch64, which comes at a cost in hardware complexity. We present MAMBO-X64, a dynamic binary translator for Linux which executes 32-bit ARM binaries using only the AArch64 instruction set. We have evaluated the performance of MAMBO-X64 on three existing ARMv8 processors which support both AArch32 and AArch64 instruction sets. The performance was measured by comparing the running time of 32-bit benchmarks running under MAMBO-X64 with the same benchmark running natively. On SPEC CPU2006, we achieve a geometric mean overhead of less than 7.5% on in-order Cortex-A53 processors and a performance improvement of 1% on out-of-order X-Gene 1 processors. MAMBO-X64 achieves such low overhead by novel optimizations to map AArch32 floating-point registers to AArch64 registers dynamically, handle overflowing address calculations efficiently, generate traces that harness hardware return address prediction, and handle operating system signals accurately.", + "link": "https://www.semanticscholar.org/paper/d5aa8f0f8f19791e421cfadd77a5eb298d06aa3a", + "scraped_abstract": null, + "citation_best": 23 + }, + { + "paper": "2626631502", + "venue": "1127352206", + "year": "2017", + "title": "repairing sequential consistency in c c 11", + "label": [ + "12186640", + "55439883", + "37279795", + "82029504", + "2778925811", + "11413529", + "2776436953", + "39920170" + ], + "author": [ + "2250232977", + "138450541", + "2152400748", + "1978008809", + "2283278728" + ], + "reference": [ + "867008410", + "1430872261", + "1525350307", + "1945229733", + "1983206712", + "1988877888", + "2000659103", + "2014708731", + "2039509099", + "2054391605", + "2054739713", + "2109639089", + "2117502039", + "2132117132", + "2138074470", + "2152885346", + "2168397157", + "2280574045", + "2293159724", + "2293319505", + "2401923818", + "2531502343", + "2549657998", + "2550256838", + "2553522901", + "2564852534", + "2738891045" + ], + "abstract": "the c c 11 memory model defines the semantics of concurrent memory accesses in c c and in particular supports racy atomic accesses at a range of different consistency levels from very weak consistency relaxed to strong sequential consistency sc unfortunately as we observe in this paper the semantics of sc atomic accesses in c c 11 as well as in all proposed strengthenings of the semantics is flawed in that contrary to previously published results both suggested compilation schemes to the power architecture are unsound we propose a model called rc11 for repaired c11 with a better semantics for sc accesses that restores the soundness of the compilation schemes to power maintains the drf sc guarantee and provides stronger more useful guarantees to sc fences in addition we formally prove for the first time the correctness of the proposed stronger compilation schemes to power that preserve load to store ordering and avoid out of thin air reads", + "title_raw": "Repairing sequential consistency in C/C++11", + "abstract_raw": "The C/C++11 memory model defines the semantics of concurrent memory accesses in C/C++, and in particular supports racy \"atomic\" accesses at a range of different consistency levels, from very weak consistency (\"relaxed\") to strong, sequential consistency (\"SC\"). Unfortunately, as we observe in this paper, the semantics of SC atomic accesses in C/C++11, as well as in all proposed strengthenings of the semantics, is flawed, in that (contrary to previously published results) both suggested compilation schemes to the Power architecture are unsound. We propose a model, called RC11 (for Repaired C11), with a better semantics for SC accesses that restores the soundness of the compilation schemes to Power, maintains the DRF-SC guarantee, and provides stronger, more useful, guarantees to SC fences. In addition, we formally prove, for the first time, the correctness of the proposed stronger compilation schemes to Power that preserve load-to-store ordering and avoid \"out-of-thin-air\" reads.", + "link": "https://www.semanticscholar.org/paper/d42aab91d132fc801b4773b09fa3e61fdc12eed3", + "scraped_abstract": null, + "citation_best": 20 + }, + { + "paper": "2562114628", + "venue": "1127352206", + "year": "2017", + "title": "compiling without continuations", + "label": [ + "196713837", + "169590947", + "2780624054", + "199360897" + ], + "author": [ + "2224010041", + "1836388904", + "2200399310", + "2142246817" + ], + "reference": [ + "84930537", + "159715351", + "1580664042", + "1622288619", + "1879332745", + "1980305847", + "1981830935", + "1982205631", + "1995535937", + "1998466493", + "2000212732", + "2002411399", + "2048706733", + "2049803057", + "2073904768", + "2081124914", + "2118165414", + "2125690388", + "2126126443", + "2133081191", + "2143273235", + "2149127686", + "2151716725", + "2159227002", + "2163672025", + "2509085585" + ], + "abstract": "many fields of study in compilers give rise to the concept of a join point a place where different execution paths come together join points are often treated as functions or continuations but we believe it is time to study them in their own right we show that adding join points to a direct style functional intermediate language is a simple but powerful change that allows new optimizations to be performed including a significant improvement to list fusion finally we report on recent work on adding join points to the intermediate language of the glasgow haskell compiler", + "title_raw": "Compiling without continuations", + "abstract_raw": "Many fields of study in compilers give rise to the concept of a join point\u2014a place where different execution paths come together. Join points are often treated as functions or continuations, but we believe it is time to study them in their own right. We show that adding join points to a direct-style functional intermediate language is a simple but powerful change that allows new optimizations to be performed, including a significant improvement to list fusion. Finally, we report on recent work on adding join points to the intermediate language of the Glasgow Haskell Compiler.", + "link": "https://www.semanticscholar.org/paper/18344f8daf595f91f18fdd6d2b2dff99423087b6", + "scraped_abstract": null, + "citation_best": 29 + }, + { + "paper": "2625141509", + "venue": "1127352206", + "year": "2017", + "title": "bringing the web up to speed with webassembly", + "label": [ + "195274430", + "196126337", + "35578498", + "521306242", + "138708601", + "544833334", + "79373723", + "182321512", + "136699151", + "118643609", + "154314014", + "97200028", + "59241245", + "136764020", + "130436687", + "61096286" + ], + "author": [ + "2618137499", + "2781141317", + "2082760169", + "2228977024", + "2638922425", + "2717110858", + "2610884195", + "2661803979", + "2625799630" + ], + "reference": [ + "1501401133", + "1557561422", + "1825457006", + "1978680977", + "1980939032", + "1994759706", + "2007809070", + "2010167524", + "2034711041", + "2069107692", + "2095629885", + "2100111786", + "2105842205", + "2106412703", + "2107089133", + "2118229393", + "2140611647", + "2141365240", + "2149603369", + "2153185479", + "2155851497", + "2156487548", + "2159558457", + "2160725972", + "2163331932", + "2171938395", + "2568721247", + "2619945721", + "3000286628", + "3015906956", + "3160056598" + ], + "abstract": "the maturation of the web platform has given rise to sophisticated and demanding web applications such as interactive 3d visualization audio and video software and games with that efficiency and security of code on the web has become more important than ever yet javascript as the only built in language of the web is not well equipped to meet these requirements especially as a compilation target engineers from the four major browser vendors have risen to the challenge and collaboratively designed a portable low level bytecode called webassembly it offers compact representation efficient validation and compilation and safe low to no overhead execution rather than committing to a specific programming model webassembly is an abstraction over modern hardware making it language hardware and platform independent with use cases beyond just the web webassembly has been designed with a formal semantics from the start we describe the motivation design and formal semantics of webassembly and provide some preliminary experience with implementations", + "title_raw": "Bringing the web up to speed with WebAssembly", + "abstract_raw": "The maturation of the Web platform has given rise to sophisticated and demanding Web applications such as interactive 3D visualization, audio and video software, and games. With that, efficiency and security of code on the Web has become more important than ever. Yet JavaScript as the only built-in language of the Web is not well-equipped to meet these requirements, especially as a compilation target. Engineers from the four major browser vendors have risen to the challenge and collaboratively designed a portable low-level bytecode called WebAssembly. It offers compact representation, efficient validation and compilation, and safe low to no-overhead execution. Rather than committing to a specific programming model, WebAssembly is an abstraction over modern hardware, making it language-, hardware-, and platform-independent, with use cases beyond just the Web. WebAssembly has been designed with a formal semantics from the start. We describe the motivation, design and formal semantics of WebAssembly and provide some preliminary experience with implementations.", + "link": "https://www.semanticscholar.org/paper/f9420023ec1ee6d7d61d8f61f3c7df33b59afe61", + "scraped_abstract": null, + "citation_best": 113 + }, + { + "paper": "3105639314", + "venue": "1184151122", + "year": "2017", + "title": "dichotomies in ontology mediated querying with the guarded fragment", + "label": [ + "166724064", + "153269930", + "2776235265", + "80444323", + "2780527393", + "134026603" + ], + "author": [ + "1369919044", + "2394551344", + "2063282505", + "2076471859" + ], + "reference": [ + "133337520", + "190956113", + "1482134990", + "1483892218", + "1527197079", + "1542417898", + "1551807230", + "1562734116", + "1608479762", + "1754725800", + "1824207140", + "1830094896", + "1862784933", + "1929404437", + "1970956440", + "1971428581", + "1981545838", + "1991981864", + "1993151238", + "2001399171", + "2013409229", + "2013561137", + "2021002502", + "2027524199", + "2028222220", + "2038993620", + "2052936500", + "2057069578", + "2059572645", + "2065555531", + "2097831877", + "2099367286", + "2102729564", + "2104356397", + "2123152121", + "2133328255", + "2135105491", + "2135109168", + "2137862151", + "2150339067", + "2157027735", + "2158009345", + "2164496990", + "2244287293", + "2245690396", + "2309621853", + "2338322935", + "2341631904", + "2579513981", + "2763521382", + "3031684986", + "3100150857", + "3123577814" + ], + "abstract": "we study the complexity of ontology mediated querying when ontologies are formulated in the guarded fragment of first order logic gf our general aim is to classify the data complexity on the level of ontologies where query evaluation w r t an ontology o is considered to be in ptime if all unions of conjunctive queries can be evaluated in ptime w r t o and conp hard if at least one query is conp hard w r t o we identify several large and relevant fragments of gf that enjoy a dichotomy between ptime and conp some of them additionally admitting a form of counting in fact almost all ontologies in the bioportal repository fall into these fragments or can easily be rewritten to do so we then establish a variation of ladner s theorem on the existence of np intermediate problems and use this result to show that for other fragments there is provably no such dichotomy again for other fragments such as full gf establishing a dichotomy implies the feder vardi conjecture on the complexity of constraint satisfaction problems we also link these results to datalog rewritability and study the decidability of whether a given ontology enjoys ptime query evaluation presenting both positive and negative results", + "title_raw": "Dichotomies in Ontology-Mediated Querying with the Guarded Fragment", + "abstract_raw": "We study the complexity of ontology-mediated querying when ontologies are formulated in the guarded fragment of first-order logic (GF). Our general aim is to classify the data complexity on the level of ontologies where query evaluation w.r.t. an ontology O is considered to be in PTime if all (unions of conjunctive) queries can be evaluated in PTime w.r.t. O and coNP-hard if at least one query is coNP-hard w.r.t. O. We identify several large and relevant fragments of GF that enjoy a dichotomy between PTime and coNP, some of them additionally admitting a form of counting. In fact, almost all ontologies in the BioPortal repository fall into these fragments or can easily be rewritten to do so. We then establish a variation of Ladner's Theorem on the existence of NP-intermediate problems and use this result to show that for other fragments, there is provably no such dichotomy. Again for other fragments (such as full GF), establishing a dichotomy implies the Feder-Vardi conjecture on the complexity of constraint satisfaction problems. We also link these results to Datalog-rewritability and study the decidability of whether a given ontology enjoys PTime query evaluation, presenting both positive and negative results.", + "link": "https://www.semanticscholar.org/paper/5f0003d19e8138389b1459a589278eec56421a0e", + "scraped_abstract": null, + "citation_best": 13 + }, + { + "paper": "2618267215", + "venue": "1163618098", + "year": "2017", + "title": "verified models and reference implementations for the tls 1 3 standard candidate", + "label": [ + "91062100", + "31258907", + "33884865", + "178489894", + "15927051", + "2778000800", + "148176105", + "120314980", + "148730421" + ], + "author": [ + "392089535", + "2311319369", + "2471162127" + ], + "reference": [ + "18814837", + "39167138", + "42122073", + "105372217", + "189766157", + "1439967542", + "1495444061", + "1549228503", + "1563971619", + "1589586740", + "1656028867", + "1662746580", + "1673604584", + "1809974132", + "1939171670", + "1975344666", + "1985453495", + "1987581799", + "1991234099", + "2002789557", + "2016747171", + "2029693536", + "2041428801", + "2042923641", + "2064815039", + "2070775894", + "2082841864", + "2085179296", + "2091877728", + "2092279637", + "2094250919", + "2107506969", + "2109767152", + "2112018008", + "2125591151", + "2132707977", + "2134615993", + "2145835327", + "2151413173", + "2152845220", + "2152933633", + "2153041122", + "2161706819", + "2163005041", + "2164090669", + "2267469130", + "2293388233", + "2296886798", + "2395368405", + "2402906095", + "2462805800", + "2469403219", + "2513374167", + "2515070993", + "2515547981", + "2516734788", + "2535028936", + "2536707834", + "2538826906", + "2545990795", + "2546510801", + "2561521908", + "2652625053", + "2774510177", + "2916293460" + ], + "abstract": "tls 1 3 is the next version of the transport layer security tls protocol its clean slate design is a reaction both to the increasing demand for low latency https connections and to a series of recent high profile attacks on tls the hope is that a fresh protocol with modern cryptography will prevent legacy problems the danger is that it will expose new kinds of attacks or reintroduce old flaws that were fixed in previous versions of tls after 18 drafts the protocol is nearing completion and the working group has appealed to researchers to analyze the protocol before publication this paper responds by presenting a comprehensive analysis of the tls 1 3 draft 18 protocol we seek to answer three questions that have not been fully addressed in previous work on tls 1 3 1 does tls 1 3 prevent well known attacks on tls 1 2 such as logjam or the triple handshake even if it is run in parallel with tls 1 2 2 can we mechanically verify the computational security of tls 1 3 under standard strong assumptions on its cryptographic primitives 3 how can we extend the guarantees of the tls 1 3 protocol to the details of its implementations to answer these questions we propose a methodology for developing verified symbolic and computational models of tls 1 3 hand in hand with a high assurance reference implementation of the protocol we present symbolic proverif models for various intermediate versions of tls 1 3 and evaluate them against a rich class of attacks to reconstruct both known and previously unpublished vulnerabilities that influenced the current design of the protocol we present a computational cryptoverif model for tls 1 3 draft 18 and prove its security we present reftls an interoperable implementation of tls 1 0 1 3 and automatically analyze its protocol core by extracting a proverif model from its typed javascript code", + "title_raw": "Verified Models and Reference Implementations for the TLS 1.3 Standard Candidate", + "abstract_raw": "TLS 1.3 is the next version of the Transport Layer Security (TLS) protocol. Its clean-slate design is a reaction both to the increasing demand for low-latency HTTPS connections and to a series of recent high-profile attacks on TLS. The hope is that a fresh protocol with modern cryptography will prevent legacy problems, the danger is that it will expose new kinds of attacks, or reintroduce old flaws that were fixed in previous versions of TLS. After 18 drafts, the protocol is nearing completion, and the working group has appealed to researchers to analyze the protocol before publication. This paper responds by presenting a comprehensive analysis of the TLS 1.3 Draft-18 protocol. We seek to answer three questions that have not been fully addressed in previous work on TLS 1.3: (1) Does TLS 1.3 prevent well-known attacks on TLS 1.2, such as Logjam or the Triple Handshake, even if it is run in parallel with TLS 1.2? (2) Can we mechanically verify the computational security of TLS 1.3 under standard (strong) assumptions on its cryptographic primitives? (3) How can we extend the guarantees of the TLS 1.3 protocol to the details of its implementations?To answer these questions, we propose a methodology for developing verified symbolic and computational models of TLS 1.3 hand-in-hand with a high-assurance reference implementation of the protocol. We present symbolic ProVerif models for various intermediate versions of TLS 1.3 and evaluate them against a rich class of attacks to reconstruct both known and previously unpublished vulnerabilities that influenced the current design of the protocol. We present a computational CryptoVerif model for TLS 1.3 Draft-18 and prove its security. We present RefTLS, an interoperable implementation of TLS 1.0-1.3 and automatically analyze its protocol core by extracting a ProVerif model from its typed JavaScript code.", + "link": "https://www.semanticscholar.org/paper/338d4815de02be38990db8cff9f96ef8e6959c80", + "scraped_abstract": null, + "citation_best": 168 + }, + { + "paper": "2744387122", + "venue": "1152462849", + "year": "2017", + "title": "re architecting datacenter networks and stacks for low latency and high performance", + "label": [ + "199845137", + "2776767758", + "31258907", + "158379750", + "119700423", + "168834603", + "157764524", + "98980195" + ], + "author": [ + "2281506998", + "223441212", + "1863903329", + "2428210659", + "2576671778", + "2043771256", + "2605512994" + ], + "reference": [ + "14875769", + "1507914199", + "1609755472", + "1668579060", + "1698388015", + "1968331080", + "1978175770", + "1988150362", + "2011730388", + "2023366662", + "2036003010", + "2040882418", + "2101871381", + "2102549685", + "2109195783", + "2117884704", + "2126210439", + "2130531694", + "2132320636", + "2133581580", + "2134889667", + "2142480021", + "2149804187", + "2155395786", + "2157614013", + "2157990152", + "2163404313", + "2163769126", + "2164740236", + "2168595508", + "2281233864", + "2319387379", + "2498764059", + "2530088943", + "2753542457", + "2999349537" + ], + "abstract": "modern datacenter networks provide very high capacity via redundant clos topologies and low switch latency but transport protocols rarely deliver matching performance we present ndp a novel data center transport architecture that achieves near optimal completion times for short transfers and high flow throughput in a wide range of scenarios including incast ndp switch buffers are very shallow and when they fill the switches trim packets to headers and priority forward the headers this gives receivers a full view of instantaneous demand from all senders and is the basis for our novel high performance multipath aware transport protocol that can deal gracefully with massive incast events and prioritize traffic from different senders on rtt timescales we implemented ndp in linux hosts with dpdk in a software switch in a netfpga based hardware switch and in p4 we evaluate ndp s performance in our implementations and in large scale simulations simultaneously demonstrating support for very low latency and high throughput", + "title_raw": "Re-architecting datacenter networks and stacks for low latency and high performance", + "abstract_raw": "Modern datacenter networks provide very high capacity via redundant Clos topologies and low switch latency, but transport protocols rarely deliver matching performance. We present NDP, a novel data-center transport architecture that achieves near-optimal completion times for short transfers and high flow throughput in a wide range of scenarios, including incast. NDP switch buffers are very shallow and when they fill the switches trim packets to headers and priority forward the headers. This gives receivers a full view of instantaneous demand from all senders, and is the basis for our novel, high-performance, multipath-aware transport protocol that can deal gracefully with massive incast events and prioritize traffic from different senders on RTT timescales. We implemented NDP in Linux hosts with DPDK, in a software switch, in a NetFPGA-based hardware switch, and in P4. We evaluate NDP's performance in our implementations and in large-scale simulations, simultaneously demonstrating support for very low-latency and high throughput.", + "link": "https://www.semanticscholar.org/paper/70c5d22a32ce03885dc9bc81bfa1f0fbc4c9f097", + "scraped_abstract": null, + "citation_best": 316 + }, + { + "paper": "2743093301", + "venue": "1152462849", + "year": "2017", + "title": "language directed hardware design for network performance monitoring", + "label": [ + "113041634", + "111919701", + "22684755", + "14107862", + "9390403", + "203274722", + "159631557" + ], + "author": [ + "2119376345", + "2170843082", + "2528902719", + "2744587247", + "2896169366", + "2309837544", + "2056491872", + "2101044554" + ], + "reference": [ + "84901888", + "1408671314", + "1623461676", + "1858168446", + "1910606302", + "1976821017", + "1984803709", + "1990249073", + "1994327700", + "2053285257", + "2067716156", + "2080234606", + "2099395665", + "2099501333", + "2112320294", + "2113093733", + "2133581580", + "2134519279", + "2136451165", + "2140311411", + "2144261930", + "2146012756", + "2147118406", + "2157990152", + "2168595508", + "2283556750", + "2295608275", + "2303191924", + "2305788608", + "2487095677", + "2487583538", + "2501089562", + "2502136739", + "2547023114", + "2585501424", + "2605280586" + ], + "abstract": "network performance monitoring today is restricted by existing switch support for measurement forcing operators to rely heavily on endpoints with poor visibility into the network core switch vendors have added progressively more monitoring features to switches but the current trajectory of adding specific features is unsustainable given the ever changing demands of network operators instead we ask what switch hardware primitives are required to support an expressive language of network performance questions we believe that the resulting switch hardware design could address a wide variety of current and future performance monitoring needs we present a performance query language marple modeled on familiar functional constructs like map filter groupby and zip marple is backed by a new programmable key value store primitive on switch hardware the key value store performs flexible aggregations at line rate e g a moving average of queueing latencies per flow and scales to millions of keys we present a marple compiler that targets a p4 programmable software switch and a simulator for high speed programmable switches marple can express switch queries that could previously run only on end hosts while marple queries only occupy a modest fraction of a switch s hardware resources", + "title_raw": "Language-Directed Hardware Design for Network Performance Monitoring", + "abstract_raw": "Network performance monitoring today is restricted by existing switch support for measurement, forcing operators to rely heavily on endpoints with poor visibility into the network core. Switch vendors have added progressively more monitoring features to switches, but the current trajectory of adding specific features is unsustainable given the ever-changing demands of network operators. Instead, we ask what switch hardware primitives are required to support an expressive language of network performance questions. We believe that the resulting switch hardware design could address a wide variety of current and future performance monitoring needs. We present a performance query language, Marple, modeled on familiar functional constructs like map, filter, groupby, and zip. Marple is backed by a new programmable key-value store primitive on switch hardware. The key-value store performs flexible aggregations at line rate (e.g., a moving average of queueing latencies per flow), and scales to millions of keys. We present a Marple compiler that targets a P4-programmable software switch and a simulator for high-speed programmable switches. Marple can express switch queries that could previously run only on end hosts, while Marple queries only occupy a modest fraction of a switch's hardware resources.", + "link": "https://www.semanticscholar.org/paper/6de1045de20e79ae5b40284fad431ed501dd3f68", + "scraped_abstract": null, + "citation_best": 234 + }, + { + "paper": "2742124187", + "venue": "1140684652", + "year": "2017", + "title": "bitfunnel revisiting signatures for search", + "label": [ + "75165309", + "147937185", + "152896618", + "2778773198", + "97854310", + "147224247", + "124101348", + "41547737", + "79974875", + "130590232", + "23123220" + ], + "author": [ + "2741552030", + "2563530614", + "2741585414", + "2742086361", + "2740686605", + "2017883357", + "2166872174" + ], + "reference": [ + "67853643", + "100509257", + "155778206", + "1516903679", + "1791987072", + "1929352279", + "1967029277", + "1968915437", + "1973846693", + "1984614894", + "1997841190", + "2004620064", + "2019406253", + "2059387258", + "2065347041", + "2066636486", + "2080745753", + "2084367148", + "2084965869", + "2089455813", + "2123845384", + "2125203709", + "2131355670", + "2157405322", + "2163652601", + "2263798363", + "2307814545" + ], + "abstract": "since the mid 90s there has been a widely held belief that signature files are inferior to inverted files for text indexing in recent years the bing search engine has developed and deployed an index based on bit sliced signatures this index known as bitfunnel replaced an existing production system based on an inverted index the driving factor behind the shift away from the inverted index was operational cost savings this paper describes algorithmic innovations and changes in the cloud computing landscape that led us to reconsider and eventually field a technology that was once considered unusable the bitfunnel algorithm directly addresses four fundamental limitations in bit sliced block signatures at the same time our mapping of the algorithm onto a cluster offers opportunities to avoid other costs associated with signatures we show these innovations yield a significant efficiency gain versus classic bit sliced signatures and then compare bitfunnel with partitioned elias fano indexes mg4j and lucene", + "title_raw": "BitFunnel: Revisiting Signatures for Search", + "abstract_raw": "Since the mid-90s there has been a widely-held belief that signature files are inferior to inverted files for text indexing. In recent years the Bing search engine has developed and deployed an index based on bit-sliced signatures. This index, known as BitFunnel, replaced an existing production system based on an inverted index. The driving factor behind the shift away from the inverted index was operational cost savings. This paper describes algorithmic innovations and changes in the cloud computing landscape that led us to reconsider and eventually field a technology that was once considered unusable. The BitFunnel algorithm directly addresses four fundamental limitations in bit-sliced block signatures. At the same time, our mapping of the algorithm onto a cluster offers opportunities to avoid other costs associated with signatures. We show these innovations yield a significant efficiency gain versus classic bit-sliced signatures and then compare BitFunnel with Partitioned Elias-Fano Indexes, MG4J, and Lucene.", + "link": "https://www.semanticscholar.org/paper/2a695a9bec2a0f4e26fab3a631ea4ea51a2aab95", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2576238278", + "venue": "1131589359", + "year": "2017", + "title": "accelerating performance inference over closed systems by asymptotic methods", + "label": [ + "22684755", + "2776214188", + "48442024" + ], + "author": [ + "2075119795" + ], + "reference": [ + "160836666", + "1459540712", + "1524762955", + "1732145562", + "1966517285", + "1968714756", + "1978006658", + "1985031822", + "1990037024", + "2010069926", + "2014057642", + "2021038847", + "2021900251", + "2023980590", + "2041392951", + "2043055825", + "2047851749", + "2048838384", + "2057475359", + "2057738769", + "2058155782", + "2069883870", + "2071465809", + "2075358350", + "2094055697", + "2096645015", + "2104110349", + "2106828713", + "2114795632", + "2115525458", + "2120502489", + "2136897171", + "2153677792", + "2164149587", + "2335709783", + "2342179536", + "2585405503", + "2904250115", + "2953343600", + "3037830703", + "3100509614", + "3104134886" + ], + "abstract": "recent years have seen a rapid growth of interest in exploiting monitoring data collected from enterprise applications for automated management and performance analysis in spite of this trend even simple performance inference problems involving queueing theoretic formulas often incur computational bottlenecks for example upon computing likelihoods in models of batch systems motivated by this issue we revisit the solution of multiclass closed queueing networks which are popular models used to describe batch and distributed applications with parallelism constraints we first prove that the normalizing constant of the equilibrium state probabilities of a closed model can be reformulated exactly as a multidimensional integral over the unit simplex this gives as a by product novel explicit expressions for the multiclass normalizing constant we then derive a method based on cubature rules to efficiently evaluate the proposed integral form in small and medium sized models for large models we propose novel asymptotic expansions and monte carlo sampling methods to efficiently and accurately approximate normalizing constants and likelihoods we illustrate the resulting accuracy gains in problems involving optimization based inference", + "title_raw": "Accelerating Performance Inference over Closed Systems by Asymptotic Methods", + "abstract_raw": "Recent years have seen a rapid growth of interest in exploiting monitoring data collected from enterprise applications for automated management and performance analysis. In spite of this trend, even simple performance inference problems involving queueing theoretic formulas often incur computational bottlenecks, for example upon computing likelihoods in models of batch systems. Motivated by this issue, we revisit the solution of multiclass closed queueing networks, which are popular models used to describe batch and distributed applications with parallelism constraints. We first prove that the normalizing constant of the equilibrium state probabilities of a closed model can be reformulated exactly as a multidimensional integral over the unit simplex. This gives as a by-product novel explicit expressions for the multiclass normalizing constant. We then derive a method based on cubature rules to efficiently evaluate the proposed integral form in small and medium-sized models. For large models, we propose novel asymptotic expansions and Monte Carlo sampling methods to efficiently and accurately approximate normalizing constants and likelihoods. We illustrate the resulting accuracy gains in problems involving optimization-based inference.", + "link": "https://www.semanticscholar.org/paper/6fba13a176e04e56f12cd3edb97ce2e358708ede", + "scraped_abstract": null, + "citation_best": 5 + }, + { + "paper": "2613088476", + "venue": "1175089206", + "year": "2017", + "title": "parallelizing sequential graph computations", + "label": [ + "34165917", + "45374587", + "48044578", + "195701839", + "173608175" + ], + "author": [ + "2132396280", + "2327968308", + "2134127457", + "2101311407", + "2685063421", + "2796889627", + "2506610500", + "2139075683", + "2511578205" + ], + "reference": [ + "30495595", + "54561612", + "78077100", + "134051198", + "217817341", + "573363114", + "1009523478", + "1181317657", + "1425731158", + "1448681276", + "1501254937", + "1509240356", + "1548164611", + "1603765807", + "1942786204", + "1969970763", + "1971630691", + "1980147176", + "1980907873", + "1991069419", + "1993319924", + "1993505169", + "1994924587", + "2004053910", + "2009702064", + "2016924386", + "2022490362", + "2029706450", + "2035173902", + "2040943118", + "2045271686", + "2049354208", + "2051586153", + "2054141820", + "2062705952", + "2064635301", + "2077436409", + "2084224084", + "2093053744", + "2096544401", + "2097284499", + "2098817244", + "2098903349", + "2100132188", + "2123966888", + "2132737349", + "2132774949", + "2138295324", + "2145067130", + "2147405597", + "2165770179", + "2166326676", + "2166469213", + "2167149929", + "2170616854", + "2171329192", + "2173213060", + "2219156867", + "2259576664", + "2281494333", + "2400702091", + "2574229471", + "2579247884", + "2612026221", + "2613159091", + "2798738742", + "2962740062", + "2963804743" + ], + "abstract": "this paper presents grape a parallel system for graph computations grape differs from prior systems in its ability to parallelize existing sequential graph algorithms as a whole underlying grape are a simple programming model and a principled approach based on partial evaluation and incremental computation we show that sequential graph algorithms can be plugged into grape with minor changes and get parallelized as long as the sequential algorithms are correct their grape parallelization guarantees to terminate with correct answers under a monotonic condition moreover we show that algorithms in mapreduce bsp and pram can be optimally simulated on grape in addition to the ease of programming we experimentally verify that grape achieves comparable performance to the state of the art graph systems using real life and synthetic graphs", + "title_raw": "Parallelizing Sequential Graph Computations", + "abstract_raw": "This paper presents GRAPE, a parallel system for graph computations. GRAPE differs from prior systems in its ability to parallelize existing sequential graph algorithms as a whole. Underlying GRAPE are a simple programming model and a principled approach, based on partial evaluation and incremental computation. We show that sequential graph algorithms can be \"plugged into\" GRAPE with minor changes, and get parallelized. As long as the sequential algorithms are correct, their GRAPE parallelization guarantees to terminate with correct answers under a monotonic condition. Moreover, we show that algorithms in MapReduce, BSP and PRAM can be optimally simulated on GRAPE. In addition to the ease of programming, we experimentally verify that GRAPE achieves comparable performance to the state-of-the-art graph systems, using real-life and synthetic graphs.", + "link": "https://www.semanticscholar.org/paper/b75eaa6208939f5c5db3a4657e39f63956440f13", + "scraped_abstract": null, + "citation_best": 37 + }, + { + "paper": "3101492167", + "venue": "1171178643", + "year": "2017", + "title": "the efficient server audit problem deduplicated re execution and the web", + "label": [ + "111919701", + "118643609", + "68339613" + ], + "author": [ + "2400754560", + "2757093515", + "2073954023", + "2048602404" + ], + "reference": [ + "4180724", + "66929706", + "70331077", + "216996474", + "1424543055", + "1434079718", + "1483960654", + "1504669610", + "1507039213", + "1557386445", + "1569778844", + "1770279127", + "1852007091", + "1858287302", + "1877496576", + "1915338469", + "1966022902", + "1970808997", + "1972304371", + "1978267236", + "1986463648", + "1991769466", + "2001359464", + "2018746447", + "2023304911", + "2029349492", + "2031348459", + "2035541386", + "2039509099", + "2040851906", + "2043501224", + "2043801088", + "2049982182", + "2053086236", + "2067700169", + "2071520502", + "2077693819", + "2080869721", + "2096787381", + "2097146584", + "2098786798", + "2098843110", + "2100889285", + "2101939036", + "2114488210", + "2114579022", + "2114887958", + "2121022001", + "2123524055", + "2123869284", + "2125591927", + "2128159601", + "2130473288", + "2131431269", + "2131889098", + "2142892618", + "2144621365", + "2150615820", + "2150709728", + "2152812436", + "2154698535", + "2157010176", + "2159915142", + "2167804035", + "2171295941", + "2171337840", + "2171956059", + "2294081347", + "2296013760", + "2299405824", + "2402869180", + "2404353777", + "2407831955", + "2416938811", + "2518442919", + "2575735093", + "2612012086", + "2613376966", + "2676157184", + "2765206040", + "2766710082" + ], + "abstract": "you put a program on a concurrent server but you don t trust the server later you get a trace of the actual requests that the server received from its clients and the responses that it delivered you separately get logs from the server these are untrusted how can you use the logs to efficiently verify that the responses were derived from running the program on the requests this is the efficient server audit problem which abstracts real world scenarios including running a web application on an untrusted provider we give a solution based on several new techniques including simultaneous replay and efficient verification of concurrent executions we implement the solution for php web applications for several applications our verifier achieves 5 6 10 9x speedup versus simply re executing with", + "title_raw": "The Efficient Server Audit Problem, Deduplicated Re-execution, and the Web", + "abstract_raw": "You put a program on a concurrent server, but you don't trust the server; later, you get a trace of the actual requests that the server received from its clients and the responses that it delivered. You separately get logs from the server; these are untrusted. How can you use the logs to efficiently verify that the responses were derived from running the program on the requests? This is the Efficient Server Audit Problem, which abstracts real-world scenarios, including running a web application on an untrusted provider. We give a solution based on several new techniques, including simultaneous replay and efficient verification of concurrent executions. We implement the solution for PHP web applications. For several applications, our verifier achieves 5.6-10.9x speedup versus simply re-executing, with", + "link": "https://www.semanticscholar.org/paper/8ad98c87bd3f9ef1299a6fbf3e0a58376366f2f1", + "scraped_abstract": null, + "citation_best": 15 + }, + { + "paper": "2616028256", + "venue": "1171178643", + "year": "2017", + "title": "deepxplore automated whitebox testing of deep learning systems", + "label": [ + "541664917", + "119857082", + "153083717", + "55439883", + "162443782", + "2777904410", + "108583219", + "2776973931", + "137836250" + ], + "author": [ + "2615179897", + "2122648457", + "2103556627", + "2154388805" + ], + "reference": [ + "109452506", + "1473189865", + "1488996941", + "1498436455", + "1570448133", + "1665214252", + "1686810756", + "1825675169", + "1883420340", + "1915485278", + "1932198206", + "1976919795", + "1988790447", + "2033368661", + "2038296020", + "2051267297", + "2054426341", + "2081580037", + "2082190528", + "2095705004", + "2096900708", + "2097117768", + "2098456636", + "2101577935", + "2104839588", + "2108598243", + "2110889728", + "2112796928", + "2117539524", + "2122672392", + "2133665775", + "2148603752", + "2151704521", + "2165073069", + "2169393322", + "2180612164", + "2194775991", + "2257979135", + "2269778407", + "2275363859", + "2342045095", + "2395317528", + "2414287720", + "2461943168", + "2512472178", + "2519224033", + "2533393700", + "2533523411", + "2535873859", + "2543296129", + "2565186948", + "2574797807", + "2594877703", + "2606722458", + "2610190180", + "2618530766", + "2672575173", + "2701082322", + "2949117887", + "2950468330", + "2953384591", + "2963207607", + "2963857521", + "2964082701", + "2964153729", + "3102542372", + "3118608800" + ], + "abstract": "deep learning dl systems are increasingly deployed in safety and security critical domains including self driving cars and malware detection where the correctness and predictability of a system s behavior for corner case inputs are of great importance existing dl testing depends heavily on manually labeled data and therefore often fails to expose erroneous behaviors for rare inputs we design implement and evaluate deepxplore the first whitebox framework for systematically testing real world dl systems first we introduce neuron coverage for systematically measuring the parts of a dl system exercised by test inputs next we leverage multiple dl systems with similar functionality as cross referencing oracles to avoid manual checking finally we demonstrate how finding inputs for dl systems that both trigger many differential behaviors and achieve high neuron coverage can be represented as a joint optimization problem and solved efficiently using gradient based search techniques deepxplore efficiently finds thousands of incorrect corner case behaviors e g self driving cars crashing into guard rails and malware masquerading as benign software in state of the art dl models with thousands of neurons trained on five popular datasets including imagenet and udacity self driving challenge data for all tested dl models on average deepxplore generated one test input demonstrating incorrect behavior within one second while running only on a commodity laptop we further show that the test inputs generated by deepxplore can also be used to retrain the corresponding dl model to improve the model s accuracy by up to 3", + "title_raw": "DeepXplore: Automated Whitebox Testing of Deep Learning Systems", + "abstract_raw": "Deep learning (DL) systems are increasingly deployed in safety- and security-critical domains including self-driving cars and malware detection, where the correctness and predictability of a system's behavior for corner case inputs are of great importance. Existing DL testing depends heavily on manually labeled data and therefore often fails to expose erroneous behaviors for rare inputs. We design, implement, and evaluate DeepXplore, the first whitebox framework for systematically testing real-world DL systems. First, we introduce neuron coverage for systematically measuring the parts of a DL system exercised by test inputs. Next, we leverage multiple DL systems with similar functionality as cross-referencing oracles to avoid manual checking. Finally, we demonstrate how finding inputs for DL systems that both trigger many differential behaviors and achieve high neuron coverage can be represented as a joint optimization problem and solved efficiently using gradient-based search techniques. DeepXplore efficiently finds thousands of incorrect corner case behaviors (e.g., self-driving cars crashing into guard rails and malware masquerading as benign software) in state-of-the-art DL models with thousands of neurons trained on five popular datasets including ImageNet and Udacity self-driving challenge data. For all tested DL models, on average, DeepXplore generated one test input demonstrating incorrect behavior within one second while running only on a commodity laptop. We further show that the test inputs generated by DeepXplore can also be used to retrain the corresponding DL model to improve the model's accuracy by up to 3%.", + "link": "https://www.semanticscholar.org/paper/945e51a317feac511739da4c642e8b8aab7f7905", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2765855860", + "venue": "1166315290", + "year": "2017", + "title": "triggering artwork swaps for live animation", + "label": [ + "502989409", + "136738937", + "2781217928", + "69369342", + "2778438179", + "49774154", + "108265739", + "200632571" + ], + "author": [ + "2765814297", + "2303505900", + "1978546182", + "2136484952" + ], + "reference": [ + "61807067", + "165571522", + "756005332", + "1702419847", + "1943054406", + "1983050461", + "1988670898", + "2005872581", + "2015814664", + "2024585795", + "2029697180", + "2036433269", + "2050188448", + "2052101136", + "2059017188", + "2067534690", + "2069705166", + "2102416463", + "2114779318", + "2116436752", + "2118711147", + "2136920231", + "2137057176", + "2141461755", + "2169753586", + "2172868867", + "2214145768", + "2218614209", + "2293053787", + "2402179761", + "2557500739", + "2964304707", + "3004596153", + "3010097777" + ], + "abstract": "live animation of 2d characters is a new form of storytelling that has started to appear on streaming platforms and broadcast tv unlike traditional animation human performers control characters in real time so that they can respond and improvise to live events current live animation systems provide a range of animation controls such as camera input to drive head movements audio for lip sync and keyboard shortcuts to trigger discrete pose changes via artwork swaps however managing all of these controls during a live performance is challenging in this work we present a new interactive system that specifically addresses the problem of triggering artwork swaps in live settings our key contributions are the design of a multi touch triggering interface that overlays visual triggers around a live preview of the character and a predictive triggering model that leverages practice performances to suggest pose transitions during live performances we evaluate our system with quantitative experiments a user study with novice participants and interviews with professional animators", + "title_raw": "Triggering Artwork Swaps for Live Animation", + "abstract_raw": "Live animation of 2D characters is a new form of storytelling that has started to appear on streaming platforms and broadcast TV. Unlike traditional animation, human performers control characters in real time so that they can respond and improvise to live events. Current live animation systems provide a range of animation controls, such as camera input to drive head movements, audio for lip sync, and keyboard shortcuts to trigger discrete pose changes via artwork swaps. However, managing all of these controls during a live performance is challenging. In this work, we present a new interactive system that specifically addresses the problem of triggering artwork swaps in live settings. Our key contributions are the design of a multi-touch triggering interface that overlays visual triggers around a live preview of the character, and a predictive triggering model that leverages practice performances to suggest pose transitions during live performances. We evaluate our system with quantitative experiments, a user study with novice participants, and interviews with professional animators.", + "link": "https://www.semanticscholar.org/paper/979db0cc194f70197a4a05e1d6656ff3dccd7741", + "scraped_abstract": null, + "citation_best": 16 + }, + { + "paper": "2766544714", + "venue": "1166315290", + "year": "2017", + "title": "grabity a wearable haptic interface for simulating weight and grasping in virtual reality", + "label": [ + "5643039", + "25344961", + "44154836", + "51970089", + "194969405", + "31972630", + "152086174", + "150594956" + ], + "author": [ + "2579329556", + "2807967620", + "2765514236", + "102229238", + "2069682576" + ], + "reference": [ + "42617256", + "69175850", + "600399566", + "1523996461", + "1562199429", + "1606092312", + "1851459790", + "1968739012", + "1969299770", + "1972258157", + "1990213079", + "2004961613", + "2008169038", + "2029304603", + "2034377412", + "2037506233", + "2037691378", + "2045222441", + "2047060865", + "2092050375", + "2092511662", + "2101553753", + "2116189780", + "2122261804", + "2122613696", + "2136078307", + "2140022787", + "2150606742", + "2152737814", + "2288146251", + "2345854622", + "2345967438", + "2346231794", + "2396363971", + "2397519240", + "2412035345", + "2481130537", + "2536013731", + "2562142647", + "2565842466", + "2581455624", + "2610457270", + "2610929834" + ], + "abstract": "ungrounded haptic devices for virtual reality vr applications lack the ability to convincingly render the sensations of a grasped virtual object s rigidity and weight we present grabity a wearable haptic device designed to simulate kinesthetic pad opposition grip forces and weight for grasping virtual objects in vr the device is mounted on the index finger and thumb and enables precision grasps with a wide range of motion a unidirectional brake creates rigid grasping force feedback two voice coil actuators create virtual force tangential to each finger pad through asymmetric skin deformation these forces can be perceived as gravitational and inertial forces of virtual objects the rotational orientation of the voice coil actuators is passively aligned with the real direction of gravity through a revolute joint causing the virtual forces to always point downward this paper evaluates the performance of grabity through two user studies finding promising ability to simulate different levels of weight with convincing object rigidity the first user study shows that grabity can convey various magnitudes of weight and force sensations to users by manipulating the amplitude of the asymmetric vibration the second user study shows that users can differentiate different weights in a virtual environment using grabity", + "title_raw": "Grabity: A Wearable Haptic Interface for Simulating Weight and Grasping in Virtual Reality", + "abstract_raw": "Ungrounded haptic devices for virtual reality (VR) applications lack the ability to convincingly render the sensations of a grasped virtual object's rigidity and weight. We present Grabity, a wearable haptic device designed to simulate kinesthetic pad opposition grip forces and weight for grasping virtual objects in VR. The device is mounted on the index finger and thumb and enables precision grasps with a wide range of motion. A unidirectional brake creates rigid grasping force feedback. Two voice coil actuators create virtual force tangential to each finger pad through asymmetric skin deformation. These forces can be perceived as gravitational and inertial forces of virtual objects. The rotational orientation of the voice coil actuators is passively aligned with the real direction of gravity through a revolute joint, causing the virtual forces to always point downward. This paper evaluates the performance of Grabity through two user studies, finding promising ability to simulate different levels of weight with convincing object rigidity. The first user study shows that Grabity can convey various magnitudes of weight and force sensations to users by manipulating the amplitude of the asymmetric vibration. The second user study shows that users can differentiate different weights in a virtual environment using Grabity.", + "link": "https://www.semanticscholar.org/paper/c592a9b2fcd1b45a8694c2b6ecf40ec83844fab7", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "3106143313", + "venue": "1166315290", + "year": "2017", + "title": "aircode unobtrusive physical tags for digital fabrication", + "label": [ + "93518851", + "64729616", + "57273362", + "31972630", + "98045186", + "41608201" + ], + "author": [ + "2941266506", + "2737012866", + "2036349267", + "2111990228" + ], + "reference": [ + "1672766972", + "1968528175", + "1973367739", + "1974329625", + "1983053224", + "1987333622", + "2003526930", + "2014988059", + "2015559606", + "2027064609", + "2027480687", + "2031535007", + "2047947115", + "2075881548", + "2078469680", + "2085261163", + "2088255295", + "2100910924", + "2115529949", + "2120448006", + "2122981824", + "2125186487", + "2128593740", + "2129751850", + "2145599611", + "2154726058", + "2164139712", + "2180120384", + "2247601005", + "2252227770", + "2267998376", + "2345379637", + "2346126702", + "2346915687", + "2405022506", + "2406687334", + "2460832736", + "2533224627", + "2890250492", + "2912460004", + "2942537621" + ], + "abstract": "we present aircode a technique that allows the user to tag physically fabricated objects with given information an aircode tag consists of a group of carefully designed air pockets placed beneath the object surface these air pockets are easily produced during the fabrication process of the object without any additional material or postprocessing meanwhile the air pockets affect only the scattering light transport under the surface and thus are hard to notice to our naked eyes but by using a computational imaging method the tags become detectable we present a tool that automates the design of air pockets for the user to encode information aircode system also allows the user to retrieve the information from captured images via a robust decoding algorithm we demonstrate our tagging technique with applications for metadata embedding robotic grasping as well as conveying object affordances", + "title_raw": "AirCode: Unobtrusive Physical Tags for Digital Fabrication", + "abstract_raw": "We present AirCode, a technique that allows the user to tag physically fabricated objects with given information. An AirCode tag consists of a group of carefully designed air pockets placed beneath the object surface. These air pockets are easily produced during the fabrication process of the object, without any additional material or postprocessing. Meanwhile, the air pockets affect only the scattering light transport under the surface, and thus are hard to notice to our naked eyes. But, by using a computational imaging method, the tags become detectable. We present a tool that automates the design of air pockets for the user to encode information. AirCode system also allows the user to retrieve the information from captured images via a robust decoding algorithm. We demonstrate our tagging technique with applications for metadata embedding, robotic grasping, as well as conveying object affordances.", + "link": "https://www.semanticscholar.org/paper/73b66c2f247b9fc5ee39091dd9ddf6f6c00b9181", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2583856578", + "venue": "1133523790", + "year": "2017", + "title": "provenance for natural language queries", + "label": [ + "48044578", + "40140605", + "199360897", + "170858558", + "195324797", + "23123220" + ], + "author": [ + "2076066329", + "2563911088", + "2165124594" + ], + "reference": [ + "22495568", + "1508977358", + "1543225912", + "1552694902", + "1561337879", + "1572578702", + "1632114991", + "1962939478", + "1988545508", + "2001108980", + "2001930745", + "2003896225", + "2019379313", + "2024834471", + "2047705935", + "2056102316", + "2058651674", + "2064095902", + "2072269087", + "2096899635", + "2097606805", + "2100531844", + "2101235780", + "2107571548", + "2112525062", + "2113123266", + "2117495487", + "2119325664", + "2121350579", + "2125806118", + "2137653635", + "2150616017", + "2154268919", + "2159727921", + "2165766811", + "2167541073", + "2243052854", + "2250304805", + "2257436084", + "2263923344", + "2269738476", + "2293299776", + "2296769737", + "2426007105", + "2547919816", + "2613196298" + ], + "abstract": "multiple lines of research have developed natural language nl interfaces for formulating database queries we build upon this work but focus on presenting a highly detailed form of the answers in nl the answers that we present are importantly based on the provenance of tuples in the query result detailing not only the results but also their explanations we develop a novel method for transforming provenance information to nl by leveraging the original nl query structure furthermore since provenance information is typically large and complex we present two solutions for its effective presentation as nl text one that is based on provenance factorization with novel desiderata relevant to the nl case and one that is based on summarization we have implemented our solution in an end to end system supporting questions answers and provenance all expressed in nl our experiments including a user study indicate the quality of our solution and its scalability", + "title_raw": "Provenance for natural language queries", + "abstract_raw": "Multiple lines of research have developed Natural Language (NL) interfaces for formulating database queries. We build upon this work, but focus on presenting a highly detailed form of the answers in NL. The answers that we present are importantly based on the provenance of tuples in the query result, detailing not only the results but also their explanations. We develop a novel method for transforming provenance information to NL, by leveraging the original NL query structure. Furthermore, since provenance information is typically large and complex, we present two solutions for its effective presentation as NL text: one that is based on provenance factorization, with novel desiderata relevant to the NL case, and one that is based on summarization. We have implemented our solution in an end-to-end system supporting questions, answers and provenance, all expressed in NL. Our experiments, including a user study, indicate the quality of our solution and its scalability.", + "link": "https://www.semanticscholar.org/paper/b299daab96549f0c5cbee0cd5a2ea9016691e8f4", + "scraped_abstract": null, + "citation_best": 31 + }, + { + "paper": "2566652907", + "venue": "1184914352", + "year": "2016", + "title": "bidirectional search that is guaranteed to meet in the middle", + "label": [ + "831591", + "46135064", + "11413529" + ], + "author": [ + "2000740313", + "1931601972", + "2129623354", + "302998071" + ], + "reference": [ + "46204108", + "175248309", + "205563547", + "790573473", + "1539048696", + "1597203970", + "1816489643", + "1881819129", + "1965469937", + "1970248641", + "1971267722", + "2023624803", + "2083355299", + "2097038466", + "2106061706", + "2139801213", + "2152475379", + "2162841080", + "2165040934", + "2277099502", + "2548579856", + "2555956881" + ], + "abstract": "we present mm the first bidirectional heuristic search algorithm whose forward and backward searches are guaranteed to meet in the middle i e never expand a node beyond the solution midpoint we also present a novel framework for comparing mm a and brute force search and identify conditions favoring each algorithm finally we present experimental results that support our theoretical analysis", + "title_raw": "Bidirectional search that is guaranteed to meet in the middle", + "abstract_raw": "We present MM, the first bidirectional heuristic search algorithm whose forward and backward searches are guaranteed to \"meet in the middle\", i.e. never expand a node beyond the solution midpoint. We also present a novel framework for comparing MM, A*, and brute-force search, and identify conditions favoring each algorithm. Finally, we present experimental results that support our theoretical analysis.", + "link": "https://www.semanticscholar.org/paper/d4c2696a254c88a2382b399d7c8c146f49338bf1", + "scraped_abstract": null, + "citation_best": 46 + }, + { + "paper": "2515120005", + "venue": "1188739475", + "year": "2016", + "title": "finding non arbitrary form meaning systematicity using string metric learning for kernel regression", + "label": [ + "200695384", + "22820288", + "160446489", + "178980831" + ], + "author": [ + "2510617825", + "2172288462", + "2115683131" + ], + "reference": [ + "3951449", + "103787822", + "104216173", + "163862657", + "165330127", + "168564468", + "854567913", + "1495858509", + "1533179050", + "1647671624", + "1971713783", + "1973309569", + "1974312763", + "1976388103", + "2000359198", + "2072026461", + "2093628909", + "2098586765", + "2108962772", + "2110065044", + "2112101928", + "2112348586", + "2115909281", + "2125001590", + "2132089731", + "2141845152", + "2153579005", + "2159306398", + "2185869177", + "2325827597", + "2338722844", + "2407312617", + "2583978552", + "2974832207", + "3154772965" + ], + "abstract": "", + "title_raw": "Finding Non-Arbitrary Form-Meaning Systematicity Using String-Metric Learning for Kernel Regression", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/14a0a960c05ad064f0cd40ff81ee059772aaff8b", + "scraped_abstract": null, + "citation_best": 27 + }, + { + "paper": "2407293509", + "venue": "1163450153", + "year": "2016", + "title": "the effect of visual appearance on the performance of continuous sliders and visual analogue scales", + "label": [ + "160497039", + "44154836", + "31972630" + ], + "author": [ + "2090457899", + "2145554258", + "2115951828", + "1899877228" + ], + "reference": [ + "1507985183", + "1964284800", + "1975030698", + "1975089669", + "1984318610", + "1984769198", + "1986309799", + "1998788003", + "2005534705", + "2014542527", + "2017521531", + "2023727185", + "2049245713", + "2051196560", + "2067880930", + "2089722985", + "2099482479", + "2102631143", + "2107153119", + "2114269021", + "2114772172", + "2117470435", + "2125729884", + "2126733201", + "2127875809", + "2128550182", + "2137071805", + "2143583890", + "2151401338", + "2162437324", + "2322584079", + "2966916958", + "2979260061", + "3150545588" + ], + "abstract": "sliders and visual analogue scales vass are input mechanisms which allow users to specify a value within a predefined range at a minimum sliders and vass typically consist of a line with the extreme values labeled additional decorations such as labels and tick marks can be added to give information about the gradations along the scale and allow for more precise and repeatable selections there is a rich history of research about the effect of labelling in discrete scales i e likert scales however the effect of decorations on continuous scales has not been rigorously explored in this paper we perform a 2 000 user 250 000 trial online experiment to study the effects of slider appearance and find that decorations along the slider considerably bias the distribution of responses received using two separate experimental tasks the trade offs between bias accuracy and speed of use are explored and design recommendations for optimal slider implementations are proposed", + "title_raw": "The Effect of Visual Appearance on the Performance of Continuous Sliders and Visual Analogue Scales", + "abstract_raw": "Sliders and Visual Analogue Scales (VASs) are input mechanisms which allow users to specify a value within a predefined range. At a minimum, sliders and VASs typically consist of a line with the extreme values labeled. Additional decorations such as labels and tick marks can be added to give information about the gradations along the scale and allow for more precise and repeatable selections. There is a rich history of research about the effect of labelling in discrete scales (i.e., Likert scales), however the effect of decorations on continuous scales has not been rigorously explored. In this paper we perform a 2,000 user, 250,000 trial online experiment to study the effects of slider appearance, and find that decorations along the slider considerably bias the distribution of responses received. Using two separate experimental tasks, the trade-offs between bias, accuracy, and speed-of-use are explored and design recommendations for optimal slider implementations are proposed.", + "link": "https://www.semanticscholar.org/paper/a901f5afe745a4d0c348619d2035b823b7fbd2c3", + "scraped_abstract": null, + "citation_best": 92 + }, + { + "paper": "2395063354", + "venue": "1163450153", + "year": "2016", + "title": "object oriented drawing", + "label": [ + "168173289", + "73752529", + "207347870", + "2780154230", + "108265739", + "2779344036", + "121684516" + ], + "author": [ + "2295141092", + "2132359199", + "2115951828", + "2065696548" + ], + "reference": [ + "98698846", + "1226869406", + "1489098793", + "1928294160", + "1965447681", + "1985598934", + "1987398446", + "1995067288", + "1995597050", + "1997447598", + "2003602347", + "2005639687", + "2044515620", + "2052194215", + "2057243093", + "2067534690", + "2092059964", + "2105802004", + "2107116266", + "2113348448", + "2116691011", + "2120134328", + "2121867457", + "2124696617", + "2124783468", + "2133353349", + "2133929728", + "2140190783", + "2143131345", + "2150187269", + "2150874632", + "2158364723", + "2161304134", + "2162456078", + "2163096731", + "2165336981", + "2247040911", + "2610304341" + ], + "abstract": "we present object oriented drawing which replaces most wimp ui with attribute objects attribute objects embody the attributes of digital content as ui objects that can be manipulated through direct touch gestures in the paper the fundamental ui concepts are presented including attribute objects which may be moved cloned linked and freely associated with drawing objects other functionalities such as attribute level blending and undo are also demonstrated we developed a drawing application based on the presented concepts with simultaneous touch and pen input an expert assessment of our application shows that direct physical manipulation of attribute objects enables a user to quickly perform interactions which were previously tedious or even impossible with a coherent and consistent interaction experience throughout the entire interface", + "title_raw": "Object-Oriented Drawing", + "abstract_raw": "We present Object-Oriented Drawing, which replaces most WIMP UI with Attribute Objects. Attribute Objects embody the attributes of digital content as UI objects that can be manipulated through direct touch gestures. In the paper, the fundamental UI concepts are presented, including Attribute Objects, which may be moved, cloned, linked, and freely associated with drawing objects. Other functionalities, such as attribute-level blending and undo, are also demonstrated. We developed a drawing application based on the presented concepts with simultaneous touch and pen input. An expert assessment of our application shows that direct physical manipulation of Attribute Objects enables a user to quickly perform interactions which were previously tedious, or even impossible, with a coherent and consistent interaction experience throughout the entire interface.", + "link": "https://www.semanticscholar.org/paper/c9e6032ff75a12db98f360f71395b758cd762e10", + "scraped_abstract": null, + "citation_best": 93 + }, + { + "paper": "2400260714", + "venue": "1163450153", + "year": "2016", + "title": "enabling designers to foresee which colors users cannot see", + "label": [ + "61674017", + "49774154" + ], + "author": [ + "2136495853", + "2093858891", + "2351090313" + ], + "reference": [ + "1543761991", + "1562544913", + "1823329909", + "1964405752", + "1965564791", + "1972703107", + "1975338529", + "1982931319", + "1983749361", + "2000141706", + "2010750459", + "2011673487", + "2014617649", + "2019666797", + "2022289087", + "2052995136", + "2076966920", + "2080220571", + "2080761261", + "2084171232", + "2101793629", + "2102378236", + "2104060123", + "2118389685", + "2120833565", + "2122086994", + "2135708457", + "2136176806", + "2150678794", + "2151071559", + "2159495285", + "2168214746", + "2169732913" + ], + "abstract": "users frequently experience situations in which their ability to differentiate screen colors is affected by a diversity of situations such as when bright sunlight causes glare or when monitors are dimly lit however designers currently have no way of choosing colors that will be differentiable by users of various demographic backgrounds and abilities and in the wide range of situations where their designs may be viewed our goal is to provide designers with insight into the effect of real world situational lighting conditions on people s ability to differentiate colors in applications and imagery we therefore developed an online color differentiation test that includes a survey of situational lighting conditions verified our test in a lab study and deployed it in an online environment where we collected data from around 30 000 participants we then created colorcheck an image processing tool that shows designers the proportion of the population they include or exclude by their color choices", + "title_raw": "Enabling Designers to Foresee Which Colors Users Cannot See", + "abstract_raw": "Users frequently experience situations in which their ability to differentiate screen colors is affected by a diversity of situations, such as when bright sunlight causes glare, or when monitors are dimly lit. However, designers currently have no way of choosing colors that will be differentiable by users of various demographic backgrounds and abilities and in the wide range of situations where their designs may be viewed. Our goal is to provide designers with insight into the effect of real-world situational lighting conditions on people's ability to differentiate colors in applications and imagery. We therefore developed an online color differentiation test that includes a survey of situational lighting conditions, verified our test in a lab study, and deployed it in an online environment where we collected data from around 30,000 participants. We then created ColorCheck, an image-processing tool that shows designers the proportion of the population they include (or exclude) by their color choices.", + "link": "https://www.semanticscholar.org/paper/1110852f906fe08d42814a051e4e8c15f0b930b5", + "scraped_abstract": null, + "citation_best": 38 + }, + { + "paper": "2347171008", + "venue": "1163450153", + "year": "2016", + "title": "i don t want to wear a screen probing perceptions of and possibilities for dynamic displays on clothing", + "label": [ + "2780522230", + "107457646", + "44154836" + ], + "author": [ + "2229851890", + "2175532007", + "2396401096", + "2563394932", + "1982348546", + "2403763117", + "2403199929", + "2728936", + "2811401379", + "288233316" + ], + "reference": [ + "99014461", + "135070850", + "1605824561", + "1970650168", + "1978281689", + "1980515835", + "1990499091", + "2004531783", + "2011675153", + "2013688694", + "2017395654", + "2022360138", + "2031844121", + "2044950274", + "2050267318", + "2051942724", + "2056809044", + "2060850044", + "2063513208", + "2073688524", + "2073922057", + "2118772539", + "2128156516", + "2134376964", + "2142448246", + "2143066249", + "2148345182", + "2149315699", + "2158519170", + "2161095991", + "2165797176", + "2168423131", + "2228994181", + "2266294403", + "2294102697", + "2405564622" + ], + "abstract": "this paper explores the role dynamic textile displays play in relation to personal style what does it mean to wear computationally responsive clothing and why would one be motivated to do so we developed a novel textile display technology called ebb and created several woven and crochet fabric swatches that explored clothing specific design possibilities we engaged fashion designers and non designers in imagining how ebb would integrate into their design practice or personal style of dressing participants evaluated the appeal and utility of clothing based displays according to a very different set of criteria than traditional screen based computational displays specifically the slowness low resolution and volatility of ebb tended to be seen as assets as opposed to technical limitations in the context of personal style additionally participants envisioned various ways that ambiguous ambient and abstract displays of information could prompt new experiences in their everyday lives our paper details the complex relationships between display and personal style and offers a new design metaphor and extension of gaver et al s original descriptions of ambiguity in order to guide the design of clothing based displays for everyday life", + "title_raw": "\"I don't Want to Wear a Screen\": Probing Perceptions of and Possibilities for Dynamic Displays on Clothing", + "abstract_raw": "This paper explores the role dynamic textile displays play in relation to personal style: What does it mean to wear computationally responsive clothing and why would one be motivated to do so? We developed a novel textile display technology, called Ebb, and created several woven and crochet fabric swatches that explored clothing-specific design possibilities. We engaged fashion designers and non-designers in imagining how Ebb would integrate into their design practice or personal style of dressing. Participants evaluated the appeal and utility of clothing-based displays according to a very different set of criteria than traditional screen-based computational displays. Specifically, the slowness, low-resolution, and volatility of Ebb tended to be seen as assets as opposed to technical limitations in the context of personal style. Additionally, participants envisioned various ways that ambiguous, ambient, and abstract displays of information could prompt new experiences in their everyday lives. Our paper details the complex relationships between display and personal style and offers a new design metaphor and extension of Gaver et al.'s original descriptions of ambiguity in order to guide the design of clothing-based displays for everyday life.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq='I+don't+Want+to+Wear+a+Screen':+Probing+Perceptions+of+and+Possibilities+for+Dynamic+Displays+on+Clothing&as_oq=&as_eq=&as_occt=any&as_sauthors=Devendorf", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2346915687", + "venue": "1163450153", + "year": "2016", + "title": "rapid a framework for fabricating low latency interactive objects with rfid tags", + "label": [ + "167955471", + "17777890", + "48103436", + "555944384", + "168834603", + "79403827" + ], + "author": [ + "2065265496", + "2040045115", + "2171298838", + "1987134293", + "2295545582" + ], + "reference": [ + "170770739", + "172137682", + "1963735412", + "1971518276", + "1977011589", + "1981096457", + "1984399688", + "1991691398", + "1994547327", + "2000400208", + "2005437290", + "2014988059", + "2017719916", + "2022895454", + "2030390645", + "2031535007", + "2040440427", + "2060064953", + "2062658884", + "2070924230", + "2103339808", + "2108292920", + "2120093000", + "2122991029", + "2143355978", + "2150170180", + "2151567401" + ], + "abstract": "rfid tags can be used to add inexpensive wireless batteryless sensing to objects however quickly and accurately estimating the state of an rfid tag is difficult in this work we show how to achieve low latency manipulation and movement sensing with off the shelf rfid tags and readers our approach couples a probabilistic filtering layer with a monte carlo sampling based interaction layer preserving uncertainty in tag reads until they can be resolved in the context of interactions this allows designers code to reason about inputs at a high level we demonstrate the effectiveness of our approach with a number of interactive objects along with a library of components that can be combined to make new designs", + "title_raw": "RapID: A Framework for Fabricating Low-Latency Interactive Objects with RFID Tags", + "abstract_raw": "RFID tags can be used to add inexpensive, wireless, batteryless sensing to objects. However, quickly and accurately estimating the state of an RFID tag is difficult. In this work, we show how to achieve low-latency manipulation and movement sensing with off-the-shelf RFID tags and readers. Our approach couples a probabilistic filtering layer with a monte-carlo-sampling-based interaction layer, preserving uncertainty in tag reads until they can be resolved in the context of interactions. This allows designers' code to reason about inputs at a high level. We demonstrate the effectiveness of our approach with a number of interactive objects, along with a library of components that can be combined to make new designs.", + "link": "https://www.semanticscholar.org/paper/b8c2bd4df9ff1491c81996f3356655364461e851", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2339558352", + "venue": "1163450153", + "year": "2016", + "title": "haptic wave a cross modal interface for visually impaired audio producers", + "label": [ + "173853756", + "87687168", + "107457646", + "49774154", + "45012715", + "161772715", + "108265739", + "152086174", + "135641252" + ], + "author": [ + "2096407648", + "2558900190" + ], + "reference": [ + "172175130", + "236105861", + "609656345", + "1205803866", + "1492194740", + "1698657956", + "1963942509", + "1979318584", + "1993032559", + "1993454247", + "1993466468", + "2005444397", + "2012571009", + "2013489329", + "2016280197", + "2063277669", + "2070353028", + "2081892178", + "2096507847", + "2110576487", + "2115647291", + "2119307272", + "2131294823", + "2136691781", + "2146684440", + "2146943316", + "2148443935", + "2148730427", + "2150634379", + "2152310241", + "2161507840", + "2172138639", + "2200114769", + "2200350770", + "3022307797", + "3152040608" + ], + "abstract": "we present the haptic wave a device that allows cross modal mapping of digital audio to the haptic domain intended for use by audio producers engineers with visual impairments we describe a series of participatory design activities adapted to non sighted users where the act of prototyping facilitates dialog a series of workshops scoping user needs and testing a technology mock up and lo fidelity prototype fed into the design of a final high spec prototype the haptic wave was tested in the laboratory then deployed in real world settings in recording studios and audio production facilities the cross modal mapping is kinesthetic and allows the direct manipulation of sound without the translation of an existing visual interface the research gleans insight into working with users with visual impairments and transforms perspective to think of them as experts in non visual interfaces for all users", + "title_raw": "Haptic Wave: A Cross-Modal Interface for Visually Impaired Audio Producers", + "abstract_raw": "We present the Haptic Wave, a device that allows cross-modal mapping of digital audio to the haptic domain, intended for use by audio producers/engineers with visual impairments. We describe a series of participatory design activities adapted to non-sighted users where the act of prototyping facilitates dialog. A series of workshops scoping user needs, and testing a technology mock up and lo-fidelity prototype fed into the design of a final high-spec prototype. The Haptic Wave was tested in the laboratory, then deployed in real world settings in recording studios and audio production facilities. The cross-modal mapping is kinesthetic and allows the direct manipulation of sound without the translation of an existing visual interface. The research gleans insight into working with users with visual impairments, and transforms perspective to think of them as experts in non-visual interfaces for all users.", + "link": "https://www.semanticscholar.org/paper/184f57d55a3316788937cec730ea1aa8c4a9163d", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2395423607", + "venue": "1163450153", + "year": "2016", + "title": "flexcase enhancing mobile interaction with a flexible sensing and display cover", + "label": [ + "115121344", + "2780152918", + "149635348", + "107457646", + "207347870", + "136197465", + "2780220136" + ], + "author": [ + "1793298736", + "2151132821", + "2223409512", + "2293620524", + "1939668391", + "2076713417", + "2948658362", + "2098553916" + ], + "reference": [ + "1506806321", + "1966912972", + "1969319597", + "1971283931", + "1971812842", + "1975813261", + "1985273935", + "1987220739", + "1988982607", + "1999224599", + "1999558161", + "2007964199", + "2010399991", + "2011422734", + "2021291149", + "2034918385", + "2038304148", + "2040187089", + "2040565638", + "2044249114", + "2050427684", + "2070583222", + "2075548592", + "2077436482", + "2086496635", + "2094097063", + "2094697850", + "2100355365", + "2103072279", + "2106941316", + "2109075207", + "2109235804", + "2115899334", + "2116659773", + "2124262094", + "2125872152", + "2128629242", + "2131588614", + "2131693996", + "2133060220", + "2133258886", + "2137664882", + "2139459444", + "2140975872", + "2143478838", + "2144902422", + "2147149886", + "2151217988", + "2155107494", + "2171137825" + ], + "abstract": "flexcase is a novel flip cover for smartphones which brings flexible input and output capabilities to existing mobile phones it combines an e paper display with a pressure and bend sensitive input sensor to augment the capabilities of a phone due to the form factor flexcase can be easily transformed into several different configurations each with different interaction possibilities users can use flexcase to perform a variety of touch pressure grip and bend gestures in a natural manner much like interacting with a sheet of paper the secondary e paper display can act as a mechanism for providing user feedback and persisting content from the main display in this paper we explore the rich design space of flexcase and present a number of different interaction techniques beyond we highlight how touch and flex sensing can be combined to support a novel type of gestures which we call grip bend gestures we also describe the underlying technology and gesture sensing algorithms numerous applications apply the interaction techniques in convincing real world examples including enhanced e paper reading and interaction a new copy and paste metaphor high degree of freedom 3d and 2d manipulation and the ability to transfer content and support input between displays in a natural and flexible manner", + "title_raw": "FlexCase: Enhancing Mobile Interaction with a Flexible Sensing and Display Cover", + "abstract_raw": "FlexCase is a novel flip cover for smartphones, which brings flexible input and output capabilities to existing mobile phones. It combines an e-paper display with a pressure- and bend-sensitive input sensor to augment the capabilities of a phone. Due to the form factor, FlexCase can be easily transformed into several different configurations, each with different interaction possibilities. Users can use FlexCase to perform a variety of touch, pressure, grip and bend gestures in a natural manner, much like interacting with a sheet of paper. The secondary e-paper display can act as a mechanism for providing user feedback and persisting content from the main display. In this paper, we explore the rich design space of FlexCase and present a number of different interaction techniques. Beyond, we highlight how touch and flex sensing can be combined to support a novel type of gestures, which we call Grip & Bend gestures. We also describe the underlying technology and gesture sensing algorithms. Numerous applications apply the interaction techniques in convincing real-world examples, including enhanced e-paper reading and interaction, a new copy and paste metaphor, high degree of freedom 3D and 2D manipulation, and the ability to transfer content and support input between displays in a natural and flexible manner.", + "link": "https://www.semanticscholar.org/paper/d8091cc14d0aed2d7b269954cb4266ce09a62d45", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2408438722", + "venue": "1163450153", + "year": "2016", + "title": "understanding and mitigating the effects of device and cloud service design decisions on the environmental footprint of digital infrastructure", + "label": [ + "107457646", + "13854087", + "75027835", + "128954960", + "79974875" + ], + "author": [ + "1985163624", + "77910542", + "1273389305" + ], + "reference": [ + "18471624", + "594957933", + "1026639879", + "1486944029", + "1534918481", + "1586469975", + "1661747935", + "1936181447", + "1964323399", + "1974589145", + "1979364125", + "1987826271", + "1997332111", + "2004238918", + "2004883467", + "2004984028", + "2011899126", + "2018817201", + "2019789166", + "2025506858", + "2031430683", + "2044772589", + "2048400963", + "2052089314", + "2054010706", + "2067858273", + "2077281136", + "2087259705", + "2088319187", + "2096885696", + "2102006180", + "2102299168", + "2104113460", + "2105188853", + "2109606100", + "2115232079", + "2117628284", + "2120883901", + "2126520272", + "2129818049", + "2130287612", + "2133443878", + "2134313417", + "2150512876", + "2166716750", + "2168233682", + "2170615731", + "2373991762", + "2762668866" + ], + "abstract": "interactive devices and the services they support are reliant on the cloud and the digital infrastructure supporting it the environmental impacts of this infrastructure are substantial and for particular services the infrastructure can account for up to 85 of the total impact in this paper we apply the principles of sustainable interaction design to cloud services use of the digital infrastructure we perform a critical analysis of current design practice with regard to interactive services which we identify as the cornucopian paradigm we show how user centered design principles induce environmental impacts in different ways and combine with technical and business drivers to drive growth of the infrastructure through a reinforcing feedback cycle we then create a design rubric substantially extending that of blevis 6 to cover impacts of the digital infrastructure in doing so we engage in design criticism identifying examples both actual and potential of good and bad practice we then extend this rubric beyond an eco efficiency paradigm to consider deeper and more radical perspectives on sustainability and finish with future directions for exploration", + "title_raw": "Understanding and Mitigating the Effects of Device and Cloud Service Design Decisions on the Environmental Footprint of Digital Infrastructure", + "abstract_raw": "Interactive devices and the services they support are reliant on the cloud and the digital infrastructure supporting it. The environmental impacts of this infrastructure are substantial and for particular services the infrastructure can account for up to 85% of the total impact. In this paper, we apply the principles of Sustainable Interaction Design to cloud services use of the digital infrastructure. We perform a critical analysis of current design practice with regard to interactive services, which we identify as the cornucopian paradigm. We show how user-centered design principles induce environmental impacts in different ways, and combine with technical and business drivers to drive growth of the infrastructure through a reinforcing feedback cycle. We then create a design rubric, substantially extending that of Blevis [6], to cover impacts of the digital infrastructure. In doing so, we engage in design criticism, identifying examples (both actual and potential) of good and bad practice. We then extend this rubric beyond an eco-efficiency paradigm to consider deeper and more radical perspectives on sustainability, and finish with future directions for exploration.", + "link": "https://www.semanticscholar.org/paper/bded1890007ac871e470e95135a3bc745e5b19b6", + "scraped_abstract": null, + "citation_best": 101 + }, + { + "paper": "2406854312", + "venue": "1163450153", + "year": "2016", + "title": "enhancing cross device interaction scripting with interactive illustrations", + "label": [ + "107457646", + "36464697", + "61423126", + "2777080924", + "98045186", + "2780220136" + ], + "author": [ + "2586047161", + "2413905935", + "2192055696" + ], + "reference": [ + "1563325992", + "1577422779", + "1967932767", + "1987185763", + "2021878536", + "2049817002", + "2056103304", + "2058649858", + "2062658884", + "2063169555", + "2066114190", + "2071873073", + "2083977441", + "2084726050", + "2094793656", + "2098961002", + "2102072895", + "2102390536", + "2105297890", + "2111642396", + "2114879705", + "2118029778", + "2124332348", + "2134816385", + "2135415614", + "2137033425", + "2147149886", + "2154583606", + "2157532207", + "2160694571", + "2294234478", + "2913256667" + ], + "abstract": "cross device interactions involve input and output on multiple computing devices implementing and reasoning about interactions that cover multiple devices with a diversity of form factors and capabilities can be complex to assist developers in programming cross device interactions we created demoscript a technique that automatically analyzes a cross device interaction program while it is being written demoscript visually illustrates the step by step execution of a selected portion or the entire program with a novel automatically generated cross device storyboard visualization in addition to helping developers understand the behavior of the program demoscript also allows developers to revise their program by interactively manipulating the cross device storyboard we evaluated demoscript with 8 professional programmers and found that demoscript significantly improved development efficiency by helping developers interpret and manage cross device interaction it also encourages testing to think through the script in a development process", + "title_raw": "Enhancing Cross-Device Interaction Scripting with Interactive Illustrations", + "abstract_raw": "Cross-device interactions involve input and output on multiple computing devices. Implementing and reasoning about interactions that cover multiple devices with a diversity of form factors and capabilities can be complex. To assist developers in programming cross-device interactions, we created DemoScript, a technique that automatically analyzes a cross-device interaction program while it is being written. DemoScript visually illustrates the step-by-step execution of a selected portion or the entire program with a novel, automatically generated cross-device storyboard visualization. In addition to helping developers understand the behavior of the program, DemoScript also allows developers to revise their program by interactively manipulating the cross-device storyboard. We evaluated DemoScript with 8 professional programmers and found that DemoScript significantly improved development efficiency by helping developers interpret and manage cross-device interaction; it also encourages testing to think through the script in a development process.", + "link": "https://www.semanticscholar.org/paper/9266f823aeb982743d20834a5aeebece4d254f9a", + "scraped_abstract": null, + "citation_best": 27 + }, + { + "paper": "2402939184", + "venue": "1163450153", + "year": "2016", + "title": "learn piano with bach an adaptive learning interface that adjusts task difficulty based on brain state", + "label": [ + "28490314", + "2775924081", + "108265739", + "125014702" + ], + "author": [ + "2224641145", + "2404528896", + "2103580512", + "2138454622", + "1907778215", + "2023608286", + "673017202" + ], + "reference": [ + "30153120", + "45100008", + "1499748908", + "1536462692", + "1626018203", + "1634720708", + "1867691547", + "1963910206", + "1967395107", + "1973638771", + "1980877068", + "1988051129", + "1993303421", + "1998977696", + "2001468118", + "2002723428", + "2009989063", + "2013785827", + "2020558214", + "2030517061", + "2035901571", + "2043736971", + "2045724842", + "2050634056", + "2053778972", + "2055556052", + "2058132619", + "2069918525", + "2081341882", + "2081811286", + "2084002479", + "2092909411", + "2096937559", + "2109165772", + "2118389537", + "2118789511", + "2127742548", + "2130736456", + "2132253180", + "2133215332", + "2135917740", + "2137243825", + "2140329470", + "2140802989", + "2141088043", + "2144183573", + "2145114078", + "2154171154", + "2157080893", + "2258241181", + "2260845711", + "2339343773", + "2551367023", + "3037177567", + "3159934092" + ], + "abstract": "we present brain automated chorales bach an adaptive brain computer system that dynamically increases the levels of difficulty in a musical learning task based on pianists cognitive workload measured by functional near infrared spectroscopy as users cognitive workload fell below a certain threshold suggesting that they had mastered the material and could handle more cognitive information bach automatically increased the difficulty of the learning task we found that learners played with significantly increased accuracy and speed in the brain based adaptive task compared to our control condition participant feedback indicated that they felt they learned better with bach and they liked the timings of the level changes the underlying premise of bach can be applied to learning situations where a task can be broken down into increasing levels of difficulty", + "title_raw": "Learn Piano with BACh: An Adaptive Learning Interface that Adjusts Task Difficulty Based on Brain State", + "abstract_raw": "We present Brain Automated Chorales (BACh), an adaptive brain-computer system that dynamically increases the levels of difficulty in a musical learning task based on pianists' cognitive workload measured by functional near-infrared spectroscopy. As users' cognitive workload fell below a certain threshold, suggesting that they had mastered the material and could handle more cognitive information, BACh automatically increased the difficulty of the learning task. We found that learners played with significantly increased accuracy and speed in the brain-based adaptive task compared to our control condition. Participant feedback indicated that they felt they learned better with BACh and they liked the timings of the level changes. The underlying premise of BACh can be applied to learning situations where a task can be broken down into increasing levels of difficulty.", + "link": "https://www.semanticscholar.org/paper/263f4ce7b6f8ad684ec8f77accedbfb77202f016", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2400074865", + "venue": "1163450153", + "year": "2016", + "title": "smart touch improving touch accuracy for people with motor impairments with template matching", + "label": [ + "158096908", + "31972630" + ], + "author": [ + "2049925020", + "1474575384", + "2107871967", + "318516288" + ], + "reference": [ + "85196187", + "1966228102", + "1974199094", + "1978118359", + "1995969295", + "2001688498", + "2004970449", + "2008213332", + "2010361629", + "2017881547", + "2020402781", + "2030026498", + "2035675819", + "2036530198", + "2040440427", + "2052686609", + "2062657340", + "2072075309", + "2087883994", + "2094616212", + "2096986574", + "2097248932", + "2098924240", + "2108518773", + "2108563939", + "2121044470", + "2122544819", + "2125872152", + "2128629242", + "2130229636", + "2130917910", + "2132527807", + "2133227699", + "2143355978", + "2144872795", + "2149344674", + "2151567401", + "2152414382", + "2158130108", + "2158533558", + "2158892938", + "2168842329", + "2295007882", + "2490179932" + ], + "abstract": "we present two contributions toward improving the accessibility of touch screens for people with motor impairments first we provide an exploration of the touch behaviors of 10 people with motor impairments e g we describe how touching with the back or sides of the hand with multiple fingers or with knuckles creates varied multi point touches second we introduce smart touch a novel template matching technique for touch input that maps any number of arbitrary contact areas to a user s intended x y target location the result is that users with motor impairments can touch however their abilities allow and smart touch will resolve their intended touch point smart touch therefore allows users to touch targets in whichever ways are most comfortable and natural for them in an experimental evaluation we found that smart touch predicted x y coordinates of the users intended target locations over three times closer to the intended target than the native land on and lift off techniques reported by the built in touch sensors found in the microsoft pixelsense interactive tabletop this result is an important step toward improving touch accuracy for people with motor impairments and others for whom touch screen operation was previously impossible", + "title_raw": "Smart Touch: Improving Touch Accuracy for People with Motor Impairments with Template Matching", + "abstract_raw": "We present two contributions toward improving the accessibility of touch screens for people with motor impairments. First, we provide an exploration of the touch behaviors of 10 people with motor impairments, e.g., we describe how touching with the back or sides of the hand, with multiple fingers, or with knuckles creates varied multi-point touches. Second, we introduce Smart Touch, a novel template-matching technique for touch input that maps any number of arbitrary contact-areas to a user's intended (x,y) target location. The result is that users with motor impairments can touch however their abilities allow, and Smart Touch will resolve their intended touch point. Smart Touch therefore allows users to touch targets in whichever ways are most comfortable and natural for them. In an experimental evaluation, we found that Smart Touch predicted (x,y) coordinates of the users' intended target locations over three times closer to the intended target than the native Land-on and Lift-off techniques reported by the built-in touch sensors found in the Microsoft PixelSense interactive tabletop. This result is an important step toward improving touch accuracy for people with motor impairments and others for whom touch screen operation was previously impossible.", + "link": "https://www.semanticscholar.org/paper/4e36e8637a6e0892eb2d6cda79537a5e0b189968", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2396533648", + "venue": "1163450153", + "year": "2016", + "title": "building a personalized auto calibrating eye tracker from user interactions", + "label": [ + "2779916870", + "56461940", + "121163568", + "31972630" + ], + "author": [ + "2116039277", + "2229358447", + "2098651954", + "2167014661", + "2233290192" + ], + "reference": [ + "1488198768", + "1510835000", + "1519033008", + "1978401434", + "2004128235", + "2026551391", + "2034382626", + "2048498434", + "2052575499", + "2053946567", + "2073171099", + "2087862817", + "2090965804", + "2103150354", + "2105899846", + "2106817091", + "2112547031", + "2112731937", + "2120889650", + "2137697010", + "2139196511", + "2150137742", + "2152826865", + "2157285372", + "2160951686", + "2167020116", + "2483249389", + "2544981553", + "2911964244" + ], + "abstract": "we present pace a personalized automatically calibrating eye tracking system that identifies and collects data unobtrusively from user interaction events on standard computing systems without the need for specialized equipment pace relies on eye facial analysis of webcam data based on a set of robust geometric gaze features and a two layer data validation mechanism to identify good training samples from daily interaction data the design of the system is founded on an in depth investigation of the relationship between gaze patterns and interaction cues and takes into consideration user preferences and habits the result is an adaptive data driven approach that continuously recalibrates adapts and improves with additional use quantitative evaluation on 31 subjects across different interaction behaviors shows that training instances identified by the pace data collection have higher gaze point interaction cue consistency than those identified by conventional approaches an in situ study using real life tasks on a diverse set of interactive applications demonstrates that the pace gaze estimation achieves an average error of 2 56o which is comparable to state of the art but without the need for explicit training or calibration this demonstrates the effectiveness of both the gaze estimation method and the corresponding data collection mechanism", + "title_raw": "Building a Personalized, Auto-Calibrating Eye Tracker from User Interactions", + "abstract_raw": "We present PACE, a Personalized, Automatically Calibrating Eye-tracking system that identifies and collects data unobtrusively from user interaction events on standard computing systems without the need for specialized equipment. PACE relies on eye/facial analysis of webcam data based on a set of robust geometric gaze features and a two-layer data validation mechanism to identify good training samples from daily interaction data. The design of the system is founded on an in-depth investigation of the relationship between gaze patterns and interaction cues, and takes into consideration user preferences and habits. The result is an adaptive, data-driven approach that continuously recalibrates, adapts and improves with additional use. Quantitative evaluation on 31 subjects across different interaction behaviors shows that training instances identified by the PACE data collection have higher gaze point-interaction cue consistency than those identified by conventional approaches. An in-situ study using real-life tasks on a diverse set of interactive applications demonstrates that the PACE gaze estimation achieves an average error of 2.56o, which is comparable to state-of-the-art, but without the need for explicit training or calibration. This demonstrates the effectiveness of both the gaze estimation method and the corresponding data collection mechanism.", + "link": "https://www.semanticscholar.org/paper/22b6fad8a72b84ea2b58eafeec084c6c0890c89e", + "scraped_abstract": null, + "citation_best": 68 + }, + { + "paper": "2394670422", + "venue": "1163450153", + "year": "2016", + "title": "developing and validating the user burden scale a tool for assessing user burden in computing systems", + "label": [ + "170130773", + "67712803", + "201025465", + "56739046", + "63880386" + ], + "author": [ + "2223487929", + "2397169004", + "2081763418", + "2322105421" + ], + "reference": [ + "93247917", + "111315170", + "114133067", + "167474841", + "1558663422", + "1791587663", + "1941267885", + "1941460181", + "1951008065", + "1969783622", + "1981610984", + "1988410705", + "1994486205", + "1996422336", + "1999880904", + "2011675647", + "2022473915", + "2031398445", + "2037057633", + "2037920420", + "2043093227", + "2046324099", + "2053356430", + "2053722606", + "2064893403", + "2065502371", + "2083021308", + "2088760811", + "2090545926", + "2096878712", + "2103223087", + "2104555525", + "2120457539", + "2124945326", + "2133268605", + "2140899128", + "2141708418", + "2145825627", + "2154962067", + "2157289187", + "2159269282", + "2162752781", + "2163642647", + "2163777572", + "2737676496", + "2905010601" + ], + "abstract": "computing systems that place a high level of burden on their users can have a negative affect on initial adoption retention and overall user experience through an iterative process we have developed a model for user burden that consists of six constructs 1 difficulty of use 2 physical 3 time and social 4 mental and emotional 5 privacy and 6 financial if researchers and practitioners can have an understanding of the overall level of burden systems may be having on the user they can have a better sense of whether and where to target future design efforts that can reduce those burdens to help assist with understanding and measuring user burden we have also developed and validated a measure of user burden in computing systems called the user burden scale ubs which is a 20 item scale with 6 individual sub scales representing each construct this paper presents the process we followed to develop and validate this scale for use in evaluating user burden in computing systems results indicate that the user burden scale has good overall inter item reliability convergent validity with similar scales and concurrent validity when compared to systems abandoned vs those still in use", + "title_raw": "Developing and Validating the User Burden Scale: A Tool for Assessing User Burden in Computing Systems", + "abstract_raw": "Computing systems that place a high level of burden on their users can have a negative affect on initial adoption, retention, and overall user experience. Through an iterative process, we have developed a model for user burden that consists of six constructs: 1) difficulty of use, 2) physical, 3) time and social, 4) mental and emotional, 5) privacy, and 6) financial. If researchers and practitioners can have an understanding of the overall level of burden systems may be having on the user, they can have a better sense of whether and where to target future design efforts that can reduce those burdens. To help assist with understanding and measuring user burden, we have also developed and validated a measure of user burden in computing systems called the User Burden Scale (UBS), which is a 20-item scale with 6 individual sub-scales representing each construct. This paper presents the process we followed to develop and validate this scale for use in evaluating user burden in computing systems. Results indicate that the User Burden Scale has good overall inter-item reliability, convergent validity with similar scales, and concurrent validity when compared to systems abandoned vs. those still in use.", + "link": "https://www.semanticscholar.org/paper/01df97006cb76cb513933ac5b3307b5afadcf942", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2400341429", + "venue": "1163450153", + "year": "2016", + "title": "foraging among an overabundance of similar variants", + "label": [ + "119857082", + "44154836", + "2780527393" + ], + "author": [ + "2407948307", + "353577947", + "2332534982", + "2150694261", + "2090168456", + "2115609526" + ], + "reference": [ + "49193678", + "1501652040", + "1591087262", + "1748815599", + "1927533402", + "1978714842", + "1982542870", + "1988506401", + "1989206341", + "1992154356", + "2007143462", + "2017986258", + "2021538299", + "2028123195", + "2028770149", + "2034079000", + "2044049174", + "2060606331", + "2061632472", + "2071873073", + "2075779758", + "2076463708", + "2088325320", + "2097653014", + "2112627888", + "2115805759", + "2116109573", + "2116696857", + "2122401044", + "2129874624", + "2135075522", + "2150797121", + "2155880554", + "2160570190", + "2161017889", + "2165152490", + "2167110174", + "2170313687", + "2745750801", + "3139818494" + ], + "abstract": "foraging among too many variants of the same artifact can be problematic when many of these variants are similar this situation which is largely overlooked in the literature is commonplace in several types of creative tasks one of which is exploratory programming in this paper we investigate how novice programmers forage through similar variants based on our results we propose a refinement to information foraging theory ift to include constructs about variation foraging behavior and propose refinements to computational models of ift to better account for foraging among variants", + "title_raw": "Foraging Among an Overabundance of Similar Variants", + "abstract_raw": "Foraging among too many variants of the same artifact can be problematic when many of these variants are similar. This situation, which is largely overlooked in the literature, is commonplace in several types of creative tasks, one of which is exploratory programming. In this paper, we investigate how novice programmers forage through similar variants. Based on our results, we propose a refinement to Information Foraging Theory (IFT) to include constructs about variation foraging behavior, and propose refinements to computational models of IFT to better account for foraging among variants.", + "link": "https://www.semanticscholar.org/paper/27fdb872558ea94409418ef1831e1bde92d3ab4a", + "scraped_abstract": null, + "citation_best": 43 + }, + { + "paper": "2396662988", + "venue": "1163450153", + "year": "2016", + "title": "finding email in a multi account multi device world", + "label": [ + "112488592", + "108827166", + "116259339", + "136764020", + "185902631" + ], + "author": [ + "1999854510", + "1985629368", + "2072421081", + "2170295230" + ], + "reference": [ + "378983897", + "806860066", + "1552242696", + "1973267558", + "1976394828", + "1979290264", + "1987520741", + "1992198112", + "2004184082", + "2004430826", + "2010925829", + "2026669942", + "2034476817", + "2043304263", + "2044315562", + "2044675247", + "2048598612", + "2057825876", + "2059403886", + "2061985103", + "2065132166", + "2083568981", + "2086176948", + "2089417143", + "2097127516", + "2112175905", + "2115590521", + "2123497136", + "2129390500", + "2130626845", + "2131744118", + "2132471143", + "2137891816", + "2159709475", + "2165149861", + "2165990684" + ], + "abstract": "email is far from dead in fact the volume of messages exchanged daily the number of accounts per user and the number of devices on which email is accessed have been constantly growing most previous studies on email have focused on management and retrieval behaviour within a single account and on a single device in this paper we examine how people find email in today s ecosystem through an in depth qualitative diary study with 16 participants we found that personal and work accounts are managed differently resulting in diverse retrieval strategies while work accounts are more structured and thus email is retrieved through folders personal accounts have fewer folders and users rely primarily on the built in search option moreover retrieval occurs primarily on laptops and pcs compared to smartphones we explore the reasons and uncover barriers and workarounds related to managing multiple accounts and devices finally we consider new design possibilities for email clients to better support how email is used today", + "title_raw": "Finding Email in a Multi-Account, Multi-Device World", + "abstract_raw": "Email is far from dead; in fact the volume of messages exchanged daily, the number of accounts per user, and the number of devices on which email is accessed have been constantly growing. Most previous studies on email have focused on management and retrieval behaviour within a single account and on a single device. In this paper, we examine how people find email in today's ecosystem through an in-depth qualitative diary study with 16 participants. We found that personal and work accounts are managed differently, resulting in diverse retrieval strategies: while work accounts are more structured and thus email is retrieved through folders, personal accounts have fewer folders and users rely primarily on the built-in search option. Moreover, retrieval occurs primarily on laptops and PCs compared to smartphones. We explore the reasons, and uncover barriers and workarounds related to managing multiple accounts and devices. Finally, we consider new design possibilities for email clients to better support how email is used today.", + "link": "https://www.semanticscholar.org/paper/a72e95252f4ebeea6a59d9c5a7d45e071bc0a6ef", + "scraped_abstract": null, + "citation_best": 32 + }, + { + "paper": "3103319922", + "venue": "1163450153", + "year": "2016", + "title": "empath understanding topic signals in large scale text", + "label": [ + "108583219", + "126349790", + "74216064", + "71472368", + "518677369", + "204321447", + "41608201", + "123406163" + ], + "author": [ + "2020052036", + "3186604085", + "1974803209" + ], + "reference": [ + "2402700", + "38739846", + "299205376", + "354847797", + "1505742155", + "1814626764", + "1968380849", + "2006044072", + "2007644286", + "2008803468", + "2016089260", + "2040467972", + "2055699460", + "2064230935", + "2065647385", + "2081580037", + "2082291422", + "2099813784", + "2110151287", + "2121277371", + "2125943921", + "2125990861", + "2140910804", + "2141599568", + "2142189376", + "2143976867", + "2151543699", + "2153579005", + "2155328222", + "2158794898", + "2161283199", + "2166706824", + "2167277498", + "2199803028", + "2220198137", + "2251939518", + "2263837627", + "2274505579", + "2407706885", + "2470543263", + "2949965121", + "2963790016", + "3099938878", + "3146306708" + ], + "abstract": "human language is colored by a broad range of topics but existing text analysis tools only focus on a small number of them we present empath a tool that can generate and validate new lexical categories on demand from a small set of seed terms like bleed and punch to generate the category violence empath draws connotations between words and phrases by deep learning a neural embedding across more than 1 8 billion words of modern fiction given a small set of seed words that characterize a category empath uses its neural embedding to discover new related terms then validates the category with a crowd powered filter empath also analyzes text across 200 built in pre validated categories we have generated from common topics in our web dataset like neglect government and social media we show that empath s data driven human validated categories are highly correlated r 0 906 with similar categories in liwc", + "title_raw": "Empath: Understanding Topic Signals in Large-Scale Text", + "abstract_raw": "Human language is colored by a broad range of topics, but existing text analysis tools only focus on a small number of them. We present Empath, a tool that can generate and validate new lexical categories on demand from a small set of seed terms (like \"bleed\" and \"punch\" to generate the category violence). Empath draws connotations between words and phrases by deep learning a neural embedding across more than 1.8 billion words of modern fiction. Given a small set of seed words that characterize a category, Empath uses its neural embedding to discover new related terms, then validates the category with a crowd-powered filter. Empath also analyzes text across 200 built-in, pre-validated categories we have generated from common topics in our web dataset, like neglect, government, and social media. We show that Empath's data-driven, human validated categories are highly correlated (r=0.906) with similar categories in LIWC.", + "link": "https://www.semanticscholar.org/paper/f43fa6f5a00246958e19d3ab8fa9cd5dbb511e33", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2398012670", + "venue": "1163450153", + "year": "2016", + "title": "lock n lol group based limiting assistance app to mitigate smartphone distractions in group activities", + "label": [ + "12096594", + "38652104", + "108827166" + ], + "author": [ + "2116711599", + "2223237421", + "1932678941", + "2170653590" + ], + "reference": [ + "1642711339", + "1892502720", + "1968146252", + "1974962426", + "1983393083", + "1987123461", + "1995994322", + "2007385141", + "2011279345", + "2011930619", + "2014383590", + "2016393937", + "2017829072", + "2020911435", + "2022164703", + "2022971806", + "2025182676", + "2025896128", + "2028632271", + "2033910215", + "2036625316", + "2046760696", + "2075752188", + "2078226637", + "2084863892", + "2085921614", + "2095146280", + "2095414363", + "2095535364", + "2096158028", + "2104673922", + "2108951007", + "2112205248", + "2113537851", + "2114826356", + "2115373648", + "2118609397", + "2124555616", + "2130418016", + "2131800446", + "2134595450", + "2134699024", + "2139398774", + "2142994359", + "2144533308", + "2163493157", + "2169792460", + "2170100231", + "2794275709", + "2796901959", + "2911407679", + "3015634246", + "3146466690" + ], + "abstract": "prior studies have addressed many negative aspects of mobile distractions in group activities in this paper we present lock n lol this is an application designed to help users focus on their group activities by allowing group members to limit their smartphone usage together in particular it provides synchronous social awareness of each other s limiting behavior this synchronous social awareness can arouse feelings of connectedness among group members and can mitigate social vulnerability due to smartphone distraction e g social exclusion that often results in poor social experiences after following an iterative prototyping process we conducted a large scale user study n 976 via real field deployment the study results revealed how the participants used lock n lol in their diverse contexts and how lock n lol helped them to mitigate smartphone distractions", + "title_raw": "Lock n' LoL: Group-based Limiting Assistance App to Mitigate Smartphone Distractions in Group Activities", + "abstract_raw": "Prior studies have addressed many negative aspects of mobile distractions in group activities. In this paper, we present Lock n' LoL. This is an application designed to help users focus on their group activities by allowing group members to limit their smartphone usage together. In particular, it provides synchronous social awareness of each other's limiting behavior. This synchronous social awareness can arouse feelings of connectedness among group members and can mitigate social vulnerability due to smartphone distraction (e.g., social exclusion) that often results in poor social experiences. After following an iterative prototyping process, we conducted a large-scale user study (n = 976) via real field deployment. The study results revealed how the participants used Lock n' LoL in their diverse contexts and how Lock n' LoL helped them to mitigate smartphone distractions.", + "link": "https://www.semanticscholar.org/paper/4200587a846a2b63387f1d017ce2734234d5c93f", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2194775991", + "venue": "1158167855", + "year": "2016", + "title": "deep residual learning for image recognition", + "label": [ + "150899416", + "155512373", + "119857082", + "81363708", + "50644808", + "178980831", + "2778149865", + "190502265", + "59404180", + "2776151529", + "31972630", + "108583219", + "3117203110", + "169903167", + "188441871" + ], + "author": [ + "2164292938", + "2499063207", + "2119543935", + "2200192130" + ], + "reference": [ + "194249466", + "1530872699", + "1533861849", + "1536680647", + "1554663460", + "1665214252", + "1677182931", + "1799366690", + "1811843574", + "1836465849", + "1861492603", + "1903029394", + "1904365287", + "1932624639", + "1932847118", + "1976921161", + "1984309565", + "1997542937", + "2022532533", + "2031489346", + "2064675550", + "2066941820", + "2097117768", + "2102605133", + "2107878631", + "2117539524", + "2117812871", + "2124509324", + "2139398462", + "2147238549", + "2147800946", + "2152424459", + "2155893237", + "2159979951", + "2161388792", + "2163605009", + "2206858481", + "2328425223", + "2613718673", + "2914484425", + "2950621961", + "2952009708", + "2962835968", + "2963504252", + "2963542991", + "2964103341", + "2964118293", + "3037950864", + "3038058348", + "3098722327", + "3118608800" + ], + "abstract": "deeper neural networks are more difficult to train we present a residual learning framework to ease the training of networks that are substantially deeper than those used previously we explicitly reformulate the layers as learning residual functions with reference to the layer inputs instead of learning unreferenced functions we provide comprehensive empirical evidence showing that these residual networks are easier to optimize and can gain accuracy from considerably increased depth on the imagenet dataset we evaluate residual nets with a depth of up to 152 layers 8 deeper than vgg nets 40 but still having lower complexity an ensemble of these residual nets achieves 3 57 error on the imagenet test set this result won the 1st place on the ilsvrc 2015 classification task we also present analysis on cifar 10 with 100 and 1000 layers the depth of representations is of central importance for many visual recognition tasks solely due to our extremely deep representations we obtain a 28 relative improvement on the coco object detection dataset deep residual nets are foundations of our submissions to ilsvrc coco 2015 competitions1 where we also won the 1st places on the tasks of imagenet detection imagenet localization coco detection and coco segmentation", + "title_raw": "Deep Residual Learning for Image Recognition", + "abstract_raw": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers\u20148\u00d7 deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", + "link": "https://www.semanticscholar.org/paper/29c808b346526fbb6027e67942b62a40a549f019", + "scraped_abstract": null, + "citation_best": 4350 + }, + { + "paper": "2548627465", + "venue": "1199533187", + "year": "2016", + "title": "api code recommendation using statistical learning from fine grained changes", + "label": [ + "167955471", + "56666940", + "23123220" + ], + "author": [ + "3188952176", + "2335241786", + "40961488", + "2099719009", + "2554459309", + "2551611317", + "2151252387", + "602257659" + ], + "reference": [ + "22384488", + "1512847993", + "1551431154", + "1574901103", + "1585991568", + "1860267373", + "1964968887", + "1970607969", + "1977971855", + "1980450075", + "1988346254", + "1994573369", + "1995935241", + "1996903695", + "1998265754", + "2008107570", + "2008512073", + "2010608861", + "2013731130", + "2018389835", + "2032558547", + "2036673071", + "2048668881", + "2062973095", + "2065489029", + "2083878868", + "2090111344", + "2102439588", + "2102546154", + "2106763308", + "2107277166", + "2114315419", + "2115130131", + "2117642462", + "2119414210", + "2123044578", + "2130344546", + "2134092629", + "2134829794", + "2140609933", + "2142403498", + "2143861926", + "2148190602", + "2153943889", + "2154021641", + "2165747537", + "2166597811", + "2166732561", + "2167630669", + "2169952536" + ], + "abstract": "learning and remembering how to use apis is difficult while code completion tools can recommend api methods browsing a long list of api method names and their documentation is tedious moreover users can easily be overwhelmed with too much information we present a novel api recommendation approach that taps into the predictive power of repetitive code changes to provide relevant api recommendations for developers our approach and tool apirec is based on statistical learning from fine grained code changes and from the context in which those changes were made our empirical evaluation shows that apirec correctly recommends an api call in the first position 59 of the time and it recommends the correct api call in the top five positions 77 of the time this is a significant improvement over the state of the art approaches by 30 160 for top 1 accuracy and 10 30 for top 5 accuracy respectively our result shows that apirec performs well even with a one time minimal training dataset of 50 publicly available projects", + "title_raw": "API code recommendation using statistical learning from fine-grained changes", + "abstract_raw": "Learning and remembering how to use APIs is difficult. While code-completion tools can recommend API methods, browsing a long list of API method names and their documentation is tedious. Moreover, users can easily be overwhelmed with too much information. We present a novel API recommendation approach that taps into the predictive power of repetitive code changes to provide relevant API recommendations for developers. Our approach and tool, APIREC, is based on statistical learning from fine-grained code changes and from the context in which those changes were made. Our empirical evaluation shows that APIREC correctly recommends an API call in the first position 59% of the time, and it recommends the correct API call in the top five positions 77% of the time. This is a significant improvement over the state-of-the-art approaches by 30-160% for top-1 accuracy, and 10-30% for top-5 accuracy, respectively. Our result shows that APIREC performs well even with a one-time, minimal training dataset of 50 publicly available projects.", + "link": "https://www.semanticscholar.org/paper/2f40f934842d64911ccbd5edcf6e20ec8c670261", + "scraped_abstract": null, + "citation_best": 156 + }, + { + "paper": "2547900581", + "venue": "1199533187", + "year": "2016", + "title": "detecting sensitive data disclosure via bi directional text correlation analysis", + "label": [ + "557433098", + "124101348", + "2775852101", + "95922358" + ], + "author": [ + "2346927377", + "2157206324", + "2124937003" + ], + "reference": [ + "79696261", + "81879861", + "124941384", + "1437814062", + "1438616768", + "1555215938", + "1593739205", + "1899538528", + "1963971515", + "1988036170", + "2017025011", + "2018956134", + "2019798206", + "2032724464", + "2062864008", + "2078197322", + "2094716892", + "2124279581", + "2148397566", + "2152417697", + "2152874840", + "2166743230", + "2170196926", + "2385474486", + "2949270723" + ], + "abstract": "traditional sensitive data disclosure analysis faces two challenges to identify sensitive data that is not generated by specific api calls and to report the potential disclosures when the disclosed data is recognized as sensitive only after the sink operations we address these issues by developing bidtext a novel static technique to detect sensitive data disclosures bidtext formulates the problem as a type system in which variables are typed with the text labels that they encounter e g during key value pair operations the type system features a novel bi directional propagation technique that propagates the variable label sets through forward and backward data flow a data disclosure is reported if a parameter at a sink point is typed with a sensitive text label bidtext is evaluated on 10 000 android apps it reports 4 406 apps that have sensitive data disclosures with 4 263 apps having log based disclosures and 1 688 having disclosures due to other sinks such as http requests existing techniques can only report 64 0 of what bidtext reports and manual inspection shows that the false positive rate for bidtext is 10", + "title_raw": "Detecting sensitive data disclosure via bi-directional text correlation analysis", + "abstract_raw": "Traditional sensitive data disclosure analysis faces two challenges: to identify sensitive data that is not generated by specific API calls, and to report the potential disclosures when the disclosed data is recognized as sensitive only after the sink operations. We address these issues by developing BidText, a novel static technique to detect sensitive data disclosures. BidText formulates the problem as a type system, in which variables are typed with the text labels that they encounter (e.g., during key-value pair operations). The type system features a novel bi-directional propagation technique that propagates the variable label sets through forward and backward data-flow. A data disclosure is reported if a parameter at a sink point is typed with a sensitive text label. BidText is evaluated on 10,000 Android apps. It reports 4,406 apps that have sensitive data disclosures, with 4,263 apps having log based disclosures and 1,688 having disclosures due to other sinks such as HTTP requests. Existing techniques can only report 64.0% of what BidText reports. And manual inspection shows that the false positive rate for BidText is 10%.", + "link": "https://www.semanticscholar.org/paper/64297dbe7df15356b1e5c0b143ecb741c4ed0ad2", + "scraped_abstract": null, + "citation_best": 24 + }, + { + "paper": "2546918856", + "venue": "1199533187", + "year": "2016", + "title": "proteus computing disjunctive loop summary via path dependency analysis", + "label": [ + "117670421", + "181236821", + "140461892", + "82653869", + "134718785", + "134224410", + "11413529", + "11799548" + ], + "author": [ + "2119556171", + "2158602532", + "2201441985", + "1949214292", + "2590431212" + ], + "reference": [ + "95161029", + "145069693", + "175421034", + "618835350", + "1140939754", + "1492171827", + "1525169782", + "1535183500", + "1568984608", + "1573838559", + "1579437898", + "1710734607", + "1788659055", + "1839583963", + "1848981483", + "1889756448", + "1925669739", + "1978277032", + "2021917262", + "2027422213", + "2031373197", + "2032153666", + "2043100293", + "2044870852", + "2060015309", + "2088181608", + "2095810701", + "2098698656", + "2104810659", + "2109863363", + "2113791307", + "2120162396", + "2122917953", + "2128467905", + "2129538349", + "2129766426", + "2132661148", + "2134875273", + "2140678594", + "2147088720", + "2147627044", + "2153185479", + "2159756689", + "2161246680", + "2165015106", + "2280703106", + "2498141432", + "2911436048", + "2963707299", + "3139826851", + "3158747708" + ], + "abstract": "loops are challenging structures for program analysis especially when loops contain multiple paths with complex interleaving executions among these paths in this paper we first propose a classification of multi path loops to understand the complexity of the loop execution which is based on the variable updates on the loop conditions and the execution order of the loop paths secondly we propose a loop analysis framework named proteus which takes a loop program and a set of variables of interest as inputs and summarizes path sensitive loop effects on the variables the key contribution is to use a path dependency automaton pda to capture the execution dependency between the paths a dfs based algorithm is proposed to traverse the pda to summarize the effect for all feasible executions in the loop the experimental results show that proteus is effective in three applications proteus can 1 compute a more precise bound than the existing loop bound analysis techniques 2 significantly outperform state of the art tools for loop verification and 3 generate test cases for deep loops within one second while klee and pex either need much more time or fail", + "title_raw": "Proteus: computing disjunctive loop summary via path dependency analysis", + "abstract_raw": "Loops are challenging structures for program analysis, especially when loops contain multiple paths with complex interleaving executions among these paths. In this paper, we first propose a classification of multi-path loops to understand the complexity of the loop execution, which is based on the variable updates on the loop conditions and the execution order of the loop paths. Secondly, we propose a loop analysis framework, named Proteus, which takes a loop program and a set of variables of interest as inputs and summarizes path-sensitive loop effects on the variables. The key contribution is to use a path dependency automaton (PDA) to capture the execution dependency between the paths. A DFS-based algorithm is proposed to traverse the PDA to summarize the effect for all feasible executions in the loop. The experimental results show that Proteus is effective in three applications: Proteus can 1) compute a more precise bound than the existing loop bound analysis techniques; 2) significantly outperform state-of-the-art tools for loop verification; and 3) generate test cases for deep loops within one second, while KLEE and Pex either need much more time or fail.", + "link": "https://www.semanticscholar.org/paper/66129da29ceb5ca70cd4ada510467b8e92c09b24", + "scraped_abstract": null, + "citation_best": 50 + }, + { + "paper": "2476494490", + "venue": "1199533187", + "year": "2016", + "title": "why we refactor confessions of github contributors", + "label": [ + "152752567", + "160713754", + "548217200", + "133237599", + "2778143727", + "202105479", + "115903868" + ], + "author": [ + "2561750680", + "150546358", + "2099436054" + ], + "reference": [ + "36290627", + "154166809", + "191168329", + "1493688518", + "1539495021", + "1964731418", + "1969191237", + "1969939902", + "1976019354", + "1988814756", + "1998265754", + "2007504957", + "2013619645", + "2021680759", + "2029348203", + "2040867412", + "2041703907", + "2042204882", + "2046276611", + "2062973095", + "2066455950", + "2073398506", + "2087305857", + "2089910927", + "2097227214", + "2099194364", + "2101720091", + "2111051240", + "2112533109", + "2113157806", + "2127833339", + "2145700761", + "2149672479", + "2150230300", + "2153111806", + "2153887189", + "2159157551", + "2159725273", + "2162424272", + "2166993820", + "2171383742" + ], + "abstract": "refactoring is a widespread practice that helps developers to improve the maintainability and readability of their code however there is a limited number of studies empirically investigating the actual motivations behind specific refactoring operations applied by developers to fill this gap we monitored java projects hosted on github to detect recently applied refactorings and asked the developers to explain the reasons behind their decision to refactor the code by applying thematic analysis on the collected responses we compiled a catalogue of 44 distinct motivations for 12 well known refactoring types we found that refactoring activity is mainly driven by changes in the requirements and much less by code smells extract method is the most versatile refactoring operation serving 11 different purposes finally we found evidence that the ide used by the developers affects the adoption of automated refactoring tools", + "title_raw": "Why we refactor? confessions of GitHub contributors", + "abstract_raw": "Refactoring is a widespread practice that helps developers to improve the maintainability and readability of their code. However, there is a limited number of studies empirically investigating the actual motivations behind specific refactoring operations applied by developers. To fill this gap, we monitored Java projects hosted on GitHub to detect recently applied refactorings, and asked the developers to explain the reasons behind their decision to refactor the code. By applying thematic analysis on the collected responses, we compiled a catalogue of 44 distinct motivations for 12 well-known refactoring types. We found that refactoring activity is mainly driven by changes in the requirements and much less by code smells. Extract Method is the most versatile refactoring operation serving 11 different purposes. Finally, we found evidence that the IDE used by the developers affects the adoption of automated refactoring tools.", + "link": "https://www.semanticscholar.org/paper/b85b942784e0a6c9d84553a1ee8b7ea090aaee84", + "scraped_abstract": null, + "citation_best": 255 + }, + { + "paper": "2173564293", + "venue": "1180662882", + "year": "2016", + "title": "dueling network architectures for deep reinforcement learning", + "label": [ + "119857082", + "154945302", + "48103436", + "97541855", + "16963264", + "193415008" + ], + "author": [ + "2153747942", + "55380488", + "2227815835", + "2778963749", + "2107049243", + "2112824674" + ], + "reference": [ + "779494576", + "834081922", + "1515851193", + "1595483645", + "1599347336", + "1658008008", + "1947291763", + "2010315761", + "2100752967", + "2108563286", + "2124215603", + "2145339207", + "2150468603", + "2151210636", + "2155007355", + "2155027007", + "2155968351", + "2169393322", + "2257979135", + "2919115771", + "2962847657", + "2963477884", + "2964036520" + ], + "abstract": "in recent years there have been many successes of using deep representations in reinforcement learning still many of these applications use conventional architectures such as convolutional networks lstms or auto encoders in this paper we present a new neural network architecture for model free reinforcement learning our dueling network represents two separate estimators one for the state value function and one for the state dependent action advantage function the main benefit of this factoring is to generalize learning across actions without imposing any change to the underlying reinforcement learning algorithm our results show that this architecture leads to better policy evaluation in the presence of many similar valued actions moreover the dueling architecture enables our rl agent to outperform the state of the art on the atari 2600 domain", + "title_raw": "Dueling network architectures for deep reinforcement learning", + "abstract_raw": "In recent years there have been many successes of using deep representations in reinforcement learning. Still, many of these applications use conventional architectures, such as convolutional networks, LSTMs, or auto-encoders. In this paper, we present a new neural network architecture for model-free reinforcement learning. Our dueling network represents two separate estimators: one for the state value function and one for the state-dependent action advantage function. The main benefit of this factoring is to generalize learning across actions without imposing any change to the underlying reinforcement learning algorithm. Our results show that this architecture leads to better policy evaluation in the presence of many similar-valued actions. Moreover, the dueling architecture enables our RL agent to outperform the state-of-the-art on the Atari 2600 domain.", + "link": "https://www.semanticscholar.org/paper/4c05d7caa357148f0bbd61720bdd35f0bc05eb81", + "scraped_abstract": null, + "citation_best": 1512 + }, + { + "paper": "2267126114", + "venue": "1180662882", + "year": "2016", + "title": "pixel recurrent neural networks", + "label": [ + "115961682", + "50644808", + "160633673", + "8038995", + "177264268", + "147168706", + "31972630", + "178980831" + ], + "author": [ + "2295551607", + "223268448", + "1111049960" + ], + "reference": [ + "115742922", + "189596042", + "196214544", + "855255571", + "1583912456", + "1753482797", + "1771459135", + "1810943226", + "1866230956", + "1906598733", + "1959608418", + "2064675550", + "2083380015", + "2096192494", + "2097039814", + "2097268041", + "2112796928", + "2117539524", + "2135181320", + "2148464528", + "2157002241", + "2167348665", + "2170111110", + "2170942820", + "2194775991", + "2952366348", + "2962736171", + "2962741254", + "2962897886", + "2963857374", + "2964084166", + "3118608800" + ], + "abstract": "modeling the distribution of natural images is a landmark problem in unsupervised learning this task requires an image model that is at once expressive tractable and scalable we present a deep neural network that sequentially predicts the pixels in an image along the two spatial dimensions our method models the discrete probability of the raw pixel values and encodes the complete set of dependencies in the image architectural novelties include fast two dimensional recurrent layers and an effective use of residual connections in deep recurrent networks we achieve log likelihood scores on natural images that are considerably better than the previous state of the art our main results also provide benchmarks on the diverse imagenet dataset samples generated from the model appear crisp varied and globally coherent", + "title_raw": "Pixel recurrent neural networks", + "abstract_raw": "Modeling the distribution of natural images is a landmark problem in unsupervised learning. This task requires an image model that is at once expressive, tractable and scalable. We present a deep neural network that sequentially predicts the pixels in an image along the two spatial dimensions. Our method models the discrete probability of the raw pixel values and encodes the complete set of dependencies in the image. Architectural novelties include fast two-dimensional recurrent layers and an effective use of residual connections in deep recurrent networks. We achieve log-likelihood scores on natural images that are considerably better than the previous state of the art. Our main results also provide benchmarks on the diverse ImageNet dataset. Samples generated from the model appear crisp, varied and globally coherent.", + "link": "https://www.semanticscholar.org/paper/41f1d50c85d3180476c4c7b3eea121278b0d8474", + "scraped_abstract": null, + "citation_best": 531 + }, + { + "paper": "2739934489", + "venue": "1203999783", + "year": "2017", + "title": "ensuring rapid mixing and low bias for asynchronous gibbs sampling", + "label": [ + "119857082", + "111350023", + "98763669", + "68339613", + "158424031", + "151319957" + ], + "author": [ + "2096532069", + "1999838320", + "2156135343" + ], + "reference": [], + "abstract": "", + "title_raw": "Ensuring Rapid Mixing and Low Bias for Asynchronous Gibbs Sampling.", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/5acd38bf0d012d569099eee391358ced154fe144", + "scraped_abstract": null, + "citation_best": 23 + }, + { + "paper": "1927112189", + "venue": "1174403976", + "year": "2016", + "title": "efficient large scale trace checking using mapreduce", + "label": [ + "89497691", + "124246873", + "184337299", + "25016198", + "11413529", + "80444323" + ], + "author": [ + "2027133714", + "1947303083", + "2233149120", + "2168304044", + "2133526809" + ], + "reference": [ + "66604613", + "131766706", + "432113453", + "763797034", + "1470776573", + "1518884487", + "1528040784", + "1896455861", + "1981208316", + "1989051781", + "2026629052", + "2031188261", + "2047923159", + "2062663486", + "2101508170", + "2131975293", + "2154811097", + "2160767434", + "2161166453", + "2173213060", + "2189285614", + "2189465200", + "2207666193", + "3099832466" + ], + "abstract": "the problem of checking a logged event trace against a temporal logic specification arises in many practical cases unfortunately known algorithms for an expressive logic like mtl metric temporal logic do not scale with respect to two crucial dimensions the length of the trace and the size of the time interval of the formula to be checked the former issue can be addressed by distributed and parallel trace checking algorithms that can take advantage of modern cloud computing and programming frameworks like mapreduce still the latter issue remains open with current state of the art approaches in this paper we address this memory scalability issue by proposing a new semantics for mtl called lazy semantics this semantics can evaluate temporal formulae and boolean combinations of temporal only formulae at any arbitrary time instant we prove that lazy semantics is more expressive than point based semantics and that it can be used as a basis for a correct parametric decomposition of any mtl formula into an equivalent one with smaller bounded time intervals we use lazy semantics to extend our previous distributed trace checking algorithm for mtl the evaluation shows that the proposed algorithm can check formulae with large intervals on large traces in a memory efficient way", + "title_raw": "Efficient large-scale trace checking using mapreduce", + "abstract_raw": "The problem of checking a logged event trace against a temporal logic specification arises in many practical cases. Unfortunately, known algorithms for an expressive logic like MTL (Metric Temporal Logic) do not scale with respect to two crucial dimensions: the length of the trace and the size of the time interval of the formula to be checked. The former issue can be addressed by distributed and parallel trace checking algorithms that can take advantage of modern cloud computing and programming frameworks like MapReduce. Still, the latter issue remains open with current state-of-the-art approaches. In this paper we address this memory scalability issue by proposing a new semantics for MTL, called lazy semantics. This semantics can evaluate temporal formulae and boolean combinations of temporal-only formulae at any arbitrary time instant. We prove that lazy semantics is more expressive than point-based semantics and that it can be used as a basis for a correct parametric decomposition of any MTL formula into an equivalent one with smaller, bounded time intervals. We use lazy semantics to extend our previous distributed trace checking algorithm for MTL. The evaluation shows that the proposed algorithm can check formulae with large intervals, on large traces, in a memory-efficient way.", + "link": "https://www.semanticscholar.org/paper/a93120f6b99990264f2bedb06d9e46280decdcb0", + "scraped_abstract": null, + "citation_best": 17 + }, + { + "paper": "2344496621", + "venue": "1174403976", + "year": "2016", + "title": "energy profiles of java collections classes", + "label": [ + "138958017", + "132106392", + "206526619", + "77088390", + "548217200", + "199360897", + "172482141", + "2779818221", + "162319229", + "168702491", + "15524039" + ], + "author": [ + "2134551143", + "2106132760", + "2311882549", + "2222148249", + "2115051412", + "2137779291" + ], + "reference": [ + "314977878", + "1424148118", + "1668788687", + "1853946175", + "1859105143", + "1966533006", + "1973077330", + "1996162360", + "2004413237", + "2015941169", + "2017684569", + "2025892295", + "2051731121", + "2064060458", + "2067617772", + "2068857448", + "2071398118", + "2090890295", + "2093354591", + "2097204446", + "2098067051", + "2102727118", + "2103273159", + "2110454200", + "2111454275", + "2112062276", + "2112918230", + "2115148068", + "2122097005", + "2126399635", + "2126841099", + "2135275954", + "2143968178", + "2144293278", + "2250533127", + "2291311334", + "2317860535", + "2401668016", + "2403967771", + "2554125638", + "2615865818", + "3121435431" + ], + "abstract": "we created detailed profiles of the energy consumed by common operations done on java list map and set abstractions the results show that the alternative data types for these abstractions differ significantly in terms of energy consumption depending on the operations for example an arraylist consumes less energy than a linkedlist if items are inserted at the middle or at the end but consumes more energy than a linkedlist if items are inserted at the start of the list to explain the results we explored the memory usage and the bytecode executed during an operation expensive computation tasks in the analyzed bytecode traces appeared to have an energy impact but memory usage did not contribute we evaluated our profiles by using them to selectively replace collections types used in six applications and libraries we found that choosing the wrong collections type as indicated by our profiles can cost even 300 more energy than the most efficient choice our work shows that the usage context of a data structure and our measured energy profiles can be used to decide between alternative collections implementations", + "title_raw": "Energy profiles of Java collections classes", + "abstract_raw": "We created detailed profiles of the energy consumed by common operations done on Java List, Map, and Set abstractions. The results show that the alternative data types for these abstractions differ significantly in terms of energy consumption depending on the operations. For example, an ArrayList consumes less energy than a LinkedList if items are inserted at the middle or at the end, but consumes more energy than a LinkedList if items are inserted at the start of the list. To explain the results, we explored the memory usage and the bytecode executed during an operation. Expensive computation tasks in the analyzed bytecode traces appeared to have an energy impact, but memory usage did not contribute. We evaluated our profiles by using them to selectively replace Collections types used in six applications and libraries. We found that choosing the wrong Collections type, as indicated by our profiles, can cost even 300% more energy than the most efficient choice. Our work shows that the usage context of a data structure and our measured energy profiles can be used to decide between alternative Collections implementations.", + "link": "https://www.semanticscholar.org/paper/4d125e263c46f397a17758ec3cae63b81615889a", + "scraped_abstract": null, + "citation_best": 116 + }, + { + "paper": "2393286498", + "venue": "1174403976", + "year": "2016", + "title": "feedback directed instrumentation for deployed javascript applications", + "label": [ + "1009929", + "100850083", + "149635348", + "544833334", + "55707083", + "168065819", + "118530786", + "183469790", + "160191386", + "2777904410", + "79403827" + ], + "author": [ + "2569616956", + "3169559280", + "1977523911", + "2157836386", + "2189404259" + ], + "reference": [ + "1514468887", + "1527815941", + "1984248430", + "1984846967", + "1987647365", + "1996296256", + "2033890725", + "2036639103", + "2051761662", + "2056749200", + "2062154010", + "2093755302", + "2098806455", + "2101819268", + "2105691657", + "2105714739", + "2110066339", + "2113404202", + "2129663982", + "2139627310", + "2142194171", + "2144621365", + "2155479707", + "2160510992", + "2162376048", + "2164649341", + "2165811181", + "2193288339", + "2396919876", + "2965069586" + ], + "abstract": "many bugs in javascript applications manifest themselves as objects that have incorrect property values when a failure occurs for this type of error stack traces and log files are often insufficient for diagnosing problems in such cases it is helpful for developers to know the control flow path from the creation of an object to a crashing statement such crash paths are useful for understanding where the object originated and whether any properties of the object were corrupted since its creation we present a feedback directed instrumentation technique for computing crash paths that allows the instrumentation overhead to be distributed over a crowd of users and to reduce it for users who do not encounter the crash we implemented our technique in a tool crowdie and evaluated it on 10 real world issues for which error messages and stack traces are insufficient to isolate the problem our results show that feedback directed instrumentation requires 5 to 25 of the program to be instrumented that the same crash must be observed 3 to 10 times to discover the crash path and that feedback directed instrumentation typically slows down execution by a factor 2x 9x compared to 8x 90x for an approach where applications are fully instrumented", + "title_raw": "Feedback-directed instrumentation for deployed JavaScript applications", + "abstract_raw": "Many bugs in JavaScript applications manifest themselves as objects that have incorrect property values when a failure occurs. For this type of error, stack traces and log files are often insufficient for diagnosing problems. In such cases, it is helpful for developers to know the control flow path from the creation of an object to a crashing statement. Such crash paths are useful for understanding where the object originated and whether any properties of the object were corrupted since its creation. We present a feedback-directed instrumentation technique for computing crash paths that allows the instrumentation overhead to be distributed over a crowd of users and to reduce it for users who do not encounter the crash. We implemented our technique in a tool, Crowdie, and evaluated it on 10 real-world issues for which error messages and stack traces are insufficient to isolate the problem. Our results show that feedback-directed instrumentation requires 5% to 25% of the program to be instrumented, that the same crash must be observed 3 to 10 times to discover the crash path, and that feedback-directed instrumentation typically slows down execution by a factor 2x-9x compared to 8x-90x for an approach where applications are fully instrumented.", + "link": "https://www.semanticscholar.org/paper/9084b4a680207eed17376904b654cde503c97fbb", + "scraped_abstract": null, + "citation_best": 18 + }, + { + "paper": "2387719207", + "venue": "1174403976", + "year": "2016", + "title": "from word embeddings to document similarities for improved information retrieval in software engineering", + "label": [ + "1009929", + "124246873", + "44291984", + "2776689786", + "33857546", + "2777904410", + "204321447", + "115903868", + "195324797", + "23123220" + ], + "author": [ + "2300949861", + "2251775646", + "2482852693", + "573732962", + "2598525617" + ], + "reference": [ + "658020064", + "1532325895", + "1566018662", + "1588986231", + "1614298861", + "1928731475", + "1977971855", + "1983578042", + "1985947101", + "1986541713", + "2000518393", + "2004247102", + "2014022233", + "2017593508", + "2018663431", + "2026003524", + "2035720976", + "2040043446", + "2047221353", + "2049238280", + "2050396504", + "2050712820", + "2058230372", + "2063384185", + "2086511124", + "2089759055", + "2100739938", + "2105400090", + "2110220994", + "2115130131", + "2116737258", + "2117130368", + "2122060876", + "2125031621", + "2125980283", + "2131744502", + "2132339004", + "2139543149", + "2141599568", + "2141987989", + "2153579005", + "2158899491", + "2161847731", + "2251803266", + "2882319491", + "2997617958", + "2998704965" + ], + "abstract": "the application of information retrieval techniques to search tasks in software engineering is made difficult by the lexical gap between search queries usually expressed in natural language e g english and retrieved documents usually expressed in code e g programming languages this is often the case in bug and feature location community question answering or more generally the communication between technical personnel and non technical stake holders in a software project in this paper we propose bridging the lexical gap by projecting natural language statements and code snippets as meaning vectors in a shared representation space in the proposed architecture word embeddings are first trained on api documents tutorials and reference documents and then aggregated in order to estimate semantic similarities between documents empirical evaluations show that the learned vector space embeddings lead to improvements in a previously explored bug localization task and a newly defined task of linking api documents to computer programming questions", + "title_raw": "From word embeddings to document similarities for improved information retrieval in software engineering", + "abstract_raw": "The application of information retrieval techniques to search tasks in software engineering is made difficult by the lexical gap between search queries, usually expressed in natural language (e.g. English), and retrieved documents, usually expressed in code (e.g. programming languages). This is often the case in bug and feature location, community question answering, or more generally the communication between technical personnel and non-technical stake holders in a software project. In this paper, we propose bridging the lexical gap by projecting natural language statements and code snippets as meaning vectors in a shared representation space. In the proposed architecture, word embeddings are first trained on API documents, tutorials, and reference documents, and then aggregated in order to estimate semantic similarities between documents. Empirical evaluations show that the learned vector space embeddings lead to improvements in a previously explored bug localization task and a newly defined task of linking API documents to computer programming questions.", + "link": "https://www.semanticscholar.org/paper/3041a9265afb2ebdb4915aa9572668bb7f32b0ef", + "scraped_abstract": null, + "citation_best": 286 + }, + { + "paper": "2294434616", + "venue": "1174403976", + "year": "2016", + "title": "guiding dynamic symbolic execution toward unverified program executions", + "label": [ + "137287247", + "2779639559", + "199360897", + "150292731", + "152124472" + ], + "author": [ + "2054326781", + "2285904575", + "56272107" + ], + "reference": [ + "74242707", + "127773936", + "138282132", + "157156687", + "190884830", + "196033600", + "1167406420", + "1497028280", + "1497571013", + "1590503034", + "1657354101", + "1970005004", + "1979693894", + "1993836075", + "1998838326", + "2009489720", + "2024442685", + "2043100293", + "2045109907", + "2054602389", + "2066210260", + "2096449544", + "2101512909", + "2110311336", + "2110908283", + "2117058582", + "2130427425", + "2133612077", + "2134875273", + "2135584970", + "2140021378", + "2140341752", + "2146641295", + "2151959678", + "2158395308", + "2159899121", + "2162284942", + "2164419371", + "2169309022", + "2171469152", + "2172260321", + "2247219265", + "2335911175", + "2472960084" + ], + "abstract": "most techniques to detect program errors such as testing code reviews and static program analysis do not fully verify all possible executions of a program they leave executions unverified when they do not check certain properties fail to verify properties or check properties under certain unsound assumptions such as the absence of arithmetic overflow in this paper we present a technique to complement partial verification results by automatic test case generation in contrast to existing work our technique supports the common case that the verification results are based on unsound assumptions we annotate programs to reflect which executions have been verified and under which assumptions these annotations are then used to guide dynamic symbolic execution toward unverified program executions our main technical contribution is a code instrumentation that causes dynamic symbolic execution to abort tests that lead to verified executions to prune parts of the search space and to prioritize tests that cover more properties that are not fully verified we have implemented our technique for the net static analyzer clousot and the dynamic symbolic execution tool pex it produces smaller test suites by up to 19 2 covers more unverified executions by up to 7 1 and reduces testing time by up to 52 4 compared to combining clousot and pex without our technique", + "title_raw": "Guiding dynamic symbolic execution toward unverified program executions", + "abstract_raw": "Most techniques to detect program errors, such as testing, code reviews, and static program analysis, do not fully verify all possible executions of a program. They leave executions unverified when they do not check certain properties, fail to verify properties, or check properties under certain unsound assumptions such as the absence of arithmetic overflow. In this paper, we present a technique to complement partial verification results by automatic test case generation. In contrast to existing work, our technique supports the common case that the verification results are based on unsound assumptions. We annotate programs to reflect which executions have been verified, and under which assumptions. These annotations are then used to guide dynamic symbolic execution toward unverified program executions. Our main technical contribution is a code instrumentation that causes dynamic symbolic execution to abort tests that lead to verified executions, to prune parts of the search space, and to prioritize tests that cover more properties that are not fully verified. We have implemented our technique for the .NET static analyzer Clousot and the dynamic symbolic execution tool Pex. It produces smaller test suites (by up to 19.2%), covers more unverified executions (by up to 7.1%), and reduces testing time (by up to 52.4%) compared to combining Clousot and Pex without our technique.", + "link": "https://www.semanticscholar.org/paper/42c2b452fa1a25324ff4ef67d16234a27cc364aa", + "scraped_abstract": null, + "citation_best": 85 + }, + { + "paper": "2367183013", + "venue": "1174403976", + "year": "2016", + "title": "on the techniques we create the tools we build and their misalignments a study of klee", + "label": [ + "1009929", + "70440993", + "2777904410", + "2522767166" + ], + "author": [ + "2283819910", + "262280937", + "2156583880" + ], + "reference": [ + "51612467", + "116894366", + "165093678", + "241804591", + "296687966", + "581768825", + "1031460189", + "1151913765", + "1436965661", + "1494749758", + "1518705996", + "1522692903", + "1548377710", + "1561959476", + "1634837753", + "1710734607", + "1744676300", + "1934855418", + "1971137495", + "1975799355", + "1978727723", + "1982206931", + "1990880047", + "1997854308", + "2002758617", + "2005140737", + "2008985174", + "2009526138", + "2009956228", + "2013382827", + "2017932409", + "2032593675", + "2036804147", + "2042033151", + "2042410326", + "2043512166", + "2051256740", + "2060573639", + "2062200967", + "2062271242", + "2062392785", + "2068973905", + "2071589768", + "2076960126", + "2087115492", + "2092388562", + "2109493568", + "2110908283", + "2111768655", + "2112448454", + "2114780348", + "2118283821", + "2119031905", + "2130514924", + "2130729525", + "2140566320", + "2146141344", + "2151532300", + "2155216527", + "2162360270", + "2163450822", + "2163592454", + "2164649341", + "2170949692", + "2199071416", + "2292217452", + "2331384579", + "3017160350", + "3160978791" + ], + "abstract": "our community constantly pushes the state of the art by introducing new techniques these techniques often build on top of and are compared against existing systems that realize previously published techniques the underlying assumption is that existing systems correctly represent the techniques they implement this paper examines that assumption through a study of klee a popular and well cited tool in our community we briefly describe six improvements we made to klee none of which can be considered new techniques that provide order of magnitude performance gains given these improvements we then investigate how the results and conclusions of a sample of papers that cite klee are affected our findings indicate that the strong emphasis on introducing new techniques may lead to wasted effort missed opportunities for progress an accretion of artifact complexity and questionable research conclusions in our study 27 of the papers that depend on klee can be questioned we conclude by revisiting initiatives that may help to realign the incentives to better support the foundations on which we build", + "title_raw": "On the techniques we create, the tools we build, and their misalignments: a study of KLEE", + "abstract_raw": "Our community constantly pushes the state-of-the-art by introducing \"new\" techniques. These techniques often build on top of, and are compared against, existing systems that realize previously published techniques. The underlying assumption is that existing systems correctly represent the techniques they implement. This paper examines that assumption through a study of KLEE, a popular and well-cited tool in our community. We briefly describe six improvements we made to KLEE, none of which can be considered \"new\" techniques, that provide order-of-magnitude performance gains. Given these improvements, we then investigate how the results and conclusions of a sample of papers that cite KLEE are affected. Our findings indicate that the strong emphasis on introducing \"new\" techniques may lead to wasted effort, missed opportunities for progress, an accretion of artifact complexity, and questionable research conclusions (in our study, 27% of the papers that depend on KLEE can be questioned). We conclude by revisiting initiatives that may help to realign the incentives to better support the foundations on which we build.", + "link": "https://www.semanticscholar.org/paper/0c3f6413b362992b5e7b6d20a6bd057ad18cf92f", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2381207137", + "venue": "1174403976", + "year": "2016", + "title": "termination checking for llvm peephole optimizations", + "label": [ + "1009929", + "55439883", + "168065819", + "169590947", + "184337299", + "181197062", + "199360897", + "173608175", + "2780154230", + "4970464" + ], + "author": [ + "2224928429", + "1774359112" + ], + "reference": [ + "45951652", + "91193428", + "364774736", + "1480909796", + "1493611000", + "1543527641", + "1576549127", + "1966021031", + "1977279860", + "1989813138", + "1996245589", + "2003164052", + "2023035194", + "2040856861", + "2060015309", + "2070291735", + "2081917916", + "2088056808", + "2097284057", + "2098293014", + "2098456636", + "2114012357", + "2121757274", + "2123413748", + "2124909257", + "2125346056", + "2130655713", + "2134087751", + "2136379287", + "2144540543", + "2160145830", + "2164264520", + "2167029843", + "2169336925", + "2170737051", + "2294628582", + "3080753854", + "3151686641" + ], + "abstract": "mainstream compilers contain a large number of peephole optimizations which perform algebraic simplification of the input program with local rewriting of the code these optimizations are a persistent source of bugs our recent research on alive a domain specific language for expressing peephole optimizations in llvm addresses a part of the problem by automatically verifying the correctness of these optimizations and generating c code for use with llvm this paper identifies a class of non termination bugs that arise when a suite of peephole optimizations is executed until a fixed point an optimization can undo the effect of another optimization in the suite which results in non terminating compilation this paper 1 proposes a methodology to detect non termination bugs with a suite of peephole optimizations 2 identifies the necessary condition to ensure termination while composing peephole optimizations and 3 provides debugging support by generating concrete input programs that cause non terminating compilation we have discovered 184 optimization sequences involving 38 optimizations that cause non terminating compilation in llvm with alive generated c code", + "title_raw": "Termination-checking for LLVM peephole optimizations", + "abstract_raw": "Mainstream compilers contain a large number of peephole optimizations, which perform algebraic simplification of the input program with local rewriting of the code. These optimizations are a persistent source of bugs. Our recent research on Alive, a domain-specific language for expressing peephole optimizations in LLVM, addresses a part of the problem by automatically verifying the correctness of these optimizations and generating C++ code for use with LLVM. This paper identifies a class of non-termination bugs that arise when a suite of peephole optimizations is executed until a fixed point. An optimization can undo the effect of another optimization in the suite, which results in non-terminating compilation. This paper (1) proposes a methodology to detect non-termination bugs with a suite of peephole optimizations, (2) identifies the necessary condition to ensure termination while composing peephole optimizations, and (3) provides debugging support by generating concrete input programs that cause non-terminating compilation. We have discovered 184 optimization sequences, involving 38 optimizations, that cause non-terminating compilation in LLVM with Alive-generated C++ code.", + "link": "https://www.semanticscholar.org/paper/b46d11568ee12f4ba5aaaaf923b73dfc0f2d9593", + "scraped_abstract": null, + "citation_best": 11 + }, + { + "paper": "2374182234", + "venue": "1174403976", + "year": "2016", + "title": "vdtest an automated framework to support testing for virtual devices", + "label": [ + "25344961", + "149635348", + "90307666", + "168065819", + "182122060", + "2777904410", + "2780237174", + "207953454", + "97686452", + "55166926" + ], + "author": [ + "2115723175", + "2161987024", + "2151210804" + ], + "reference": [ + "109452506", + "1206239176", + "1499241274", + "1522250664", + "1985018157", + "2059865504", + "2060361314", + "2061290744", + "2075699551", + "2080865708", + "2092483417", + "2098202130", + "2105183815", + "2119068918", + "2122796178", + "2128204165", + "2136933131", + "2137530017", + "2146924294", + "2147657366", + "2148308674", + "2151704521", + "2154569078", + "2156480873", + "2162023704", + "2169294765" + ], + "abstract": "the use of virtual devices in place of physical hardware is increasing in activities such as design testing and debugging yet virtual devices are simply software applications and like all software they are prone to faults a full system simulator fss is a class of virtual machine that includes a large set of virtual devices enough to run the full target software stack defects in an fss virtual device may have cascading effects as the incorrect behavior can be propagated forward to many different platforms as well as to guest programs in this work we present vdtest a novel framework for testing virtual devices within an fss vdtest begins by generating a test specification obtained through static analysis it then employs a two phase testing approach to test virtual components both individually and in combination it leverages a differential oracle strategy taking advantage of the existence of a physical or golden device to eliminate the need for manually generating test oracles in an empirical study using both open source and commercial fsss we found 64 faults 83 more than random testing", + "title_raw": "VDTest: an automated framework to support testing for virtual devices", + "abstract_raw": "The use of virtual devices in place of physical hardware is increasing in activities such as design, testing and debugging. Yet virtual devices are simply software applications, and like all software they are prone to faults. A full system simulator (FSS), is a class of virtual machine that includes a large set of virtual devices -- enough to run the full target software stack. Defects in an FSS virtual device may have cascading effects as the incorrect behavior can be propagated forward to many different platforms as well as to guest programs. In this work we present VDTest, a novel framework for testing virtual devices within an FSS. VDTest begins by generating a test specification obtained through static analysis. It then employs a two-phase testing approach to test virtual components both individually and in combination. It leverages a differential oracle strategy, taking advantage of the existence of a physical or golden device to eliminate the need for manually generating test oracles. In an empirical study using both open source and commercial FSSs, we found 64 faults, 83% more than random testing.", + "link": "https://www.semanticscholar.org/paper/f452576de6eb14b3f0a294438a073f67957a6e31", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2144160189", + "venue": "1174403976", + "year": "2016", + "title": "work practices and challenges in pull based development the contributor s perspective", + "label": [ + "529173508", + "2777904410", + "56739046" + ], + "author": [ + "1999536173", + "2291851674", + "2123850919", + "2155442793" + ], + "reference": [ + "119969938", + "159465820", + "1510316655", + "1547253197", + "1592081868", + "1595108726", + "1606787793", + "1607675442", + "1714723801", + "1964000287", + "1964986774", + "1972386298", + "1975782063", + "1984080818", + "1987459880", + "1988320117", + "1989862714", + "1995969252", + "2002449827", + "2007210734", + "2011350340", + "2018871545", + "2024132711", + "2024860915", + "2032843080", + "2040915209", + "2055703005", + "2055837583", + "2065367771", + "2068418679", + "2090094826", + "2091990486", + "2097227214", + "2104544295", + "2107294940", + "2107564263", + "2110229593", + "2110499955", + "2112750810", + "2120244029", + "2120972908", + "2120983913", + "2124100711", + "2125854594", + "2126231393", + "2133561941", + "2136421959", + "2139092060", + "2139885493", + "2140952846", + "2142440840", + "2145890718", + "2147018965", + "2148554434", + "2148854374", + "2155684781", + "2163313306", + "2163592454", + "2168248828", + "2175631294", + "2185531197", + "2283503290", + "2294130646", + "3123336377", + "3124560715", + "3137784269" + ], + "abstract": "in the pull based development model the integrator has the crucial role of managing and integrating contributions this work focuses on the role of the integrator and investigates working habits and challenges alike we set up an exploratory qualitative study involving a large scale survey of 749 integrators to which we add quantitative data from the integrator s project our results provide insights into the factors they consider in their decision making process to accept or reject a contribution our key findings are that integrators struggle to maintain the quality of their projects and have difficulties with prioritizing contributions that are to be merged our insights have implications for practitioners who wish to use or improve their pull based development process as well as for researchers striving to understand the theoretical implications of the pull based model in software development", + "title_raw": "Work practices and challenges in pull-based development: the contributor's perspective", + "abstract_raw": "In the pull-based development model, the integrator has the crucial role of managing and integrating contributions. This work focuses on the role of the integrator and investigates working habits and challenges alike. We set up an exploratory qualitative study involving a large-scale survey of 749 integrators, to which we add quantitative data from the integrator's project. Our results provide insights into the factors they consider in their decision making process to accept or reject a contribution. Our key findings are that integrators struggle to maintain the quality of their projects and have difficulties with prioritizing contributions that are to be merged. Our insights have implications for practitioners who wish to use or improve their pull-based development process, as well as for researchers striving to understand the theoretical implications of the pull-based model in software development.", + "link": "https://www.semanticscholar.org/paper/7fdacde2b12d6cb122d620d1f6b266e22a6a72a0", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2348679751", + "venue": "1130985203", + "year": "2016", + "title": "fraudar bounding graph fraud in the face of camouflage", + "label": [ + "38652104", + "2776196576", + "136134403", + "63584917" + ], + "author": [ + "1755863881", + "2158947101", + "3205362904", + "2397954333", + "2226806500", + "2198983026" + ], + "reference": [ + "81017276", + "189202421", + "1535144194", + "1560607100", + "1585799956", + "1587819022", + "1736726159", + "1845137714", + "1933130724", + "2005556331", + "2011863672", + "2019207508", + "2047756776", + "2061873838", + "2101196063", + "2101447063", + "2101890615", + "2112429379", + "2124120603", + "2133591726", + "2134695286", + "2138621811", + "2142517301", + "2148123869", + "2150941206", + "2158600392", + "2168508162", + "2248736178", + "2266714125", + "2282288858", + "2949957935" + ], + "abstract": "given a bipartite graph of users and the products that they review or followers and followees how can we detect fake reviews or follows existing fraud detection methods spectral etc try to identify dense subgraphs of nodes that are sparsely connected to the remaining graph fraudsters can evade these methods using camouflage by adding reviews or follows with honest targets so that they look normal even worse some fraudsters use hijacked accounts from honest users and then the camouflage is indeed organic our focus is to spot fraudsters in the presence of camouflage or hijacked accounts we propose fraudar an algorithm that a is camouflage resistant b provides upper bounds on the effectiveness of fraudsters and c is effective in real world data experimental results under various attacks show that fraudar outperforms the top competitor in accuracy of detecting both camouflaged and non camouflaged fraud additionally in real world experiments with a twitter follower followee graph of 1 47 billion edges fraudar successfully detected a subgraph of more than 4000 detected accounts of which a majority had tweets showing that they used follower buying services", + "title_raw": "FRAUDAR: Bounding Graph Fraud in the Face of Camouflage", + "abstract_raw": "Given a bipartite graph of users and the products that they review, or followers and followees, how can we detect fake reviews or follows? Existing fraud detection methods (spectral, etc.) try to identify dense subgraphs of nodes that are sparsely connected to the remaining graph. Fraudsters can evade these methods using camouflage, by adding reviews or follows with honest targets so that they look \"normal\". Even worse, some fraudsters use hijacked accounts from honest users, and then the camouflage is indeed organic. Our focus is to spot fraudsters in the presence of camouflage or hijacked accounts. We propose FRAUDAR, an algorithm that (a) is camouflage-resistant, (b) provides upper bounds on the effectiveness of fraudsters, and (c) is effective in real-world data. Experimental results under various attacks show that FRAUDAR outperforms the top competitor in accuracy of detecting both camouflaged and non-camouflaged fraud. Additionally, in real-world experiments with a Twitter follower-followee graph of 1.47 billion edges, FRAUDAR successfully detected a subgraph of more than 4000 detected accounts, of which a majority had tweets showing that they used follower-buying services.", + "link": "https://www.semanticscholar.org/paper/2852982175beeb92e14127277cb158c4cb31f5c5", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2964077562", + "venue": "1127325140", + "year": "2016", + "title": "value iteration networks", + "label": [ + "155032097", + "81363708", + "154945302", + "50644808", + "97541855", + "200632571" + ], + "author": [ + "1908865110", + "2600679102", + "2514302950", + "2140030024", + "243981275" + ], + "reference": [ + "1176919", + "603908379", + "834081922", + "1514535095", + "1515851193", + "1606347560", + "1663973292", + "1771410628", + "1777239053", + "1815076433", + "1850531616", + "1903029394", + "1975463331", + "1983364832", + "1999874108", + "2002428251", + "2013614847", + "2021128738", + "2022508996", + "2072128103", + "2084920657", + "2098774185", + "2101926813", + "2104171826", + "2105960367", + "2112796928", + "2116442740", + "2117675763", + "2121103318", + "2123491406", + "2135639338", + "2136064009", + "2140135625", + "2141125852", + "2145339207", + "2151210636", + "2155007355", + "2158782408", + "2164424353", + "2167590754", + "2169498096", + "2262178610", + "2296673577", + "2341171179", + "2441569124", + "2618530766", + "2962957031", + "2963773578", + "2963825768" + ], + "abstract": "we introduce the value iteration network vin a fully differentiable neural network with a planning module embedded within vins can learn to plan and are suitable for predicting outcomes that involve planning based reasoning such as policies for reinforcement learning key to our approach is a novel differentiable approximation of the value iteration algorithm which can be represented as a convolutional neural network and trained end to end using standard backpropagation we evaluate vin based policies on discrete and continuous path planning domains and on a natural language based search task we show that by learning an explicit planning computation vin policies generalize better to new unseen domains", + "title_raw": "Value iteration networks", + "abstract_raw": "We introduce the value iteration network (VIN): a fully differentiable neural network with a 'planning module' embedded within. VINs can learn to plan, and are suitable for predicting outcomes that involve planning-based reasoning, such as policies for reinforcement learning. Key to our approach is a novel differentiable approximation of the value-iteration algorithm, which can be represented as a convolutional neural network, and trained end-to-end using standard backpropagation. We evaluate VIN based policies on discrete and continuous path-planning domains, and on a natural-language based search task. We show that by learning an explicit planning computation, VIN policies generalize better to new, unseen domains.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Value+Iteration+Networks&as_oq=&as_eq=&as_occt=any&as_sauthors=Tamar", + "scraped_abstract": null, + "citation_best": 241 + }, + { + "paper": "2412123704", + "venue": "2534597628", + "year": "2016", + "title": "passive wi fi bringing low power to wi fi transmissions", + "label": [ + "546215728", + "101765175", + "24590314", + "2780652538", + "73431340", + "9390403", + "42935608", + "527821871", + "38601921" + ], + "author": [ + "2141890943", + "669839582", + "2039996270", + "2154841660" + ], + "reference": [], + "abstract": "", + "title_raw": "Passive Wi-Fi: Bringing Low Power to Wi-Fi Transmissions", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/f3879fa2fbcbe1090108fcef3ec030d0ba8bd6c1", + "scraped_abstract": null, + "citation_best": 256 + }, + { + "paper": "2575735093", + "venue": "1185109434", + "year": "2016", + "title": "ryoan a distributed sandbox for untrusted computation on secret data", + "label": [ + "38652104", + "45374587", + "9417928", + "2780734892", + "2777904410", + "203005215" + ], + "author": [ + "2164054805", + "2602328539", + "2577716877", + "2154122045", + "2307943801" + ], + "reference": [ + "2363194", + "13103650", + "65595058", + "104209573", + "1434079718", + "1494049356", + "1495495588", + "1499241274", + "1560204281", + "1584476834", + "1664413462", + "1710734607", + "1852007091", + "1895952394", + "1992291252", + "1999602293", + "2014235365", + "2024438729", + "2031738616", + "2037322308", + "2062340141", + "2062805799", + "2074701325", + "2083228150", + "2086234010", + "2099137371", + "2099382052", + "2106573617", + "2112735498", + "2116807588", + "2117550587", + "2133448626", + "2136296832", + "2138517425", + "2147378257", + "2147758029", + "2149603369", + "2150615820", + "2150620897", + "2150709728", + "2151071112", + "2159915142", + "2162283517", + "2167804035", + "2171690178", + "2176235539", + "2266218113", + "2315828679", + "2316567509", + "2401617229", + "2460441129", + "2463516579" + ], + "abstract": "users of modern data processing services such as tax preparation or genomic screening are forced to trust them with data that the users wish to keep secret ryoan protects secret data while it is processed by services that the data owner does not trust accomplishing this goal in a distributed setting is difficult because the user has no control over the service providers or the computational platform confining code to prevent it from leaking secrets is notoriously difficult but ryoan benefits from new hardware and a request oriented data model ryoan provides a distributed sandbox leveraging hardware enclaves e g intel s software guard extensions sgx 15 to protect sandbox instances from potentially malicious computing platforms the protected sandbox instances confine untrusted data processing modules to prevent leakage of the user s input data ryoan is designed for a request oriented data model where confined modules only process input once and do not persist state about the input we present the design and prototype implementation of ryoan and evaluate it on a series of challenging problems including email filtering heath analysis image processing and machine translation", + "title_raw": "Ryoan: a distributed sandbox for untrusted computation on secret data", + "abstract_raw": "Users of modern data-processing services such as tax preparation or genomic screening are forced to trust them with data that the users wish to keep secret. Ryoan protects secret data while it is processed by services that the data owner does not trust. Accomplishing this goal in a distributed setting is difficult because the user has no control over the service providers or the computational platform. Confining code to prevent it from leaking secrets is notoriously difficult, but Ryoan benefits from new hardware and a request-oriented data model.\r\n\r\nRyoan provides a distributed sandbox, leveraging hardware enclaves (e.g., Intel's software guard extensions (SGX) [15]) to protect sandbox instances from potentially malicious computing platforms. The protected sandbox instances confine untrusted data-processing modules to prevent leakage of the user's input data. Ryoan is designed for a request-oriented data model, where confined modules only process input once and do not persist state about the input. We present the design and prototype implementation of Ryoan and evaluate it on a series of challenging problems including email filtering, heath analysis, image processing and machine translation.", + "link": "https://www.semanticscholar.org/paper/f70052c91dc7c75a215d4dbc8c473db84414ced0", + "scraped_abstract": null, + "citation_best": 166 + }, + { + "paper": "2522470548", + "venue": "1185109434", + "year": "2016", + "title": "early detection of configuration errors to reduce failure damage", + "label": [ + "63540848", + "114466953", + "55439883", + "32582076", + "149091818", + "43126263", + "79403827" + ], + "author": [ + "2121702024", + "2149033451", + "2646704398", + "2155186186", + "2111557135", + "2465456621", + "2094263872" + ], + "reference": [ + "127874871", + "134722953", + "338991206", + "1495241705", + "1527417319", + "1573585357", + "1607902447", + "1652441716", + "1779647772", + "1924374923", + "1969643550", + "1975413145", + "1978364288", + "1984775394", + "1994411654", + "2027701108", + "2029039689", + "2030696252", + "2040849296", + "2060204338", + "2076793324", + "2098972600", + "2102436656", + "2120376200", + "2128208466", + "2130136915", + "2133441575", + "2138509363", + "2151502039", + "2261611353", + "2489690523", + "2518961441" + ], + "abstract": "early detection is the key to minimizing failure damage induced by configuration errors especially those errors in configurations that control failure handling and fault tolerance since such configurations are not needed for initialization many systems do not check their settings early e g at startup time consequently the errors become latent until their manifestations cause severe damage such as breaking the failure handling such latent errors are likely to escape from sysadmins observation and testing and be deployed to production at scale our study shows that many of today s mature widely used software systems are subject to latent configuration errors referred to as lc errors in their critically important configurations those related to the system s reliability availability and serviceability one root cause is that many 14 0 93 2 of these configurations do not have any special code for checking the correctness of their settings at the system s initialization time to help software systems detect lc errors early we present a tool named pcheck that analyzes the source code and automatically generates configuration checking code called checkers the checkers emulate the late execution that uses configuration values and detect lc errors if the error manifestations are captured during the emulated execution our results show that pcheck can help systems detect 75 of real world lc errors at the initialization phase including 37 new lc errors that have not been exposed before compared with existing detection tools it can detect 31 more lc errors", + "title_raw": "Early detection of configuration errors to reduce failure damage", + "abstract_raw": "Early detection is the key to minimizing failure damage induced by configuration errors, especially those errors in configurations that control failure handling and fault tolerance. Since such configurations are not needed for initialization, many systems do not check their settings early (e.g., at startup time). Consequently, the errors become latent until their manifestations cause severe damage, such as breaking the failure handling. Such latent errors are likely to escape from sysadmins' observation and testing, and be deployed to production at scale.\r\n\r\nOur study shows that many of today's mature, widely-used software systems are subject to latent configuration errors (referred to as LC errors) in their critically important configurations--those related to the system's reliability, availability, and serviceability. One root cause is that many (14.0%-93.2%) of these configurations do not have any special code for checking the correctness of their settings at the system's initialization time.\r\n\r\nTo help software systems detect LC errors early, we present a tool named PCHECK that analyzes the source code and automatically generates configuration checking code (called checkers). The checkers emulate the late execution that uses configuration values, and detect LC errors if the error manifestations are captured during the emulated execution. Our results show that PCHECK can help systems detect 75+% of real-world LC errors at the initialization phase, including 37 new LC errors that have not been exposed before. Compared with existing detection tools, it can detect 31% more LC errors.", + "link": "https://www.semanticscholar.org/paper/b2abc9a3f374ab7191e321025106b2b24775b785", + "scraped_abstract": null, + "citation_best": 1 + }, + { + "paper": "2576393274", + "venue": "1185109434", + "year": "2016", + "title": "push button verification of file systems via crash refinement", + "label": [ + "95637964", + "111919701", + "168065819", + "199360897", + "82820731", + "193769178", + "2225880", + "2780940931", + "166807848", + "166690567" + ], + "author": [ + "2275599615", + "2026265091", + "2344136120", + "2303948204" + ], + "reference": [ + "1412006679", + "1423003888", + "1475719308", + "1710734607", + "1964802076", + "1995626000", + "2020395879", + "2025018396", + "2057156093", + "2073742357", + "2081917916", + "2096915479", + "2101512909", + "2104136059", + "2104266030", + "2104670257", + "2104954161", + "2106192381", + "2108183412", + "2111262474", + "2115235165", + "2115600841", + "2117289367", + "2126823808", + "2127321265", + "2137628566", + "2138555106", + "2138634686", + "2154161369", + "2155061608", + "2171352284", + "2291146106", + "2332661323", + "2340812064", + "2408151954", + "2465660011", + "2911450990" + ], + "abstract": "the file system is an essential operating system component for persisting data on storage devices writing bug free file systems is non trivial as they must correctly implement and maintain complex on disk data structures even in the presence of system crashes and reorderings of disk operations this paper presents yggdrasil a toolkit for writing file systems with push button verification yggdrasil requires no manual annotations or proofs about the implementation code and it produces a counterexample if there is a bug yggdrasil achieves this automation through a novel definition of file system correctness called crash refinement which requires the set of possible disk states produced by an implementation including states produced by crashes to be a subset of those allowed by the specification crash refinement is amenable to fully automated satisfiability modulo theories smt reasoning and enables developers to implement file systems in a modular way for verification with yggdrasil we have implemented and verified the yxv6 journaling file system the ycp file copy utility and the ylog persistent log our experience shows that the ease of proof and counterexample based debugging support make yggdrasil practical for building reliable storage applications", + "title_raw": "Push-button verification of file systems via crash refinement", + "abstract_raw": "The file system is an essential operating system component for persisting data on storage devices. Writing bug-free file systems is non-trivial, as they must correctly implement and maintain complex on-disk data structures even in the presence of system crashes and reorderings of disk operations.\r\n\r\nThis paper presents Yggdrasil, a toolkit for writing file systems with push-button verification: Yggdrasil requires no manual annotations or proofs about the implementation code, and it produces a counterexample if there is a bug. Yggdrasil achieves this automation through a novel definition of file system correctness called crash refinement, which requires the set of possible disk states produced by an implementation (including states produced by crashes) to be a subset of those allowed by the specification. Crash refinement is amenable to fully automated satisfiability modulo theories (SMT) reasoning, and enables developers to implement file systems in a modular way for verification.\r\n\r\nWith Yggdrasil, we have implemented and verified the Yxv6 journaling file system, the Ycp file copy utility, and the Ylog persistent log. Our experience shows that the ease of proof and counterexample-based debugging support make Yggdrasil practical for building reliable storage applications.", + "link": "https://www.semanticscholar.org/paper/c67aae21549ee703cd07d541500198081ef8cc83", + "scraped_abstract": null, + "citation_best": 53 + }, + { + "paper": "2414762192", + "venue": "1127352206", + "year": "2016", + "title": "into the depths of c elaborating the de facto standards", + "label": [ + "160145156", + "12186640", + "184337299", + "193702766", + "199360897", + "55166926", + "13339274" + ], + "author": [ + "264870927", + "2329415059", + "2637899266", + "2413668506", + "1227104805", + "2101946122", + "2083541332" + ], + "reference": [ + "11554108", + "163539143", + "201976661", + "563467911", + "1484997341", + "1526037585", + "1818561783", + "1825457006", + "1904738922", + "1969064066", + "1978364288", + "1998263113", + "2003556922", + "2005574148", + "2008626182", + "2033320580", + "2047650147", + "2064390891", + "2098456636", + "2112472122", + "2122868537", + "2126421682", + "2130970533", + "2138074470", + "2140611647", + "2141080699", + "2141365240", + "2152885346", + "2154942048", + "2155851497", + "2156858199", + "2162604396", + "2167029843", + "2167148498", + "2171482413", + "2186809249", + "2213418347", + "2278024782", + "2499491173", + "2533070090", + "2611598995" + ], + "abstract": "c remains central to our computing infrastructure it is notionally defined by iso standards but in reality the properties of c assumed by systems code and those implemented by compilers have diverged both from the iso standards and from each other and none of these are clearly understood we make two contributions to help improve this error prone situation first we describe an in depth analysis of the design space for the semantics of pointers and memory in c as it is used in practice we articulate many specific questions build a suite of semantic test cases gather experimental data from multiple implementations and survey what c experts believe about the de facto standards we identify questions where there is a consensus either following iso or differing and where there are conflicts we apply all this to an experimental c implemented above capability hardware second we describe a formal model cerberus for large parts of c cerberus is parameterised on its memory model it is linkable either with a candidate de facto memory object model under construction or with an operational c11 concurrency model it is defined by elaboration to a much simpler core language for accessibility and it is executable as a test oracle on small examples this should provide a solid basis for discussion of what mainstream c is now what programmers and analysis tools can assume and what compilers aim to implement ultimately we hope it will be a step towards clear consistent and accepted semantics for the various use cases of c", + "title_raw": "Into the depths of C: elaborating the de facto standards", + "abstract_raw": "C remains central to our computing infrastructure. It is notionally defined by ISO standards, but in reality the properties of C assumed by systems code and those implemented by compilers have diverged, both from the ISO standards and from each other, and none of these are clearly understood. We make two contributions to help improve this error-prone situation. First, we describe an in-depth analysis of the design space for the semantics of pointers and memory in C as it is used in practice. We articulate many specific questions, build a suite of semantic test cases, gather experimental data from multiple implementations, and survey what C experts believe about the de facto standards. We identify questions where there is a consensus (either following ISO or differing) and where there are conflicts. We apply all this to an experimental C implemented above capability hardware. Second, we describe a formal model, Cerberus, for large parts of C. Cerberus is parameterised on its memory model; it is linkable either with a candidate de facto memory object model, under construction, or with an operational C11 concurrency model; it is defined by elaboration to a much simpler Core language for accessibility, and it is executable as a test oracle on small examples. This should provide a solid basis for discussion of what mainstream C is now: what programmers and analysis tools can assume and what compilers aim to implement. Ultimately we hope it will be a step towards clear, consistent, and accepted semantics for the various use-cases of C.", + "link": "https://www.semanticscholar.org/paper/5ddc6a439cdc9b4eaebdad8c20976f1f0be4523f", + "scraped_abstract": null, + "citation_best": 14 + }, + { + "paper": "2413028252", + "venue": "1127352206", + "year": "2016", + "title": "transactional data structure libraries", + "label": [ + "167149655", + "77088390", + "48044578", + "134277064", + "177264268", + "193702766", + "203222032", + "127722929", + "160403385", + "188045909", + "162319229" + ], + "author": [ + "2240160473", + "2605387252", + "696126840" + ], + "reference": [ + "1157540918", + "1492674724", + "1495128589", + "1517389573", + "1533487732", + "1542975293", + "1584458283", + "1847396493", + "1935933758", + "1966132277", + "1970621822", + "1983572666", + "1991199257", + "1999622046", + "2045506295", + "2063050381", + "2068617930", + "2095825766", + "2097279136", + "2097717378", + "2099661831", + "2101939036", + "2105055683", + "2106703803", + "2113751407", + "2119251905", + "2128585185", + "2128870271", + "2129458440", + "2129555383", + "2138567239", + "2140908587", + "2149298504", + "2151467437", + "2156717922", + "2158838886", + "2163654949", + "2169611673", + "2172192453", + "2187800381", + "2266681092", + "2341778529", + "3083604022" + ], + "abstract": "we introduce transactions into libraries of concurrent data structures such transactions can be used to ensure atomicity of sequences of data structure operations by focusing on transactional access to a well defined set of data structure operations we strike a balance between the ease of programming of transactions and the efficiency of custom tailored data structures we exemplify this concept by designing and implementing a library supporting transactions on any number of maps sets implemented as skiplists and queues our library offers efficient and scalable transactions which are an order of magnitude faster than state of the art transactional memory toolkits moreover our approach treats stand alone data structure operations like put and enqueue as first class citizens and allows them to execute with virtually no overhead at the speed of the original data structure library", + "title_raw": "Transactional data structure libraries", + "abstract_raw": "We introduce transactions into libraries of concurrent data structures; such transactions can be used to ensure atomicity of sequences of data structure operations. By focusing on transactional access to a well-defined set of data structure operations, we strike a balance between the ease-of-programming of transactions and the efficiency of custom-tailored data structures. We exemplify this concept by designing and implementing a library supporting transactions on any number of maps, sets (implemented as skiplists), and queues. Our library offers efficient and scalable transactions, which are an order of magnitude faster than state-of-the-art transactional memory toolkits. Moreover, our approach treats stand-alone data structure operations (like put and enqueue) as first class citizens, and allows them to execute with virtually no overhead, at the speed of the original data structure library.", + "link": "https://www.semanticscholar.org/paper/202d6e1bc26a9ec2fe69c60ccc9b898c22425ed4", + "scraped_abstract": null, + "citation_best": 7 + }, + { + "paper": "2386500332", + "venue": "1127352206", + "year": "2016", + "title": "types from data making structured data first class citizens in f", + "label": [ + "2780416260", + "77088390", + "2779849256", + "9476365", + "47487241", + "8797682", + "2776214188", + "39920170", + "23123220" + ], + "author": [ + "282965468", + "2186578543", + "2136386745" + ], + "reference": [ + "70194684", + "197735651", + "1546727036", + "1829244603", + "1966814918", + "1980939032", + "1982280055", + "1990751139", + "2009775190", + "2041630899", + "2047497632", + "2051001073", + "2088675571", + "2096314727", + "2132843347", + "2136327713", + "2151232706", + "2153374737", + "2154294048", + "2168986107", + "2186298404", + "2394914267", + "2617318415", + "3087747708", + "3137596000", + "3163302786" + ], + "abstract": "most modern applications interact with external services and access data in structured formats such as xml json and csv static type systems do not understand such formats often making data access more cumbersome should we give up and leave the messy world of external data to dynamic typing and runtime checks of course not we present f data a library that integrates external structured data into f as most real world data does not come with an explicit schema we develop a shape inference algorithm that infers a shape from representative sample documents we then integrate the inferred shape into the f type system using type providers we formalize the process and prove a relative type soundness theorem our library significantly reduces the amount of data access code and it provides additional safety guarantees when contrasted with the widely used weakly typed techniques", + "title_raw": "Types from data: making structured data first-class citizens in F#", + "abstract_raw": "Most modern applications interact with external services and access data in structured formats such as XML, JSON and CSV. Static type systems do not understand such formats, often making data access more cumbersome. Should we give up and leave the messy world of external data to dynamic typing and runtime checks? Of course, not! We present F# Data, a library that integrates external structured data into F#. As most real-world data does not come with an explicit schema, we develop a shape inference algorithm that infers a shape from representative sample documents. We then integrate the inferred shape into the F# type system using type providers. We formalize the process and prove a relative type soundness theorem. Our library significantly reduces the amount of data access code and it provides additional safety guarantees when contrasted with the widely used weakly typed techniques.", + "link": "https://www.semanticscholar.org/paper/a9cea64ed69ae54c050dd1b42c84f213d1107816", + "scraped_abstract": null, + "citation_best": 4 + }, + { + "paper": "2408975894", + "venue": "1127352206", + "year": "2016", + "title": "assessing the limits of program specific garbage collection performance", + "label": [ + "105122174", + "134757568", + "97541855", + "68387754" + ], + "author": [ + "2146340799", + "2155105488", + "1559204946", + "2656542640" + ], + "reference": [ + "2000746586", + "2053986424", + "2076337359", + "2094678784", + "2098432798", + "2119567691", + "2123918639", + "2127527141", + "2130005627", + "2145825613", + "2155027007", + "2159928435", + "2165497190" + ], + "abstract": "we consider the ultimate limits of program specific garbage collector performance for real programs we first characterize the gc schedule optimization problem using markov decision processes mdps based on this characterization we develop a method of determining for a given program run and heap size an optimal schedule of collections for a non generational collector we further explore the limits of performance of a generational collector where it is not feasible to search the space of schedules to prove optimality still we show significant improvements with least squares policy iteration a reinforcement learning technique for solving mdps we demonstrate that there is considerable promise to reduce garbage collection costs by developing program specific collection policies", + "title_raw": "Assessing the limits of program-specific garbage collection performance", + "abstract_raw": "We consider the ultimate limits of program-specific garbage collector performance for real programs. We first characterize the GC schedule optimization problem using Markov Decision Processes (MDPs). Based on this characterization, we develop a method of determining, for a given program run and heap size, an optimal schedule of collections for a non-generational collector. We further explore the limits of performance of a generational collector, where it is not feasible to search the space of schedules to prove optimality. Still, we show significant improvements with Least Squares Policy Iteration, a reinforcement learning technique for solving MDPs. We demonstrate that there is considerable promise to reduce garbage collection costs by developing program-specific collection policies.", + "link": "https://www.semanticscholar.org/paper/4af11afcd5f448e667d705b08f65e4968f497d14", + "scraped_abstract": null, + "citation_best": 7 + }, + { + "paper": "2963560792", + "venue": "1184151122", + "year": "2016", + "title": "faq questions asked frequently", + "label": [ + "155846161", + "181321632", + "148764684", + "169272836", + "2778692605", + "80444323", + "76482347" + ], + "author": [ + "2106395173", + "2100461916", + "2147385210" + ], + "reference": [ + "52840101", + "56447744", + "99828349", + "1480550036", + "1496450597", + "1549008998", + "1573978656", + "1601049810", + "1752044203", + "1759086394", + "1810918211", + "1968991580", + "1978282744", + "1982333717", + "1983022219", + "1998342956", + "1999038767", + "2008865455", + "2015362435", + "2026129786", + "2028659807", + "2029634712", + "2032444001", + "2046167302", + "2047650874", + "2050100848", + "2055633823", + "2057361103", + "2061171222", + "2064076717", + "2066776015", + "2067706177", + "2077814962", + "2100444372", + "2120349401", + "2126653228", + "2134483537", + "2143075689", + "2143238590", + "2146492010", + "2156094048", + "2161956202", + "2166552916", + "2174332830", + "2232813226", + "2287549241", + "2295268496", + "2401576518", + "2624220145", + "2628596753", + "2752885492", + "2790840297", + "2950294966", + "3145128584" + ], + "abstract": "we define and study the functional aggregate query faq problem which encompasses many frequently asked questions in constraint satisfaction databases matrix operations probabilistic graphical models and logic this is our main conceptual contribution we then present a simple algorithm called insideout to solve this general problem insideout is a variation of the traditional dynamic programming approach for constraint programming based on variable elimination our variation adds a couple of simple twists to basic variable elimination in order to deal with the generality of faq to take full advantage of grohe and marx s fractional edge cover framework and of the analysis of recent worst case optimal relational join algorithms as is the case with constraint programming and graphical model inference to make insideout run efficiently we need to solve an optimization problem to compute an appropriate variable ordering the main technical contribution of this work is a precise characterization of when a variable ordering is semantically equivalent to the variable ordering given by the input faq expression then we design an approximation algorithm to find an equivalent variable ordering that has the best fractional faq width our results imply a host of known and a few new results in graphical model inference matrix operations relational joins and logic we also briefly explain how recent algorithms on beyond worst case analysis for joins and those for solving sat and sat can be viewed as variable elimination to solve faq over compactly represented input functions", + "title_raw": "FAQ: Questions Asked Frequently", + "abstract_raw": "We define and study the Functional Aggregate Query (FAQ) problem, which encompasses many frequently asked questions in constraint satisfaction, databases, matrix operations, probabilistic graphical models and logic. This is our main conceptual contribution. We then present a simple algorithm called \"InsideOut\" to solve this general problem. InsideOut is a variation of the traditional dynamic programming approach for constraint programming based on variable elimination. Our variation adds a couple of simple twists to basic variable elimination in order to deal with the generality of FAQ, to take full advantage of Grohe and Marx's fractional edge cover framework, and of the analysis of recent worst-case optimal relational join algorithms. As is the case with constraint programming and graphical model inference, to make InsideOut run efficiently we need to solve an optimization problem to compute an appropriate variable ordering. The main technical contribution of this work is a precise characterization of when a variable ordering is `semantically equivalent' to the variable ordering given by the input FAQ expression. Then, we design an approximation algorithm to find an equivalent variable ordering that has the best `fractional FAQ-width'. Our results imply a host of known and a few new results in graphical model inference, matrix operations, relational joins, and logic. We also briefly explain how recent algorithms on beyond worst-case analysis for joins and those for solving SAT and #SAT can be viewed as variable elimination to solve FAQ over compactly represented input functions.", + "link": "https://www.semanticscholar.org/paper/1135b3bf28238efb284255924199523b46f93d14", + "scraped_abstract": null, + "citation_best": 13 + }, + { + "paper": "2511192821", + "venue": "1163618098", + "year": "2016", + "title": "a2 analog malicious hardware", + "label": [ + "174333608", + "74524168", + "38652104", + "3981223", + "149635348", + "9390403" + ], + "author": [ + "2256744454", + "2116311070", + "2305520967", + "2279048297", + "2199982448" + ], + "reference": [ + "76123274", + "1492970950", + "1500140210", + "1541663547", + "1686420892", + "1952517978", + "1989761413", + "1990097965", + "1992964615", + "1995558750", + "2009009482", + "2010699766", + "2014828568", + "2026891388", + "2036620903", + "2065181350", + "2092545762", + "2093405763", + "2100866260", + "2104677471", + "2115394282", + "2122509673", + "2142135738", + "2145937629", + "2150928734", + "2153055408", + "2154978532", + "2160597734", + "2172075479" + ], + "abstract": "while the move to smaller transistors has been a boon for performance it has dramatically increased the cost to fabricate chips using those smaller transistors this forces the vast majority of chip design companies to trust a third party often overseas to fabricate their design to guard against shipping chips with errors intentional or otherwise chip design companies rely on post fabrication testing unfortunately this type of testing leaves the door open to malicious modifications since attackers can craft attack triggers requiring a sequence of unlikely events which will never be encountered by even the most diligent tester in this paper we show how a fabrication time attacker can leverage analog circuits to create a hardware attack that is small i e requires as little as one gate and stealthy i e requires an unlikely trigger sequence before effecting a chip s functionality in the open spaces of an already placed and routed design we construct a circuit that uses capacitors to siphon charge from nearby wires as they transition between digital values when the capacitors fully charge they deploy an attack that forces a victim flip flop to a desired value we weaponize this attack into a remotely controllable privilege escalation by attaching the capacitor to a wire controllable and by selecting a victim flip flop that holds the privilege bit for our processor we implement this attack in an or1200 processor and fabricate a chip experimental results show that our attacks work show that our attacks elude activation by a diverse set of benchmarks and suggest that our attacks evade known defenses", + "title_raw": "A2: Analog Malicious Hardware", + "abstract_raw": "While the move to smaller transistors has been a boon for performance it has dramatically increased the cost to fabricate chips using those smaller transistors. This forces the vast majority of chip design companies to trust a third party -- often overseas -- to fabricate their design. To guard against shipping chips with errors (intentional or otherwise) chip design companies rely on post-fabrication testing. Unfortunately, this type of testing leaves the door open to malicious modifications since attackers can craft attack triggers requiring a sequence of unlikely events, which will never be encountered by even the most diligent tester. In this paper, we show how a fabrication-time attacker can leverage analog circuits to create a hardware attack that is small (i.e., requires as little as one gate) and stealthy (i.e., requires an unlikely trigger sequence before effecting a chip's functionality). In the open spaces of an already placed and routed design, we construct a circuit that uses capacitors to siphon charge from nearby wires as they transition between digital values. When the capacitors fully charge, they deploy an attack that forces a victim flip-flop to a desired value. We weaponize this attack into a remotely-controllable privilege escalation by attaching the capacitor to a wire controllable and by selecting a victim flip-flop that holds the privilege bit for our processor. We implement this attack in an OR1200 processor and fabricate a chip. Experimental results show that our attacks work, show that our attacks elude activation by a diverse set of benchmarks, and suggest that our attacks evade known defenses.", + "link": "https://www.semanticscholar.org/paper/fa74bacdfb77496fa6d597a58e83b80a4e4e83db", + "scraped_abstract": null, + "citation_best": 223 + }, + { + "paper": "2488197787", + "venue": "1152462849", + "year": "2016", + "title": "don t mind the gap bridging network wide objectives and device level configurations", + "label": [ + "63540848", + "31258907", + "135257023", + "2776689786", + "169590947", + "120314980" + ], + "author": [ + "2912563580", + "2119684617", + "294596595", + "2016432970", + "2161850330" + ], + "reference": [ + "1249296113", + "1582996281", + "1780466844", + "1965343327", + "1967656199", + "2021234005", + "2026392294", + "2064595734", + "2074616737", + "2082135413", + "2111379929", + "2112867573", + "2118632874", + "2120900812", + "2124430419", + "2126969025", + "2130210899", + "2130531694", + "2133496820", + "2161151447", + "2239064595", + "2298376471", + "2303191924", + "2477023729", + "2485543941", + "2536639940", + "3088063260", + "3160434771" + ], + "abstract": "we develop propane a language and compiler to help network operators with a challenging error prone task bridging the gap between network wide routing objectives and low level configurations of devices that run complex distributed protocols the language allows operators to specify their objectives naturally using high level constraints on both the shape and relative preference of traffic paths the compiler automatically translates these specifications to router level bgp configurations using an effective intermediate representation that compactly encodes the flow of routing information along policy compliant paths it guarantees that the compiled configurations correctly implement the specified policy under all possible combinations of failures we show that propane can effectively express the policies of datacenter and backbone networks of a large cloud provider and despite its strong guarantees our compiler scales to networks with hundreds or thousands of routers", + "title_raw": "Don't Mind the Gap: Bridging Network-wide Objectives and Device-level Configurations", + "abstract_raw": "We develop Propane, a language and compiler to help network operators with a challenging, error-prone task\u2014bridging the gap between network-wide routing objectives and low-level configurations of devices that run complex, distributed protocols. The language allows operators to specify their objectives naturally, using high-level constraints on both the shape and relative preference of traffic paths. The compiler automatically translates these specifications to router-level BGP configurations, using an effective intermediate representation that compactly encodes the flow of routing information along policy-compliant paths. It guarantees that the compiled configurations correctly implement the specified policy under all possible combinations of failures. We show that Propane can effectively express the policies of datacenter and backbone networks of a large cloud provider; and despite its strong guarantees, our compiler scales to networks with hundreds or thousands of routers.", + "link": "https://www.semanticscholar.org/paper/17059e939aa051d7db57f4af959b2af21fa3dd18", + "scraped_abstract": null, + "citation_best": 6 + }, + { + "paper": "2488265751", + "venue": "1152462849", + "year": "2016", + "title": "eliminating channel feedback in next generation cellular networks", + "label": [ + "92545706", + "138660444", + "19247436", + "148063708", + "31258907", + "207987634", + "555944384", + "156996364", + "153646914", + "91330434", + "40409654", + "2778116611", + "54197355" + ], + "author": [ + "227480808", + "2238571927", + "56628844", + "194212017" + ], + "reference": [ + "1510911730", + "1581235876", + "1585879837", + "1708343048", + "1804051827", + "1874568214", + "1915423751", + "1973609509", + "1983301216", + "2013503669", + "2037608252", + "2044079777", + "2048481826", + "2054318779", + "2061914652", + "2076457767", + "2076761096", + "2077077908", + "2085468476", + "2094124335", + "2094993324", + "2098133520", + "2100548565", + "2104622386", + "2109212118", + "2110404830", + "2112323603", + "2115810313", + "2117909688", + "2119136610", + "2120549790", + "2124067137", + "2137628444", + "2139308412", + "2141682101", + "2144916689", + "2146102702", + "2147906061", + "2162636480", + "2165492624", + "2168821867", + "2169474053", + "2506478151", + "2568331865", + "2963887063", + "3136666733", + "3162456902" + ], + "abstract": "this paper focuses on a simple yet fundamental question can a node infer the wireless channels on one frequency band by observing the channels on a different frequency band this question arises in cellular networks where the uplink and the downlink operate on different frequencies addressing this question is critical for the deployment of key 5g solutions such as massive mimo multi user mimo and distributed mimo which require channel state information we introduce r2 f2 a system that enables lte base stations to infer the downlink channels to a client by observing the uplink channels from that client by doing so r2 f2 extends the concept of reciprocity to lte cellular networks where downlink and uplink transmissions occur on different frequency bands it also removes a major hurdle for the deployment of 5g mimo solutions we have implemented r2 f2 in software radios and integrated it within the lte ofdm physical layer our results show that the channels computed by r2 f2 deliver accurate mimo beamforming to within 0 7 db of beamforming gains with ground truth channels while eliminating channel feedback overhead", + "title_raw": "Eliminating Channel Feedback in Next-Generation Cellular Networks", + "abstract_raw": "This paper focuses on a simple, yet fundamental question: ``Can a node infer the wireless channels on one frequency band by observing the channels on a different frequency band?'' This question arises in cellular networks, where the uplink and the downlink operate on different frequencies. Addressing this question is critical for the deployment of key 5G solutions such as massive MIMO, multi-user MIMO, and distributed MIMO, which require channel state information. We introduce R2-F2, a system that enables LTE base stations to infer the downlink channels to a client by observing the uplink channels from that client. By doing so, R2-F2 extends the concept of reciprocity to LTE cellular networks, where downlink and uplink transmissions occur on different frequency bands. It also removes a major hurdle for the deployment of 5G MIMO solutions. We have implemented R2-F2 in software radios and integrated it within the LTE OFDM physical layer. Our results show that the channels computed by R2-F2 deliver accurate MIMO beamforming (to within 0.7~dB of beamforming gains with ground truth channels) while eliminating channel feedback overhead.", + "link": "https://www.semanticscholar.org/paper/670925c875ef3f5eb56f42bb85e18394ac0fe49b", + "scraped_abstract": null, + "citation_best": 134 + }, + { + "paper": "2495264776", + "venue": "1152462849", + "year": "2016", + "title": "inter technology backscatter towards internet connectivity for implanted devices", + "label": [ + "546215728", + "149635348", + "555944384", + "9390403", + "21822782", + "108265739", + "110875604", + "83849155" + ], + "author": [ + "2398591363", + "669839582", + "2141890943", + "2039996270", + "2154841660" + ], + "reference": [ + "1009466626", + "1550777847", + "1967356008", + "1967754176", + "1971775068", + "1975671167", + "1994803221", + "2010683549", + "2026350097", + "2029714639", + "2034654141", + "2058871194", + "2087248844", + "2091703650", + "2093223573", + "2099461393", + "2108412306", + "2109520672", + "2111360050", + "2114004602", + "2126353950", + "2127919512", + "2134011805", + "2135825876", + "2138874069", + "2145692097", + "2146976683", + "2165406850", + "2169929745", + "2196145477", + "2300484078", + "2481477255", + "2513523907", + "2612977707" + ], + "abstract": "we introduce inter technology backscatter a novel approach that transforms wireless transmissions from one technology to another on the air specifically we show for the first time that bluetooth transmissions can be used to create wi fi and zigbee compatible signals using backscatter communication since bluetooth wi fi and zigbee radios are widely available this approach enables a backscatter design that works using only commodity devices we build prototype backscatter hardware using an fpga and experiment with various wi fi bluetooth and zigbee devices our experiments show we can create 2 11 mbps wi fi standards compliant signals by backscattering bluetooth transmissions to show the generality of our approach we also demonstrate generation of standards complaint zigbee signals by backscattering bluetooth transmissions finally we build proof of concepts for previously infeasible applications including the first contact lens form factor antenna prototype and an implantable neural recording interface that communicate directly with commodity devices such as smartphones and watches thus enabling the vision of internet connected implanted devices", + "title_raw": "Inter-Technology Backscatter: Towards Internet Connectivity for Implanted Devices", + "abstract_raw": "We introduce inter-technology backscatter, a novel approach that transforms wireless transmissions from one technology to another, on the air. Specifically, we show for the first time that Bluetooth transmissions can be used to create Wi-Fi and ZigBee-compatible signals using backscatter communication. Since Bluetooth, Wi-Fi and ZigBee radios are widely available, this approach enables a backscatter design that works using only commodity devices. We build prototype backscatter hardware using an FPGA and experiment with various Wi-Fi, Bluetooth and ZigBee devices. Our experiments show we can create 2--11~Mbps Wi-Fi standards-compliant signals by backscattering Bluetooth transmissions. To show the generality of our approach, we also demonstrate generation of standards-complaint ZigBee signals by backscattering Bluetooth transmissions. Finally, we build proof-of-concepts for previously infeasible applications including the first contact lens form-factor antenna prototype and an implantable neural recording interface that communicate directly with commodity devices such as smartphones and watches, thus enabling the vision of Internet connected implanted devices.", + "link": "https://www.semanticscholar.org/paper/620841951ad6ff5392ab69378b667e35bf443898", + "scraped_abstract": null, + "citation_best": 23 + }, + { + "paper": "2340380384", + "venue": "1140684652", + "year": "2016", + "title": "understanding information need an fmri study", + "label": [ + "44291984", + "78999398", + "23123220" + ], + "author": [ + "92283206", + "318333653", + "306880229" + ], + "reference": [ + "1791808111", + "1940352218", + "1964146567", + "1969340322", + "1973697738", + "1974816463", + "1982451429", + "1985367358", + "1986178665", + "1991530889", + "1995242587", + "2000432130", + "2002128263", + "2007350606", + "2008883775", + "2018097218", + "2043088477", + "2045251193", + "2054339120", + "2072995952", + "2084814177", + "2099585577", + "2099703387", + "2100189108", + "2110065044", + "2113788004", + "2119689606", + "2121033679", + "2124932826", + "2125868297", + "2135133561", + "2136022845", + "2139005824", + "2143445498", + "2144208493", + "2144351977", + "2152314154", + "2153505326", + "2156797816", + "2162010696", + "2162476696", + "2164734124", + "2319223103", + "3013880958", + "3124955848" + ], + "abstract": "the raison d etre of ir is to satisfy human information need but do we really understand information need despite advances in the past few decades in both the ir and relevant scientific communities this question is largely unanswered we do not really understand how an information need emerges and how it is physically manifested information need refers to a complex concept at the very initial state of the phenomenon i e at a visceral level even the searcher may not be aware of its existence this renders the measuring of this concept using traditional behaviour studies nearly impossible in this paper we investigate the connection between an information need and brain activity using functional magnetic resonance imaging fmri we measured the brain activity of twenty four participants while they performed a question answering q a task where the questions were carefully selected and developed from trec 8 and trec 2001 q a track the results of this experiment revealed a distributed network of brain regions commonly associated with activities related to information need and retrieval and differing brain activity in processing scenarios when participants knew the answer to a given question and when they did not and needed to search we believe our study and conclusions constitute an important step in unravelling the nature of information need and therefore better satisfying it", + "title_raw": "Understanding Information Need: An fMRI Study", + "abstract_raw": "The raison d'etre of IR is to satisfy human information need. But, do we really understand information need? Despite advances in the past few decades in both the IR and relevant scientific communities, this question is largely unanswered. We do not really understand how an information need emerges and how it is physically manifested. Information need refers to a complex concept: at the very initial state of the phenomenon (i.e. at a visceral level), even the searcher may not be aware of its existence. This renders the measuring of this concept (using traditional behaviour studies) nearly impossible. In this paper, we investigate the connection between an information need and brain activity. Using functional Magnetic Resonance Imaging (fMRI), we measured the brain activity of twenty four participants while they performed a Question Answering (Q/A) Task, where the questions were carefully selected and developed from TREC-8 and TREC 2001 Q/A Track. The results of this experiment revealed a distributed network of brain regions commonly associated with activities related to information need and retrieval and differing brain activity in processing scenarios when participants knew the answer to a given question and when they did not and needed to search. We believe our study and conclusions constitute an important step in unravelling the nature of information need and therefore better satisfying it.", + "link": "https://www.semanticscholar.org/paper/1919e7c0f4c1b5252219f49f3c263db58768b1b2", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2421547754", + "venue": "1175089206", + "year": "2016", + "title": "wander join online aggregation via random walks", + "label": [ + "82687282", + "196713837", + "203570394", + "2778692605", + "188805328", + "80444323", + "120314980", + "24028149" + ], + "author": [ + "2472728339", + "2629713714", + "2115752352", + "2485659166" + ], + "reference": [ + "1532546444", + "1656389077", + "1964857063", + "1968032985", + "1968829657", + "1972223681", + "1993482412", + "2001947543", + "2002791618", + "2017992824", + "2018030107", + "2020147322", + "2027685188", + "2038412523", + "2043097023", + "2051641461", + "2063546264", + "2064777887", + "2066179253", + "2066293100", + "2067348620", + "2068634907", + "2071989194", + "2073479529", + "2086128349", + "2103212156", + "2110363867", + "2117098610", + "2118229812", + "2119072946", + "2132808937", + "2138529836", + "2139783012", + "2145385038", + "2151181047", + "2153231238", + "2153914251", + "2155595639", + "2158194525", + "2163864652", + "2164507334", + "2165990006", + "2167811976", + "2168385561", + "2170651405", + "2243803726", + "2293308125", + "2295428206", + "2296677182", + "2799002609" + ], + "abstract": "joins are expensive and online aggregation over joins was proposed to mitigate the cost which offers users a nice and flexible tradeoff between query efficiency and accuracy in a continuous online fashion however the state of the art approach in both internal and external memory is based on ripple join which is still very expensive and even needs unrealistic assumptions e g tuples in a table are stored in random order this paper proposes a new approach the wander join algorithm to the online aggregation problem by performing random walks over the underlying join graph we also design an optimizer that chooses the optimal plan for conducting the random walks without having to collect any statistics a priori compared with ripple join wander join is particularly efficient for equality joins involving multiple tables but also supports joins selection predicates and group by clauses can be handled as well extensive experiments using the tpc h benchmark have demonstrated the superior performance of wander join over ripple join in particular we have integrated and tested wander join in the latest version of postgresql demonstrating its practicality in a full fledged database system", + "title_raw": "Wander Join: Online Aggregation via Random Walks", + "abstract_raw": "Joins are expensive, and online aggregation over joins was proposed to mitigate the cost, which offers users a nice and flexible tradeoff between query efficiency and accuracy in a continuous, online fashion. However, the state-of-the-art approach, in both internal and external memory, is based on ripple join, which is still very expensive and even needs unrealistic assumptions (e.g., tuples in a table are stored in random order). This paper proposes a new approach, the wander join algorithm, to the online aggregation problem by performing random walks over the underlying join graph. We also design an optimizer that chooses the optimal plan for conducting the random walks without having to collect any statistics a priori. Compared with ripple join, wander join is particularly efficient for equality joins involving multiple tables, but also supports \u03b8-joins. Selection predicates and group-by clauses can be handled as well. Extensive experiments using the TPC-H benchmark have demonstrated the superior performance of wander join over ripple join. In particular, we have integrated and tested wander join in the latest version of PostgreSQL, demonstrating its practicality in a full-fledged database system.", + "link": "https://www.semanticscholar.org/paper/e794e6e9881c673d9dd63d823e10118b209cae62", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2963024133", + "venue": "1190910084", + "year": "2016", + "title": "reed muller codes achieve capacity on erasure channels", + "label": [ + "167955471", + "2778790127", + "206468330", + "169021753", + "149862233", + "42276685" + ], + "author": [ + "1913508077", + "2134670196", + "2177118573", + "2094884912", + "2037711403", + "2081756610" + ], + "reference": [ + "568673721", + "1507990476", + "1512363553", + "1529556207", + "1535038576", + "1536930200", + "1559343931", + "1559492868", + "1563159456", + "1578703824", + "1764851859", + "1968525006", + "1970056983", + "1971933274", + "1978526953", + "1986310050", + "1995875735", + "2001766583", + "2003895037", + "2004229705", + "2005236590", + "2025948462", + "2030633623", + "2037339745", + "2038242066", + "2042993834", + "2063966194", + "2063994293", + "2070702809", + "2071763320", + "2073236804", + "2080479164", + "2088719955", + "2094776848", + "2094878497", + "2099296548", + "2100856114", + "2102336264", + "2103098233", + "2103524940", + "2112446888", + "2113237846", + "2114468914", + "2115104320", + "2120399748", + "2121417875", + "2121498709", + "2121606987", + "2123119950", + "2135393523", + "2135764410", + "2143698439", + "2146712141", + "2150116694", + "2150498905", + "2152182443", + "2156849519", + "2159059820", + "2163934701", + "2164719123", + "2172679141", + "2179644698", + "2266370294", + "2589095445", + "2591190829", + "2611805533", + "2963572639", + "2987657883", + "3104291019", + "3122159180" + ], + "abstract": "we introduce a new approach to proving that a sequence of deterministic linear codes achieves capacity on an erasure channel under maximum a posteriori decoding rather than relying on the precise structure of the codes our method exploits code symmetry in particular the technique applies to any sequence of linear codes where the block lengths are strictly increasing the code rates converge and the permutation group of each code is doubly transitive in a nutshell we show that symmetry alone implies near optimal performance an important consequence of this result is that a sequence of reed muller codes with increasing block length and converging rate achieves capacity this possibility has been suggested previously in the literature but it has only been proven for cases where the limiting code rate is 0 or 1 moreover these results extend naturally to affine invariant codes and thus to all extended primitive narrow sense bch codes this is used to resolve in the affirmative the existence question for capacity achieving sequences of binary cyclic codes the primary tools used in the proofs are the sharp threshold property for symmetric monotone boolean functions and the area theorem for extrinsic information transfer exit functions", + "title_raw": "Reed-Muller codes achieve capacity on erasure channels", + "abstract_raw": "We introduce a new approach to proving that a sequence of deterministic linear codes achieves capacity on an erasure channel under maximum a posteriori decoding. Rather than relying on the precise structure of the codes, our method exploits code symmetry. In particular, the technique applies to any sequence of linear codes where the block lengths are strictly increasing, the code rates converge, and the permutation group of each code is doubly transitive. In a nutshell, we show that symmetry alone implies near-optimal performance. An important consequence of this result is that a sequence of Reed-Muller codes with increasing block length and converging rate achieves capacity. This possibility has been suggested previously in the literature, but it has only been proven for cases where the limiting code rate is 0 or 1. Moreover, these results extend naturally to affine-invariant codes and, thus, to all extended primitive narrow-sense BCH codes. This is used to resolve, in the affirmative, the existence question for capacity-achieving sequences of binary cyclic codes. The primary tools used in the proofs are the sharp threshold property for symmetric monotone boolean functions and the area theorem for extrinsic information transfer (EXIT) functions.", + "link": "https://www.semanticscholar.org/paper/9185f931d72c5424335c8371ff7427c5254691e2", + "scraped_abstract": null, + "citation_best": 44 + }, + { + "paper": "2537074996", + "venue": "1166315290", + "year": "2016", + "title": "rovables miniature on body robots as mobile wearables", + "label": [ + "149635348", + "79061980", + "555944384", + "101468663", + "9390403", + "173018170", + "150594956", + "90509273", + "118505674" + ], + "author": [ + "2001274798", + "2291398295", + "2579329556", + "2358320642", + "3080419039", + "295984705", + "2042299866", + "2069682576" + ], + "reference": [ + "1572206011", + "1965849932", + "1999224599", + "2020169247", + "2024669213", + "2030257660", + "2039592020", + "2053378304", + "2065740160", + "2092050375", + "2098055994", + "2113331157", + "2115466366", + "2116604485", + "2125095347", + "2126978975", + "2128026023", + "2131740967", + "2142943459", + "2150549828", + "2152444335", + "2159456336", + "2163444123", + "2180414786", + "2405564622" + ], + "abstract": "we introduce rovables a miniature robot that can move freely on unmodified clothing the robots are held in place by magnetic wheels and can climb vertically the robots are untethered and have an onboard battery microcontroller and wireless communications they also contain a low power localization system that uses wheel encoders and imu allowing rovables to perform limited autonomous navigation on the body in the technical evaluations we found that rovables can operate continuously for 45 minutes and can carry up to 1 5n we propose an interaction space for mobile on body devices spanning sensing actuation and interfaces and develop application scenarios in that space our applications include on body sensing modular displays tactile feedback and interactive clothing and jewelry", + "title_raw": "Rovables: Miniature On-Body Robots as Mobile Wearables", + "abstract_raw": "We introduce Rovables, a miniature robot that can move freely on unmodified clothing. The robots are held in place by magnetic wheels, and can climb vertically. The robots are untethered and have an onboard battery, microcontroller, and wireless communications. They also contain a low-power localization system that uses wheel encoders and IMU, allowing Rovables to perform limited autonomous navigation on the body. In the technical evaluations, we found that Rovables can operate continuously for 45 minutes and can carry up to 1.5N. We propose an interaction space for mobile on-body devices spanning sensing, actuation, and interfaces, and develop application scenarios in that space. Our applications include on-body sensing, modular displays, tactile feedback and interactive clothing and jewelry.", + "link": "https://www.semanticscholar.org/paper/0a6c3429e85588a3e65d5876f0066c7d4069bb44", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2533619018", + "venue": "1166315290", + "year": "2016", + "title": "zooids building blocks for swarm user interfaces", + "label": [ + "118524514", + "163847158", + "107457646", + "2776865275", + "177264268", + "89505385", + "181335050", + "76518257", + "90509273", + "193581530" + ], + "author": [ + "2734788194", + "2229397824", + "2533497114", + "2780724759", + "89785269", + "2069682576" + ], + "reference": [ + "59431316", + "1481103847", + "1517064069", + "1542229793", + "1874982062", + "1967033510", + "1969045498", + "1972218541", + "1975142584", + "1981927657", + "1982636603", + "1986840445", + "1990259408", + "1994547327", + "1997488333", + "1998818664", + "2002910582", + "2004962076", + "2005188499", + "2010353889", + "2012299141", + "2014137027", + "2019375712", + "2026877030", + "2028822613", + "2031215791", + "2032933675", + "2043579144", + "2048125445", + "2057516087", + "2060804491", + "2063561142", + "2079218888", + "2082727067", + "2084069552", + "2085517578", + "2096625858", + "2097639646", + "2102848110", + "2104836835", + "2110378855", + "2111429380", + "2113331157", + "2114825426", + "2116311127", + "2117682058", + "2118041291", + "2119995469", + "2122581059", + "2122928453", + "2125215841", + "2127177812", + "2129763427", + "2130117351", + "2130615539", + "2132089528", + "2134908182", + "2138988785", + "2142304282", + "2143131345", + "2146604360", + "2147846431", + "2150549828", + "2152397416", + "2158804691", + "2161404123", + "2163893120", + "2168442304", + "2169855920", + "2222512263", + "2245553218", + "2259076611", + "2261511230", + "2294959121", + "2339406872", + "2342091124", + "2406660271", + "2544041682", + "2778205983", + "3142658448" + ], + "abstract": "this paper introduces swarm user interfaces a new class of human computer interfaces comprised of many autonomous robots that handle both display and interaction we describe the design of zooids an open source open hardware platform for developing tabletop swarm interfaces the platform consists of a collection of custom designed wheeled micro robots each 2 6 cm in diameter a radio base station a high speed dlp structured light projector for optical tracking and a software framework for application development and control we illustrate the potential of tabletop swarm user interfaces through a set of application scenarios developed with zooids and discuss general design considerations unique to swarm user interfaces", + "title_raw": "Zooids: Building Blocks for Swarm User Interfaces", + "abstract_raw": "This paper introduces swarm user interfaces, a new class of human-computer interfaces comprised of many autonomous robots that handle both display and interaction. We describe the design of Zooids, an open-source open-hardware platform for developing tabletop swarm interfaces. The platform consists of a collection of custom-designed wheeled micro robots each 2.6 cm in diameter, a radio base-station, a high-speed DLP structured light projector for optical tracking, and a software framework for application development and control. We illustrate the potential of tabletop swarm user interfaces through a set of application scenarios developed with Zooids, and discuss general design considerations unique to swarm user interfaces.", + "link": "https://www.semanticscholar.org/paper/a68fc8871593e3fc9b0e5d754f533ba8b89c8c83", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2539017927", + "venue": "1166315290", + "year": "2016", + "title": "procover sensory augmentation of prosthetic limbs using smart textile covers", + "label": [ + "107457646", + "44154836", + "152086174", + "150594956" + ], + "author": [ + "2343259542", + "2223409512", + "1238674649", + "2394996897", + "1793298736", + "2282489031", + "2534472325", + "102229238", + "2948658362" + ], + "reference": [ + "87503355", + "1588530204", + "1782270568", + "1916529046", + "1967273673", + "1999585120", + "2010431579", + "2012050798", + "2041266393", + "2054854590", + "2061453480", + "2073060332", + "2081175116", + "2097780143", + "2103115035", + "2116396066", + "2134500330", + "2157094999", + "2163444123", + "2165394321", + "2212410734", + "2296426322", + "2345907455", + "2401668149", + "3098485917" + ], + "abstract": "today s commercially available prosthetic limbs lack tactile sensation and feedback recent research in this domain focuses on sensor technologies designed to be directly embedded into future prostheses we present a novel concept and prototype of a prosthetic sensing wearable that offers a non invasive self applicable and customizable approach for the sensory augmentation of present day and future low to mid range priced lower limb prosthetics from consultation with eight lower limb amputees we investigated the design space for prosthetic sensing wearables and developed novel interaction methods for dynamic user driven creation and mapping of sensing regions on the foot to wearable haptic feedback actuators based on a pilot study with amputees we assessed the utility of our design in scenarios brought up by the amputees and we summarize our findings to establish future directions for research into using smart textiles for the sensory enhancement of prosthetic limbs", + "title_raw": "proCover: Sensory Augmentation of Prosthetic Limbs Using Smart Textile Covers", + "abstract_raw": "Today's commercially available prosthetic limbs lack tactile sensation and feedback. Recent research in this domain focuses on sensor technologies designed to be directly embedded into future prostheses. We present a novel concept and prototype of a prosthetic-sensing wearable that offers a non-invasive, self-applicable and customizable approach for the sensory augmentation of present-day and future low to mid-range priced lower-limb prosthetics. From consultation with eight lower-limb amputees, we investigated the design space for prosthetic sensing wearables and developed novel interaction methods for dynamic, user-driven creation and mapping of sensing regions on the foot to wearable haptic feedback actuators. Based on a pilot-study with amputees, we assessed the utility of our design in scenarios brought up by the amputees and we summarize our findings to establish future directions for research into using smart textiles for the sensory enhancement of prosthetic limbs.", + "link": "https://www.semanticscholar.org/paper/5fe10b54c27ce4610ede88522d7bcdd109db474b", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2538172027", + "venue": "1166315290", + "year": "2016", + "title": "viband high fidelity bio acoustic sensing using commodity smartwatch accelerometers", + "label": [ + "29794715", + "95020103", + "89805583", + "207347870", + "2776151529", + "89505385", + "31972630", + "64876066", + "150594956" + ], + "author": [ + "768992449", + "2147333048", + "2123491528" + ], + "reference": [ + "78111734", + "166064277", + "179321503", + "612599047", + "1524135185", + "1918749672", + "1975634689", + "1977509831", + "1980592899", + "1983898909", + "1986707552", + "1989702938", + "2008834805", + "2022376209", + "2027225339", + "2029948294", + "2037464338", + "2039857090", + "2040569955", + "2042249835", + "2048769187", + "2056744577", + "2058189943", + "2070099144", + "2070924230", + "2084243203", + "2088182082", + "2099800354", + "2102413118", + "2115466366", + "2118271370", + "2122991029", + "2123068464", + "2123218386", + "2124386111", + "2128554449", + "2129114382", + "2130851966", + "2141964067", + "2144299314", + "2148575324", + "2152528000", + "2159999338", + "2163097095", + "2166712377", + "2167040800", + "2169709590", + "2194381629", + "2209204668", + "2333662034", + "2338964903", + "2402069821", + "2543775126" + ], + "abstract": "smartwatches and wearables are unique in that they reside on the body presenting great potential for always available input and interaction their position on the wrist makes them ideal for capturing bio acoustic signals we developed a custom smartwatch kernel that boosts the sampling rate of a smartwatch s existing accelerometer to 4 khz using this new source of high fidelity data we uncovered a wide range of applications for example we can use bio acoustic data to classify hand gestures such as flicks claps scratches and taps which combine with on device motion tracking to create a wide range of expressive input modalities bio acoustic sensing can also detect the vibrations of grasped mechanical or motor powered objects enabling passive object recognition that can augment everyday experiences with context aware functionality finally we can generate structured vibrations using a transducer and show that data can be transmitted through the human body overall our contributions unlock user interface techniques that previously relied on special purpose and or cumbersome instrumentation making such interactions considerably more feasible for inclusion in future consumer devices", + "title_raw": "ViBand: High-Fidelity Bio-Acoustic Sensing Using Commodity Smartwatch Accelerometers", + "abstract_raw": "Smartwatches and wearables are unique in that they reside on the body, presenting great potential for always-available input and interaction. Their position on the wrist makes them ideal for capturing bio-acoustic signals. We developed a custom smartwatch kernel that boosts the sampling rate of a smartwatch's existing accelerometer to 4 kHz. Using this new source of high-fidelity data, we uncovered a wide range of applications. For example, we can use bio-acoustic data to classify hand gestures such as flicks, claps, scratches, and taps, which combine with on-device motion tracking to create a wide range of expressive input modalities. Bio-acoustic sensing can also detect the vibrations of grasped mechanical or motor-powered objects, enabling passive object recognition that can augment everyday experiences with context-aware functionality. Finally, we can generate structured vibrations using a transducer, and show that data can be transmitted through the human body. Overall, our contributions unlock user interface techniques that previously relied on special-purpose and/or cumbersome instrumentation, making such interactions considerably more feasible for inclusion in future consumer devices.", + "link": "https://www.semanticscholar.org/paper/1a892b8f0abf8f1ea559cc94470cf9d0ccc07526", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2535724050", + "venue": "1133523790", + "year": "2016", + "title": "compressed linear algebra for large scale machine learning", + "label": [ + "119857082", + "94835093", + "162478608", + "2777644182", + "78548338", + "11413529", + "81081738" + ], + "author": [ + "2164540290", + "2942221819", + "2118460571", + "1494782392", + "1980638931" + ], + "reference": [ + "31634058", + "85452980", + "181246746", + "230214904", + "1501548698", + "1736023725", + "1967601791", + "1968105141", + "1969891822", + "1972783048", + "1974671283", + "1977142133", + "1978024959", + "1979567624", + "1980307891", + "1988311079", + "1990832096", + "1992363839", + "1993704253", + "1993819379", + "2003421875", + "2013373704", + "2018745370", + "2032775418", + "2035415465", + "2037726512", + "2043862089", + "2047061289", + "2047244756", + "2054046497", + "2055935910", + "2058991275", + "2064366207", + "2079885973", + "2082092506", + "2086529872", + "2089908605", + "2096461567", + "2096496252", + "2099102906", + "2100132188", + "2102458936", + "2105848461", + "2108392934", + "2117756735", + "2121810937", + "2123686039", + "2127090196", + "2128853364", + "2131975293", + "2135003477", + "2140453381", + "2146635036", + "2151131744", + "2153884194", + "2154590891", + "2163336450", + "2167911783", + "2168540086", + "2170990775", + "2173213060", + "2216541755", + "2247380138", + "2271840356", + "2284514301", + "2292590056", + "2336944019", + "2374597300", + "2376027876", + "2394571627", + "2399991609", + "2439390339", + "2525739395", + "2533239314", + "2547190417", + "2548695101", + "2563724055", + "2574839832", + "2585098096", + "2591620544", + "2591908351", + "2614179415", + "2764754194", + "2919115771", + "3029645440", + "3138367763" + ], + "abstract": "large scale machine learning ml algorithms are often iterative using repeated read only data access and i o bound matrix vector multiplications to converge to an optimal model it is crucial for performance to fit the data into single node or distributed main memory general purpose heavy and lightweight compression techniques struggle to achieve both good compression ratios and fast decompression speed to enable block wise uncompressed operations hence we initiate work on compressed linear algebra cla in which lightweight database compression techniques are applied to matrices and then linear algebra operations such as matrix vector multiplication are executed directly on the compressed representations we contribute effective column compression schemes cache conscious operations and an efficient sampling based compression algorithm our experiments show that cla achieves in memory operations performance close to the uncompressed case and good compression ratios that allow us to fit larger datasets into available memory we thereby obtain significant end to end performance improvements up to 26x or reduced memory requirements", + "title_raw": "Compressed linear algebra for large-scale machine learning", + "abstract_raw": "Large-scale machine learning (ML) algorithms are often iterative, using repeated read-only data access and I/O-bound matrix-vector multiplications to converge to an optimal model. It is crucial for performance to fit the data into single-node or distributed main memory. General-purpose, heavy- and lightweight compression techniques struggle to achieve both good compression ratios and fast decompression speed to enable block-wise uncompressed operations. Hence, we initiate work on compressed linear algebra (CLA), in which lightweight database compression techniques are applied to matrices and then linear algebra operations such as matrix-vector multiplication are executed directly on the compressed representations. We contribute effective column compression schemes, cache-conscious operations, and an efficient sampling-based compression algorithm. Our experiments show that CLA achieves in-memory operations performance close to the uncompressed case and good compression ratios that allow us to fit larger datasets into available memory. We thereby obtain significant end-to-end performance improvements up to 26x or reduced memory requirements.", + "link": "https://www.semanticscholar.org/paper/6df79ed3d9c65e7370f001e01d7c68ac9a7440e9", + "scraped_abstract": null, + "citation_best": 75 + }, + { + "paper": "2262364653", + "venue": "1135342153", + "year": "2016", + "title": "social networks under stress", + "label": [ + "2776604539", + "192126672", + "13540734", + "68416499", + "62886766", + "86256295", + "136764020" + ], + "author": [ + "2133929192", + "2048884539", + "2261367123" + ], + "reference": [ + "120264976", + "286295083", + "379337736", + "574862180", + "934261879", + "1533368239", + "1603920809", + "1875112053", + "1968380849", + "1970655941", + "1999410909", + "2008878870", + "2016669059", + "2020358772", + "2021695593", + "2028914474", + "2049406449", + "2049607688", + "2055235810", + "2067868909", + "2085491458", + "2087962989", + "2100261414", + "2106533374", + "2108614537", + "2108855171", + "2109469951", + "2117028698", + "2119353660", + "2121761994", + "2125349172", + "2128300405", + "2131773668", + "2134953154", + "2136151826", + "2146259587", + "2147113977", + "2147824439", + "2147952642", + "2148606196", + "2153204928", + "2156647912", + "2159397589", + "2161010501", + "2166330262", + "2232943036", + "2332125581", + "2735645964", + "2752099845", + "2963785568", + "3121584743", + "3122714990", + "3122944446", + "3124595197", + "3150197019" + ], + "abstract": "social network research has begun to take advantage of fine grained communications regarding coordination decision making and knowledge sharing these studies however have not generally analyzed how external events are associated with a social network s structure and communicative properties here we study how external events are associated with a network s change in structure and communications analyzing a complete dataset of millions of instant messages among the decision makers in a large hedge fund and their network of outside contacts we investigate the link between price shocks network structure and change in the affect and cognition of decision makers embedded in the network when price shocks occur the communication network tends not to display structural changes associated with adaptiveness rather the network turtles up it displays a propensity for higher clustering strong tie inter action and an intensification of insider vs outsider communication further we find changes in network structure pre dict shifts in cognitive and affective processes execution of new transactions and local optimality of transactions better than prices revealing the important predictive relationship between network structure and collective behavior within a social network", + "title_raw": "Social Networks Under Stress", + "abstract_raw": "Social network research has begun to take advantage of fine-grained communications regarding coordination, decision-making, and knowledge sharing. These studies, however, have not generally analyzed how external events are associated with a social network's structure and communicative properties. Here, we study how external events are associated with a network's change in structure and communications. Analyzing a complete dataset of millions of instant messages among the decision-makers in a large hedge fund and their network of outside contacts, we investigate the link between price shocks, network structure, and change in the affect and cognition of decision-makers embedded in the network. When price shocks occur the communication network tends not to display structural changes associated with adaptiveness. Rather, the network 'turtles up'. It displays a propensity for higher clustering, strong tie inter- action, and an intensification of insider vs. outsider communication. Further, we find changes in network structure pre- dict shifts in cognitive and affective processes, execution of new transactions, and local optimality of transactions better than prices, revealing the important predictive relationship between network structure and collective behavior within a social network.", + "link": "https://www.semanticscholar.org/paper/a1b015cca11fc48641562efeae7780cc919ade84", + "scraped_abstract": null, + "citation_best": 6 + }, + { + "paper": "2211594368", + "venue": "1184914352", + "year": "2015", + "title": "from non negative to general operator cost partitioning", + "label": [ + "76449508", + "173801870", + "127705205" + ], + "author": [ + "1916685292", + "451163712", + "2141104126", + "2075806465" + ], + "reference": [ + "53694208", + "132584314", + "1534467528", + "1554095770", + "1760222968", + "1968808691", + "1969483458", + "2045526507", + "2081726708", + "2131910799", + "2136791927", + "2161414194", + "2198245851", + "2205495840", + "2288384992", + "2405407348" + ], + "abstract": "operator cost partitioning is a well known technique to make admissible heuristics additive by distributing the operator costs among individual heuristics planning tasks are usually defined with non negative operator costs and therefore it appears natural to demand the same for the distributed costs we argue that this requirement is not necessary and demonstrate the benefit of using general cost partitioning we show that lp heuristics for operator counting constraints are cost partitioned heuristics and that the state equation heuristic computes a cost partitioning over atomic projections we also introduce a new family of potential heuristics and show their relationship to general cost partitioning", + "title_raw": "From non-negative to general operator cost partitioning", + "abstract_raw": "Operator cost partitioning is a well-known technique to make admissible heuristics additive by distributing the operator costs among individual heuristics. Planning tasks are usually defined with non-negative operator costs and therefore it appears natural to demand the same for the distributed costs. We argue that this requirement is not necessary and demonstrate the benefit of using general cost partitioning. We show that LP heuristics for operator-counting constraints are cost-partitioned heuristics and that the state equation heuristic computes a cost partitioning over atomic projections. We also introduce a new family of potential heuristics and show their relationship to general cost partitioning.", + "link": "https://www.semanticscholar.org/paper/6f45883551ecb1cb33cb30cf1f67fc3d28f4a59c", + "scraped_abstract": null, + "citation_best": 77 + }, + { + "paper": "1974329625", + "venue": "1163450153", + "year": "2015", + "title": "affordance allowing objects to communicate dynamic use", + "label": [ + "104114177", + "64729616", + "31972630", + "194995250" + ], + "author": [ + "2128317731", + "2749704239", + "2009751849" + ], + "reference": [ + "36471650", + "1511843655", + "1528027857", + "1559983024", + "1933657216", + "1970701335", + "2005633553", + "2016027835", + "2023269348", + "2056084814", + "2063594338", + "2099800354", + "2104385324", + "2119995469", + "2122317094", + "2122613696", + "2128868982", + "2129763427", + "2131662572", + "2143131345", + "2155459695", + "2156626285", + "2161751289", + "2165133011", + "2171987976", + "2613049552" + ], + "abstract": "we propose extending the affordance of objects by allowing them to communicate dynamic use such as 1 motion e g spray can shakes when touched 2 multi step processes e g spray can sprays only after shaking and 3 behaviors that change over time e g empty spray can does not allow spraying anymore rather than enhancing objects directly however we implement this concept by enhancing the user we call this affordance by stimulating the user s arms using electrical muscle stimulation our prototype allows objects not only to make the user actuate them but also perform required movements while merely approaching the object such as not to touch objects that do not want to be touched in our user study affordance helped participants to successfully operate devices of poor natural affordance such as a multi functional slicer tool or a magnetic nail sweeper and to stay away from cups filled with hot liquids", + "title_raw": "Affordance++: Allowing Objects to Communicate Dynamic Use", + "abstract_raw": "We propose extending the affordance of objects by allowing them to communicate dynamic use, such as (1) motion (e.g., spray can shakes when touched), (2) multi-step processes (e.g., spray can sprays only after shaking), and (3) behaviors that change over time (e.g., empty spray can does not allow spraying anymore). Rather than enhancing objects directly, however, we implement this concept by enhancing the user. We call this affordance++. By stimulating the user's arms using electrical muscle stimulation, our prototype allows objects not only to make the user actuate them, but also perform required movements while merely approaching the object, such as not to touch objects that do not \"want\" to be touched. In our user study, affordance++ helped participants to successfully operate devices of poor natural affordance, such as a multi-functional slicer tool or a magnetic nail sweeper, and to stay away from cups filled with hot liquids.", + "link": "https://www.semanticscholar.org/paper/eaef8333b197738ce67a52f4b4af4bf951728df4", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2077940530", + "venue": "1163450153", + "year": "2015", + "title": "velocitap investigating fast mobile text entry using sentence based decoding of touchscreen keyboard input", + "label": [ + "28490314", + "40969351", + "2777530160", + "136738937", + "9390403", + "57273362", + "200632571", + "2778539339" + ], + "author": [ + "182405630", + "2228304778", + "2222212730", + "2228131807", + "2091367221" + ], + "reference": [ + "1550321890", + "1590330054", + "1992326382", + "1999806644", + "2012241589", + "2045522158", + "2052570369", + "2054689358", + "2058049219", + "2058503756", + "2084413096", + "2104328027", + "2112261049", + "2113893685", + "2117278770", + "2118711147", + "2124810338", + "2124818189", + "2128808215", + "2134549953", + "2148723390", + "2151905266", + "2165645720" + ], + "abstract": "we present velocitap a state of the art touchscreen keyboard decoder that supports a sentence based text entry approach velocitap enables users to seamlessly choose from three word delimiter actions pushing a space key swiping to the right or simply omitting the space key and letting the decoder infer spaces automatically we demonstrate that velocitap has a significantly lower error rate than google s keyboard while retaining the same entry rate we show that intermediate visual feedback does not significantly affect entry or error rates and we find that using the space key results in the most accurate results we also demonstrate that enabling flexible word delimiter options does not incur an error rate penalty finally we investigate how small we can make the keyboard when using velocitap we show that novice users can reach a mean entry rate of 41 wpm on a 40 mm wide smartwatch sized keyboard at a 3 character error rate", + "title_raw": "VelociTap: Investigating Fast Mobile Text Entry using Sentence-Based Decoding of Touchscreen Keyboard Input", + "abstract_raw": "We present VelociTap: a state-of-the-art touchscreen keyboard decoder that supports a sentence-based text entry approach. VelociTap enables users to seamlessly choose from three word-delimiter actions: pushing a space key, swiping to the right, or simply omitting the space key and letting the decoder infer spaces automatically. We demonstrate that VelociTap has a significantly lower error rate than Google's keyboard while retaining the same entry rate. We show that intermediate visual feedback does not significantly affect entry or error rates and we find that using the space key results in the most accurate results. We also demonstrate that enabling flexible word-delimiter options does not incur an error rate penalty. Finally, we investigate how small we can make the keyboard when using VelociTap. We show that novice users can reach a mean entry rate of 41 wpm on a 40 mm wide smartwatch-sized keyboard at a 3% character error rate.", + "link": "https://www.semanticscholar.org/paper/5d18dd15f7ff960f8e6b5fc5635c802696928544", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2171022339", + "venue": "1163450153", + "year": "2015", + "title": "what makes interruptions disruptive a process model account of the effects of the problem state bottleneck on task interruption and resumption", + "label": [ + "44154836", + "2780513914", + "41661131", + "98045186" + ], + "author": [ + "2284871913", + "1988470573", + "2142564275" + ], + "reference": [ + "24931679", + "1524319988", + "1928882148", + "1966518476", + "1971660807", + "1972046139", + "1979053643", + "1980358571", + "1982888377", + "1984507476", + "1997717211", + "2016155538", + "2016725357", + "2022164703", + "2023695064", + "2026916986", + "2036625316", + "2046149799", + "2048020986", + "2054671775", + "2055104609", + "2058695773", + "2079184413", + "2086426908", + "2089615165", + "2094296032", + "2096158028", + "2096173577", + "2101337095", + "2107920470", + "2109529678", + "2111730760", + "2120076005", + "2129415794", + "2134595450", + "2135054307", + "2139186353", + "2148958608", + "2150412828", + "2152574803", + "2167723047", + "2172151257", + "2237830298", + "2323537224", + "2496754967", + "3149827206" + ], + "abstract": "in this paper we present a computational cognitive model of task interruption and resumption focusing on the effects of the problem state bottleneck previous studies have shown that the disruptiveness of interruptions is for an important part determined by three factors interruption duration interrupting task complexity and moment of interruption however an integrated theory of these effects is still missing based on previous research into multitasking we propose a first step towards such a theory in the form of a process model that attributes these effects to problem state requirements of both the interrupted and the interrupting task subsequently we tested two predictions of this model in two experiments the experiments confirmed that problem state requirements are an important predictor for the disruptiveness of interruptions this suggests that interfaces should be designed to a interrupt users at low problem state moments and b maintain the problem state for the user when interrupted", + "title_raw": "What Makes Interruptions Disruptive?: A Process-Model Account of the Effects of the Problem State Bottleneck on Task Interruption and Resumption", + "abstract_raw": "In this paper we present a computational cognitive model of task interruption and resumption, focusing on the effects of the problem state bottleneck. Previous studies have shown that the disruptiveness of interruptions is for an important part determined by three factors: interruption duration, interrupting-task complexity, and moment of interruption. However, an integrated theory of these effects is still missing. Based on previous research into multitasking, we propose a first step towards such a theory in the form of a process model that attributes these effects to problem state requirements of both the interrupted and the interrupting task. Subsequently, we tested two predictions of this model in two experiments. The experiments confirmed that problem state requirements are an important predictor for the disruptiveness of interruptions. This suggests that interfaces should be designed to a) interrupt users at low-problem state moments and b) maintain the problem state for the user when interrupted.", + "link": "https://www.semanticscholar.org/paper/19b28ad84b72266365b13e773ffcf519ce200643", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1993143504", + "venue": "1163450153", + "year": "2015", + "title": "lightweight relief shearing for enhanced terrain perception on interactive maps", + "label": [ + "12713177", + "31972630" + ], + "author": [ + "2085234058", + "1990812620", + "1672749942", + "89785269" + ], + "reference": [ + "146082745", + "207103598", + "1529077413", + "1543448657", + "1561256858", + "1975374508", + "1984318610", + "1986123980", + "1992177162", + "2005065380", + "2007144424", + "2008130329", + "2010507078", + "2031755971", + "2062711920", + "2074306633", + "2079416727", + "2084486945", + "2090386708", + "2094318678", + "2111750370", + "2121940100", + "2127875809", + "2133498913", + "2136022943", + "2160089049" + ], + "abstract": "we explore interactive relief shearing a set of non intrusive direct manipulation interactions that expose depth and shape information in terrain maps using ephemeral animations reading and interpreting topography and relief on terrain maps is an important aspect of map use but extracting depth information from 2d maps is notoriously difficult modern mapping software attempts to alleviate this limitation by presenting digital terrain using 3d views however 3d views introduce occlusion complicate distance estimations and typically require more complex interactions in contrast our approach reveals depth information via shearing animations on 2d maps and can be paired with existing interactions such as pan and zoom we examine explicit integrated and hybrid interactions for triggering relief shearing and present a version that uses device tilt to control depth effects our evaluation shows that these interactive techniques improve depth perception when compared to standard 2d and perspective views", + "title_raw": "Lightweight Relief Shearing for Enhanced Terrain Perception on Interactive Maps", + "abstract_raw": "We explore interactive relief shearing, a set of non-intrusive, direct manipulation interactions that expose depth and shape information in terrain maps using ephemeral animations. Reading and interpreting topography and relief on terrain maps is an important aspect of map use, but extracting depth information from 2D maps is notoriously difficult. Modern mapping software attempts to alleviate this limitation by presenting digital terrain using 3D views. However, 3D views introduce occlusion, complicate distance estimations, and typically require more complex interactions. In contrast, our approach reveals depth information via shearing animations on 2D maps, and can be paired with existing interactions such as pan and zoom. We examine explicit, integrated, and hybrid interactions for triggering relief shearing and present a version that uses device tilt to control depth effects. Our evaluation shows that these interactive techniques improve depth perception when compared to standard 2D and perspective views.", + "link": "https://www.semanticscholar.org/paper/ea163f96c14fcccd6d87da765efa26eea08c0c65", + "scraped_abstract": null, + "citation_best": 40 + }, + { + "paper": "2071865879", + "venue": "1163450153", + "year": "2015", + "title": "patina engraver visualizing activity logs as patina in fashionable trackers", + "label": [ + "107457646", + "150594956", + "57501372" + ], + "author": [ + "2225154729", + "2226550430", + "2130378173" + ], + "reference": [ + "1572670004", + "1757644187", + "1972594921", + "1990843018", + "1991376523", + "1991857593", + "2005219594", + "2027847765", + "2029337986", + "2055135850", + "2058448884", + "2068408009", + "2078032740", + "2091177731", + "2129343065", + "2142094977", + "2143239689", + "2148763290", + "2151137010", + "2151586234", + "2168423131", + "2255370415", + "2477352585", + "3094202663" + ], + "abstract": "despite technological improvements in commercial activity trackers little attention has been given to their emotional social or fashion related qualities such as their visual aesthetics and their relationship to self expression and social connection as an alternative integrated approach incorporating hci fashion and product design our project made use of the characteristics of patina to improve activity trackers as fashionable wearables we developed the patina engraving system which engraves patina like patterns on an activity tracker according to a user s activity logs using a piercing technique the patina of activity logs has been made abstract visually rich gradually emerging and historically accumulated during the field trial we found that the patina motivated the participants to increase exercises for engraving aesthetic patinas a tracker with patina triggered spontaneous social interactions in face to face situations the participants also cherished the trackers that held their own history based on the field trial we discuss design implications for utilizing patina in designing future fashionable technologies", + "title_raw": "Patina Engraver: Visualizing Activity Logs as Patina in Fashionable Trackers", + "abstract_raw": "Despite technological improvements in commercial activity trackers, little attention has been given to their emotional, social, or fashion-related qualities, such as their visual aesthetics and their relationship to self-expression and social connection. As an alternative integrated approach incorporating HCI, fashion, and product design, our project made use of the characteristics of patina to improve activity trackers as fashionable wearables. We developed the Patina Engraving System, which engraves patina-like patterns on an activity tracker according to a user's activity logs. Using a piercing technique, the patina of activity logs has been made abstract, visually rich, gradually emerging, and historically accumulated. During the field trial, we found that the patina motivated the participants to increase exercises for engraving aesthetic patinas. A tracker with patina triggered spontaneous social interactions in face-to-face situations. The participants also cherished the trackers that held their own history. Based on the field trial, we discuss design implications for utilizing patina in designing future fashionable technologies.", + "link": "https://www.semanticscholar.org/paper/d34a06dabccdcdc8211dcc988c70a27b4bce46d5", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2148163144", + "venue": "1163450153", + "year": "2015", + "title": "baselase an interactive focus context laser floor", + "label": [ + "188255373", + "40140605", + "31972630" + ], + "author": [ + "2908656269", + "2222241732", + "2131810727" + ], + "reference": [ + "150895655", + "1542657334", + "1559983024", + "1586963532", + "1601898415", + "1844042977", + "1991625730", + "2002910582", + "2009104953", + "2013541125", + "2014073269", + "2020405099", + "2026506423", + "2033175536", + "2033288247", + "2039925519", + "2050552147", + "2082930097", + "2093639577", + "2103793365", + "2105408445", + "2106223989", + "2122714335", + "2139120520", + "2148819007", + "2150874632", + "2164689677", + "2166713718" + ], + "abstract": "we present baselase an interactive laser projected focus context floor display in order to provide a transportable system that works in areas where there are no ceilings we provide an integrated unit 1 3m height that stands on the floor one unsolved challenge for laser projectors is to cover large projection areas while providing high resolution at the same time our focus context laser projector solves this problem baselase can cover a large context area in low resolution while providing three movable high resolution focus spots we provide a convex mirror design that enables the laser to reach a large area 75m2 with low resolution while decreasing the beam divergence compared to spherical or parabolic mirrors this hyperboloidal mirror shape approximately equalizes the point size on the floor independent from the projected location we propose to add a number of planar mirrors on pan tilt units to create dynamic zones of high resolution that can adjust to the user behavior we provide example applications for baselase and report on user experience in preliminary trials", + "title_raw": "BaseLase: An Interactive Focus+Context Laser Floor", + "abstract_raw": "We present BaseLase, an interactive laser projected focus + context floor display. In order to provide a transportable system that works in areas where there are no ceilings, we provide an integrated unit (1.3m height) that stands on the floor. One unsolved challenge for laser projectors is to cover large projection areas while providing high resolution at the same time. Our focus + context laser projector solves this problem. BaseLase can cover a large context area in low resolution, while providing three movable high-resolution focus spots. We provide a convex mirror design that enables the laser to reach a large area (75m2) with low resolution while decreasing the beam divergence compared to spherical or parabolic mirrors. This hyperboloidal mirror shape approximately equalizes the point size on the floor independent from the projected location. We propose to add a number of planar mirrors on pan-tilt units to create dynamic zones of high resolution that can adjust to the user behavior. We provide example applications for BaseLase and report on user experience in preliminary trials.", + "link": "https://www.semanticscholar.org/paper/fea938d28c56a26ec7c3cb84e266e63b4613df1f", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2163444123", + "venue": "1163450153", + "year": "2015", + "title": "iskin flexible stretchable and visually customizable on body touch sensors for mobile computing", + "label": [ + "6899612", + "54290928", + "144543869", + "41022531", + "9390403", + "150594956" + ], + "author": [ + "2187599565", + "2104817029", + "2223317084", + "1968824034", + "730153875", + "2098177638" + ], + "reference": [ + "1887892688", + "1968058875", + "1969573829", + "1977452208", + "1980587781", + "1980884944", + "1989492259", + "1998246765", + "1999585120", + "2001742158", + "2012722774", + "2012865772", + "2017832059", + "2026636460", + "2028781180", + "2029948294", + "2040565638", + "2066577444", + "2074008326", + "2085011130", + "2085869266", + "2091790974", + "2093292021", + "2119593195", + "2123139789", + "2125551333", + "2129186559", + "2131740967", + "2133656144", + "2135840203", + "2138340184", + "2142943459", + "2144575742", + "2145080451", + "2154104679", + "2158417770", + "2161141776", + "2169709590", + "2170982357", + "2295797196", + "2407399349", + "2907558673" + ], + "abstract": "we propose iskin a novel class of skin worn sensors for touch input on the body iskin is a very thin sensor overlay made of biocompatible materials and is flexible and stretchable it can be produced in different shapes and sizes to suit various locations of the body such as the finger forearm or ear integrating capacitive and resistive touch sensing the sensor is capable of detecting touch input with two levels of pressure even when stretched by 30 or when bent with a radius of 0 5cm furthermore iskin supports single or multiple touch areas of custom shape and arrangement as well as more complex widgets such as sliders and click wheels recognizing the social importance of skin we show visual design patterns to customize functional touch sensors and allow for a visually aesthetic appearance taken together these contributions enable new types of on body devices this includes finger worn devices extensions to conventional wearable devices and touch input stickers all fostering direct quick and discreet input for mobile computing", + "title_raw": "iSkin: Flexible, Stretchable and Visually Customizable On-Body Touch Sensors for Mobile Computing", + "abstract_raw": "We propose iSkin, a novel class of skin-worn sensors for touch input on the body. iSkin is a very thin sensor overlay, made of biocompatible materials, and is flexible and stretchable. It can be produced in different shapes and sizes to suit various locations of the body such as the finger, forearm, or ear. Integrating capacitive and resistive touch sensing, the sensor is capable of detecting touch input with two levels of pressure, even when stretched by 30% or when bent with a radius of 0.5cm. Furthermore, iSkin supports single or multiple touch areas of custom shape and arrangement, as well as more complex widgets, such as sliders and click wheels. Recognizing the social importance of skin, we show visual design patterns to customize functional touch sensors and allow for a visually aesthetic appearance. Taken together, these contributions enable new types of on-body devices. This includes finger-worn devices, extensions to conventional wearable devices, and touch input stickers, all fostering direct, quick, and discreet input for mobile computing.", + "link": "https://www.semanticscholar.org/paper/c7e234c6aa04c6a4b60c07b3a93fb9e73e26e9e3", + "scraped_abstract": null, + "citation_best": 9 + }, + { + "paper": "2193303691", + "venue": "1164321581", + "year": "2015", + "title": "acoustruments passive acoustically driven interactive controls for handheld devices", + "label": [ + "22414024", + "149635348", + "107457646", + "186967261", + "144430266" + ], + "author": [ + "768992449", + "2195513861", + "2138501373", + "2171298838", + "2123491528" + ], + "reference": [ + "2031535007" + ], + "abstract": "smartphones and handheld devices are increasingly being used in interactive applications beyond their conventional touchscreens for example tangibles allow users to interact with mobile devices using physical objects both on screen and around the device similarly there is a growing class of auxiliary devices that require a smartphone to be docked transforming an otherwise simple object into something with rich interactivity however these auxiliary devices still require numerous components including mechanical mechanisms pcbs and sometimes batteries this increases manufacturing costs and reduces physical robustness", + "title_raw": "Acoustruments: passive, acoustically-driven, interactive controls for handheld devices", + "abstract_raw": "Smartphones and handheld devices are increasingly being used in interactive applications beyond their conventional touchscreens. For example, tangibles allow users to interact with mobile devices using physical objects both on screen and around the device. Similarly, there is a growing class of auxiliary devices that require a smartphone to be docked, transforming an otherwise simple object into something with rich interactivity. However, these auxiliary devices still require numerous components, including mechanical mechanisms, PCBs, and sometimes batteries. This increases manufacturing costs, and reduces physical robustness.", + "link": "https://www.semanticscholar.org/paper/24978da5fe544ff167980fffafa73539fd6a2cf3", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2071620178", + "venue": "1163450153", + "year": "2015", + "title": "sharing is caring assistive technology designs on thingiverse", + "label": [ + "107457646", + "49774154" + ], + "author": [ + "2222174057", + "2651507879", + "2799646997", + "2889882858", + "2232008332", + "2155780930", + "2107871967" + ], + "reference": [ + "1965153126", + "1968657424", + "1969023376", + "1999733149", + "2010684062", + "2024281943", + "2062781973", + "2081175116", + "2100910924", + "2107898387", + "2125898659", + "2136944876", + "2140232903", + "2409739857", + "2410410597", + "2895813329" + ], + "abstract": "an increasing number of online communities support the open source sharing of designs that can be built using rapid prototyping to construct physical objects in this paper we examine the designs and motivations for assistive technology found on thingiverse com the largest of these communities at the time of this writing we present results from a survey of all assistive technology that has been posted to thingiverse since 2008 and a questionnaire distributed to the designers exploring their relationship with assistive technology and the motivation for creating these designs the majority of these designs are intended to be manufactured on a 3d printer and include assistive devices and modifications for individuals with disabilities older adults and medication management many of these designs are created by the end users themselves or on behalf of friends and loved ones these designers frequently have no formal training or expertise in the creation of assistive technology this paper discusses trends within this community as well as future opportunities and challenges", + "title_raw": "Sharing is Caring: Assistive Technology Designs on Thingiverse", + "abstract_raw": "An increasing number of online communities support the open-source sharing of designs that can be built using rapid prototyping to construct physical objects. In this paper, we examine the designs and motivations for assistive technology found on Thingiverse.com, the largest of these communities at the time of this writing. We present results from a survey of all assistive technology that has been posted to Thingiverse since 2008 and a questionnaire distributed to the designers exploring their relationship with assistive technology and the motivation for creating these designs. The majority of these designs are intended to be manufactured on a 3D printer and include assistive devices and modifications for individuals with disabilities, older adults, and medication management. Many of these designs are created by the end-users themselves or on behalf of friends and loved ones. These designers frequently have no formal training or expertise in the creation of assistive technology. This paper discusses trends within this community as well as future opportunities and challenges.", + "link": "https://www.semanticscholar.org/paper/a3b2c700da67bccf500a7e8678c52ae1e4c453ac", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1964639420", + "venue": "1163450153", + "year": "2015", + "title": "colourid improving colour identification for people with impaired colour vision", + "label": [ + "107457646", + "78646695", + "31972630" + ], + "author": [ + "2093858891", + "2222335579", + "2232041143", + "2149083239", + "2154677005" + ], + "reference": [ + "579369954", + "1495887101", + "1503839106", + "1562544913", + "1823329909", + "1970859825", + "1974689608", + "1983126287", + "1984394909", + "1999592181", + "2051826135", + "2057850588", + "2059234575", + "2064388896", + "2078522365", + "2080220571", + "2080761261", + "2106615295", + "2120833565", + "2129785995", + "2135708457", + "2137167595", + "2157289187", + "2159175405", + "2159495285", + "2167666412", + "2296178917" + ], + "abstract": "being able to identify colours is a fundamental human activity colour identification helps us work get dressed prepare food and keep safe but for the 5 of the world with impaired colour vision icv colour identification is often a challenge resulting in frustration and confusion with sometimes dangerous consequences colour namer tools have been proposed as a solution however these are often slow to use and imprecise to address these shortcomings we developed three new colour identification techniques colournames colourmeters colourpopper using a new colour name dictionary based on the largest colour naming experiment to date we compared our techniques to colour namers using participants with icv in desktop and mobile conditions and found that colournames and colourpopper resulted in 99 colour identification accuracy 10 higher than the colour namer colourmeters and colourpopper were three times faster and colourpopper had lower perceived effort and was ranked significantly higher with the benefits provided by our new colour identification techniques people with icv are one step closer to seeing the world like everyone else", + "title_raw": "ColourID: Improving Colour Identification for People with Impaired Colour Vision", + "abstract_raw": "Being able to identify colours is a fundamental human activity; colour identification helps us work, get dressed, prepare food, and keep safe. But for the 5% of the world with impaired colour vision (ICV), colour identification is often a challenge, resulting in frustration and confusion with sometimes dangerous consequences. Colour namer tools have been proposed as a solution, however these are often slow to use and imprecise. To address these shortcomings, we developed three new colour identification techniques (ColourNames, ColourMeters, ColourPopper) using a new colour name dictionary based on the largest colour naming experiment to date. We compared our techniques to colour namers using participants with ICV in desktop and mobile conditions, and found that ColourNames and ColourPopper resulted in ~99% colour identification accuracy (10% higher than the colour namer), ColourMeters and ColourPopper were three times faster, and ColourPopper had lower perceived effort and was ranked significantly higher. With the benefits provided by our new colour identification techniques, people with ICV are one step closer to seeing the world like everyone else.", + "link": "https://www.semanticscholar.org/paper/e7d9bc92f3338c388ecd259d2829d17e209b8fed", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1938204631", + "venue": "1158167855", + "year": "2015", + "title": "dynamicfusion reconstruction and tracking of non rigid scenes in real time", + "label": [ + "124774092", + "194145944", + "31972630", + "121684516", + "41904074", + "126042441" + ], + "author": [ + "2161656134", + "2231782831", + "2134777055" + ], + "reference": [ + "1585781633", + "1596964824", + "1716229439", + "1944319588", + "1971968197", + "1977039804", + "1983516323", + "1985290145", + "1987648924", + "1990947293", + "1993095151", + "2001358217", + "2009422376", + "2012048984", + "2016663152", + "2026179794", + "2028653923", + "2038891881", + "2041855012", + "2042843908", + "2047947369", + "2050256614", + "2054381235", + "2055174129", + "2058328050", + "2065270678", + "2071906076", + "2075402943", + "2081927584", + "2098466221", + "2099940712", + "2100642335", + "2105465549", + "2115298906", + "2122025115", + "2156094778", + "2295692686", + "2337757305" + ], + "abstract": "we present the first dense slam system capable of reconstructing non rigidly deforming scenes in real time by fusing together rgbd scans captured from commodity sensors our dynamicfusion approach reconstructs scene geometry whilst simultaneously estimating a dense volumetric 6d motion field that warps the estimated geometry into a live frame like kinectfusion our system produces increasingly denoised detailed and complete reconstructions as more measurements are fused and displays the updated model in real time because we do not require a template or other prior scene model the approach is applicable to a wide range of moving objects and scenes", + "title_raw": "DynamicFusion: Reconstruction and tracking of non-rigid scenes in real-time", + "abstract_raw": "We present the first dense SLAM system capable of reconstructing non-rigidly deforming scenes in real-time, by fusing together RGBD scans captured from commodity sensors. Our DynamicFusion approach reconstructs scene geometry whilst simultaneously estimating a dense volumetric 6D motion field that warps the estimated geometry into a live frame. Like KinectFusion, our system produces increasingly denoised, detailed, and complete reconstructions as more measurements are fused, and displays the updated model in real time. Because we do not require a template or other prior scene model, the approach is applicable to a wide range of moving objects and scenes.", + "link": "https://www.semanticscholar.org/paper/e37f2fb8d6e675abbcda3bd09d586b9aaec26486", + "scraped_abstract": null, + "citation_best": 927 + }, + { + "paper": "2136601052", + "venue": "1199533187", + "year": "2015", + "title": "multise multi path symbolic execution using value summaries", + "label": [ + "544833334", + "2779639559", + "35421722", + "11219265", + "80444323" + ], + "author": [ + "2157836386", + "281330718", + "2299365732", + "2113143839" + ], + "reference": [ + "52244296", + "121457469", + "148396834", + "157156687", + "196342399", + "208073541", + "1497028280", + "1548806133", + "1550549614", + "1586497944", + "1710734607", + "1777881387", + "1966982815", + "1970005004", + "1975191777", + "1979693894", + "1987647365", + "1991546210", + "1992012690", + "1992105245", + "2009489720", + "2012997183", + "2026496705", + "2036208810", + "2041176136", + "2044213223", + "2049707975", + "2050853996", + "2053154567", + "2057156093", + "2061686717", + "2073264387", + "2077850509", + "2080267935", + "2080573945", + "2094568767", + "2095450067", + "2096449544", + "2099941001", + "2101512909", + "2101811836", + "2106545804", + "2110311336", + "2115950902", + "2119251836", + "2133612077", + "2134875273", + "2136880809", + "2137351629", + "2138369269", + "2150990339", + "2151655370", + "2156045808", + "2163499368", + "2164151624", + "2168625647", + "2171469152" + ], + "abstract": "dynamic symbolic execution dse has been proposed to effectively generate test inputs for real world programs unfortunately dse techniques do not scale well for large realistic programs because often the number of feasible execution paths of a program increases exponentially with the increase in the length of an execution path in this paper we propose multise a new technique for merging states incrementally during symbolic execution without using auxiliary variables the key idea of multise is based on an alternative representation of the state where we map each variable including the program counter to a set of guarded symbolic expressions called a value summary multise has several advantages over conventional dse and conventional state merging techniques value summaries enable sharing of symbolic expressions and path constraints along multiple paths and thus avoid redundant execution multise does not introduce auxiliary symbolic variables which enables it to 1 make progress even when merging values not supported by the constraint solver 2 avoid expensive constraint solver calls when resolving function calls and jumps and 3 carry out most operations concretely moreover multise updates value summaries incrementally at every assignment instruction which makes it unnecessary to identify the join points and to keep track of variables to merge at join points we have implemented multise for javascript programs in a publicly available open source tool our evaluation of multise on several programs shows that 1 value summaries are an eective technique to take advantage of the sharing of value along multiple execution path that 2 multise can run significantly faster than traditional dynamic symbolic execution and 3 multise saves a substantial number of state merges compared to conventional state merging techniques", + "title_raw": "MultiSE: multi-path symbolic execution using value summaries", + "abstract_raw": "Dynamic symbolic execution (DSE) has been proposed to effectively generate test inputs for real-world programs. Unfortunately, DSE techniques do not scale well for large realistic programs, because often the number of feasible execution paths of a program increases exponentially with the increase in the length of an execution path. In this paper, we propose MultiSE, a new technique for merging states incrementally during symbolic execution, without using auxiliary variables. The key idea of MultiSE is based on an alternative representation of the state, where we map each variable, including the program counter, to a set of guarded symbolic expressions called a value summary. MultiSE has several advantages over conventional DSE and conventional state merging techniques: value summaries enable sharing of symbolic expressions and path constraints along multiple paths and thus avoid redundant execution. MultiSE does not introduce auxiliary symbolic variables, which enables it to 1) make progress even when merging values not supported by the constraint solver, 2) avoid expensive constraint solver calls when resolving function calls and jumps, and 3) carry out most operations concretely. Moreover, MultiSE updates value summaries incrementally at every assignment instruction, which makes it unnecessary to identify the join points and to keep track of variables to merge at join points. We have implemented MultiSE for JavaScript programs in a publicly available open-source tool. Our evaluation of MultiSE on several programs shows that 1) value summaries are an eective technique to take advantage of the sharing of value along multiple execution path, that 2) MultiSE can run significantly faster than traditional dynamic symbolic execution and, 3) MultiSE saves a substantial number of state merges compared to conventional state-merging techniques.", + "link": "https://www.semanticscholar.org/paper/8a0eb38db21aab815888ca73963a5bca18c4323d", + "scraped_abstract": null, + "citation_best": 101 + }, + { + "paper": "2015187825", + "venue": "1199533187", + "year": "2015", + "title": "measure it manage it ignore it software practitioners and technical debt", + "label": [ + "159198006", + "2777904410", + "56739046", + "117447612" + ], + "author": [ + "2099123081", + "2254729253", + "2303626447", + "693687178", + "303809925" + ], + "reference": [ + "1565746575", + "1592081868", + "1658908529", + "1965658570", + "1985236007", + "1986086971", + "2004833623", + "2008626182", + "2009256433", + "2016191054", + "2028836139", + "2031271477", + "2040739191", + "2057244851", + "2070437629", + "2084429785", + "2084563412", + "2092298056", + "2096095490", + "2104577574", + "2108025693", + "2119673341", + "2144160189", + "2145933668", + "2147863284", + "2149893959", + "2166993820", + "2212732160", + "2216905672", + "2526133847", + "2800559265" + ], + "abstract": "the technical debt metaphor is widely used to encapsulate numerous software quality problems the metaphor is attractive to practitioners as it communicates to both technical and nontechnical audiences that if quality problems are not addressed things may get worse however it is unclear whether there are practices that move this metaphor beyond a mere communication mechanism existing studies of technical debt have largely focused on code metrics and small surveys of developers in this paper we report on our survey of 1 831 participants primarily software engineers and architects working in long lived software intensive projects from three large organizations and follow up interviews of seven software engineers we analyzed our data using both nonparametric statistics and qualitative text analysis we found that architectural decisions are the most important source of technical debt furthermore while respondents believe the metaphor is itself important for communication existing tools are not currently helpful in managing the details we use our results to motivate a technical debt timeline to focus management and tooling approaches", + "title_raw": "Measure it? Manage it? Ignore it? software practitioners and technical debt", + "abstract_raw": "The technical debt metaphor is widely used to encapsulate numerous software quality problems. The metaphor is attractive to practitioners as it communicates to both technical and nontechnical audiences that if quality problems are not addressed, things may get worse. However, it is unclear whether there are practices that move this metaphor beyond a mere communication mechanism. Existing studies of technical debt have largely focused on code metrics and small surveys of developers. In this paper, we report on our survey of 1,831 participants, primarily software engineers and architects working in long-lived, software-intensive projects from three large organizations, and follow-up interviews of seven software engineers. We analyzed our data using both nonparametric statistics and qualitative text analysis. We found that architectural decisions are the most important source of technical debt. Furthermore, while respondents believe the metaphor is itself important for communication, existing tools are not currently helpful in managing the details. We use our results to motivate a technical debt timeline to focus management and tooling approaches.", + "link": "https://www.semanticscholar.org/paper/3e1f89ca4d729b2fcaa0a437e874136477579aca", + "scraped_abstract": null, + "citation_best": 244 + }, + { + "paper": "2079877139", + "venue": "1199533187", + "year": "2015", + "title": "a user guided approach to program analysis", + "label": [ + "98183937", + "548217200", + "199360897", + "124101348", + "2778770139", + "39920170" + ], + "author": [ + "2045343935", + "2616946442", + "2049248193", + "2005763208" + ], + "reference": [ + "47392883", + "1505465226", + "1538211826", + "1557543533", + "1671718115", + "1907086563", + "1912598576", + "1968027261", + "1969599528", + "1971859150", + "1997945384", + "2050680750", + "2075005465", + "2102178883", + "2106108278", + "2111949697", + "2113709047", + "2119648923", + "2132733485", + "2132800423", + "2133497528", + "2133662847", + "2133824159", + "2134401695", + "2134429122", + "2135023759", + "2135209143", + "2136503680", + "2144196848", + "2151562310", + "2158600037", + "2158765820", + "2162544703", + "2163267264", + "2163521620", + "2166091242", + "2515929223", + "2592431019" + ], + "abstract": "program analysis tools often produce undesirable output due to various approximations we present an approach and a system eugene that allows user feedback to guide such approximations towards producing the desired output we formulate the problem of user guided program analysis in terms of solving a combination of hard rules and soft rules hard rules capture soundness while soft rules capture degrees of approximations and preferences of users our technique solves the rules using an off the shelf solver in a manner that is sound satisfies all hard rules optimal maximally satisfies soft rules and scales to real world analyses and programs we evaluate eugene on two different analyses with labeled output on a suite of seven java programs of size 131 198 kloc we also report upon a user study involving nine users who employ eugene to guide an information flow analysis on three java micro benchmarks in our experiments eugene significantly reduces misclassified reports upon providing limited amounts of feedback", + "title_raw": "A user-guided approach to program analysis", + "abstract_raw": "Program analysis tools often produce undesirable output due to various approximations. We present an approach and a system EUGENE that allows user feedback to guide such approximations towards producing the desired output. We formulate the problem of user-guided program analysis in terms of solving a combination of hard rules and soft rules: hard rules capture soundness while soft rules capture degrees of approximations and preferences of users. Our technique solves the rules using an off-the-shelf solver in a manner that is sound (satisfies all hard rules), optimal (maximally satisfies soft rules), and scales to real-world analyses and programs. We evaluate EUGENE on two different analyses with labeled output on a suite of seven Java programs of size 131\u2013198 KLOC. We also report upon a user study involving nine users who employ EUGENE to guide an information-flow analysis on three Java micro-benchmarks. In our experiments, EUGENE significantly reduces misclassified reports upon providing limited amounts of feedback.", + "link": "https://www.semanticscholar.org/paper/bd06a0d166957b169be53434f1192cc985a587cc", + "scraped_abstract": null, + "citation_best": 68 + }, + { + "paper": "2027701108", + "venue": "1199533187", + "year": "2015", + "title": "users beware preference inconsistencies ahead", + "label": [ + "36871734", + "167955471", + "43126263", + "199360897", + "149091818", + "89505385", + "2777904410", + "202105479", + "80444323", + "97686452" + ], + "author": [ + "2648207154", + "2151210804", + "1989715328" + ], + "reference": [ + "134722953", + "1495241705", + "1573585357", + "1662961914", + "1986531046", + "1994411654", + "2003775793", + "2004248182", + "2029039689", + "2030696252", + "2040849296", + "2059731886", + "2073869982", + "2102436656", + "2104959783", + "2107574238", + "2114525558", + "2117288652", + "2124682367", + "2126104567", + "2128208466", + "2147786885", + "2151502039", + "2152874840", + "2157771728", + "2169622310" + ], + "abstract": "the structure of preferences for modern highly configurable software systems has become extremely complex usually consisting of multiple layers of access that go from the user interface down to the lowest levels of the source code this complexity can lead to inconsistencies between layers especially during software evolution for example there may be preferences that users can change through the gui but that have no effect on the actual behavior of the system because the related source code is not present or has been removed going from one version to the next these inconsistencies may result in unexpected program behaviors which range in severity from mild annoyances to more critical security or performance problems to address this problem we present scic software configuration inconsistency checker a static analysis technique that can automatically detect these kinds of inconsistencies unlike other configuration analysis tools scic can handle software that 1 is written in multiple programming languages and 2 has a complex preference structure in an empirical evaluation that we performed on 10 years worth of versions of both the widely used mozilla core and firefox scic was able to find 40 real inconsistencies some determined as severe whose lifetime spanned multiple versions and whose detection required the analysis of code written in multiple languages", + "title_raw": "Users beware: preference inconsistencies ahead", + "abstract_raw": "The structure of preferences for modern highly-configurable software systems has become extremely complex, usually consisting of multiple layers of access that go from the user interface down to the lowest levels of the source code. This complexity can lead to inconsistencies between layers, especially during software evolution. For example, there may be preferences that users can change through the GUI, but that have no effect on the actual behavior of the system because the related source code is not present or has been removed going from one version to the next. These inconsistencies may result in unexpected program behaviors, which range in severity from mild annoyances to more critical security or performance problems. To address this problem, we present SCIC (Software Configuration Inconsistency Checker), a static analysis technique that can automatically detect these kinds of inconsistencies. Unlike other configuration analysis tools, SCIC can handle software that (1) is written in multiple programming languages and (2) has a complex preference structure. In an empirical evaluation that we performed on 10 years worth of versions of both the widely used Mozilla Core and Firefox, SCIC was able to find 40 real inconsistencies (some determined as severe), whose lifetime spanned multiple versions, and whose detection required the analysis of code written in multiple languages.", + "link": "https://www.semanticscholar.org/paper/3046c4bde694f175b0ac248256aa9cb539a8efb4", + "scraped_abstract": null, + "citation_best": 31 + }, + { + "paper": "2220384803", + "venue": "1164975091", + "year": "2015", + "title": "deep neural decision forests", + "label": [ + "119857082", + "56289965", + "154945302", + "84525736", + "59404180", + "164752517", + "190502265", + "97385483", + "108583219", + "169258074" + ], + "author": [ + "1994913826", + "2185955379", + "102275141", + "102181720" + ], + "reference": [ + "80760317", + "137456267", + "330298975", + "986585644", + "1480376833", + "1515620500", + "1579279110", + "1588663000", + "1677182931", + "1905882502", + "1913356549", + "1937766607", + "2021404082", + "2025653905", + "2036196300", + "2059424674", + "2081136789", + "2083842231", + "2095705004", + "2100659887", + "2104266970", + "2106004777", + "2107285841", + "2112796928", + "2117496083", + "2117539524", + "2120240539", + "2123199310", + "2128302979", + "2142859438", + "2143908786", + "2148029428", + "2148596671", + "2155893237", + "2504108613", + "2618530766", + "2911964244", + "2914484425", + "2950179405", + "2953066166", + "2963911037" + ], + "abstract": "we present deep neural decision forests a novel approach that unifies classification trees with the representation learning functionality known from deep convolutional networks by training them in an end to end manner to combine these two worlds we introduce a stochastic and differentiable decision tree model which steers the representation learning usually conducted in the initial layers of a deep convolutional network our model differs from conventional deep networks because a decision forest provides the final predictions and it differs from conventional decision forests since we propose a principled joint and global optimization of split and leaf node parameters we show experimental results on benchmark machine learning datasets like mnist and imagenet and find on par or superior results when compared to state of the art deep models most remarkably we obtain top5 errors of only 7 84 6 38 on imagenet validation data when integrating our forests in a single crop single seven model googlenet architecture respectively thus even without any form of training data set augmentation we are improving on the 6 67 error obtained by the best googlenet architecture 7 models 144 crops", + "title_raw": "Deep Neural Decision Forests", + "abstract_raw": "We present Deep Neural Decision Forests - a novel approach that unifies classification trees with the representation learning functionality known from deep convolutional networks, by training them in an end-to-end manner. To combine these two worlds, we introduce a stochastic and differentiable decision tree model, which steers the representation learning usually conducted in the initial layers of a (deep) convolutional network. Our model differs from conventional deep networks because a decision forest provides the final predictions and it differs from conventional decision forests since we propose a principled, joint and global optimization of split and leaf node parameters. We show experimental results on benchmark machine learning datasets like MNIST and ImageNet and find on-par or superior results when compared to state-of-the-art deep models. Most remarkably, we obtain Top5-Errors of only 7.84%/6.38% on ImageNet validation data when integrating our forests in a single-crop, single/seven model GoogLeNet architecture, respectively. Thus, even without any form of training data set augmentation we are improving on the 6.67% error obtained by the best GoogLeNet architecture (7 models, 144 crops).", + "link": "https://www.semanticscholar.org/paper/544998db166c047c70a61c5a5c54d10c5879ecf1", + "scraped_abstract": null, + "citation_best": 461 + }, + { + "paper": "1876956220", + "venue": "1180662882", + "year": "2015", + "title": "optimal and adaptive algorithms for online boosting", + "label": [ + "119857082", + "196921405", + "2777723229", + "11413529", + "46686674" + ], + "author": [ + "317105621", + "2152055551", + "2461368180" + ], + "reference": [ + "1536299660", + "1548189207", + "1570060426", + "1570963478", + "1647591404", + "1790582767", + "1807914171", + "1926592634", + "1988790447", + "2009772318", + "2070534370", + "2077723394", + "2084310470", + "2093825590", + "2125573488", + "2128644080", + "2139055576", + "2146159678", + "2148825261", + "2149079208", + "2154436219", + "2170865122", + "2400267228", + "2911549670" + ], + "abstract": "we study online boosting the task of converting any weak online learner into a strong online learner based on a novel and natural definition of weak online learnability we develop two online boosting algorithms the first algorithm is an online version of boost by majority by proving a matching lower bound we show that this algorithm is essentially optimal in terms of the number of weak learners and the sample complexity needed to achieve a specified accuracy the second algorithm is adaptive and parameter free albeit not optimal", + "title_raw": "Optimal and Adaptive Algorithms for Online Boosting", + "abstract_raw": "We study online boosting, the task of converting any weak online learner into a strong online learner. Based on a novel and natural definition of weak online learnability, we develop two online boosting algorithms. The first algorithm is an online version of boost-by-majority. By proving a matching lower bound, we show that this algorithm is essentially optimal in terms of the number of weak learners and the sample complexity needed to achieve a specified accuracy. The second algorithm is adaptive and parameter-free, albeit not optimal.", + "link": "https://www.semanticscholar.org/paper/6eb5c71aeaf2588a965ad48b48ae827988c4a061", + "scraped_abstract": null, + "citation_best": 25 + }, + { + "paper": "1826818071", + "venue": "1180662882", + "year": "2015", + "title": "a nearly linear time framework for graph structured sparsity", + "label": [ + "311688", + "11413529" + ], + "author": [ + "2101484218", + "22135093", + "2134624344" + ], + "reference": [ + "1497745584", + "1546924356", + "1608367986", + "1837471008", + "1970554427", + "1983481354", + "1987371344", + "2001139415", + "2001681959", + "2043905543", + "2044762091", + "2049502219", + "2077074857", + "2082176459", + "2093545205", + "2120383799", + "2125680629", + "2126976013", + "2127723919", + "2129131372", + "2130357047", + "2138019504", + "2138265962", + "2145096794", + "2145504209", + "2151825876", + "2170844819", + "2208167373", + "2244252827", + "2281354239", + "2289917018", + "2401610261", + "2406740263", + "2595699107", + "2964150680", + "3149604617" + ], + "abstract": "we introduce a framework for sparsity structures defined via graphs our approach is flexible and generalizes several previously studied sparsity models moreover we provide efficient projection algorithms for our sparsity model that run in nearly linear time in the context of sparse recovery we show that our framework achieves an information theoretically optimal sample complexity for a wide range of parameters we complement our theoretical analysis with experiments demonstrating that our algorithms also improve on prior work in practice", + "title_raw": "A Nearly-Linear Time Framework for Graph-Structured Sparsity", + "abstract_raw": "We introduce a framework for sparsity structures defined via graphs. Our approach is flexible and generalizes several previously studied sparsity models. Moreover, we provide efficient projection algorithms for our sparsity model that run in nearly-linear time. In the context of sparse recovery, we show that our framework achieves an information-theoretically optimal sample complexity for a wide range of parameters. We complement our theoretical analysis with experiments demonstrating that our algorithms also improve on prior work in practice.", + "link": "https://www.semanticscholar.org/paper/e74a1e87a0cbac606a81e9b5f8695d828c07f966", + "scraped_abstract": null, + "citation_best": 3 + }, + { + "paper": "2092388562", + "venue": "1174403976", + "year": "2015", + "title": "views on internal and external validity in empirical software engineering", + "label": [ + "46110900", + "74579156", + "14896104", + "87546605", + "56739046" + ], + "author": [ + "2145688174", + "1619324679", + "2170711779" + ], + "reference": [ + "1494946506", + "1511267087", + "1518586892", + "1730782591", + "1746353556", + "1783768447", + "1876404477", + "1964386935", + "1983830972", + "1992602600", + "2002763011", + "2015232149", + "2046557280", + "2046895806", + "2084429940", + "2100044779", + "2101582137", + "2106956101", + "2108072832", + "2109076419", + "2111199933", + "2112351052", + "2112433871", + "2113294746", + "2113533445", + "2114269784", + "2117254507", + "2125759561", + "2145906963", + "2153523756", + "2161222712", + "2165135275", + "2167926541", + "2169090130", + "2172287543", + "2496434514", + "3150176278" + ], + "abstract": "empirical methods have grown common in software engineering but there is no consensus on how to apply them properly is practical relevance key do internally valid studies have any value should we replicate more to address the tradeoff between internal and external validity we asked the community how empirical research should take place in software engineering with a focus on the tradeoff between internal and external validity and replication complemented with a literature review about the status of empirical research in software engineering we found that the opinions differ considerably and that there is no consensus in the community when to focus on internal or external validity and how to conduct and review replications", + "title_raw": "Views on internal and external validity in empirical software engineering", + "abstract_raw": "Empirical methods have grown common in software engineering, but there is no consensus on how to apply them properly. Is practical relevance key? Do internally valid studies have any value? Should we replicate more to address the tradeoff between internal and external validity? We asked the community how empirical research should take place in software engineering, with a focus on the tradeoff between internal and external validity and replication, complemented with a literature review about the status of empirical research in software engineering. We found that the opinions differ considerably, and that there is no consensus in the community when to focus on internal or external validity and how to conduct and review replications.", + "link": "https://www.semanticscholar.org/paper/2f9a1286e7af4ab7706ad8cfcc8c8742a1964939", + "scraped_abstract": null, + "citation_best": 125 + }, + { + "paper": "2139553864", + "venue": "1174403976", + "year": "2015", + "title": "alloy a general purpose higher order relational constraint solver", + "label": [ + "193237570", + "164155591", + "169896238", + "187455244", + "173608175", + "162319229", + "2778770139", + "80444323" + ], + "author": [ + "1810387679", + "2016638031", + "2123331094", + "2617652890" + ], + "reference": [ + "72463235", + "99012786", + "954629016", + "1480909796", + "1519503479", + "1526922602", + "1536511644", + "1543449523", + "1552077729", + "1557470769", + "1561078809", + "1595209293", + "1705806753", + "1895387792", + "1923535011", + "1972999341", + "1988537734", + "2012312630", + "2040768172", + "2049542183", + "2064070192", + "2085025498", + "2094878426", + "2110988764", + "2115261880", + "2128953477", + "2130427425", + "2131146089", + "2134734244", + "2136663634", + "2146105230", + "2146356111", + "2149236697", + "2154061425", + "2155780123", + "2156865091", + "2160985005", + "2161408250", + "2161516603", + "2163671349", + "2164548535", + "2165247635", + "2165413178", + "2168617729", + "2183062003", + "2243433439", + "2295923951", + "2363172845", + "2397041770", + "2399923814", + "2468404022", + "2504100651", + "3143219376" + ], + "abstract": "the last decade has seen a dramatic growth in the use of constraint solvers as a computational mechanism not only for analysis of software but also at runtime solvers are available for a variety of logics but are generally restricted to first order formulas some tasks however most notably those involving synthesis are inherently higher order these are typically handled by embedding a first order solver such as a sat or smt solver in a domain specific algorithm using strategies similar to those used in such algorithms we show how to extend a first order solver in this case kodkod a model finder for relational logic used as the engine of the alloy analyzer so that it can handle quantifications over higher order structures the resulting solver is sufficiently general that it can be applied to a range of problems it is higher order so that it can be applied directly without embedding in another algorithm and it performs well enough to be competitive with specialized tools just as the identification of first order solvers as reusable backends advanced the performance of specialized tools and simplified their architecture factoring out higher order solvers may bring similar benefits to a new class of tools", + "title_raw": "Alloy*: a general-purpose higher-order relational constraint solver", + "abstract_raw": "The last decade has seen a dramatic growth in the use of constraint solvers as a computational mechanism, not only for analysis of software, but also at runtime. Solvers are available for a variety of logics but are generally restricted to first-order formulas. Some tasks, however, most notably those involving synthesis, are inherently higher order; these are typically handled by embedding a first-order solver (such as a SAT or SMT solver) in a domain-specific algorithm. Using strategies similar to those used in such algorithms, we show how to extend a first-order solver (in this case Kodkod, a model finder for relational logic used as the engine of the Alloy Analyzer) so that it can handle quantifications over higher-order structures. The resulting solver is sufficiently general that it can be applied to a range of problems; it is higher order, so that it can be applied directly, without embedding in another algorithm; and it performs well enough to be competitive with specialized tools. Just as the identification of first-order solvers as reusable backends advanced the performance of specialized tools and simplified their architecture, factoring out higher-order solvers may bring similar benefits to a new class of tools.", + "link": "https://www.semanticscholar.org/paper/a2ed2bfa9b0529dcdab06b5a86b42429155d88d1", + "scraped_abstract": null, + "citation_best": 38 + }, + { + "paper": "1608999459", + "venue": "1158363782", + "year": "2015", + "title": "the design and implementation of open vswitch", + "label": [ + "112904061", + "149635348", + "111919701", + "105339364", + "2777904410" + ], + "author": [ + "2122690630", + "2126736731", + "2305287388", + "2128927004", + "2115010416", + "2431373274", + "2162934431", + "2477382996", + "2498310692", + "2282506560", + "1981163864", + "2153903143" + ], + "reference": [ + "79029001", + "83339351", + "158224344", + "1435010830", + "1882012874", + "1989728020", + "1992811742", + "2022106793", + "2063773589", + "2071552523", + "2077688755", + "2099498471", + "2105545278", + "2115278163", + "2120416631", + "2120900812", + "2123845384", + "2131074176", + "2137838113", + "2141062513", + "2143377393", + "2146657832", + "2147802358", + "2150942100", + "2159669151", + "2164905748", + "2168626603", + "2173847632", + "2243203026", + "2288643294", + "2395731689", + "2400395024" + ], + "abstract": "we describe the design and implementation of open vswitch a multi layer open source virtual switch for all major hypervisor platforms open vswitch was designed de novo for networking in virtual environments resulting in major design departures from traditional software switching architectures we detail the advanced flow classification and caching techniques that open vswitch uses to optimize its operations and conserve hypervisor resources we evaluate open vswitch performance drawing from our deployment experiences over the past seven years of using and improving open vswitch", + "title_raw": "The design and implementation of open vSwitch", + "abstract_raw": "We describe the design and implementation of Open vSwitch, a multi-layer, open source virtual switch for all major hypervisor platforms. Open vSwitch was designed de novo for networking in virtual environments, resulting in major design departures from traditional software switching architectures. We detail the advanced flow classification and caching techniques that Open vSwitch uses to optimize its operations and conserve hypervisor resources. We evaluate Open vSwitch performance, drawing from our deployment experiences over the past seven years of using and improving Open vSwitch.", + "link": "https://www.semanticscholar.org/paper/00cf4b13a1bd202ccebe1e7bd0587f11e98ec3d6", + "scraped_abstract": null, + "citation_best": 661 + }, + { + "paper": "1699100222", + "venue": "1158363782", + "year": "2015", + "title": "queues don t matter when you can jump them", + "label": [ + "31258907", + "158379750", + "22684755", + "115051666", + "68178114", + "201100257", + "160403385", + "98980195", + "79403827" + ], + "author": [ + "2008694580", + "2113508224", + "1242599949", + "2101946122", + "2576671778", + "2140572436", + "1970640687" + ], + "reference": [ + "14875769", + "1503891749", + "1583175181", + "1785664926", + "1820403722", + "1977867261", + "1982063824", + "2003597767", + "2013073142", + "2013409485", + "2082171780", + "2084226860", + "2096915479", + "2101871381", + "2102549685", + "2103110737", + "2109195783", + "2111714633", + "2112486185", + "2117884704", + "2123138012", + "2146434221", + "2148147078", + "2149804187", + "2154042331", + "2157614013", + "2163404313", + "2164740236", + "2168595508", + "2176566884", + "2186482227", + "2984174199", + "2997709458" + ], + "abstract": "qjump is a simple and immediately deployable approach to controlling network interference in datacenter networks network interference occurs when congestion from throughput intensive applications causes queueing that delays traffic from latency sensitive applications to mitigate network interference qjump applies internet qos inspired techniques to datacenter applications each application is assigned to a latency sensitivity level or class packets from higher levels are rate limited in the end host but once allowed into the network can jump the queue over packets from lower levels in settings with known node counts and link speeds qjump can support service levels ranging from strictly bounded latency but with low rate through to line rate throughput but with high latency variance we have implemented qjump as a linux traffic control module we show that qjump achieves bounded latency and reduces in network interference by up to 300 outperforming ethernet flow control 802 3x ecn wred and dctcp we also show that qjump improves average flow completion times performing close to or better than dctcp and pfabric", + "title_raw": "Queues don't matter when you can JUMP them!", + "abstract_raw": "QJUMP is a simple and immediately deployable approach to controlling network interference in datacenter networks. Network interference occurs when congestion from throughput-intensive applications causes queueing that delays traffic from latency-sensitive applications. To mitigate network interference, QJUMP applies Internet QoS-inspired techniques to datacenter applications. Each application is assigned to a latency sensitivity level (or class). Packets from higher levels are rate-limited in the end host, but once allowed into the network can \"jump-the-queue\" over packets from lower levels. In settings with known node counts and link speeds, QJUMP can support service levels ranging from strictly bounded latency (but with low rate) through to line-rate throughput (but with high latency variance).\r\n\r\nWe have implemented QJUMP as a Linux Traffic Control module. We show that QJUMP achieves bounded latency and reduces in-network interference by up to 300\u00d7, outperforming Ethernet Flow Control (802.3x), ECN (WRED) and DCTCP. We also show that QJUMP improves average flow completion times, performing close to or better than DCTCP and pFabric.", + "link": "https://www.semanticscholar.org/paper/8f4ca751f5ec46ec81dff2997247b8c5d1fb5053", + "scraped_abstract": null, + "citation_best": 144 + }, + { + "paper": "1642392512", + "venue": "1158363782", + "year": "2015", + "title": "designing distributed systems using approximate synchrony in data center networks", + "label": [ + "153740404", + "120314980", + "31258907", + "32295351", + "55368355", + "49265948", + "98980195", + "32833848" + ], + "author": [ + "1513655371", + "2105191977", + "2062601765", + "2159186294", + "2088689873" + ], + "reference": [ + "100679602", + "192446467", + "1500258551", + "1543942291", + "1549820118", + "1565495482", + "1801241948", + "1831997210", + "1850597240", + "1893504272", + "1973501242", + "1992479210", + "2000832815", + "2003214215", + "2013409485", + "2035362408", + "2067740651", + "2075854425", + "2077240273", + "2101939036", + "2106670435", + "2109213558", + "2121178808", + "2123016589", + "2125936380", + "2126087831", + "2126969025", + "2127872526", + "2130531694", + "2131929623", + "2139359217", + "2147118406", + "2152465173", + "2157614013", + "2168595508", + "2182688186", + "2293633413", + "3100217861", + "3138135046" + ], + "abstract": "distributed systems are traditionally designed independently from the underlying network making worst case assumptions e g complete asynchrony about its behavior however many of today s distributed applications are deployed in data centers where the network is more reliable predictable and extensible in these environments it is possible to co design distributed systems with their network layer and doing so can offer substantial benefits this paper explores network level mechanisms for providing mostly ordered multicast mom a best effort ordering property for concurrent multicast operations using this primitive we design speculative paxos a state machine replication protocol that relies on the network to order requests in the normal case this approach leads to substantial performance benefits under realistic data center conditions speculative paxos can provide 40 lower latency and 2 6 higher throughput than the standard paxos protocol it offers lower latency than a latency optimized protocol fast paxos with the same throughput as a throughput optimized protocol batching", + "title_raw": "Designing distributed systems using approximate synchrony in data center networks", + "abstract_raw": "Distributed systems are traditionally designed independently from the underlying network, making worst-case assumptions (e.g., complete asynchrony) about its behavior. However, many of today's distributed applications are deployed in data centers, where the network is more reliable, predictable, and extensible. In these environments, it is possible to co-design distributed systems with their network layer, and doing so can offer substantial benefits.\r\n\r\nThis paper explores network-level mechanisms for providing Mostly-Ordered Multicast (MOM): a best-effort ordering property for concurrent multicast operations. Using this primitive, we design Speculative Paxos, a state machine replication protocol that relies on the network to order requests in the normal case. This approach leads to substantial performance benefits: under realistic data center conditions, Speculative Paxos can provide 40% lower latency and 2.6\u00d7 higher throughput than the standard Paxos protocol. It offers lower latency than a latency-optimized protocol (Fast Paxos) with the same throughput as a throughput-optimized protocol (batching).", + "link": "https://www.semanticscholar.org/paper/5e707621ae530a372e82c9439ab6495d84fb934e", + "scraped_abstract": null, + "citation_best": 103 + }, + { + "paper": "2061091230", + "venue": "1127352206", + "year": "2015", + "title": "automatically improving accuracy for floating point expressions", + "label": [ + "84211073", + "100850083", + "61005703", + "11413529" + ], + "author": [ + "838603720", + "2224281134", + "2224265930", + "187474749" + ], + "reference": [ + "2567833", + "103214193", + "1528042415", + "1966151649", + "1969213662", + "1986969990", + "1999470254", + "2020804487", + "2037350163", + "2038705805", + "2058821752", + "2076064095", + "2079428063", + "2081368694", + "2103953062", + "2109955300", + "2118594068", + "2121344286", + "2121764011", + "2122738744", + "2130084210", + "2130175237", + "2131670453", + "2140716004", + "2144319269", + "2147177731", + "2157054705", + "2169004268", + "2295915207", + "2340006107", + "2780368759", + "2979730858" + ], + "abstract": "scientific and engineering applications depend on floating point arithmetic to approximate real arithmetic this approximation introduces rounding error which can accumulate to produce unacceptable results while the numerical methods literature provides techniques to mitigate rounding error applying these techniques requires manually rearranging expressions and understanding the finer details of floating point arithmetic we introduce herbie a tool which automatically discovers the rewrites experts perform to improve accuracy herbie s heuristic search estimates and localizes rounding error using sampled points rather than static error analysis applies a database of rules to generate improvements takes series expansions and combines improvements for different input regions we evaluated herbie on examples from a classic numerical methods textbook and found that herbie was able to improve accuracy on each example some by up to 60 bits while imposing a median performance overhead of 40 colleagues in machine learning have used herbie to significantly improve the results of a clustering algorithm and a mathematical library has accepted two patches generated using herbie", + "title_raw": "Automatically improving accuracy for floating point expressions", + "abstract_raw": "Scientific and engineering applications depend on floating point arithmetic to approximate real arithmetic. This approximation introduces rounding error, which can accumulate to produce unacceptable results. While the numerical methods literature provides techniques to mitigate rounding error, applying these techniques requires manually rearranging expressions and understanding the finer details of floating point arithmetic. We introduce Herbie, a tool which automatically discovers the rewrites experts perform to improve accuracy. Herbie's heuristic search estimates and localizes rounding error using sampled points (rather than static error analysis), applies a database of rules to generate improvements, takes series expansions, and combines improvements for different input regions. We evaluated Herbie on examples from a classic numerical methods textbook, and found that Herbie was able to improve accuracy on each example, some by up to 60 bits, while imposing a median performance overhead of 40%. Colleagues in machine learning have used Herbie to significantly improve the results of a clustering algorithm, and a mathematical library has accepted two patches generated using Herbie.", + "link": "https://www.semanticscholar.org/paper/307fb6c6cfc456ab3e510c08fde39e6e3574fe5d", + "scraped_abstract": null, + "citation_best": 66 + }, + { + "paper": "2037037228", + "venue": "1127352206", + "year": "2015", + "title": "diagnosing type errors with class", + "label": [ + "70203142", + "198370458", + "75039014", + "169590947", + "199360897", + "136134403", + "46743427", + "33288326", + "2776214188", + "80444323", + "2780624054" + ], + "author": [ + "2099242823", + "2141746463", + "77990519", + "2185166580" + ], + "reference": [ + "184339100", + "1506661840", + "1568497055", + "2011725693", + "2011824721", + "2022443117", + "2029079408", + "2040152339", + "2045041259", + "2045313089", + "2056310485", + "2064982280", + "2068336423", + "2068457689", + "2069858276", + "2096973632", + "2098374496", + "2131916293", + "2134093956", + "2135565307", + "2147609113", + "2157859774", + "2166414999", + "2168280960", + "2170257749", + "2187961666", + "2293332143", + "2406569985", + "2484245539" + ], + "abstract": "type inference engines often give terrible error messages and the more sophisticated the type system the worse the problem we show that even with the highly expressive type system implemented by the glasgow haskell compiler ghc including type classes gadts and type families it is possible to identify the most likely source of the type error rather than the first source that the inference engine trips over to determine which are the likely error sources we apply a simple bayesian model to a graph representation of the typing constraints the satisfiability or unsatisfiability of paths within the graph provides evidence for or against possible explanations while we build on prior work on error diagnosis for simpler type systems inference in the richer type system of haskell requires extending the graph with new nodes the augmentation of the graph creates challenges both for bayesian reasoning and for ensuring termination using a large corpus of haskell programs we show that this error localization technique is practical and significantly improves accuracy over the state of the art", + "title_raw": "Diagnosing type errors with class", + "abstract_raw": "Type inference engines often give terrible error messages, and the more sophisticated the type system the worse the problem. We show that even with the highly expressive type system implemented by the Glasgow Haskell Compiler (GHC)--including type classes, GADTs, and type families--it is possible to identify the most likely source of the type error, rather than the first source that the inference engine trips over. To determine which are the likely error sources, we apply a simple Bayesian model to a graph representation of the typing constraints; the satisfiability or unsatisfiability of paths within the graph provides evidence for or against possible explanations. While we build on prior work on error diagnosis for simpler type systems, inference in the richer type system of Haskell requires extending the graph with new nodes. The augmentation of the graph creates challenges both for Bayesian reasoning and for ensuring termination. Using a large corpus of Haskell programs, we show that this error localization technique is practical and significantly improves accuracy over the state of the art.", + "link": "https://www.semanticscholar.org/paper/48b0c261dfcdc6dc1bee6958271a92aeca09d94a", + "scraped_abstract": null, + "citation_best": 30 + }, + { + "paper": "2003164052", + "venue": "1127352206", + "year": "2015", + "title": "provably correct peephole optimizations with alive", + "label": [ + "167955471", + "169590947", + "154690210", + "184337299", + "75606506", + "199360897", + "4970464", + "64156549", + "80444323" + ], + "author": [ + "2096241075", + "2224928429", + "1774359112", + "1993899960" + ], + "reference": [ + "109452506", + "146262440", + "364774736", + "1480909796", + "1570783318", + "1587844310", + "1806382567", + "1977279860", + "1982205631", + "1996892501", + "2000659103", + "2023035194", + "2040856861", + "2045025591", + "2076667206", + "2081917916", + "2085050643", + "2088056808", + "2098456636", + "2101635160", + "2108174561", + "2114012357", + "2117065635", + "2119664062", + "2126823808", + "2127919458", + "2128433129", + "2134087751", + "2144540543", + "2160145830", + "2160947267", + "2164264520", + "2167029843", + "2169336925", + "2169391873", + "2170737051", + "2294628582", + "2911450990" + ], + "abstract": "compilers should not miscompile our work addresses problems in developing peephole optimizations that perform local rewriting to improve the efficiency of llvm code these optimizations are individually difficult to get right particularly in the presence of undefined behavior taken together they represent a persistent source of bugs this paper presents alive a domain specific language for writing optimizations and for automatically either proving them correct or else generating counterexamples furthermore alive can be automatically translated into c code that is suitable for inclusion in an llvm optimization pass alive is based on an attempt to balance usability and formal methods for example it captures but largely hides the detailed semantics of three different kinds of undefined behavior in llvm we have translated more than 300 llvm optimizations into alive and in the process found that eight of them were wrong", + "title_raw": "Provably correct peephole optimizations with alive", + "abstract_raw": "Compilers should not miscompile. Our work addresses problems in developing peephole optimizations that perform local rewriting to improve the efficiency of LLVM code. These optimizations are individually difficult to get right, particularly in the presence of undefined behavior; taken together they represent a persistent source of bugs. This paper presents Alive, a domain-specific language for writing optimizations and for automatically either proving them correct or else generating counterexamples. Furthermore, Alive can be automatically translated into C++ code that is suitable for inclusion in an LLVM optimization pass. Alive is based on an attempt to balance usability and formal methods; for example, it captures---but largely hides---the detailed semantics of three different kinds of undefined behavior in LLVM. We have translated more than 300 LLVM optimizations into Alive and, in the process, found that eight of them were wrong.", + "link": "https://www.semanticscholar.org/paper/37791336941a0d954e4a98c96b1a66ca7be43eb2", + "scraped_abstract": null, + "citation_best": 116 + }, + { + "paper": "2070021346", + "venue": "1184151122", + "year": "2015", + "title": "parallel correctness and transferability for conjunctive queries", + "label": [ + "55439883", + "70061542", + "65647387", + "50820777", + "93996380", + "80444323" + ], + "author": [ + "181190241", + "2229907684", + "297960948", + "1870307126", + "7276906" + ], + "reference": [ + "1979514837", + "1986870156", + "2006141176", + "2008199814", + "2058651804", + "2065251436", + "2071588600", + "2073279331", + "2075620950", + "2077542434", + "2137373960", + "2139072600", + "2163087774" + ], + "abstract": "a dominant cost for query evaluation in modern massively distributed systems is the number of communication rounds for this reason there is a growing interest in single round multiway join algorithms where data is first reshuffled over many servers and then evaluated in a parallel but communication free way the reshuffling itself is specified as a distribution policy we introduce a correctness condition called parallel correctness for the evaluation of queries w r t a distribution policy we study the complexity of parallel correctness for conjunctive queries as well as transferability of parallel correctness between queries we also investigate the complexity of transferability for certain families of distribution policies including for instance the hypercube distribution", + "title_raw": "Parallel-Correctness and Transferability for Conjunctive Queries", + "abstract_raw": "A dominant cost for query evaluation in modern massively distributed systems is the number of communication rounds. For this reason, there is a growing interest in single-round multiway join algorithms where data is first reshuffled over many servers and then evaluated in a parallel but communication-free way. The reshuffling itself is specified as a distribution policy. We introduce a correctness condition, called parallel-correctness, for the evaluation of queries w.r.t. a distribution policy. We study the complexity of parallel-correctness for conjunctive queries as well as transferability of parallel-correctness between queries. We also investigate the complexity of transferability for certain families of distribution policies, including, for instance, the Hypercube distribution.", + "link": "https://www.semanticscholar.org/paper/e6030a163a373fac160f60c32ceccb3bc7663493", + "scraped_abstract": null, + "citation_best": 10 + }, + { + "paper": "1495444061", + "venue": "1163618098", + "year": "2015", + "title": "a messy state of the union taming the composite state machines of tls", + "label": [ + "1009929", + "33884865", + "38652104", + "167822520", + "109154458", + "2776717989", + "99674996", + "196491621", + "15927051", + "171289174", + "148176105" + ], + "author": [ + "1044068175", + "392089535", + "27630912", + "1819774294", + "1520953178", + "1805102069", + "1820728581", + "2343742392" + ], + "reference": [ + "34367126", + "39167138", + "75729132", + "165943180", + "189766157", + "1567409052", + "1595861018", + "1673604584", + "1733713784", + "1809974132", + "1971772683", + "1975344666", + "2058906062", + "2064815039", + "2067756717", + "2070775894", + "2094250919", + "2107506969", + "2112018008", + "2114398364", + "2153041122", + "2159840470", + "2163005041", + "2164428090", + "2165175152", + "2166093784", + "2167104015", + "2170630946", + "2266218113", + "2275269859", + "2296886798", + "2397911851", + "2915352631", + "2916447643", + "2918128759", + "2952321600", + "3194229933" + ], + "abstract": "implementations of the transport layer security tls protocol must handle a variety of protocol versions and extensions authentication modes and key exchange methods confusingly each combination may prescribe a different message sequence between the client and the server we address the problem of designing a robust composite state machine that correctly multiplexes between these different protocol modes we systematically test popular open source tls implementations for state machine bugs and discover several critical security vulnerabilities that have lain hidden in these libraries for years and have now finally been patched due to our disclosures several of these vulnerabilities including the recently publicized freak flaw enable a network attacker to break into tls connections between authenticated clients and servers we argue that state machine bugs stem from incorrect compositions of individually correct state machines we present the first verified implementation of a composite tls state machine in c that can be embedded into openssl and accounts for all its supported cipher suites our attacks expose the need for the formal verification of core components in cryptographic protocol libraries our implementation demonstrates that such mechanized proofs are within reach even for mainstream tls implementations", + "title_raw": "A Messy State of the Union: Taming the Composite State Machines of TLS", + "abstract_raw": "Implementations of the Transport Layer Security (TLS) protocol must handle a variety of protocol versions and extensions, authentication modes, and key exchange methods. Confusingly, each combination may prescribe a different message sequence between the client and the server. We address the problem of designing a robust composite state machine that correctly multiplexes between these different protocol modes. We systematically test popular open-source TLS implementations for state machine bugs and discover several critical security vulnerabilities that have lain hidden in these libraries for years, and have now finally been patched due to our disclosures. Several of these vulnerabilities, including the recently publicized FREAK flaw, enable a network attacker to break into TLS connections between authenticated clients and servers. We argue that state machine bugs stem from incorrect compositions of individually correct state machines. We present the first verified implementation of a composite TLS state machine in C that can be embedded into OpenSSL and accounts for all its supported cipher suites. Our attacks expose the need for the formal verification of core components in cryptographic protocol libraries, our implementation demonstrates that such mechanized proofs are within reach, even for mainstream TLS implementations.", + "link": "https://www.semanticscholar.org/paper/f247fc03f645428049f138dcf76d897767dbad7c", + "scraped_abstract": null, + "citation_best": 208 + }, + { + "paper": "1536141561", + "venue": "1163618098", + "year": "2015", + "title": "riposte an anonymous messaging system handling millions of users", + "label": [ + "38652104", + "99221444", + "38822068", + "178489894", + "93996380", + "108827166", + "110875604" + ], + "author": [ + "253729515", + "201828038", + "292459508" + ], + "reference": [ + "40134741", + "111294696", + "124804086", + "139740867", + "171567834", + "644599125", + "1486928190", + "1501894674", + "1504707014", + "1522388518", + "1532961226", + "1538706266", + "1545174125", + "1549820118", + "1564775601", + "1583834646", + "1589176872", + "1590015132", + "1600530176", + "1608539542", + "1655958391", + "1763079358", + "1801339841", + "1834982738", + "1885156594", + "1952958290", + "1970606468", + "1975016298", + "1978884755", + "1979215153", + "1995885288", + "1996006421", + "2003736153", + "2011112377", + "2011441851", + "2015880590", + "2027471022", + "2032821770", + "2052267638", + "2052518690", + "2057100569", + "2065265824", + "2073086835", + "2073346043", + "2087811006", + "2092422002", + "2093507182", + "2099858845", + "2101770573", + "2102632861", + "2103299932", + "2103597742", + "2103647628", + "2105037262", + "2105994318", + "2127295197", + "2134698082", + "2136631923", + "2145801920", + "2146112227", + "2150248082", + "2154059840", + "2156410527", + "2156580773", + "2163674653", + "2200869402", + "2247375362", + "2253199766", + "2280235587", + "2460119894", + "2505706763", + "2521972914", + "2611529857", + "3141585064" + ], + "abstract": "this paper presents riposte a new system for anonymous broadcast messaging riposte is the first such system to our knowledge that simultaneously protects against traffic analysis attacks prevents anonymous denial of service by malicious clients and scales to million user anonymity sets to achieve these properties riposte makes novel use of techniques used in systems for private information retrieval and secure multi party computation for latency tolerant workloads with many more readers than writers e g twitter wikileaks we demonstrate that a three server riposte cluster can build an anonymity set of 2 895 216 users in 32 hours", + "title_raw": "Riposte: An Anonymous Messaging System Handling Millions of Users", + "abstract_raw": "This paper presents Riposte, a new system for anonymous broadcast messaging. Riposte is the first such system, to our knowledge, that simultaneously protects against traffic-analysis attacks, prevents anonymous denial-of-service by malicious clients, and scales to million-user anonymity sets. To achieve these properties, Riposte makes novel use of techniques used in systems for private information retrieval and secure multi-party computation. For latency-tolerant workloads with many more readers than writers (e.g. Twitter, Wikileaks), we demonstrate that a three-server Riposte cluster can build an anonymity set of 2,895,216 users in 32 hours.", + "link": "https://www.semanticscholar.org/paper/02dad9c51e3a2e2117ffc41d624de4a090271d1f", + "scraped_abstract": null, + "citation_best": 117 + }, + { + "paper": "2094739364", + "venue": "1152462849", + "year": "2015", + "title": "central control over distributed routing", + "label": [ + "184896649", + "29436982", + "204739117", + "177818476", + "104954878", + "157626507", + "94600068", + "115443555", + "196423136", + "9659607", + "174809319", + "31258907", + "87044965", + "139330139", + "204948658", + "70522964", + "71923881", + "189884158", + "120314980", + "89305328" + ], + "author": [ + "137890367", + "320427046", + "20158177", + "1998722161" + ], + "reference": [ + "49496018", + "85890223", + "1485595694", + "1636629214", + "1764864911", + "1903971244", + "2009038171", + "2021234005", + "2026177951", + "2061372501", + "2095234341", + "2096655151", + "2102090846", + "2105785546", + "2109840532", + "2112867573", + "2126822952", + "2136930733", + "2137826183", + "2147118406", + "2147802358", + "2156654756", + "2163015897", + "2165030634", + "2177058407", + "2248064281", + "2275551475", + "2286743186", + "2474591072", + "2513856683", + "2514718641", + "2798915702", + "3159998597", + "3161272550" + ], + "abstract": "centralizing routing decisions offers tremendous flexibility but sacrifices the robustness of distributed protocols in this paper we present fibbing an architecture that achieves both flexibility and robustness through central control over distributed routing fibbing introduces fake nodes and links into an underlying link state routing protocol so that routers compute their own forwarding tables based on the augmented topology fibbing is expressive and readily supports flexible load balancing traffic engineering and backup routes based on high level forwarding requirements the fibbing controller computes a compact augmented topology and injects the fake components through standard routing protocol messages fibbing works with any unmodified routers speaking ospf our experiments also show that it can scale to large networks with many forwarding requirements introduces minimal overhead and quickly reacts to network and controller failures", + "title_raw": "Central Control Over Distributed Routing", + "abstract_raw": "Centralizing routing decisions offers tremendous flexibility, but sacrifices the robustness of distributed protocols. In this paper, we present Fibbing, an architecture that achieves both flexibility and robustness through central control over distributed routing. Fibbing introduces fake nodes and links into an underlying link-state routing protocol, so that routers compute their own forwarding tables based on the augmented topology. Fibbing is expressive, and readily supports flexible load balancing, traffic engineering, and backup routes. Based on high-level forwarding requirements, the Fibbing controller computes a compact augmented topology and injects the fake components through standard routing-protocol messages. Fibbing works with any unmodified routers speaking OSPF. Our experiments also show that it can scale to large networks with many forwarding requirements, introduces minimal overhead, and quickly reacts to network and controller failures.", + "link": "https://www.semanticscholar.org/paper/a297ca79f8a1eac72282b0296d269b168ee3296d", + "scraped_abstract": null, + "citation_best": 139 + }, + { + "paper": "2070299948", + "venue": "1140684652", + "year": "2015", + "title": "quickscorer a fast algorithm to rank documents with additive ensembles of regression trees", + "label": [ + "119857082", + "189430467", + "101056560", + "50341643", + "100853971", + "124101348", + "140745168", + "86037889", + "197927960" + ], + "author": [ + "1989507918", + "689896360", + "2109622093", + "1650486011", + "1923078747", + "2277909408" + ], + "reference": [ + "136905915", + "1678356000", + "1821491182", + "1973435495", + "1976864843", + "2000431947", + "2069870183", + "2075501215", + "2094145178", + "2108278040", + "2115584760", + "2134195052", + "2155482025", + "2162059449", + "2162741763", + "2167865917", + "2187805056", + "2949654875", + "3097096317" + ], + "abstract": "learning to rank models based on additive ensembles of regression trees have proven to be very effective for ranking query results returned by web search engines a scenario where quality and efficiency requirements are very demanding unfortunately the computational cost of these ranking models is high thus several works already proposed solutions aiming at improving the efficiency of the scoring process by dealing with features and peculiarities of modern cpus and memory hierarchies in this paper we present quickscorer a new algorithm that adopts a novel bitvector representation of the tree based ranking model and performs an interleaved traversal of the ensemble by means of simple logical bitwise operations the performance of the proposed algorithm are unprecedented due to its cache aware approach both in terms of data layout and access patterns and to a control flow that entails very low branch mis prediction rates the experiments on real learning to rank datasets show that quickscorer is able to achieve speedups over the best state of the art baseline ranging from 2x to 6 5x", + "title_raw": "QuickScorer: A Fast Algorithm to Rank Documents with Additive Ensembles of Regression Trees", + "abstract_raw": "Learning-to-Rank models based on additive ensembles of regression trees have proven to be very effective for ranking query results returned by Web search engines, a scenario where quality and efficiency requirements are very demanding. Unfortunately, the computational cost of these ranking models is high. Thus, several works already proposed solutions aiming at improving the efficiency of the scoring process by dealing with features and peculiarities of modern CPUs and memory hierarchies. In this paper, we present QuickScorer, a new algorithm that adopts a novel bitvector representation of the tree-based ranking model, and performs an interleaved traversal of the ensemble by means of simple logical bitwise operations. The performance of the proposed algorithm are unprecedented, due to its cache-aware approach, both in terms of data layout and access patterns, and to a control flow that entails very low branch mis-prediction rates. The experiments on real Learning-to-Rank datasets show that QuickScorer is able to achieve speedups over the best state-of-the-art baseline ranging from 2x to 6.5x.", + "link": "https://www.semanticscholar.org/paper/2bb585c4b9d89b095e9938f7d1d3286e4ac2076f", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2009122315", + "venue": "1131589359", + "year": "2015", + "title": "spy vs spy rumor source obfuscation", + "label": [ + "41065033", + "40305131", + "38652104", + "518677369", + "108827166", + "178005623" + ], + "author": [ + "2004410564", + "18301080", + "2166897319", + "2147669519" + ], + "reference": [ + "1580004440", + "1655958391", + "1834982738", + "1893161742", + "1952958290", + "2011014734", + "2023223364", + "2057199594", + "2084531279", + "2087811006", + "2093029384", + "2095006675", + "2101877806", + "2103299932", + "2111772797", + "2112515575", + "2127644875", + "2130466555", + "2136347453", + "2150105124", + "2153204928", + "2161850537", + "2163674653", + "2165949377", + "2174507869", + "2311773231", + "2555171551", + "2953217652", + "2963385671", + "3016516185" + ], + "abstract": "anonymous messaging platforms such as secret yik yak and whisper have emerged as important social media for sharing one s thoughts without the fear of being judged by friends family or the public further such anonymous platforms are crucial in nations with authoritarian governments the right to free expression and sometimes the personal safety of the author of the message depend on anonymity whether for fear of judgment or personal endangerment it is crucial to keep anonymous the identity of the user who initially posted a sensitive message in this paper we consider an adversary who observes a snapshot of the spread of a message at a certain time recent advances in rumor source detection shows that the existing messaging protocols are vulnerable against such an adversary we introduce a novel messaging protocol which we call adaptive diffusion and show that it spreads the messages fast and achieves a perfect obfuscation of the source when the underlying contact network is an infinite regular tree all users with the message are nearly equally likely to have been the origin of the message experiments on a sampled facebook network show that it effectively hides the location of the source even when the graph is finite irregular and has cycles", + "title_raw": "Spy vs. Spy: Rumor Source Obfuscation", + "abstract_raw": "Anonymous messaging platforms, such as Secret, Yik Yak and Whisper, have emerged as important social media for sharing one's thoughts without the fear of being judged by friends, family, or the public. Further, such anonymous platforms are crucial in nations with authoritarian governments; the right to free expression and sometimes the personal safety of the author of the message depend on anonymity. Whether for fear of judgment or personal endangerment, it is crucial to keep anonymous the identity of the user who initially posted a sensitive message. In this paper, we consider an adversary who observes a snapshot of the spread of a message at a certain time. Recent advances in rumor source detection shows that the existing messaging protocols are vulnerable against such an adversary. We introduce a novel messaging protocol, which we call adaptive diffusion, and show that it spreads the messages fast and achieves a perfect obfuscation of the source when the underlying contact network is an infinite regular tree: all users with the message are nearly equally likely to have been the origin of the message. Experiments on a sampled Facebook network show that it effectively hides the location of the source even when the graph is finite, irregular and has cycles.", + "link": "https://www.semanticscholar.org/paper/62debb00681ae52e86cbebc065d893a6c0fa0ec1", + "scraped_abstract": null, + "citation_best": 1 + }, + { + "paper": "2092799168", + "venue": "1175089206", + "year": "2015", + "title": "dbscan revisited mis claim un fixability and approximation", + "label": [ + "311688", + "73555534", + "46576248", + "80444323", + "146076780", + "191928576", + "111030470" + ], + "author": [ + "2628047163", + "2131082813" + ], + "reference": [ + "8734727", + "108314478", + "154269568", + "1566114229", + "1673310716", + "1938740620", + "1967822648", + "2026297770", + "2027387410", + "2037877878", + "2041179002", + "2060564327", + "2067877017", + "2088698696", + "2093241092", + "2108335620", + "2118268275", + "2123747287", + "2123904884", + "2129976136", + "2140190241", + "2145758431", + "2152601912", + "2160642098", + "2165169065", + "2167800198", + "2189503625", + "2220451813", + "2262723863", + "2489377986", + "3120740533" + ], + "abstract": "dbscan is a popular method for clustering multi dimensional objects just as notable as the method s vast success is the research community s quest for its efficient computation the original kdd 96 paper claimed an algorithm with o n log n running time where n is the number of objects unfortunately this is a mis claim and that algorithm actually requires o n2 time there has been a fix in 2d space where a genuine o n log n time algorithm has been found looking for a fix for dimensionality d 3 is currently an important open problem in this paper we prove that for d 3 the dbscan problem requires n4 3 time to solve unless very significant breakthroughs ones widely believed to be impossible could be made in theoretical computer science this i explains why the community s search for fixing the aforementioned mis claim has been futile for d 3 and ii indicates sadly that all dbscan algorithms must be intolerably slow even on moderately large n in practice surprisingly we show that the running time can be dramatically brought down to o n in expectation regardless of the dimensionality d as soon as slight inaccuracy in the clustering results is permitted we formalize our findings into the new notion of approximate dbscan which we believe should replace dbscan on big data due to the latter s computational intractability", + "title_raw": "DBSCAN Revisited: Mis-Claim, Un-Fixability, and Approximation", + "abstract_raw": "DBSCAN is a popular method for clustering multi-dimensional objects. Just as notable as the method's vast success is the research community's quest for its efficient computation. The original KDD'96 paper claimed an algorithm with O(n log n) running time, where n is the number of objects. Unfortunately, this is a mis-claim; and that algorithm actually requires O(n2) time. There has been a fix in 2D space, where a genuine O(n log n)-time algorithm has been found. Looking for a fix for dimensionality d \u2265 3 is currently an important open problem. In this paper, we prove that for d \u2265 3, the DBSCAN problem requires \u03a9(n4/3) time to solve, unless very significant breakthroughs---ones widely believed to be impossible---could be made in theoretical computer science. This (i) explains why the community's search for fixing the aforementioned mis-claim has been futile for d \u2265 3, and (ii) indicates (sadly) that all DBSCAN algorithms must be intolerably slow even on moderately large n in practice. Surprisingly, we show that the running time can be dramatically brought down to O(n) in expectation regardless of the dimensionality d, as soon as slight inaccuracy in the clustering results is permitted. We formalize our findings into the new notion of \u03c1-approximate DBSCAN, which we believe should replace DBSCAN on big data due to the latter's computational intractability.", + "link": "https://www.semanticscholar.org/paper/5869b3d5607bff1a079aa24c8a241d656fe683b7", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2049314312", + "venue": "1171178643", + "year": "2015", + "title": "pivot tracing dynamic causal monitoring for distributed systems", + "label": [ + "147494362", + "1009929", + "100850083", + "49585438", + "138673069", + "14107862", + "72589913", + "120314980" + ], + "author": [ + "2152065620", + "2223686518", + "2665458278" + ], + "reference": [ + "46959185", + "148956775", + "149441384", + "191500313", + "1243432849", + "1415938757", + "1513765469", + "1524673069", + "1527417319", + "1537198022", + "1563576199", + "1581005283", + "1610570299", + "1742682033", + "1856904932", + "1901726304", + "1981420413", + "1985229168", + "1997369208", + "1997787524", + "2008665665", + "2027380800", + "2029039689", + "2029414465", + "2031802828", + "2037004315", + "2039157918", + "2040156347", + "2041987828", + "2044530246", + "2049298676", + "2054834791", + "2063566496", + "2076793324", + "2085392455", + "2088675571", + "2103201239", + "2105947650", + "2109178099", + "2109820865", + "2110144520", + "2117020308", + "2119738171", + "2126529005", + "2127249107", + "2128825142", + "2130843735", + "2143522309", + "2148544545", + "2154983209", + "2155072926", + "2156997370", + "2171867449", + "2172191070", + "2173213060", + "2213870015", + "2225346295", + "2314785489", + "2753710282", + "3006026125", + "3137220996" + ], + "abstract": "monitoring and troubleshooting distributed systems is notoriously difficult potential problems are complex varied and unpredictable the monitoring and diagnosis tools commonly used today logs counters and metrics have two important limitations what gets recorded is defined a priori and the information is recorded in a component or machine centric way making it extremely hard to correlate events that cross these boundaries this paper presents pivot tracing a monitoring framework for distributed systems that addresses both limitations by combining dynamic instrumentation with a novel relational operator the happened before join pivot tracing gives users at runtime the ability to define arbitrary metrics at one point of the system while being able to select filter and group by events meaningful at other parts of the system even when crossing component or machine boundaries we have implemented a prototype of pivot tracing for java based systems and evaluate it on a heterogeneous hadoop cluster comprising hdfs hbase mapreduce and yarn we show that pivot tracing can effectively identify a diverse range of root causes such as software bugs misconfiguration and limping hardware we show that pivot tracing is dynamic extensible and enables cross tier analysis between inter operating applications with low execution overhead", + "title_raw": "Pivot tracing: dynamic causal monitoring for distributed systems", + "abstract_raw": "Monitoring and troubleshooting distributed systems is notoriously difficult; potential problems are complex, varied, and unpredictable. The monitoring and diagnosis tools commonly used today -- logs, counters, and metrics -- have two important limitations: what gets recorded is defined a priori, and the information is recorded in a component- or machine-centric way, making it extremely hard to correlate events that cross these boundaries. This paper presents Pivot Tracing, a monitoring framework for distributed systems that addresses both limitations by combining dynamic instrumentation with a novel relational operator: the happened-before join. Pivot Tracing gives users, at runtime, the ability to define arbitrary metrics at one point of the system, while being able to select, filter, and group by events meaningful at other parts of the system, even when crossing component or machine boundaries. We have implemented a prototype of Pivot Tracing for Java-based systems and evaluate it on a heterogeneous Hadoop cluster comprising HDFS, HBase, MapReduce, and YARN. We show that Pivot Tracing can effectively identify a diverse range of root causes such as software bugs, misconfiguration, and limping hardware. We show that Pivot Tracing is dynamic, extensible, and enables cross-tier analysis between inter-operating applications, with low execution overhead.", + "link": "https://www.semanticscholar.org/paper/c2c9b0bd2c49d3ec59e76ae6b054447a72033c5b", + "scraped_abstract": null, + "citation_best": 18 + }, + { + "paper": "2411173678", + "venue": "2534597628", + "year": "2016", + "title": "using crash hoare logic for certifying the fscq file system", + "label": [ + "104949639", + "55439883", + "2777683733", + "48103436", + "199360897", + "2780940931" + ], + "author": [ + "2117466421", + "2311703592", + "2225573685", + "46689790", + "3206713635", + "1150986126" + ], + "reference": [], + "abstract": "", + "title_raw": "Using Crash Hoare Logic for Certifying the {FSCQ} File System", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/bade6470375a7126d62b744214ff305d3c905ec1", + "scraped_abstract": null, + "citation_best": 182 + }, + { + "paper": "2412033124", + "venue": "2534597628", + "year": "2016", + "title": "coz finding code that counts with causal profiling", + "label": [ + "113775141", + "187191949", + "2777904410", + "199519371" + ], + "author": [ + "2056298812", + "2144133257" + ], + "reference": [], + "abstract": "", + "title_raw": "COZ: Finding Code that Counts with Causal Profiling", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/49e840c4f0b7f133a8dfa8ff04a248bc39173416", + "scraped_abstract": null, + "citation_best": 3 + }, + { + "paper": "2197150605", + "venue": "1166315290", + "year": "2015", + "title": "foldio digital fabrication of interactive and shape changing objects with foldable printed electronics", + "label": [ + "149635348", + "25435620", + "9390403", + "89505385" + ], + "author": [ + "2003629216", + "2708621365", + "2114902421", + "2098177638" + ], + "reference": [ + "1517527132", + "1572844260", + "1984399688", + "1984631936", + "1985153681", + "2001327325", + "2010399991", + "2020169247", + "2040565638", + "2048912286", + "2054044780", + "2057073649", + "2060064953", + "2068086746", + "2083363561", + "2099908542", + "2102712675", + "2103072279", + "2103339808", + "2123139789", + "2126086600", + "2126616676", + "2127694416", + "2128850857", + "2131063824", + "2133498025", + "2143131345", + "2144575742", + "2147326417", + "2148982166", + "2150549828", + "2150695584", + "2153580689", + "2168018860" + ], + "abstract": "foldios are foldable interactive objects with embedded input sensing and output capabilities foldios combine the advantages of folding for thin lightweight and shape changing objects with the strengths of thin film printed electronics for embedded sensing and output to enable designers and end users to create highly custom interactive foldable objects we contribute a new design and fabrication approach it makes it possible to design the foldable object in a standard 3d environment and to easily add interactive high level controls eliminating the need to manually design a fold pattern and low level circuits for printed electronics second we contribute a set of printable user interface controls for touch input and display output on folded objects moreover we contribute controls for sensing and actuation of shape changeable objects we demonstrate the versatility of the approach with a variety of interactive objects that have been fabricated with this framework", + "title_raw": "Foldio: Digital Fabrication of Interactive and Shape-Changing Objects With Foldable Printed Electronics", + "abstract_raw": "Foldios are foldable interactive objects with embedded input sensing and output capabilities. Foldios combine the advantages of folding for thin, lightweight and shape-changing objects with the strengths of thin-film printed electronics for embedded sensing and output. To enable designers and end-users to create highly custom interactive foldable objects, we contribute a new design and fabrication approach. It makes it possible to design the foldable object in a standard 3D environment and to easily add interactive high-level controls, eliminating the need to manually design a fold pattern and low-level circuits for printed electronics. Second, we contribute a set of printable user interface controls for touch input and display output on folded objects. Moreover, we contribute controls for sensing and actuation of shape-changeable objects. We demonstrate the versatility of the approach with a variety of interactive objects that have been fabricated with this framework.", + "link": "https://www.semanticscholar.org/paper/b1bc7498d3a287c863b864b811b47e28d1420b12", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2176017127", + "venue": "1166315290", + "year": "2015", + "title": "orbits gaze interaction for smart watches using smooth pursuit eye movements", + "label": [ + "29794715", + "2779916870", + "97970142", + "56461940", + "62402345", + "31972630", + "153050134", + "150594956", + "10324989" + ], + "author": [ + "2130505379", + "2131772637", + "2139380868", + "2011140363" + ], + "reference": [ + "1590806791", + "1666243891", + "1850899538", + "1968080309", + "1976177666", + "1979038477", + "1983239853", + "1986291329", + "1990266873", + "1993982229", + "1995055145", + "2001456765", + "2004403975", + "2022716189", + "2031781307", + "2038358290", + "2077819131", + "2087872446", + "2094670800", + "2099788879", + "2109094823", + "2110501627", + "2137664882", + "2141993427", + "2146567024", + "2147149886", + "2158707444", + "2160155731", + "2160951686", + "2170237747", + "2170982357", + "2344381053", + "2352625460", + "2461734069", + "3006135479" + ], + "abstract": "we introduce orbits a novel gaze interaction technique that enables hands free input on smart watches the technique relies on moving controls to leverage the smooth pursuit movements of the eyes and detect whether and at which control the user is looking at in orbits controls include targets that move in a circular trajectory in the face of the watch and can be selected by following the desired one for a small amount of time we conducted two user studies to assess the technique s recognition and robustness which demonstrated how orbits is robust against false positives triggered by natural eye movements and how it presents a hands free high accuracy way of interacting with smart watches using off the shelf devices finally we developed three example interfaces built with orbits a music player a notifications face plate and a missed call menu despite relying on moving controls very unusual in current hci interfaces these were generally well received by participants in a third and final study", + "title_raw": "Orbits: Gaze Interaction for Smart Watches using Smooth Pursuit Eye Movements", + "abstract_raw": "We introduce Orbits, a novel gaze interaction technique that enables hands-free input on smart watches. The technique relies on moving controls to leverage the smooth pursuit movements of the eyes and detect whether and at which control the user is looking at. In Orbits, controls include targets that move in a circular trajectory in the face of the watch, and can be selected by following the desired one for a small amount of time. We conducted two user studies to assess the technique's recognition and robustness, which demonstrated how Orbits is robust against false positives triggered by natural eye movements and how it presents a hands-free, high accuracy way of interacting with smart watches using off-the-shelf devices. Finally, we developed three example interfaces built with Orbits: a music player, a notifications face plate and a missed call menu. Despite relying on moving controls -- very unusual in current HCI interfaces -- these were generally well received by participants in a third and final study.", + "link": "https://www.semanticscholar.org/paper/bb88c51fcc352f0cc1489d96800f43c574406cb0", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2271345687", + "venue": "1166315290", + "year": "2015", + "title": "webstrates shareable dynamic media", + "label": [ + "21959979", + "107457646", + "2776063143", + "2777904410", + "136764020" + ], + "author": [ + "263677593", + "2036409498", + "2108974072", + "1925449899", + "2674390412" + ], + "reference": [ + "66130970", + "1496065127", + "1516319412", + "1560867643", + "1770006921", + "1909822857", + "1970022914", + "1988816896", + "1989814541", + "1999730921", + "2012262424", + "2014191808", + "2024565395", + "2034285448", + "2034967522", + "2037559787", + "2049817002", + "2051172185", + "2064613661", + "2083298807", + "2091777849", + "2105465615", + "2115920120", + "2128026023", + "2130200371", + "2130500927", + "2133353349", + "2134816385", + "2139936993", + "2145220267", + "2158539604", + "2166901142", + "2584045800" + ], + "abstract": "we revisit alan kay s early vision of dynamic media that blurs the distinction between documents and applications we introduce shareable dynamic media that are malleable by users who may appropriate them in idiosyncratic ways shareable among users who collaborate on multiple aspects of the media and distributable across diverse devices and platforms we present webstrates an environment for exploring shareable dynamic media webstrates augment web technology with real time sharing they turn web pages into substrates i e software entities that act as applications or documents depending upon use we illustrate webstrates with two implemented case studies users collaboratively author an article with functionally and visually different editors that they can personalize and extend at run time and they orchestrate its presentation and audience participation with multiple devices we demonstrate the simplicity and generative power of webstrates with three additional prototypes and evaluate it from a systems perspective", + "title_raw": "Webstrates : Shareable Dynamic Media", + "abstract_raw": "We revisit Alan Kay's early vision of dynamic media that blurs the distinction between documents and applications. We introduce shareable dynamic media that are malleable by users, who may appropriate them in idiosyncratic ways; shareable among users, who collaborate on multiple aspects of the media; and distributable across diverse devices and platforms. We present Webstrates, an environment for exploring shareable dynamic media. Webstrates augment web technology with real-time sharing. They turn web pages into substrates, i.e. software entities that act as applications or documents depending upon use. We illustrate Webstrates with two implemented case studies: users collaboratively author an article with functionally and visually different editors that they can personalize and extend at run-time; and they orchestrate its presentation and audience participation with multiple devices. We demonstrate the simplicity and generative power of Webstrates with three additional prototypes and evaluate it from a systems perspective.", + "link": "https://www.semanticscholar.org/paper/18feb72ae815f9127498a7a5b2f46d5b7fe7f45d", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2269738476", + "venue": "1133523790", + "year": "2014", + "title": "constructing an interactive natural language interface for relational databases", + "label": [ + "24394798", + "174252522", + "96956885", + "2777530160", + "5655090", + "192939062", + "192028432", + "23123220", + "199360897", + "194222762", + "195324797", + "510870499", + "67463725" + ], + "author": [ + "2711965729", + "360112113" + ], + "reference": [ + "1508977358", + "1970709149", + "1995320938", + "1996791708", + "2032299694", + "2065259291", + "2077556543", + "2079372196", + "2082867342", + "2085828533", + "2095026299", + "2097184821", + "2098388305", + "2100531844", + "2121350579", + "2136480620", + "2145618437", + "2154268919", + "2157749290", + "2165382777", + "2169624745", + "2950584515" + ], + "abstract": "natural language has been the holy grail of query interface designers but has generally been considered too hard to work with except in limited specific circumstances in this paper we describe the architecture of an interactive natural language query interface for relational databases through a carefully limited interaction with the user we are able to correctly interpret complex natural language queries in a generic manner across a range of domains by these means a logically complex english language sentence is correctly translated into a sql query which may include aggregation nesting and various types of joins among other things and can be evaluated against an rdbms we have constructed a system nalir natural language interface for relational databases embodying these ideas our experimental assessment through user studies demonstrates that nalir is good enough to be usable in practice even naive users are able to specify quite complex ad hoc queries", + "title_raw": "Constructing an interactive natural language interface for relational databases", + "abstract_raw": "Natural language has been the holy grail of query interface designers, but has generally been considered too hard to work with, except in limited specific circumstances. In this paper, we describe the architecture of an interactive natural language query interface for relational databases. Through a carefully limited interaction with the user, we are able to correctly interpret complex natural language queries, in a generic manner across a range of domains. By these means, a logically complex English language sentence is correctly translated into a SQL query, which may include aggregation, nesting, and various types of joins, among other things, and can be evaluated against an RDBMS. We have constructed a system, NaLIR (Natural Language Interface for Relational databases), embodying these ideas. Our experimental assessment, through user studies, demonstrates that NaLIR is good enough to be usable in practice: even naive users are able to specify quite complex ad-hoc queries.", + "link": "https://www.semanticscholar.org/paper/9d8c5acea36981714278ca7e7d51a5ddd5ab8f39", + "scraped_abstract": null, + "citation_best": 404 + }, + { + "paper": "3125781181", + "venue": "1135342153", + "year": "2015", + "title": "hyptrails a bayesian approach for comparing hypotheses about human trails on the web", + "label": [ + "153083717", + "142291917", + "160234255", + "107673813", + "101112237", + "98763669", + "136764020", + "177769412", + "61096286" + ], + "author": [ + "2167599249", + "315966907", + "20543882", + "142799918" + ], + "reference": [ + "65714572", + "68858914", + "125517943", + "1500415735", + "1532325895", + "1548778571", + "1560285440", + "1649293523", + "1664061557", + "1966027336", + "1968407925", + "1975914563", + "1978394996", + "1981664382", + "1982896842", + "2008620264", + "2015720094", + "2023815876", + "2026644166", + "2027255954", + "2035503723", + "2041836310", + "2053171205", + "2060112995", + "2062685946", + "2065863071", + "2066636486", + "2075000550", + "2079025608", + "2080100102", + "2080450835", + "2088658556", + "2089192108", + "2089199911", + "2093124207", + "2108301111", + "2109913881", + "2118048521", + "2129531883", + "2129874624", + "2133156844", + "2137502531", + "2155135604", + "2167000629", + "2169240294", + "2231488453", + "2475924711" + ], + "abstract": "when users interact with the web today they leave sequential digital trails on a massive scale examples of such human trails include web navigation sequences of online restaurant reviews or online music play lists understanding the factors that drive the production of these trails can be useful for e g improving underlying network structures predicting user clicks or enhancing recommendations in this work we present a general approach called hyptrails for comparing a set of hypotheses about human trails on the web where hypotheses represent beliefs about transitions between states our approach utilizes markov chain models with bayesian inference the main idea is to incorporate hypotheses as informative dirichlet priors and to leverage the sensitivity of bayes factors on the prior for comparing hypotheses with each other for eliciting dirichlet priors from hypotheses we present an adaption of the so called trial roulette method we demonstrate the general mechanics and applicability of hyptrails by performing experiments with i synthetic trails for which we control the mechanisms that have produced them and ii empirical trails stemming from different domains including website navigation business reviews and online music played our work expands the repertoire of methods available for studying human trails on the web", + "title_raw": "HypTrails: A Bayesian Approach for Comparing Hypotheses About Human Trails on the Web", + "abstract_raw": "When users interact with the Web today, they leave sequential digital trails on a massive scale. Examples of such human trails include Web navigation, sequences of online restaurant reviews, or online music play lists. Understanding the factors that drive the production of these trails can be useful for e.g., improving underlying network structures, predicting user clicks or enhancing recommendations. In this work, we present a general approach called HypTrails for comparing a set of hypotheses about human trails on the Web, where hypotheses represent beliefs about transitions between states. Our approach utilizes Markov chain models with Bayesian inference. The main idea is to incorporate hypotheses as informative Dirichlet priors and to leverage the sensitivity of Bayes factors on the prior for comparing hypotheses with each other. For eliciting Dirichlet priors from hypotheses, we present an adaption of the so-called (trial) roulette method. We demonstrate the general mechanics and applicability of HypTrails by performing experiments with (i) synthetic trails for which we control the mechanisms that have produced them and (ii) empirical trails stemming from different domains including website navigation, business reviews and online music played. Our work expands the repertoire of methods available for studying human trails on the Web.", + "link": "https://www.semanticscholar.org/paper/d8ecc474db9b87941ab05fe1df468d91d86340e5", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "151167705", + "venue": "1184914352", + "year": "2014", + "title": "recovering from selection bias in causal and statistical inference", + "label": [ + "124101348", + "81917197" + ], + "author": [ + "237352020", + "2283469157", + "2248165042" + ], + "reference": [ + "1799752", + "67356013", + "134542921", + "167016754", + "1489999654", + "1511986666", + "1524326598", + "1554983662", + "1592011820", + "1853837125", + "1915430721", + "1975780287", + "1982730819", + "2014289389", + "2032536435", + "2034080748", + "2044758663", + "2049910836", + "2059089962", + "2076082444", + "2083710273", + "2134652049", + "2139122730", + "2143891888", + "2159080219", + "2160276026", + "2162651021", + "2397969444", + "2488917374", + "2541803860", + "3121478035", + "3124238886", + "3125872434", + "3133236490" + ], + "abstract": "selection bias is caused by preferential exclusion of units from the samples and represents a major obstacle to valid causal and statistical inferences it cannot be removed by randomized experiments and can rarely be detected in either experimental or observational studies in this paper we provide complete graphical and algorithmic conditions for recovering conditional probabilities from selection biased data we also provide graphical conditions for recoverability when unbiased data is available over a subset of the variables finally we provide a graphical condition that generalizes the backdoor criterion and serves to recover causal effects when the data is collected under preferential selection", + "title_raw": "Recovering from selection bias in causal and statistical inference", + "abstract_raw": "Selection bias is caused by preferential exclusion of units from the samples and represents a major obstacle to valid causal and statistical inferences; it cannot be removed by randomized experiments and can rarely be detected in either experimental or observational studies. In this paper, we provide complete graphical and algorithmic conditions for recovering conditional probabilities from selection biased data. We also provide graphical conditions for recoverability when unbiased data is available over a subset of the variables. Finally, we provide a graphical condition that generalizes the backdoor criterion and serves to recover causal effects when the data is collected under preferential selection.", + "link": "https://www.semanticscholar.org/paper/91dfdd05a99256c2293b952c3b008326b015d4eb", + "scraped_abstract": null, + "citation_best": 85 + }, + { + "paper": "2251682575", + "venue": "1188739475", + "year": "2014", + "title": "fast and robust neural network joint models for statistical machine translation", + "label": [ + "119857082", + "50644808", + "154945302", + "175202392", + "126155421", + "203005215" + ], + "author": [ + "3037503319", + "2309234230", + "2250754834", + "2251180718", + "2105897744", + "2003402953" + ], + "reference": [ + "125693536", + "179875071", + "932413789", + "1528441900", + "1753482797", + "1815076433", + "1880262756", + "1909398668", + "1934041838", + "1996903695", + "2013540053", + "2060127787", + "2083545877", + "2100183594", + "2116042738", + "2118090838", + "2132339004", + "2140343992", + "2141599568", + "2144879357", + "2156985047", + "2250489405", + "2250732891", + "2251098065", + "2251222643", + "2251246760", + "2252177599", + "2437005631", + "2914484425" + ], + "abstract": "recent work has shown success in using neural network language models nnlms as features in mt systems here we present a novel formulation for a neural network joint model nnjm which augments the nnlm with a source context window our model is purely lexicalized and can be integrated into any mt decoder we also present several variations of the nnjm which provide significant additive improvements", + "title_raw": "Fast and Robust Neural Network Joint Models for Statistical Machine Translation", + "abstract_raw": "Recent work has shown success in using neural network language models (NNLMs) as features in MT systems. Here, we present a novel formulation for a neural network joint model (NNJM), which augments the NNLM with a source context window. Our model is purely lexicalized and can be integrated into any MT decoder. We also present several variations of the NNJM which provide significant additive improvements.", + "link": "https://www.semanticscholar.org/paper/0894b06cff1cd0903574acaa7fcf071b144ae775", + "scraped_abstract": null, + "citation_best": 496 + }, + { + "paper": "2137864660", + "venue": "1163450153", + "year": "2014", + "title": "consumed endurance a metric to quantify arm fatigue of mid air interactions", + "label": [ + "154586513", + "44154836" + ], + "author": [ + "223563474", + "2600026359", + "2225637801", + "1569396467" + ], + "reference": [ + "1484168899", + "1725507402", + "1964743069", + "1966313362", + "1969521013", + "1983506867", + "1996593833", + "2005118853", + "2013199111", + "2013640876", + "2014362088", + "2019817434", + "2033544493", + "2043980699", + "2046344695", + "2048435250", + "2051978237", + "2057475882", + "2058740652", + "2075612272", + "2083545438", + "2099287431", + "2113243630", + "2117420274", + "2123484586", + "2128213565", + "2129186559", + "2151021074", + "2157289187", + "2626609912" + ], + "abstract": "mid air interactions are prone to fatigue and lead to a feeling of heaviness in the upper limbs a condition casually termed as the gorilla arm effect designers have often associated limitations of their mid air interactions with arm fatigue but do not possess a quantitative method to assess and therefore mitigate it in this paper we propose a novel metric consumed endurance ce derived from the biomechanical structure of the upper arm and aimed at characterizing the gorilla arm effect we present a method to capture ce in a non intrusive manner using an off the shelf camera based skeleton tracking system and demonstrate that ce correlates strongly with the borg cr10 scale of perceived exertion we show how designers can use ce as a complementary metric for evaluating existing and designing novel mid air interactions including tasks with repetitive input such as mid air text entry finally we propose a series of guidelines for the design of fatigue efficient mid air interfaces", + "title_raw": "Consumed endurance: a metric to quantify arm fatigue of mid-air interactions", + "abstract_raw": "Mid-air interactions are prone to fatigue and lead to a feeling of heaviness in the upper limbs, a condition casually termed as the gorilla-arm effect. Designers have often associated limitations of their mid-air interactions with arm fatigue, but do not possess a quantitative method to assess and therefore mitigate it. In this paper we propose a novel metric, Consumed Endurance (CE), derived from the biomechanical structure of the upper arm and aimed at characterizing the gorilla-arm effect. We present a method to capture CE in a non-intrusive manner using an off-the-shelf camera-based skeleton tracking system, and demonstrate that CE correlates strongly with the Borg CR10 scale of perceived exertion. We show how designers can use CE as a complementary metric for evaluating existing and designing novel mid-air interactions, including tasks with repetitive input such as mid-air text-entry. Finally, we propose a series of guidelines for the design of fatigue-efficient mid-air interfaces.", + "link": "https://www.semanticscholar.org/paper/416cebc00d038e5e11457c9d71e35025cd9a4829", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2147149886", + "venue": "1163450153", + "year": "2014", + "title": "duet exploring joint interactions on a smart phone and a smart watch", + "label": [ + "104114177", + "115121344", + "29794715", + "126155421", + "49774154", + "207347870" + ], + "author": [ + "2108826813", + "2115951828", + "2065696548", + "1899877228" + ], + "reference": [ + "606654937", + "1226869406", + "1510817841", + "1984575025", + "1992415267", + "1993982229", + "2001013292", + "2004179962", + "2019478086", + "2023331673", + "2026067799", + "2059403886", + "2060069377", + "2068389981", + "2072758279", + "2078155644", + "2096988419", + "2099800354", + "2101760432", + "2102413118", + "2104268896", + "2107188506", + "2108518773", + "2108715885", + "2120063997", + "2122331864", + "2125872152", + "2129629834", + "2131588614", + "2133258886", + "2135165686", + "2137664882", + "2137727001", + "2139505180", + "2146995712", + "2158272724", + "2160170050", + "2163833610", + "2169732913", + "2304268851", + "2307120217", + "2322160056", + "2466831675" + ], + "abstract": "the emergence of smart devices e g smart watches and smart eyewear is redefining mobile interaction from the solo performance of a smart phone to a symphony of multiple devices in this paper we present duet an interactive system that explores a design space of interactions between a smart phone and a smart watch based on the devices spatial configurations duet coordinates their motion and touch input and extends their visual and tactile output to one another this transforms the watch into an active element that enhances a wide range of phone based interactive tasks and enables a new class of multi device gestures and sensing techniques a technical evaluation shows the accuracy of these gestures and sensing techniques and a subjective study on duet provides insights observations and guidance for future work", + "title_raw": "Duet: exploring joint interactions on a smart phone and a smart watch", + "abstract_raw": "The emergence of smart devices (e.g., smart watches and smart eyewear) is redefining mobile interaction from the solo performance of a smart phone, to a symphony of multiple devices. In this paper, we present Duet -- an interactive system that explores a design space of interactions between a smart phone and a smart watch. Based on the devices' spatial configurations, Duet coordinates their motion and touch input, and extends their visual and tactile output to one another. This transforms the watch into an active element that enhances a wide range of phone-based interactive tasks, and enables a new class of multi-device gestures and sensing techniques. A technical evaluation shows the accuracy of these gestures and sensing techniques, and a subjective study on Duet provides insights, observations, and guidance for future work.", + "link": "https://www.semanticscholar.org/paper/9992b47939c87de3073cb21cf88e6021da9b4706", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2081933842", + "venue": "1163450153", + "year": "2014", + "title": "type hover swipe in 96 bytes a motion sensing mechanical keyboard", + "label": [ + "104114177", + "159437735", + "207347870", + "121449826", + "31972630", + "2779623668" + ], + "author": [ + "2244944254", + "2206727272", + "2075214526", + "2098553916", + "2065024650" + ], + "reference": [ + "20898209", + "137456267", + "1500711968", + "1973259708", + "1973394302", + "1980720228", + "2012241589", + "2032366757", + "2036471813", + "2048472057", + "2070885641", + "2076305990", + "2079750751", + "2091491784", + "2096003374", + "2100661450", + "2103853000", + "2105004615", + "2105594594", + "2109075207", + "2110964220", + "2111424004", + "2114663654", + "2120701504", + "2125337786", + "2130851628", + "2134418753", + "2139459444", + "2140280838", + "2141317558", + "2143272542", + "2145607950", + "2165715280", + "2168570452", + "2170513162", + "2172156083", + "2911964244" + ], + "abstract": "we present a new type of augmented mechanical keyboard capable of sensing rich and expressive motion gestures performed both on and directly above the device our hardware comprises of low resolution matrix of infrared ir proximity sensors interspersed between the keys of a regular mechanical keyboard this results in coarse but high frame rate motion data we extend a machine learning algorithm traditionally used for static classification only to robustly support dynamic temporal gestures we propose the use of motion signatures a technique that utilizes pairs of motion history images and a random forest based classifier to robustly recognize a large set of motion gestures on and directly above the keyboard our technique achieves a mean per frame classification accuracy of 75 6 in leave one subject out and 89 9 in half test half training cross validation we detail our hardware and gesture recognition algorithm provide performance and accuracy numbers and demonstrate a large set of gestures designed to be performed with our device we conclude with qualitative feedback from users discussion of limitations and areas for future work", + "title_raw": "Type-hover-swipe in 96 bytes: a motion sensing mechanical keyboard", + "abstract_raw": "We present a new type of augmented mechanical keyboard, capable of sensing rich and expressive motion gestures performed both on and directly above the device. Our hardware comprises of low-resolution matrix of infrared (IR) proximity sensors interspersed between the keys of a regular mechanical keyboard. This results in coarse but high frame-rate motion data. We extend a machine learning algorithm, traditionally used for static classification only, to robustly support dynamic, temporal gestures. We propose the use of motion signatures a technique that utilizes pairs of motion history images and a random forest based classifier to robustly recognize a large set of motion gestures on and directly above the keyboard. Our technique achieves a mean per-frame classification accuracy of 75.6% in leave-one-subject-out and 89.9% in half-test/half-training cross-validation. We detail our hardware and gesture recognition algorithm, provide performance and accuracy numbers, and demonstrate a large set of gestures designed to be performed with our device. We conclude with qualitative feedback from users, discussion of limitations and areas for future work.", + "link": "https://www.semanticscholar.org/paper/da71286a0986a8a1864876667811dd92845282f0", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2005996634", + "venue": "1163450153", + "year": "2014", + "title": "effects of display size and navigation type on a classification task", + "label": [ + "170003942", + "56288433", + "107457646", + "36464697", + "138268822" + ], + "author": [ + "2134296105", + "166732564", + "2674390412", + "343525182", + "1925449899" + ], + "reference": [ + "156307932", + "1523378720", + "1552213365", + "1972089560", + "1973460642", + "1973787390", + "1984249915", + "1987063546", + "1995923922", + "2013060577", + "2019137317", + "2022815033", + "2024851719", + "2043935056", + "2066595234", + "2075809334", + "2088916941", + "2101353123", + "2108376501", + "2111981024", + "2118280170", + "2118580158", + "2127406376", + "2146269805", + "2153104460", + "2159262688" + ], + "abstract": "the advent of ultra high resolution wall size displays and their use for complex tasks require a more systematic analysis and deeper understanding of their advantages and drawbacks compared with desktop monitors while previous work has mostly addressed search visualization and sense making tasks we have designed an abstract classification task that involves explicit data manipulation based on our observations of real uses of a wall display this task represents a large category of applications we report on a controlled experiment that uses this task to compare physical navigation in front of a wall size display with virtual navigation using pan and zoom on the desktop our main finding is a robust interaction effect between display type and task difficulty while the desktop can be faster than the wall for simple tasks the wall gains a sizable advantage as the task becomes more difficult a follow up study shows that other desktop techniques overview detail lens do not perform better than pan and zoom and are therefore slower than the wall for difficult tasks", + "title_raw": "Effects of display size and navigation type on a classification task", + "abstract_raw": "The advent of ultra-high resolution wall-size displays and their use for complex tasks require a more systematic analysis and deeper understanding of their advantages and drawbacks compared with desktop monitors. While previous work has mostly addressed search, visualization and sense-making tasks, we have designed an abstract classification task that involves explicit data manipulation. Based on our observations of real uses of a wall display, this task represents a large category of applications. We report on a controlled experiment that uses this task to compare physical navigation in front of a wall-size display with virtual navigation using pan-and-zoom on the desktop. Our main finding is a robust interaction effect between display type and task difficulty: while the desktop can be faster than the wall for simple tasks, the wall gains a sizable advantage as the task becomes more difficult. A follow-up study shows that other desktop techniques (overview+detail, lens) do not perform better than pan-and-zoom and are therefore slower than the wall for difficult tasks.", + "link": "https://www.semanticscholar.org/paper/7453be46f7026ebe581e8e1500c79c49a66d4447", + "scraped_abstract": null, + "citation_best": 73 + }, + { + "paper": "2094690640", + "venue": "1163450153", + "year": "2014", + "title": "mixfab a mixed reality environment for personal fabrication", + "label": [ + "35173682", + "107457646", + "49774154", + "205711294", + "207347870", + "153715457", + "206776904", + "2777897806" + ], + "author": [ + "2039602682", + "2122980442", + "2151132821", + "2165003359", + "2011140363" + ], + "reference": [ + "1981934656", + "1981963587", + "1992854310", + "2012212916", + "2040842605", + "2051196564", + "2057913402", + "2058352122", + "2073953353", + "2091592567", + "2096493020", + "2099800354", + "2102547268", + "2105021915", + "2109618101", + "2113696470", + "2116820576", + "2118816563", + "2126698653", + "2136217726", + "2142310655", + "2148877839", + "2161304134", + "2167679500", + "2171505762", + "2235981404", + "2246690291" + ], + "abstract": "personal fabrication machines such as 3d printers and laser cutters are becoming increasingly ubiquitous however designing objects for fabrication still requires 3d modeling skills thereby rendering such technologies inaccessible to a wide user group in this paper we introduce mixfab a mixed reality environment for personal fabrication that lowers the barrier for users to engage in personal fabrication users design objects in an immersive augmented reality environment interact with virtual objects in a direct gestural manner and can introduce existing physical objects effortlessly into their designs we describe the design and implementation of mixfab a user defined gesture study that informed this design show artifacts designed with the system and describe a user study evaluating the system s prototype", + "title_raw": "MixFab: a mixed-reality environment for personal fabrication", + "abstract_raw": "Personal fabrication machines, such as 3D printers and laser cutters, are becoming increasingly ubiquitous. However, designing objects for fabrication still requires 3D modeling skills, thereby rendering such technologies inaccessible to a wide user-group. In this paper, we introduce MixFab, a mixed-reality environment for personal fabrication that lowers the barrier for users to engage in personal fabrication. Users design objects in an immersive augmented reality environment, interact with virtual objects in a direct gestural manner and can introduce existing physical objects effortlessly into their designs. We describe the design and implementation of MixFab, a user-defined gesture study that informed this design, show artifacts designed with the system and describe a user study evaluating the system's prototype.", + "link": "https://www.semanticscholar.org/paper/609a34a6e2cd9dfe8c1ef727a36b5bf27d4f2cbd", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2047781873", + "venue": "1163450153", + "year": "2014", + "title": "real time feedback for improving medication taking", + "label": [ + "107457646", + "44154836", + "40140605" + ], + "author": [ + "2141119226", + "2111246749" + ], + "reference": [ + "1587563450", + "1594567624", + "1777790150", + "1991625730", + "2001617346", + "2003561474", + "2014272633", + "2021472060", + "2021633655", + "2033296332", + "2041988633", + "2046093156", + "2046626275", + "2059454145", + "2062121040", + "2103789057", + "2120909070", + "2130162735", + "2131671009", + "2134049139", + "2137248440", + "2146882979", + "2147700876", + "2152582606", + "2154973070", + "2169514162", + "2593633484", + "2952953987" + ], + "abstract": "medication taking is a self regulatory process that requires individuals to self monitor their medication taking behaviors but this can be difficult because medication taking is such a mundane unremarkable behavior ubiquitous sensing systems have the potential to sense everyday behaviors and provide the objective feedback necessary for self regulation of medication taking we describe an unobtrusive sensing system consisting of a sensor augmented pillbox and an ambient display that provides near real time visual feedback about how well medications are being taken in contrast to other systems that focus on reminding before medication taking our approach uses feedback after medication taking to allow the individual to develop their own routines through self regulation we evaluated this system in the homes of older adults in a 10 month deployment feedback helped improve the consistency of medication taking behaviors as well as increased ratings of self efficacy however the improved performance did not persist after the feedback display was removed because individuals had integrated the feedback display into their routines to support their self awareness identify mistakes guide the timing of medication taking and provide a sense of security that they are taking their medications well finally we reflect on design considerations for feedback systems to support the process of self regulation of everyday behaviors", + "title_raw": "Real-time feedback for improving medication taking", + "abstract_raw": "Medication taking is a self-regulatory process that requires individuals to self-monitor their medication taking behaviors, but this can be difficult because medication taking is such a mundane, unremarkable behavior. Ubiquitous sensing systems have the potential to sense everyday behaviors and provide the objective feedback necessary for self-regulation of medication taking. We describe an unobtrusive sensing system consisting of a sensor-augmented pillbox and an ambient display that provides near real-time visual feedback about how well medications are being taken. In contrast to other systems that focus on reminding before medication taking, our approach uses feedback after medication taking to allow the individual to develop their own routines through self-regulation. We evaluated this system in the homes of older adults in a 10-month deployment. Feedback helped improve the consistency of medication-taking behaviors as well as increased ratings of self-efficacy. However, the improved performance did not persist after the feedback display was removed, because individuals had integrated the feedback display into their routines to support their self-awareness, identify mistakes, guide the timing of medication taking, and provide a sense of security that they are taking their medications well. Finally, we reflect on design considerations for feedback systems to support the process of self-regulation of everyday behaviors.", + "link": "https://www.semanticscholar.org/paper/e6d995d73c3ba97f40a840364976690d73fc1fbe", + "scraped_abstract": null, + "citation_best": 67 + }, + { + "paper": "2162409443", + "venue": "1163450153", + "year": "2014", + "title": "structured labeling for facilitating concept evolution in machine learning", + "label": [ + "119857082", + "21959979" + ], + "author": [ + "2007574723", + "2073051687", + "2106722673", + "2113528437", + "2121586545" + ], + "reference": [ + "27674064", + "29050389", + "39957844", + "79383088", + "83593200", + "139044672", + "204776570", + "216460935", + "301677808", + "303127967", + "592829405", + "937107142", + "945147173", + "948638995", + "950482009", + "952369992", + "978875813", + "983732852", + "993291060", + "1001401933", + "1007770941", + "1011278105", + "1021027294", + "1022742273", + "1026806235", + "1036507814", + "1036995229", + "1483330504", + "1493108469", + "1501005121", + "1502571714", + "1507264013", + "1509772896", + "1510824068", + "1517680578", + "1519820146", + "1520209285", + "1523794535", + "1527593353", + "1530755521", + "1534753945", + "1541368278", + "1546429744", + "1549909567", + "1551045851", + "1554829397", + "1556861584", + "1565745825", + "1567630777", + "1570169799", + "1576106211", + "1576667197", + "1577085794", + "1582081223", + "1582403853", + "1591119786", + "1593271214", + "1598194964", + "1604542852", + "1608224733", + "1655591981", + "1666942233", + "1736726159", + "1748180122", + "1760018428", + "1766442844", + "1812525299", + "1812816435", + "1827380967", + "1837064647", + "1844936689", + "1862248568", + "1867849544", + "1873332500", + "1878479415", + "1881168306", + "1885089433", + "1903215186", + "1922685798", + "1926997511", + "1928047241", + "1937097906", + "1943883132", + "1947064154", + "1952374089", + "1955441067", + "1960014134", + "1963397501", + "1970736040", + "2002277714", + "2022775778", + "2048987619", + "2049050102", + "2062142366", + "2069324208", + "2081798681", + "2112726802", + "2117664769", + "2124191450", + "2125352857", + "2125943921", + "2129407262", + "2135880665", + "2137799220", + "2140853685", + "2142813453", + "2152311362", + "2154887314", + "2157018954", + "2157504474", + "2159048649", + "2161937612", + "2167708792", + "2180443860", + "2200801832", + "2203634713", + "2203853577", + "2206023774", + "2206651108", + "2220857793", + "2235685865", + "2249399938", + "2258436308", + "2264719273", + "2267992487", + "2268347781", + "2268410595", + "2274531542", + "2277881186", + "2279813823", + "2286544501", + "2294975956", + "2298202653", + "2300607195", + "2300755140", + "2306745500", + "2308569538", + "2309146138", + "2396707212", + "2398872579", + "2400128484", + "2401671375", + "2405804680", + "2413433895", + "2414522420", + "2533923853", + "2966207845", + "3141116257", + "3142582179", + "3202461034" + ], + "abstract": "labeling data is a seemingly simple task required for training many machine learning systems but is actually fraught with problems this paper introduces the notion of concept evolution the changing nature of a person s underlying concept the abstract notion of the target class a person is labeling for e g spam email travel related web pages which can result in inconsistent labels and thus be detrimental to machine learning we introduce two structured labeling solutions a novel technique we propose for helping people define and refine their concept in a consistent manner as they label through a series of five experiments including a controlled lab study we illustrate the impact and dynamics of concept evolution in practice and show that structured labeling helps people label more consistently in the presence of concept evolution than traditional labeling", + "title_raw": "Structured labeling for facilitating concept evolution in machine learning", + "abstract_raw": "Labeling data is a seemingly simple task required for training many machine learning systems, but is actually fraught with problems. This paper introduces the notion of concept evolution, the changing nature of a person's underlying concept (the abstract notion of the target class a person is labeling for, e.g., spam email, travel related web pages) which can result in inconsistent labels and thus be detrimental to machine learning. We introduce two structured labeling solutions, a novel technique we propose for helping people define and refine their concept in a consistent manner as they label. Through a series of five experiments, including a controlled lab study, we illustrate the impact and dynamics of concept evolution in practice and show that structured labeling helps people label more consistently in the presence of concept evolution than traditional labeling.", + "link": "https://www.semanticscholar.org/paper/c8ffe876bfbe85231b236a9af35ad7a2d2ccfcef", + "scraped_abstract": null, + "citation_best": 99 + }, + { + "paper": "2117160827", + "venue": "1163450153", + "year": "2014", + "title": "towards accurate and practical predictive models of active vision based visual search", + "label": [ + "119857082", + "196074963", + "154945302", + "158495155", + "193611912", + "2776459999", + "108265739", + "153050134", + "160086991" + ], + "author": [ + "528685754", + "227084865" + ], + "reference": [ + "118500641", + "295659407", + "633460564", + "645399019", + "1519461085", + "1545251799", + "1600895443", + "1963693586", + "1964720454", + "1991691398", + "2021703305", + "2021898394", + "2026558133", + "2035080477", + "2046433956", + "2047923402", + "2063567871", + "2069571208", + "2075458572", + "2080231239", + "2085781384", + "2093255195", + "2110014716", + "2111348442", + "2130025274", + "2139973114", + "2140071154", + "2159443917", + "2164279716", + "2169813489", + "2506147923", + "2620134654", + "2912296668", + "2914331897", + "2985154343" + ], + "abstract": "being able to predict the performance of interface designs using models of human cognition and performance is a long standing goal of hci research this paper presents recent advances in cognitive modeling which permit increasingly realistic and accurate predictions for visual human computer interaction tasks such as icon search by incorporating an active vision approach which emphasizes eye movements to visual features based on the availability of features in relationship to the point of gaze a high fidelity model of a classic visual search task demonstrates the value of incorporating visual acuity functions into models of visual performance the features captured by the high fidelity model are then used to formulate a model simple enough for practical use which is then implemented in an easy to use glean modeling tool easy to use predictive models for complex visual search are thus feasible and should be further developed", + "title_raw": "Towards accurate and practical predictive models of active-vision-based visual search", + "abstract_raw": "Being able to predict the performance of interface designs using models of human cognition and performance is a long-standing goal of HCI research. This paper presents recent advances in cognitive modeling which permit increasingly realistic and accurate predictions for visual human-computer interaction tasks such as icon search by incorporating an \"active vision\" approach which emphasizes eye movements to visual features based on the availability of features in relationship to the point of gaze. A high fidelity model of a classic visual search task demonstrates the value of incorporating visual acuity functions into models of visual performance. The features captured by the high-fidelity model are then used to formulate a model simple enough for practical use, which is then implemented in an easy-to-use GLEAN modeling tool. Easy-to-use predictive models for complex visual search are thus feasible and should be further developed.", + "link": "https://www.semanticscholar.org/paper/134b2bc694ef207b5421bdf21fc6993f184a352e", + "scraped_abstract": null, + "citation_best": 31 + }, + { + "paper": "2051295909", + "venue": "1163450153", + "year": "2014", + "title": "retrodepth 3d silhouette sensing for high precision input on and above physical surfaces", + "label": [ + "124504099", + "58103923", + "164086593", + "64729616", + "159437735", + "43521106", + "202474056", + "31972630", + "121684516", + "77660652" + ], + "author": [ + "2151132821", + "2098553916", + "2127648044", + "2244942548", + "2206727272", + "2156938214", + "2046931170", + "2310154819", + "1966862064", + "2144745162", + "3181417196", + "2293620524", + "2236735721" + ], + "reference": [ + "137024741", + "1260036880", + "1500711968", + "1964057156", + "1968211101", + "1979846720", + "1984341229", + "2003941864", + "2008150314", + "2009678897", + "2011217110", + "2034664912", + "2036471813", + "2038710267", + "2044109592", + "2056554851", + "2081556881", + "2100491474", + "2101577246", + "2104623069", + "2104974755", + "2105013814", + "2109618101", + "2110964220", + "2112976440", + "2113696470", + "2115030409", + "2117466280", + "2118041291", + "2125220008", + "2132669551", + "2134418753", + "2142482579", + "2148819007", + "2148877839", + "2159839630", + "2167023109", + "2172156083", + "2172160185", + "2397908591", + "2911964244" + ], + "abstract": "we present retrodepth a new vision based system for accurately sensing the 3d silhouettes of hands styluses and other objects as they interact on and above physical surfaces our setup is simple cheap and easily reproducible comprising of two infrared cameras diffuse infrared leds and any off the shelf retro reflective material the retro reflector aids image segmentation creating a strong contrast between the surface and any object in proximity a new highly efficient stereo matching algorithm precisely estimates the 3d contours of interacting objects and the retro reflective surfaces a novel pipeline enables 3d finger hand and object tracking as well as gesture recognition purely using these 3d contours we demonstrate high precision sensing allowing robust disambiguation between a finger or stylus touching pressing or interacting above the surface this allows many interactive scenarios that seamlessly mix together freehand 3d interactions with touch pressure and stylus input as shown these rich modalities of input are enabled on and above any retro reflective surface including custom physical widgets fabricated by users we compare our system with kinect and leap motion and conclude with limitations and future work", + "title_raw": "RetroDepth: 3D silhouette sensing for high-precision input on and above physical surfaces", + "abstract_raw": "We present RetroDepth, a new vision-based system for accurately sensing the 3D silhouettes of hands, styluses, and other objects, as they interact on and above physical surfaces. Our setup is simple, cheap, and easily reproducible, comprising of two infrared cameras, diffuse infrared LEDs, and any off-the-shelf retro-reflective material. The retro-reflector aids image segmentation, creating a strong contrast between the surface and any object in proximity. A new highly efficient stereo matching algorithm precisely estimates the 3D contours of interacting objects and the retro-reflective surfaces. A novel pipeline enables 3D finger, hand and object tracking, as well as gesture recognition, purely using these 3D contours. We demonstrate high-precision sensing, allowing robust disambiguation between a finger or stylus touching, pressing or interacting above the surface. This allows many interactive scenarios that seamlessly mix together freehand 3D interactions with touch, pressure and stylus input. As shown, these rich modalities of input are enabled on and above any retro-reflective surface, including custom \"physical widgets\" fabricated by users. We compare our system with Kinect and Leap Motion, and conclude with limitations and future work.", + "link": "https://www.semanticscholar.org/paper/d331c4a0307b83efdf597eb4c74512f3ab4fd243", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2124910144", + "venue": "1163450153", + "year": "2014", + "title": "understanding multitasking through parallelized strategy exploration and individualized cognitive modeling", + "label": [ + "83283714", + "107457646", + "98397804", + "79403827" + ], + "author": [ + "2147081021", + "227084865" + ], + "reference": [ + "182486277", + "1887131224", + "1964154547", + "1985101153", + "1991691398", + "1994788840", + "2021898394", + "2026558133", + "2043143027", + "2053216388", + "2064919653", + "2099794055", + "2106332947", + "2108148331", + "2121012460", + "2127852655" + ], + "abstract": "human multitasking often involves complex task interactions and subtle tradeoffs which might be best understood through detailed computational cognitive modeling yet traditional cognitive modeling approaches may not explore a sufficient range of task strategies to reveal the true complexity of multitasking behavior this study proposes a systematic approach for exploring a large number of strategies using a computer cluster based parallelized modeling system the paper demonstrates the efficacy of the approach for investigating and revealing the effects of different microstrategies on human performance both within and across individuals for a time pressured multimodal dual task the modeling results suggest that multitasking performance is not simply a matter of interleaving cognitive and sensorimotor processing but is instead heavily influenced by the selection of subtask microstrategies", + "title_raw": "Understanding multitasking through parallelized strategy exploration and individualized cognitive modeling", + "abstract_raw": "Human multitasking often involves complex task interactions and subtle tradeoffs which might be best understood through detailed computational cognitive modeling, yet traditional cognitive modeling approaches may not explore a sufficient range of task strategies to reveal the true complexity of multitasking behavior. This study proposes a systematic approach for exploring a large number of strategies using a computer-cluster-based parallelized modeling system. The paper demonstrates the efficacy of the approach for investigating and revealing the effects of different microstrategies on human performance, both within and across individuals, for a time-pressured multimodal dual task. The modeling results suggest that multitasking performance is not simply a matter of interleaving cognitive and sensorimotor processing but is instead heavily influenced by the selection of subtask microstrategies.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Understanding+Multitasking+Through+Parallelized+Strategy+Exploration+and+Individualized+Cognitive+Modeling&as_oq=&as_eq=&as_occt=any&as_sauthors=Zhang", + "scraped_abstract": null, + "citation_best": 19 + }, + { + "paper": "2106612861", + "venue": "1163450153", + "year": "2014", + "title": "making sustainability sustainable challenges in the design of eco interaction technologies", + "label": [ + "2775924081", + "44154836", + "200632571", + "507571656" + ], + "author": [ + "2231698199", + "2193817519", + "2068668851" + ], + "reference": [ + "1784994949", + "1966392344", + "1980272186", + "1998276047", + "2005673708", + "2020488522", + "2022799832", + "2032053597", + "2059216172", + "2063594338", + "2096667387", + "2108297840", + "2126215470", + "2127523455", + "2133270667", + "2152710862", + "2166591893", + "2168569455", + "2169508077", + "2321058054" + ], + "abstract": "the smart home is here one area where smart home devices promise to deliver great benefits is in the control of home heating ventilation and cooling hvac systems in this paper we seek to inform the design of future heating and cooling systems by investigating users experiences with the nest learning thermostat a commercially available smart home device we conducted a qualitative study where we compared people s interactions with conventional thermostats with interactions with the nest a key finding was that the nest impacted users pattern of hvac control but only for a while and caused new problems in unrealized energy savings in leveraging these findings we create a set of design implications for eco interaction the design of features and human system interactions with the goal of saving energy", + "title_raw": "Making sustainability sustainable: challenges in the design of eco-interaction technologies", + "abstract_raw": "The smart home is here. One area where smart home devices promise to deliver great benefits is in the control of home heating, ventilation, and cooling (HVAC) systems. In this paper, we seek to inform the design of future heating and cooling systems by investigating users' experiences with the Nest Learning Thermostat, a commercially available smart home device. We conducted a qualitative study where we compared people's interactions with conventional thermostats with interactions with the Nest. A key finding was that the Nest impacted users' pattern of HVAC control, but only for a while, and caused new problems in unrealized energy savings. In leveraging these findings, we create a set of design implications for Eco-Interaction, the design of features and human-system interactions with the goal of saving energy.", + "link": "https://www.semanticscholar.org/paper/7ce8c6343d2d4a9a2c6e2cda1d09a702aa1d0c00", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2102147011", + "venue": "1199533187", + "year": "2014", + "title": "selection and presentation practices for code example summarization", + "label": [ + "170858558", + "2776063143", + "2777904410", + "136764020", + "43126263", + "23123220" + ], + "author": [ + "2033865055", + "2136878537" + ], + "reference": [ + "90447038", + "140624466", + "1645937837", + "1972420078", + "1974311367", + "1986541713", + "1991613282", + "2004089651", + "2010887646", + "2022090474", + "2046253855", + "2048207804", + "2051204868", + "2066972299", + "2081749632", + "2082160726", + "2090111344", + "2090299557", + "2099332975", + "2101390659", + "2102065370", + "2104577574", + "2113610359", + "2114315419", + "2122156963", + "2124658502", + "2127190390", + "2133333349", + "2134829794", + "2137591918", + "2139005824", + "2140183398", + "2142403498", + "2153150125", + "2153887189", + "2161847731", + "2612304202", + "3148527572", + "3151369355" + ], + "abstract": "code examples are an important source for answering questions about software libraries and applications many usage contexts for code examples require them to be distilled to their essence e g when serving as cues to longer documents or for reminding developers of a previously known idiom we conducted a study to discover how code can be summarized and why as part of the study we collected 156 pairs of code examples and their summaries from 16 participants along with over 26 hours of think aloud verbalizations detailing the decisions of the participants during their summarization activities based on a qualitative analysis of this data we elicited a list of practices followed by the participants to summarize code examples and propose empirically supported hypotheses justifying the use of specific practices one main finding was that none of the participants exclusively extracted code verbatim for the summaries motivating abstractive summarization the results provide a grounded basis for the development of code example summarization and presentation technology", + "title_raw": "Selection and presentation practices for code example summarization", + "abstract_raw": "Code examples are an important source for answering questions about software libraries and applications. Many usage contexts for code examples require them to be distilled to their essence: e.g., when serving as cues to longer documents, or for reminding developers of a previously known idiom. We conducted a study to discover how code can be summarized and why. As part of the study, we collected 156 pairs of code examples and their summaries from 16 participants, along with over 26 hours of think-aloud verbalizations detailing the decisions of the participants during their summarization activities. Based on a qualitative analysis of this data we elicited a list of practices followed by the participants to summarize code examples and propose empirically-supported hypotheses justifying the use of specific practices. One main finding was that none of the participants exclusively extracted code verbatim for the summaries, motivating abstractive summarization. The results provide a grounded basis for the development of code example summarization and presentation technology.", + "link": "https://www.semanticscholar.org/paper/84083ccc9e2a8f6f7341c9166f80c8e1b3e25c74", + "scraped_abstract": null, + "citation_best": 34 + }, + { + "paper": "2140609933", + "venue": "1199533187", + "year": "2014", + "title": "learning natural coding conventions", + "label": [ + "51929080", + "64729616", + "154504017", + "150292731", + "146054899", + "2778357539", + "136764020", + "43126263" + ], + "author": [ + "1792257219", + "2169920116", + "2142366035", + "2113665458" + ], + "reference": [ + "84082018", + "90447038", + "91975816", + "188027806", + "215031598", + "1244965290", + "1490674333", + "1551431154", + "1560914538", + "1579838312", + "1608271177", + "1673239900", + "1968278615", + "1970607969", + "1984314602", + "1991435895", + "1994573369", + "2010595692", + "2010887646", + "2014577207", + "2016871293", + "2023312039", + "2062947167", + "2072384494", + "2077155146", + "2079729471", + "2082743655", + "2091990486", + "2097998348", + "2101105183", + "2103910676", + "2104364184", + "2111282827", + "2113157806", + "2113697305", + "2115998851", + "2116395532", + "2117642462", + "2123044578", + "2123586642", + "2126793110", + "2128990852", + "2130388803", + "2132103315", + "2133247167", + "2142403498", + "2143960295", + "2144517582", + "2148190602", + "2148854374", + "2150207557", + "2152474046", + "2153804780", + "2154285016", + "2154652894", + "2156050524", + "2156448859", + "2159739762", + "2166597811", + "2166667242", + "2170546198", + "2273599507", + "2312417930", + "2466101837", + "2950186769", + "2998461326", + "2998993395", + "3141465105", + "3149750418" + ], + "abstract": "every programmer has a characteristic style ranging from preferences about identifier naming to preferences about object relationships and design patterns coding conventions define a consistent syntactic style fostering readability and hence maintainability when collaborating programmers strive to obey a project s coding conventions however one third of reviews of changes contain feedback about coding conventions indicating that programmers do not always follow them and that project members care deeply about adherence unfortunately programmers are often unaware of coding conventions because inferring them requires a global view one that aggregates the many local decisions programmers make and identifies emergent consensus on style we present naturalize a framework that learns the style of a codebase and suggests revisions to improve stylistic consistency naturalize builds on recent work in applying statistical natural language processing to source code we apply naturalize to suggest natural identifier names and formatting conventions we present four tools focused on ensuring natural code during development and release management including code review naturalize achieves 94 accuracy in its top suggestions for identifier names we used naturalize to generate 18 patches for 5 open source projects 14 were accepted", + "title_raw": "Learning natural coding conventions", + "abstract_raw": "Every programmer has a characteristic style, ranging from preferences about identifier naming to preferences about object relationships and design patterns. Coding conventions define a consistent syntactic style, fostering readability and hence maintainability. When collaborating, programmers strive to obey a project\u2019s coding conventions. However, one third of reviews of changes contain feedback about coding conventions, indicating that programmers do not always follow them and that project members care deeply about adherence. Unfortunately, programmers are often unaware of coding conventions because inferring them requires a global view, one that aggregates the many local decisions programmers make and identifies emergent consensus on style. We present NATURALIZE, a framework that learns the style of a codebase, and suggests revisions to improve stylistic consistency. NATURALIZE builds on recent work in applying statistical natural language processing to source code. We apply NATURALIZE to suggest natural identifier names and formatting conventions. We present four tools focused on ensuring natural code during development and release management, including code review. NATURALIZE achieves 94 % accuracy in its top suggestions for identifier names. We used NATURALIZE to generate 18 patches for 5 open source projects: 14 were accepted.", + "link": "https://www.semanticscholar.org/paper/93ff001eb7ddd019c107879943126c74a973993b", + "scraped_abstract": null, + "citation_best": 377 + }, + { + "paper": "2041235686", + "venue": "1199533187", + "year": "2014", + "title": "ai a lightweight system for tolerating concurrency bugs", + "label": [ + "168065819", + "138101251", + "193702766", + "188045909", + "91587340", + "120314980", + "117447612" + ], + "author": [ + "2140025058", + "2118010187", + "2111557135", + "2130425262", + "2103704713", + "2137330571" + ], + "reference": [ + "1478599539", + "1582681289", + "1887412317", + "2049381173", + "2061504544", + "2084719450", + "2096155624", + "2101161997", + "2101785823", + "2104000753", + "2108637333", + "2110852599", + "2110908283", + "2113027907", + "2115015193", + "2119567442", + "2120027538", + "2120476011", + "2135395375", + "2135948849", + "2137270184", + "2138555106", + "2145021036", + "2148011508", + "2150290060", + "2150602145", + "2153185479", + "2153229512", + "2153723363", + "2159856414", + "2160983664", + "2164530016", + "2171008784", + "2251066406", + "3142284768", + "3145128584", + "3151125684" + ], + "abstract": "concurrency bugs are notoriously difficult to eradicate during software testing because of their non deterministic nature moreover fixing concurrency bugs is time consuming and error prone thus tolerating concurrency bugs during production runs is an attractive complementary approach to bug detection and testing unfortunately existing bug tolerating tools are usually either 1 constrained in types of bugs they can handle or 2 requiring roll back mechanism which can hitherto not be fully achieved efficiently without hardware supports this paper presents a novel program invariant called anticipating invariant ai which can help anticipate bugs before any irreversible changes are made benefiting from this ability of anticipating bugs beforehand our software only system is able to forestall the failures with a simple thread stalling technique which does not rely on execution roll back and hence has good performance experiments with 35 real world concurrency bugs demonstrate that ai is capable of detecting and tolerating most types of concurrency bugs including both atomicity and order violations two new bugs have been detected and confirmed by the corresponding developers performance evaluation with 6 representative parallel programs shows that ai incurs negligible overhead", + "title_raw": "AI: a lightweight system for tolerating concurrency bugs", + "abstract_raw": "Concurrency bugs are notoriously difficult to eradicate during software testing because of their non-deterministic nature. Moreover, fixing concurrency bugs is time-consuming and error-prone. Thus, tolerating concurrency bugs during production runs is an attractive complementary approach to bug detection and testing. Unfortunately, existing bug-tolerating tools are usually either 1) constrained in types of bugs they can handle or 2) requiring roll-back mechanism, which can hitherto not be fully achieved efficiently without hardware supports. This paper presents a novel program invariant, called Anticipating Invariant (AI), which can help anticipate bugs before any irreversible changes are made. Benefiting from this ability of anticipating bugs beforehand, our software-only system is able to forestall the failures with a simple thread stalling technique, which does not rely on execution roll-back and hence has good performance Experiments with 35 real-world concurrency bugs demonstrate that AI is capable of detecting and tolerating most types of concurrency bugs, including both atomicity and order violations. Two new bugs have been detected and confirmed by the corresponding developers. Performance evaluation with 6 representative parallel programs shows that AI incurs negligible overhead (", + "link": "https://www.semanticscholar.org/paper/e279996ed2edb1c7fcd6dd0b1ef6bc909b70a9ef", + "scraped_abstract": null, + "citation_best": 13 + }, + { + "paper": "2096146112", + "venue": "1199533187", + "year": "2014", + "title": "powering the static driver verifier using corral", + "label": [ + "2775980278", + "113775141", + "2779444136", + "134757568", + "48044578", + "33054407", + "199360897" + ], + "author": [ + "2123176980", + "1985363956" + ], + "reference": [ + "1507006488", + "1606177908", + "1606540187", + "2076285066", + "2105614525", + "2107002931", + "2109863363", + "2111050128", + "2123636373", + "2129538349", + "2134875273", + "2144552646", + "2218365969" + ], + "abstract": "the application of software verification technology towards building realistic bug finding tools requires working through several precision scalability tradeoffs for instance a critical aspect while dealing with c programs is to formally define the treatment of pointers and the heap a machine level modeling is often intractable whereas one that leverages high level information such as types can be inaccurate another tradeoff is modeling integer arithmetic ideally all arithmetic should be performed over bitvector representations whereas the current practice in most tools is to use mathematical integers for scalability a third tradeoff in the context of bounded program exploration is to choose a bound that ensures high coverage without overwhelming the analysis this paper works through these three tradeoffs when we applied corral an smt based verifier inside microsoft s static driver verifier sdv our decisions were guided by experimentation on a large set of drivers the total verification time exceeded well over a month we justify that each of our decisions were crucial in getting value out of corral and led to corral being accepted as the engine that powers sdv in the windows 8 1 release replacing the slam engine that had been used inside sdv for the past decade", + "title_raw": "Powering the static driver verifier using corral", + "abstract_raw": "The application of software-verification technology towards building realistic bug-finding tools requires working through several precision-scalability tradeoffs. For instance, a critical aspect while dealing with C programs is to formally define the treatment of pointers and the heap. A machine-level modeling is often intractable, whereas one that leverages high-level information (such as types) can be inaccurate. Another tradeoff is modeling integer arithmetic. Ideally, all arithmetic should be performed over bitvector representations whereas the current practice in most tools is to use mathematical integers for scalability. A third tradeoff, in the context of bounded program exploration, is to choose a bound that ensures high coverage without overwhelming the analysis. This paper works through these three tradeoffs when we applied Corral, an SMT-based verifier, inside Microsoft's Static Driver Verifier (SDV). Our decisions were guided by experimentation on a large set of drivers; the total verification time exceeded well over a month. We justify that each of our decisions were crucial in getting value out of Corral and led to Corral being accepted as the engine that powers SDV in the Windows 8.1 release, replacing the SLAM engine that had been used inside SDV for the past decade.", + "link": "https://www.semanticscholar.org/paper/ecf85c26d3a7dc1dffad51f06ccc8bbc0b8461f7", + "scraped_abstract": null, + "citation_best": 40 + }, + { + "paper": "159693449", + "venue": "1180662882", + "year": "2014", + "title": "understanding the limiting factors of topic modeling via posterior contraction analysis", + "label": [ + "119857082", + "171686336", + "500882744", + "136197465" + ], + "author": [ + "2586239148", + "2125701068", + "2298170834", + "2166036605", + "2618493248" + ], + "reference": [ + "1552178784", + "1880262756", + "1979809564", + "1982474113", + "1989611747", + "2001082470", + "2063904635", + "2098126593", + "2100858341", + "2103587173", + "2128521126", + "2130978632", + "2135790056", + "2137958601", + "2141056424", + "2141441158", + "2148111240", + "2152706498", + "2153383412", + "2168332560", + "2170798317", + "2171319841", + "3103438822" + ], + "abstract": "topic models such as the latent dirichlet allocation lda have become a standard staple in the modeling toolbox of machine learning they have been applied to a vast variety of data sets contexts and tasks to varying degrees of success however to date there is almost no formal theory explicating the lda s behavior and despite its familiarity there is very little systematic analysis of and guidance on the properties of the data that affect the inferential performance of the model this paper seeks to address this gap by providing a systematic analysis of factors which characterize the lda s performance we present theorems elucidating the posterior contraction rates of the topics as the amount of data increases and a thorough supporting empirical study using synthetic and real data sets including news and web based articles and tweet messages based on these results we provide practical guidance on how to identify suitable data sets for topic models and how to specify particular model parameters", + "title_raw": "Understanding the Limiting Factors of Topic Modeling via Posterior Contraction Analysis", + "abstract_raw": "Topic models such as the latent Dirichlet allocation (LDA) have become a standard staple in the modeling toolbox of machine learning. They have been applied to a vast variety of data sets, contexts, and tasks to varying degrees of success. However, to date there is almost no formal theory explicating the LDA's behavior, and despite its familiarity there is very little systematic analysis of and guidance on the properties of the data that affect the inferential performance of the model. This paper seeks to address this gap, by providing a systematic analysis of factors which characterize the LDA's performance. We present theorems elucidating the posterior contraction rates of the topics as the amount of data increases, and a thorough supporting empirical study using synthetic and real data sets, including news and web-based articles and tweet messages. Based on these results we provide practical guidance on how to identify suitable data sets for topic models, and how to specify particular model parameters.", + "link": "https://www.semanticscholar.org/paper/9975e9b3c1ab964f86bf4a553ce23cec43b567bc", + "scraped_abstract": null, + "citation_best": 166 + }, + { + "paper": "2056659466", + "venue": "1174403976", + "year": "2014", + "title": "a study and toolkit for asynchronous programming in c", + "label": [ + "152752567", + "167955471", + "8973012", + "199360897", + "173608175", + "2778361913", + "204495577", + "115903868", + "151319957" + ], + "author": [ + "2095944012", + "2659908903", + "602257659", + "2155442793" + ], + "reference": [ + "104529784", + "1587375298", + "1750514695", + "1991613282", + "2008512073", + "2028889016", + "2048960787", + "2054788588", + "2063989349", + "2091939412", + "2112939580", + "2144778294", + "2148189199", + "2154563336", + "2154705416" + ], + "abstract": "asynchronous programming is in demand today because responsiveness is increasingly important on all modern devices yet we know little about how developers use asynchronous programming in practice without such knowledge developers researchers language and library designers and tool providers can make wrong assumptions we present the first study that analyzes the usage of asynchronous programming in a large experiment we analyzed 1378 open source windows phone wp apps comprising 12m sloc produced by 3376 developers using this data we answer 2 research questions about use and misuse of asynchronous constructs inspired by these findings we developed i asyncifier an automated refactoring tool that converts callback based asynchronous code to use async await ii corrector a tool that finds and corrects common misuses of async await our empirical evaluation shows that these tools are i applicable and ii efficient developers accepted 314 patches generated by our tools", + "title_raw": "A study and toolkit for asynchronous programming in c", + "abstract_raw": "Asynchronous programming is in demand today, because responsiveness is increasingly important on all modern devices. Yet, we know little about how developers use asynchronous programming in practice. Without such knowledge, developers, researchers, language and library designers, and tool providers can make wrong assumptions. We present the first study that analyzes the usage of asynchronous programming in a large experiment. We analyzed 1378 open source Windows Phone (WP) apps, comprising 12M SLOC, produced by 3376 developers. Using this data, we answer 2 research questions about use and misuse of asynchronous constructs. Inspired by these findings, we developed (i) Asyncifier, an automated refactoring tool that converts callback-based asynchronous code to use async/await; (ii) Corrector, a tool that finds and corrects common misuses of async/await. Our empirical evaluation shows that these tools are (i) applicable and (ii) efficient. Developers accepted 314 patches generated by our tools.", + "link": "https://www.semanticscholar.org/paper/97775f514df60582b14e9473cd86a510ed941e56", + "scraped_abstract": null, + "citation_best": 54 + }, + { + "paper": "2080395944", + "venue": "1174403976", + "year": "2014", + "title": "cowboys ankle sprains and keepers of quality how is video game development different from software development", + "label": [ + "6907630", + "7591567", + "146508145", + "503285160", + "49774154", + "35674477", + "170828538", + "47187476", + "41826821", + "80646779", + "54276265" + ], + "author": [ + "2342860276", + "2014339847", + "2031695674" + ], + "reference": [ + "62945607", + "84082018", + "594814506", + "613315223", + "1557454735", + "1560914538", + "1864072592", + "1887412317", + "1967475102", + "1971137495", + "1975911483", + "1984108206", + "2000585051", + "2006537209", + "2020340052", + "2027312037", + "2040493112", + "2044166130", + "2052804452", + "2053692450", + "2085909931", + "2095938258", + "2097009687", + "2105678592", + "2110065044", + "2117852815", + "2118466151", + "2124469657", + "2134062730", + "2139974422", + "2141722816", + "2144846415", + "2161889293", + "2165136898", + "2169090130", + "2182002372", + "2316055893", + "2494418021", + "3121187712" + ], + "abstract": "video games make up an important part of the software industry yet the software engineering community rarely studies video games this imbalance is a problem if video game development differs from general software development as some game experts suggest in this paper we describe a study with 14 interviewees and 364 survey respondents the study elicited substantial differences between video game development and other software development for example in game development cowboy coders are necessary to cope with the continuous interplay between creative desires and technical constraints consequently game developers are hesitant to use automated testing because of these tests rapid obsolescence in the face of shifting creative desires of game designers these differences between game and non game development have implications for research industry and practice for instance as a starting point for impacting game development researchers could create testing tools that enable game developers to create tests that assert flexible behavior with little up front investment", + "title_raw": "Cowboys, ankle sprains, and keepers of quality: how is video game development different from software development?", + "abstract_raw": "Video games make up an important part of the software industry, yet the software engineering community rarely studies video games. This imbalance is a problem if video game development differs from general software development, as some game experts suggest. In this paper we describe a study with 14 interviewees and 364 survey respondents. The study elicited substantial differences between video game development and other software development. For example, in game development, \u201ccowboy coders\u201d are necessary to cope with the continuous interplay between creative desires and technical constraints. Consequently, game developers are hesitant to use automated testing because of these tests\u2019 rapid obsolescence in the face of shifting creative desires of game designers. These differences between game and non-game development have implications for research, industry, and practice. For instance, as a starting point for impacting game development, researchers could create testing tools that enable game developers to create tests that assert flexible behavior with little up-front investment.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Cowboys,+Ankle+Sprains,+and+Keepers+of+Quality:+How+Is+Video+Game+Development+Different+from+Software+Development?&as_oq=&as_eq=&as_occt=any&as_sauthors=Murphy-Hill", + "scraped_abstract": null, + "citation_best": 170 + }, + { + "paper": "1970005004", + "venue": "1174403976", + "year": "2014", + "title": "enhancing symbolic execution with veritesting", + "label": [ + "23123167", + "53942775", + "46135064", + "164866538", + "2779639559", + "199360897", + "173608175", + "11219265", + "2777904410" + ], + "author": [ + "2164280241", + "1981989542", + "2282250597", + "2157466369" + ], + "reference": [ + "17195072", + "49922133", + "157156687", + "1480909796", + "1491178396", + "1497028280", + "1506510492", + "1544225867", + "1585877844", + "1591856965", + "1710734607", + "1966982815", + "1977902681", + "1979693894", + "1992431017", + "2000414759", + "2002479196", + "2009489720", + "2042033151", + "2082172430", + "2094382938", + "2095551048", + "2096449544", + "2101430044", + "2101512909", + "2107147876", + "2113864883", + "2121757479", + "2132897303", + "2133612077", + "2134633067", + "2137530017", + "2138788987", + "2140311411", + "2140809377", + "2151585646", + "2153185479", + "2154897437", + "2158798798", + "2164419371", + "2164836255", + "2171469152", + "2281785532" + ], + "abstract": "we present mergepoint a new binary only symbolic execution system for large scale and fully unassisted testing of commodity off the shelf cots software mergepoint introduces veritesting a new technique that employs static symbolic execution to amplify the effect of dynamic symbolic execution veritesting allows mergepoint to find twice as many bugs explore orders of magnitude more paths and achieve higher code coverage than previous dynamic symbolic execution systems mergepoint is currently running daily on a 100 node cluster analyzing 33 248 linux binaries has generated more than 15 billion smt queries 200 million test cases 2 347 420 crashes and found 11 687 bugs in 4 379 distinct applications", + "title_raw": "Enhancing symbolic execution with veritesting", + "abstract_raw": "We present MergePoint, a new binary-only symbolic execution system for large-scale and fully unassisted testing of commodity off-the-shelf (COTS) software. MergePoint introduces veritesting, a new technique that employs static symbolic execution to amplify the effect of dynamic symbolic execution. Veritesting allows MergePoint to find twice as many bugs, explore orders of magnitude more paths, and achieve higher code coverage than previous dynamic symbolic execution systems. MergePoint is currently running daily on a 100 node cluster analyzing 33,248 Linux binaries; has generated more than 15 billion SMT queries, 200 million test cases, 2,347,420 crashes, and found 11,687 bugs in 4,379 distinct applications.", + "link": "https://www.semanticscholar.org/paper/bb1c9607b9ec23de4b5ef22ae1b77c9a22f3f956", + "scraped_abstract": null, + "citation_best": 212 + }, + { + "paper": "2113610359", + "venue": "1174403976", + "year": "2014", + "title": "improving automated source code summarization via an eye tracking study of programmers", + "label": [ + "167955471", + "134714966", + "548217200", + "43126263", + "56461940", + "2778514511", + "199360897", + "170858558", + "2777561058" + ], + "author": [ + "2223398578", + "2101030568", + "2076036746", + "2122185516", + "2012641152" + ], + "reference": [ + "1553570763", + "1806889067", + "1818980659", + "1951734032", + "1959689011", + "1964679590", + "1965237437", + "1965976058", + "1967995512", + "1978755715", + "1982355305", + "1989526951", + "1990190154", + "1991514546", + "1993139624", + "1994683471", + "1995099952", + "2000743515", + "2001258366", + "2007247913", + "2010076961", + "2013788150", + "2018844270", + "2019528623", + "2021538299", + "2027752285", + "2031043538", + "2034688095", + "2046253855", + "2057049321", + "2061742702", + "2064081154", + "2070111972", + "2081749632", + "2082160726", + "2085710993", + "2088695619", + "2097001189", + "2097750323", + "2099628750", + "2099706779", + "2109125971", + "2112351052", + "2115501754", + "2115998851", + "2117228548", + "2120076005", + "2120101742", + "2133333349", + "2133890582", + "2139374478", + "2140644905", + "2142391150", + "2148357053", + "2150685905", + "2151996389", + "2161847731", + "2163745066", + "2166879716", + "2180107243", + "2284840260", + "2325343629" + ], + "abstract": "source code summarization is an emerging technology for automatically generating brief descriptions of code current summarization techniques work by selecting a subset of the statements and keywords from the code and then including information from those statements and keywords in the summary the quality of the summary depends heavily on the process of selecting the subset a high quality selection would contain the same statements and keywords that a programmer would choose unfortunately little evidence exists about the statements and keywords that programmers view as important when they summarize source code in this paper we present an eye tracking study of 10 professional java programmers in which the programmers read java methods and wrote english summaries of those methods we apply the findings to build a novel summarization tool then we evaluate this tool and provide evidence to support the development of source code summarization systems", + "title_raw": "Improving automated source code summarization via an eye-tracking study of programmers", + "abstract_raw": "Source Code Summarization is an emerging technology for automatically generating brief descriptions of code. Current summarization techniques work by selecting a subset of the statements and keywords from the code, and then including information from those statements and keywords in the summary. The quality of the summary depends heavily on the process of selecting the subset: a high-quality selection would contain the same statements and keywords that a programmer would choose. Unfortunately, little evidence exists about the statements and keywords that programmers view as important when they summarize source code. In this paper, we present an eye-tracking study of 10 professional Java programmers in which the programmers read Java methods and wrote English summaries of those methods. We apply the findings to build a novel summarization tool. Then, we evaluate this tool and provide evidence to support the development of source code summarization systems.", + "link": "https://www.semanticscholar.org/paper/67649d6e32fcf7573da91e685cfda173775708d5", + "scraped_abstract": null, + "citation_best": 182 + }, + { + "paper": "2086186328", + "venue": "1174403976", + "year": "2014", + "title": "trading robustness for maintainability an empirical study of evolving c programs", + "label": [ + "97970142", + "160713754", + "176359209", + "519991488", + "199360897", + "145428669", + "149091818", + "115903868", + "43126263", + "164691408", + "14185376" + ], + "author": [ + "2188408743", + "2543062629", + "2224566099", + "2225953042", + "2222162059", + "2256892753", + "2115007605", + "2101752092", + "2148604163" + ], + "reference": [ + "72910588", + "194952179", + "1491676566", + "1494508523", + "1530275272", + "1606900254", + "1644882639", + "1833620599", + "1856815419", + "1894242881", + "1966871552", + "1968054699", + "1976701842", + "1992602600", + "1993306785", + "1996428910", + "2006323725", + "2027806980", + "2033969230", + "2040612760", + "2045100674", + "2048038821", + "2054223060", + "2071136304", + "2096160393", + "2100945416", + "2104221251", + "2117174233", + "2129262850", + "2132290529", + "2140462040", + "2141670850", + "2143163255", + "2145071552", + "2151990727", + "2159427468", + "2164317885", + "2165688098", + "2165700256", + "2167116703", + "2895146318" + ], + "abstract": "mainstream programming languages provide built in exception handling mechanisms to support robust and maintainable implementation of exception handling in software systems most of these modern languages such as c ruby python and many others are often claimed to have more appropriated exception handling mechanisms they reduce programming constraints on exception handling to favor agile changes in the source code these languages provide what we call maintenance driven exception handling mechanisms it is expected that the adoption of these mechanisms improve software maintainability without hindering software robustness however there is still little empirical knowledge about the impact that adopting these mechanisms have on software robustness this paper addressed this gap by conducting an empirical study aimed at understanding the relationship between changes in c programs and their robustness in particular we evaluated how changes in the normal and exceptional code were related to exception handling faults we applied a change impact analysis and a control flow analysis in 119 versions of 16 c programs the results showed that i most of the problems hindering software robustness in those programs are caused by changes in the normal code ii many potential faults were introduced even when improving exception handling in c code and iii faults are often facilitated by the maintenance driven flexibility of the exception handling mechanism moreover we present a series of change scenarios that decrease the program robustness", + "title_raw": "Trading robustness for maintainability: an empirical study of evolving c# programs", + "abstract_raw": "Mainstream programming languages provide built-in exception handling mechanisms to support robust and maintainable implementation of exception handling in software systems. Most of these modern languages, such as C#, Ruby, Python and many others, are often claimed to have more appropriated exception handling mechanisms. They reduce programming constraints on exception handling to favor agile changes in the source code. These languages provide what we call maintenance-driven exception handling mechanisms. It is expected that the adoption of these mechanisms improve software maintainability without hindering software robustness. However, there is still little empirical knowledge about the impact that adopting these mechanisms have on software robustness. This paper addressed this gap by conducting an empirical study aimed at understanding the relationship between changes in C# programs and their robustness. In particular, we evaluated how changes in the normal and exceptional code were related to exception handling faults. We applied a change impact analysis and a control flow analysis in 119 versions of 16 C# programs. The results showed that: (i) most of the problems hindering software robustness in those programs are caused by changes in the normal code, (ii) many potential faults were introduced even when improving exception handling in C# code, and (iii) faults are often facilitated by the maintenance-driven flexibility of the exception handling mechanism. Moreover, we present a series of change scenarios that decrease the program robustness.", + "link": "https://www.semanticscholar.org/paper/84ee88f85463db40bce34a22d087d323b9044531", + "scraped_abstract": null, + "citation_best": 45 + }, + { + "paper": "2090907135", + "venue": "1174403976", + "year": "2014", + "title": "understanding javascript event based interactions", + "label": [ + "198240166", + "544833334", + "5366617", + "107457646", + "118643609", + "149091818", + "64073096", + "136764020", + "2777561058" + ], + "author": [ + "2229760896", + "2225466864", + "2022381637", + "2184603505" + ], + "reference": [ + "1483380114", + "1570215497", + "1595781181", + "1987647365", + "1992602600", + "1995923922", + "2001033929", + "2016263254", + "2033890725", + "2105482755", + "2105714739", + "2128090947", + "2137619003", + "2139627310", + "2154267672", + "2157943826", + "2172161420", + "2193288339", + "2915472497" + ], + "abstract": "web applications have become one of the fastest growing types of software systems today despite their popularity understanding the behaviour of modern web applications is still a challenging endeavour for developers during development and maintenance tasks the challenges mainly stem from the dynamic event driven and asynchronous nature of the javascript language we propose a generic technique for capturing low level event based interactions in a web application and mapping those to a higher level behavioural model this model is then transformed into an interactive visualization representing episodes of triggered causal and temporal events related javascript code executions and their impact on the dynamic dom state our approach implemented in a tool called clematis allows developers to easily understand the complex dynamic behaviour of their application at three different semantic levels of granularity the results of our industrial controlled experiment show that clematis is capable of improving the task accuracy by 61 while reducing the task completion time by 47", + "title_raw": "Understanding JavaScript event-based interactions", + "abstract_raw": "Web applications have become one of the fastest growing types of software systems today. Despite their popularity, understanding the behaviour of modern web applications is still a challenging endeavour for developers during development and maintenance tasks. The challenges mainly stem from the dynamic, event-driven, and asynchronous nature of the JavaScript language. We propose a generic technique for capturing low-level event-based interactions in a web application and mapping those to a higher-level behavioural model. This model is then transformed into an interactive visualization, representing episodes of triggered causal and temporal events, related JavaScript code executions, and their impact on the dynamic DOM state. Our approach, implemented in a tool called Clematis, allows developers to easily understand the complex dynamic behaviour of their application at three different semantic levels of granularity. The results of our industrial controlled experiment show that Clematis is capable of improving the task accuracy by 61%, while reducing the task completion time by 47%.", + "link": "https://www.semanticscholar.org/paper/b68a0ff4d5757ff99207d53a7e7de5e7c58c3566", + "scraped_abstract": null, + "citation_best": 64 + }, + { + "paper": "2050127001", + "venue": "1174403976", + "year": "2014", + "title": "unit test virtualization with vmvm", + "label": [ + "47878483", + "513985346", + "149635348", + "111919701", + "148027188", + "548217200", + "7435765", + "109852812", + "2777904410" + ], + "author": [ + "2156129476", + "2100592247" + ], + "reference": [ + "1514258760", + "1569962836", + "1668251704", + "1971137495", + "1993295335", + "1998393968", + "2014515160", + "2031965919", + "2040019420", + "2044445979", + "2079811826", + "2087275952", + "2088175657", + "2095710561", + "2097808034", + "2104420598", + "2105904466", + "2107500604", + "2109739361", + "2119861793", + "2120781477", + "2122156253", + "2123356060", + "2125776594", + "2125814238", + "2131053137", + "2131742774", + "2137433502", + "2140991542", + "2144927251", + "2149603369", + "2152870594", + "2152949369", + "2154656574", + "2155246571", + "2159614205", + "2165965482", + "2166616527", + "2166700159", + "3139291860" + ], + "abstract": "testing large software packages can become very time intensive to address this problem researchers have investigated techniques such as test suite minimization test suite minimization reduces the number of tests in a suite by removing tests that appear redundant at the risk of a reduction in fault finding ability since it can be difficult to identify which tests are truly redundant we take a completely different approach to solving the same problem of long running test suites by instead reducing the time needed to execute each test an approach that we call unit test virtualization with unit test virtualization we reduce the overhead of isolating each unit test with a lightweight virtualization container we describe the empirical analysis that grounds our approach and provide an implementation of unit test virtualization targeting java applications we evaluated our implementation vmvm using 20 real world java applications and found that it reduces test suite execution time by up to 97 on average 62 when compared to traditional unit test execution we also compared vmvm to a well known test suite minimization technique finding the reduction provided by vmvm to be four times greater while still executing every test with no loss of fault finding ability", + "title_raw": "Unit test virtualization with VMVM", + "abstract_raw": "Testing large software packages can become very time intensive. To address this problem, researchers have investigated techniques such as Test Suite Minimization. Test Suite Minimization reduces the number of tests in a suite by removing tests that appear redundant, at the risk of a reduction in fault-finding ability since it can be difficult to identify which tests are truly redundant. We take a completely different approach to solving the same problem of long running test suites by instead reducing the time needed to execute each test, an approach that we call Unit Test Virtualization. With Unit Test Virtualization, we reduce the overhead of isolating each unit test with a lightweight virtualization container. We describe the empirical analysis that grounds our approach and provide an implementation of Unit Test Virtualization targeting Java applications. We evaluated our implementation, VMVM, using 20 real-world Java applications and found that it reduces test suite execution time by up to 97% (on average, 62%) when compared to traditional unit test execution. We also compared VMVM to a well known Test Suite Minimization technique, finding the reduction provided by VMVM to be four times greater, while still executing every test with no loss of fault-finding ability.", + "link": "https://www.semanticscholar.org/paper/89305bd5500d5c67cc399c608e7674b814933472", + "scraped_abstract": null, + "citation_best": 80 + }, + { + "paper": "2052261215", + "venue": "1130985203", + "year": "2014", + "title": "reducing the sampling complexity of topic models", + "label": [ + "171686336", + "167966045", + "114289077", + "142417499", + "68339613", + "2776214188", + "80444323", + "61249035", + "51167844" + ], + "author": [ + "2500931535", + "2259645355", + "2590734359", + "1972291593" + ], + "reference": [ + "203054622", + "1866637071", + "1880262756", + "1915058860", + "2001082470", + "2041517243", + "2053725995", + "2065221212", + "2080972498", + "2087309226", + "2110591510", + "2115529864", + "2130416410", + "2145768976", + "2150286230", + "2150731624", + "2158266063", + "2161194249", + "2163021329", + "2187741934", + "2952905572", + "3120740533" + ], + "abstract": "inference in topic models typically involves a sampling step to associate latent variables with observations unfortunately the generative model loses sparsity as the amount of data increases requiring o k operations per word for k topics in this paper we propose an algorithm which scales linearly with the number of actually instantiated topics kd in the document for large document collections and in structured hierarchical models kd ll k this yields an order of magnitude speedup our method applies to a wide variety of statistical models such as pdp 16 4 and hdp 19 at its core is the idea that dense slowly changing distributions can be approximated efficiently by the combination of a metropolis hastings step use of sparsity and amortized constant time sampling via walker s alias method", + "title_raw": "Reducing the sampling complexity of topic models", + "abstract_raw": "Inference in topic models typically involves a sampling step to associate latent variables with observations. Unfortunately the generative model loses sparsity as the amount of data increases, requiring O(k) operations per word for k topics. In this paper we propose an algorithm which scales linearly with the number of actually instantiated topics kd in the document. For large document collections and in structured hierarchical models kd ll k. This yields an order of magnitude speedup. Our method applies to a wide variety of statistical models such as PDP [16,4] and HDP [19]. At its core is the idea that dense, slowly changing distributions can be approximated efficiently by the combination of a Metropolis-Hastings step, use of sparsity, and amortized constant time sampling via Walker's alias method.", + "link": "https://www.semanticscholar.org/paper/ff698613a241aef1f57715eb5a4dfc12a79d1975", + "scraped_abstract": null, + "citation_best": 228 + }, + { + "paper": "2069973845", + "venue": "1123349196", + "year": "2014", + "title": "tagoram real time tracking of mobile rfid tags to high precision using cots devices", + "label": [ + "2776459758", + "165696696", + "64729616", + "40140605", + "105446022", + "2779843651", + "2777884278", + "41904074", + "79403827" + ], + "author": [ + "2421180782", + "2222904846", + "2111415347", + "2784177086", + "2111754996", + "2130996983" + ], + "reference": [ + "1963654592", + "1976152410", + "2002093905", + "2003329422", + "2009008240", + "2048131511", + "2049036780", + "2078810600", + "2080797991", + "2084088942", + "2099461393", + "2108292920", + "2120910213", + "2122317600", + "2125111937", + "2129567471", + "2158090937", + "2160593627", + "2161455381", + "2162134046", + "2999214452" + ], + "abstract": "in many applications we have to identify an object and then locate the object to within high precision centimeter or millimeter level legacy systems that can provide such accuracy are either expensive or suffering from performance degradation resulting from various impacts e g occlusion for computer vision based approaches in this work we present an rfid based system tagoram for object localization and tracking using cots rfid tags and readers tracking mobile rfid tags in real time has been a daunting task especially challenging for achieving high precision our system achieves these three goals by leveraging the phase value of the backscattered signal provided by the cots rfid readers to estimate the location of the object in tagoram we exploit the tag s mobility to build a virtual antenna array by using readings from a few physical antennas over a time window to illustrate the basic idea of our system we firstly focus on a simple scenario where the tag is moving along a fixed track known to the system we propose differential augmented hologram dah which will facilitate the instant tracking of the mobile rfid tag to a high precision we then devise a comprehensive solution to accurately recover the tag s moving trajectories and its locations relaxing the assumption of knowing tag s track function in advance we have implemented the tagoram system using cots rfid tags and readers the system has been tested extensively in the lab environment and used for more than a year in real airline applications for lab environment we can track the mobile tags in real time with a millimeter accuracy to a median of 5mm and 7 29mm using linear and circular track respectively in our year long large scale baggage sortation systems deployed in two airports our results from real deployments show that tagoram can achieve a centimeter level accuracy to a median of 6 35cm in these real deployments", + "title_raw": "Tagoram: real-time tracking of mobile RFID tags to high precision using COTS devices", + "abstract_raw": "In many applications, we have to identify an object and then locate the object to within high precision (centimeter- or millimeter-level). Legacy systems that can provide such accuracy are either expensive or suffering from performance degradation resulting from various impacts, e.g., occlusion for computer vision based approaches. In this work, we present an RFID-based system, Tagoram, for object localization and tracking using COTS RFID tags and readers. Tracking mobile RFID tags in real time has been a daunting task, especially challenging for achieving high precision. Our system achieves these three goals by leveraging the phase value of the backscattered signal, provided by the COTS RFID readers, to estimate the location of the object. In Tagoram, we exploit the tag's mobility to build a virtual antenna array by using readings from a few physical antennas over a time window. To illustrate the basic idea of our system, we firstly focus on a simple scenario where the tag is moving along a fixed track known to the system. We propose Differential Augmented Hologram (DAH) which will facilitate the instant tracking of the mobile RFID tag to a high precision. We then devise a comprehensive solution to accurately recover the tag's moving trajectories and its locations, relaxing the assumption of knowing tag's track function in advance. We have implemented the Tagoram system using COTS RFID tags and readers. The system has been tested extensively in the lab environment and used for more than a year in real airline applications. For lab environment, we can track the mobile tags in real time with a millimeter accuracy to a median of 5mm and 7.29mm using linear and circular track respectively. In our year- long large scale baggage sortation systems deployed in two airports, our results from real deployments show that Tagoram can achieve a centimeter-level accuracy to a median of 6.35cm in these real deployments.", + "link": "https://www.semanticscholar.org/paper/d25f8103e30d158e4993bbe4f120c49fb0b7f9a5", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2963056065", + "venue": "1127325140", + "year": "2014", + "title": "asymmetric lsh alsh for sublinear time maximum inner product search mips", + "label": [ + "2776517306", + "11413529", + "99138194", + "2777036070", + "200632571", + "74270461" + ], + "author": [ + "2151457819", + "2721445647" + ], + "reference": [ + "107173025", + "192724328", + "1455310343", + "1541459201", + "1583707981", + "1620548055", + "1671551279", + "1736726159", + "1768336749", + "1858047604", + "1877138905", + "1893597444", + "1974627246", + "1978475816", + "1985123706", + "1986482242", + "1994389483", + "2007682403", + "2010416066", + "2010454887", + "2012833704", + "2029852131", + "2031248101", + "2041836310", + "2049898997", + "2054141820", + "2067952552", + "2081193615", + "2082612735", + "2085922539", + "2090836891", + "2097776316", + "2097865464", + "2105734423", + "2120031510", + "2122146326", + "2123427850", + "2126887541", + "2126907894", + "2128056631", + "2132069633", + "2143996849", + "2145349611", + "2147017814", + "2147717514", + "2150886314", + "2151596444", + "2152402969", + "2154660245", + "2162006472", + "2167029568", + "2168356304", + "2169054943", + "2251406078", + "2397770138", + "2962863202", + "2963436558", + "2963576795", + "2963593740", + "2963703787" + ], + "abstract": "we present the first provably sublinear time hashing algorithm for approximate maximum inner product search mips searching with un normalized inner product as the underlying similarity measure is a known difficult problem and finding hashing schemes for mips was considered hard while the existing locality sensitive hashing lsh framework is insufficient for solving mips in this paper we extend the lsh framework to allow asymmetric hashing schemes our proposal is based on a key observation that the problem of finding maximum inner products after independent asymmetric transformations can be converted into the problem of approximate near neighbor search in classical settings this key observation makes efficient sublinear hashing scheme for mips possible under the extended asymmetric lsh alsh framework this paper provides an example of explicit construction of provably fast hashing scheme for mips our proposed algorithm is simple and easy to implement the proposed hashing scheme leads to significant computational savings over the two popular conventional lsh schemes i sign random projection srp and ii hashing based on p stable distributions for l2 norm l2lsh in the collaborative filtering task of item recommendations on netflix and movielens 10m datasets", + "title_raw": "Asymmetric LSH (ALSH) for Sublinear Time Maximum Inner Product Search (MIPS)", + "abstract_raw": "We present the first provably sublinear time hashing algorithm for approximate Maximum Inner Product Search (MIPS). Searching with (un-normalized) inner product as the underlying similarity measure is a known difficult problem and finding hashing schemes for MIPS was considered hard. While the existing Locality Sensitive Hashing (LSH) framework is insufficient for solving MIPS, in this paper we extend the LSH framework to allow asymmetric hashing schemes. Our proposal is based on a key observation that the problem of finding maximum inner products, after independent asymmetric transformations, can be converted into the problem of approximate near neighbor search in classical settings. This key observation makes efficient sublinear hashing scheme for MIPS possible. Under the extended asymmetric LSH (ALSH) framework, this paper provides an example of explicit construction of provably fast hashing scheme for MIPS. Our proposed algorithm is simple and easy to implement. The proposed hashing scheme leads to significant computational savings over the two popular conventional LSH schemes: (i) Sign Random Projection (SRP) and (ii) hashing based on p-stable distributions for L2 norm (L2LSH), in the collaborative filtering task of item recommendations on Netflix and Movielens (10M) datasets.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Asymmetric+LSH+(ALSH)+for+Sublinear+Time+Maximum+Inner+Product+Search+(MIPS)&as_oq=&as_eq=&as_occt=any&as_sauthors=Shrivastava", + "scraped_abstract": null, + "citation_best": 266 + }, + { + "paper": "2155216527", + "venue": "1158363782", + "year": "2014", + "title": "software dataplane verification", + "label": [ + "2779229675", + "22927095", + "103613024", + "161743704", + "33054407", + "2777884278", + "2777904410", + "186846655", + "120314980" + ], + "author": [ + "1893457898", + "305900328" + ], + "reference": [ + "158224344", + "186989516", + "207759855", + "1497028280", + "1515508441", + "1576549127", + "1675033504", + "1697123834", + "1710734607", + "1882012874", + "1979693894", + "2010365467", + "2018041465", + "2042804814", + "2065675749", + "2082000355", + "2096449544", + "2111648865", + "2111734949", + "2115526539", + "2117009500", + "2121021091", + "2124909257", + "2131195907", + "2132897303", + "2133612077", + "2134875273", + "2136310957", + "2140069682", + "2148994573", + "2150990339", + "2151062909", + "2162360270", + "2170736936", + "2171469152", + "2171999426", + "2202294430" + ], + "abstract": "software dataplanes are emerging as an alternative to traditional hardware switches and routers promising programmability and short time to market these advantages are set against the risk of disrupting the network with bugs unpredictable performance or security vulnerabilities we explore the feasibility of verifying software dataplanes to ensure smooth network operation for general programs verifiability and performance are competing goals we argue that software dataplanes are different we can write them in a way that enables verification and preserves performance we present a verification tool that takes as input a software dataplane written in a way that meets a given set of conditions and dis proves that the dataplane satisfies crash freedom bounded execution and filtering properties we evaluate our tool on stateless and simple stateful click pipelines we perform complete and sound verification of these pipelines within tens of minutes whereas a state of the art general purpose tool fails to complete the same task within several hours", + "title_raw": "Software dataplane verification", + "abstract_raw": "Software dataplanes are emerging as an alternative to traditional hardware switches and routers, promising programmability and short time to market. These advantages are set against the risk of disrupting the network with bugs, unpredictable performance, or security vulnerabilities. We explore the feasibility of verifying software dataplanes to ensure smooth network operation. For general programs, verifiability and performance are competing goals; we argue that software dataplanes are different--we can write them in a way that enables verification and preserves performance. We present a verification tool that takes as input a software dataplane, written in a way that meets a given set of conditions, and (dis)proves that the dataplane satisfies crash-freedom, bounded-execution, and filtering properties. We evaluate our tool on stateless and simple stateful Click pipelines; we perform complete and sound verification of these pipelines within tens of minutes, whereas a state-of-the-art general-purpose tool fails to complete the same task within several hours.", + "link": "https://www.semanticscholar.org/paper/af8d9569433f98d9f5743c5b141f47bdf0ca4ea2", + "scraped_abstract": null, + "citation_best": 40 + }, + { + "paper": "2096915479", + "venue": "1185109434", + "year": "2014", + "title": "arrakis the operating system is the control plane", + "label": [ + "59671392", + "149635348", + "111919701", + "90307666", + "553261973", + "572687", + "94127936", + "120763227", + "34590194" + ], + "author": [ + "2154122045", + "2105191977", + "2145632935", + "1513655371", + "1994080349", + "2088689873", + "2148869393", + "1981466420" + ], + "reference": [ + "183305829", + "1424350945", + "1532546444", + "1560399135", + "1966938284", + "1982063824", + "1992755462", + "1994618472", + "1995790197", + "2009072424", + "2009404871", + "2038572501", + "2045808511", + "2068975988", + "2081461624", + "2083469471", + "2096241833", + "2097174266", + "2100792372", + "2102248411", + "2105545278", + "2109841085", + "2111262474", + "2115412108", + "2120111796", + "2121160206", + "2125901106", + "2129988636", + "2131726714", + "2133119226", + "2139367291", + "2140973798", + "2143160644", + "2148114202", + "2154207621", + "2155066383", + "2156368296", + "2159890891", + "2162121187", + "2165832195", + "2168075869", + "2169414316", + "2178497290", + "2186588068", + "2205436351", + "2225993331" + ], + "abstract": "recent device hardware trends enable a new approach to the design of network server operating systems in a traditional operating system the kernel mediates access to device hardware by server applications to enforce process isolation as well as network and disk security we have designed and implemented a new operating system arrakis that splits the traditional role of the kernel in two applications have direct access to virtualized i o devices allowing most i o operations to skip the kernel entirely while the kernel is re engineered to provide network and disk protection without kernel mediation of every operation we describe the hardware and software changes needed to take advantage of this new abstraction and we illustrate its power by showing improvements of 2 5x in latency and 9x in throughput for a popular persistent nosql store relative to a well tuned linux implementation", + "title_raw": "Arrakis: the operating system is the control plane", + "abstract_raw": "Recent device hardware trends enable a new approach to the design of network server operating systems. In a traditional operating system, the kernel mediates access to device hardware by server applications, to enforce process isolation as well as network and disk security. We have designed and implemented a new operating system, Arrakis, that splits the traditional role of the kernel in two. Applications have direct access to virtualized I/O devices, allowing most I/O operations to skip the kernel entirely, while the kernel is re-engineered to provide network and disk protection without kernel mediation of every operation. We describe the hardware and software changes needed to take advantage of this new abstraction, and we illustrate its power by showing improvements of 2-5x in latency and 9x in throughput for a popular persistent NoSQL store relative to a well-tuned Linux implementation.", + "link": "https://www.semanticscholar.org/paper/f2886ad3e20fcda1ba8c171cce65ea240af9f738", + "scraped_abstract": null, + "citation_best": 222 + }, + { + "paper": "1852007091", + "venue": "1185109434", + "year": "2014", + "title": "shielding applications from an untrusted cloud with haven", + "label": [ + "167955471", + "111919701", + "38652104", + "105446022", + "67212190", + "2777904410", + "195917429", + "79974875", + "71745522" + ], + "author": [ + "2150586784", + "2123463638", + "2141074703" + ], + "reference": [ + "15883", + "13103650", + "104209573", + "157845512", + "1253017325", + "1434079718", + "1504669610", + "1545927878", + "1600255172", + "1704933903", + "1966938284", + "1978703818", + "2000375627", + "2001986196", + "2007537982", + "2012898464", + "2014517322", + "2029349492", + "2036329595", + "2036790532", + "2043366397", + "2043501224", + "2048525559", + "2062340141", + "2065207200", + "2076265406", + "2082222018", + "2092423386", + "2093460657", + "2103131117", + "2105528199", + "2108255910", + "2112735498", + "2128159601", + "2129342555", + "2130694829", + "2136310957", + "2138711012", + "2140972824", + "2143351500", + "2150615820", + "2150709728", + "2153553074", + "2154884316", + "2159915142", + "2164399967", + "2167512529", + "2167804035", + "2168601499", + "2176235539", + "2289716913", + "2401959250" + ], + "abstract": "today s cloud computing infrastructure requires substantial trust cloud users rely on both the provider s staff and its globally distributed software hardware platform not to expose any of their private data we introduce the notion of shielded execution which protects the confidentiality and integrity of a program and its data from the platform on which it runs i e the cloud operator s os vm and firmware our prototype haven is the first system to achieve shielded execution of unmodified legacy applications including sql server and apache on a commodity os windows and commodity hardware haven leverages the hardware protection of intel sgx to defend against privileged code and physical attacks such as memory probes but also addresses the dual challenges of executing unmodified legacy binaries and protecting them from a malicious host this work motivated recent changes in the sgx specification", + "title_raw": "Shielding applications from an untrusted cloud with Haven", + "abstract_raw": "Today's cloud computing infrastructure requires substantial trust. Cloud users rely on both the provider's staff and its globally-distributed software/hardware platform not to expose any of their private data.\r\n\r\nWe introduce the notion of shielded execution, which protects the confidentiality and integrity of a program and its data from the platform on which it runs (i.e., the cloud operator's OS, VM and firmware). Our prototype, Haven, is the first system to achieve shielded execution of unmodified legacy applications, including SQL Server and Apache, on a commodity OS (Windows) and commodity hardware. Haven leverages the hardware protection of Intel SGX to defend against privileged code and physical attacks such as memory probes, but also addresses the dual challenges of executing unmodified legacy binaries and protecting them from a malicious host. This work motivated recent changes in the SGX specification.", + "link": "https://www.semanticscholar.org/paper/17f19d9ec093ef82a10f1276fc53c10d4667836d", + "scraped_abstract": null, + "citation_best": 430 + }, + { + "paper": "2169414316", + "venue": "1185109434", + "year": "2014", + "title": "ix a protected dataplane operating system for high throughput and low latency", + "label": [ + "513985346", + "111919701", + "158379750", + "113200698", + "68793194", + "160403385", + "98980195" + ], + "author": [ + "2943084257", + "2461283553", + "2244492565", + "2559588932", + "713250648", + "2670407451" + ], + "reference": [ + "11812818", + "83339351", + "100679602", + "183305829", + "1214620947", + "1424350945", + "1532546444", + "1538104277", + "1553037668", + "1609755472", + "1772679534", + "1785664926", + "1982063824", + "2003597767", + "2006816934", + "2010365467", + "2014485836", + "2019066515", + "2020733012", + "2023209622", + "2031844067", + "2038572501", + "2045808511", + "2084226860", + "2090590366", + "2096915479", + "2104670257", + "2108914546", + "2109928164", + "2110946051", + "2120422789", + "2122960384", + "2125901106", + "2129554014", + "2133581580", + "2134807578", + "2134939854", + "2137755180", + "2139367291", + "2143677609", + "2144553551", + "2151062909", + "2151182669", + "2158733823", + "2159256070", + "2160121678", + "2163961697", + "2164740236", + "2164846625", + "2167756215", + "2168075869", + "2186588068", + "2978655370" + ], + "abstract": "the conventional wisdom is that aggressive networking requirements such as high packet rates for small messages and microsecond scale tail latency are best addressed outside the kernel in a user level networking stack we present ix a dataplane operating system that provides high i o performance while maintaining the key advantage of strong protection offered by existing kernels ix uses hardware virtualization to separate management and scheduling functions of the kernel control plane from network processing dataplane the data plane architecture builds upon a native zero copy api and optimizes for both bandwidth and latency by dedicating hardware threads and networking queues to data plane instances processing bounded batches of packets to completion and by eliminating coherence traffic and multi core synchronization we demonstrate that ix outperforms linux and state of the art user space network stacks significantly in both throughput and end to end latency moreover ix improves the throughput of a widely deployed key value store by up to 3 6x and reduces tail latency by more than 2x", + "title_raw": "IX: a protected dataplane operating system for high throughput and low latency", + "abstract_raw": "The conventional wisdom is that aggressive networking requirements, such as high packet rates for small messages and microsecond-scale tail latency, are best addressed outside the kernel, in a user-level networking stack. We present IX, a dataplane operating system that provides high I/O performance, while maintaining the key advantage of strong protection offered by existing kernels. IX uses hardware virtualization to separate management and scheduling functions of the kernel (control plane) from network processing (dataplane). The data-plane architecture builds upon a native, zero-copy API and optimizes for both bandwidth and latency by dedicating hardware threads and networking queues to data-plane instances, processing bounded batches of packets to completion, and by eliminating coherence traffic and multi-core synchronization. We demonstrate that IX outperforms Linux and state-of-the-art, user-space network stacks significantly in both throughput and end-to-end latency. Moreover, IX improves the throughput of a widely deployed, key-value store by up to 3.6x and reduces tail latency by more than 2x.", + "link": "https://www.semanticscholar.org/paper/567f633bc74ac40364a1961fad1b7fe80605b815", + "scraped_abstract": null, + "citation_best": 278 + }, + { + "paper": "2170737051", + "venue": "1127352206", + "year": "2014", + "title": "compiler validation via equivalence modulo inputs", + "label": [ + "169590947", + "2777332272", + "199360897", + "2778361913" + ], + "author": [ + "2430623256", + "2230992257", + "2102704429" + ], + "reference": [ + "109452506", + "190884830", + "1531203382", + "2040856861", + "2071952624", + "2085050643", + "2098456636", + "2108174561", + "2114012357", + "2129695855", + "2138474070", + "2141717815", + "2148662736", + "2150871888", + "2152397470", + "2155877593", + "2160145830", + "2162604396", + "2187646468", + "2295658119", + "2313783443", + "2597376266", + "2911450990" + ], + "abstract": "we introduce equivalence modulo inputs emi a simple widely applicable methodology for validating optimizing compilers our key insight is to exploit the close interplay between 1 dynamically executing a program on some test inputs and 2 statically compiling the program to work on all possible inputs indeed the test inputs induce a natural collection of the original program s emi variants which can help differentially test any compiler and specifically target the difficult to find miscompilations to create a practical implementation of emi for validating c compilers we profile a program s test executions and stochastically prune its unexecuted code our extensive testing in eleven months has led to 147 confirmed unique bug reports for gcc and llvm alone the majority of those bugs are miscompilations and more than 100 have already been fixed beyond testing compilers emi can be adapted to validate program transformation and analysis systems in general this work opens up this exciting new direction", + "title_raw": "Compiler validation via equivalence modulo inputs", + "abstract_raw": "We introduce equivalence modulo inputs (EMI), a simple, widely applicable methodology for validating optimizing compilers. Our key insight is to exploit the close interplay between (1) dynamically executing a program on some test inputs and (2) statically compiling the program to work on all possible inputs. Indeed, the test inputs induce a natural collection of the original program's EMI variants, which can help differentially test any compiler and specifically target the difficult-to-find miscompilations. To create a practical implementation of EMI for validating C compilers, we profile a program's test executions and stochastically prune its unexecuted code. Our extensive testing in eleven months has led to 147 confirmed, unique bug reports for GCC and LLVM alone. The majority of those bugs are miscompilations, and more than 100 have already been fixed. Beyond testing compilers, EMI can be adapted to validate program transformation and analysis systems in general. This work opens up this exciting, new direction.", + "link": "https://www.semanticscholar.org/paper/79bbd54d5bdfd20980e5f9a65480f5e127fc1221", + "scraped_abstract": null, + "citation_best": 174 + }, + { + "paper": "2057510141", + "venue": "1127352206", + "year": "2014", + "title": "improving javascript performance by deconstructing the type system", + "label": [ + "198240166", + "167955471", + "133162039", + "544833334", + "64729616", + "169590947", + "199360897", + "61423126", + "127431555" + ], + "author": [ + "2170240972", + "2504852526", + "2223684681", + "2240166792", + "2154776208" + ], + "reference": [ + "42576273", + "1777693579", + "1975608564", + "1993318777", + "1999753800", + "2039690203", + "2058639022", + "2107612966", + "2128457714", + "2134633067", + "2141293928", + "2144902933", + "2150650310", + "2164021361", + "3147685736" + ], + "abstract": "increased focus on javascript performance has resulted in vast performance improvements for many benchmarks however for actual code used in websites the attained improvements often lag far behind those for popular benchmarks this paper shows that the main reason behind this short fall is how the compiler understands types javascript has no concept of types but the compiler assigns types to objects anyway for ease of code generation we examine the way that the chrome v8 compiler defines types and identify two design decisions that are the main reasons for the lack of improvement 1 the inherited prototype object is part of the current object s type definition and 2 method bindings are also part of the type definition these requirements make types very unpredictable which hinders type specialization by the compiler hence we modify v8 to remove these requirements and use it to compile the javascript code assembled by jsbench from real websites on average we reduce the execution time of jsbench by 36 and the dynamic instruction count by 49", + "title_raw": "Improving JavaScript performance by deconstructing the type system", + "abstract_raw": "Increased focus on JavaScript performance has resulted in vast performance improvements for many benchmarks. However, for actual code used in websites, the attained improvements often lag far behind those for popular benchmarks. This paper shows that the main reason behind this short-fall is how the compiler understands types. JavaScript has no concept of types, but the compiler assigns types to objects anyway for ease of code generation. We examine the way that the Chrome V8 compiler defines types, and identify two design decisions that are the main reasons for the lack of improvement: (1) the inherited prototype object is part of the current object's type definition, and (2) method bindings are also part of the type definition. These requirements make types very unpredictable, which hinders type specialization by the compiler. Hence, we modify V8 to remove these requirements, and use it to compile the JavaScript code assembled by JSBench from real websites. On average, we reduce the execution time of JSBench by 36%, and the dynamic instruction count by 49%.", + "link": "https://www.semanticscholar.org/paper/a4cea00efa2314fe1b291aaa5b7c995573b101e8", + "scraped_abstract": null, + "citation_best": 14 + }, + { + "paper": "2050680750", + "venue": "1127352206", + "year": "2014", + "title": "on abstraction refinement for program analyses in datalog", + "label": [ + "98183937", + "148230440", + "2780542140", + "548217200", + "80444323", + "199360897", + "7263679", + "2778770139", + "6943359", + "130822934" + ], + "author": [ + "2616946442", + "2045343935", + "2119042880", + "2005763208", + "2125768396" + ], + "reference": [ + "17381159", + "74317754", + "96134206", + "1536007121", + "1593428110", + "1671718115", + "1775352280", + "1787074469", + "1969599528", + "1970357325", + "1971327145", + "2051527230", + "2080573945", + "2082000355", + "2083756938", + "2102178883", + "2112243500", + "2127574686", + "2132685964", + "2136503680", + "2151463894", + "2151562310", + "2152791745", + "2158600037", + "2163521620", + "2278445211" + ], + "abstract": "a central task for a program analysis concerns how to efficiently find a program abstraction that keeps only information relevant for proving properties of interest we present a new approach for finding such abstractions for program analyses written in datalog our approach is based on counterexample guided abstraction refinement when a datalog analysis run fails using an abstraction it seeks to generalize the cause of the failure to other abstractions and pick a new abstraction that avoids a similar failure our solution uses a boolean satisfiability formulation that is general complete and optimal it is independent of the datalog solver it generalizes the failure of an abstraction to as many other abstractions as possible and it identifies the cheapest refined abstraction to try next we show the performance of our approach on a pointer analysis and a typestate analysis on eight real world java benchmark programs", + "title_raw": "On abstraction refinement for program analyses in Datalog", + "abstract_raw": "A central task for a program analysis concerns how to efficiently find a program abstraction that keeps only information relevant for proving properties of interest. We present a new approach for finding such abstractions for program analyses written in Datalog. Our approach is based on counterexample-guided abstraction refinement: when a Datalog analysis run fails using an abstraction, it seeks to generalize the cause of the failure to other abstractions, and pick a new abstraction that avoids a similar failure. Our solution uses a boolean satisfiability formulation that is general, complete, and optimal: it is independent of the Datalog solver, it generalizes the failure of an abstraction to as many other abstractions as possible, and it identifies the cheapest refined abstraction to try next. We show the performance of our approach on a pointer analysis and a typestate analysis, on eight real-world Java benchmark programs.", + "link": "https://www.semanticscholar.org/paper/73d83c84e0cf2909351b01e9e22c312a9a09045c", + "scraped_abstract": null, + "citation_best": 79 + }, + { + "paper": "2077542434", + "venue": "1184151122", + "year": "2014", + "title": "weaker forms of monotonicity for declarative networking a more fine grained answer to the calm conjecture", + "label": [ + "70061542", + "148230440", + "121163568", + "2776235265", + "80444323" + ], + "author": [ + "181190241", + "297960948", + "1870307126", + "2466730297" + ], + "reference": [ + "167785927", + "193566757", + "1513049773", + "1529168989", + "1558832481", + "1565106395", + "1897022643", + "1984280364", + "2001995757", + "2008199814", + "2013984441", + "2023062809", + "2027438132", + "2028980140", + "2029591641", + "2033258621", + "2035165468", + "2040988073", + "2050489064", + "2058651804", + "2071588600", + "2073227282", + "2075620950", + "2091090133", + "2106355650", + "2110346977", + "2118612179", + "2129687134", + "2136575791", + "2163087774", + "2166197332", + "2293486499" + ], + "abstract": "the calm conjecture first stated by hellerstein 23 and proved in its revised form by ameloot et al 13 within the framework of relational transducer networks asserts that a query has a coordination free execution strategy if and only if the query is monotone zinn et al 32 extended the framework of relational transducer networks to allow for specific data distribution strategies and showed that the nonmonotone win move query is coordination free for domain guided data distributions in this paper we complete the story by equating increasingly larger classes of coordination free computations with increasingly weaker forms of monotonicity and make datalog variants explicit that capture each of these classes one such fragment is based on stratified datalog where rules are required to be connected with the exception of the last stratum in addition we characterize coordination freeness as those computations that do not require knowledge about all other nodes in the network and therefore can not globally coordinate the results in this paper can be interpreted as a more fine grained answer to the calm conjecture", + "title_raw": "Weaker forms of monotonicity for declarative networking: a more fine-grained answer to the calm-conjecture", + "abstract_raw": "The CALM-conjecture, first stated by Hellerstein [23] and proved in its revised form by Ameloot et al. [13] within the framework of relational transducer networks, asserts that a query has a coordination-free execution strategy if and only if the query is monotone. Zinn et al. [32] extended the framework of relational transducer networks to allow for specific data distribution strategies and showed that the nonmonotone win-move query is coordination-free for domain-guided data distributions. In this paper, we complete the story by equating increasingly larger classes of coordination-free computations with increasingly weaker forms of monotonicity and make Datalog variants explicit that capture each of these classes. One such fragment is based on stratified Datalog where rules are required to be connected with the exception of the last stratum. In addition, we characterize coordination-freeness as those computations that do not require knowledge about all other nodes in the network, and therefore, can not globally coordinate. The results in this paper can be interpreted as a more fine-grained answer to the CALM-conjecture.", + "link": "https://www.semanticscholar.org/paper/a976968dc1bfde2e220cda2ace0e22527916be52", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2031015560", + "venue": "1163618098", + "year": "2014", + "title": "secure multiparty computations on bitcoin", + "label": [ + "18396474", + "48220719", + "38652104", + "29808475", + "2780801425", + "178489894", + "139089976" + ], + "author": [ + "288136762", + "2072459584", + "2152820835", + "2227210915" + ], + "reference": [ + "111114747", + "179922057", + "1480225633", + "1488658290", + "1489883012", + "1520914943", + "1551392315", + "1595357546", + "1599549092", + "1601379374", + "1602159460", + "1635361314", + "1984680759", + "2013686672", + "2039275540", + "2073086835", + "2077734015", + "2137664544", + "2143783692", + "2151433956", + "2156001253", + "2168325212", + "2214897964", + "2337430557", + "2403437300", + "2913157813", + "2990399857", + "3142333173" + ], + "abstract": "bit coin is a decentralized digital currency introduced in 2008 that has recently gained noticeable popularity its main features are a it lacks a central authority that controls the transactions b the list of transactions is publicly available and c its syntax allows more advanced transactions than simply transferring the money the goal of this paper is to show how these properties of bit coin can be used in the area of secure multiparty computation protocols mpcs firstly we show that the bit coin system provides an attractive way to construct a version of timed commitments where the committer has to reveal his secret within a certain time frame or to pay a fine this in turn can be used to obtain fairness in some multiparty protocols secondly we introduce a concept of multiparty protocols that work directly on bit coin recall that the standard definition of the mpcs guarantees only that the protocol emulates the trusted third party hence ensuring that the inputs are correct and the outcome is respected is beyond the scope of the definition our observation is that the bit coin system can be used to go beyond the standard emulation based definition by constructing protocols that link their inputs and the outputs with the real bit coin transactions as an instantiation of this idea we construct protocols for secure multiparty lotteries using the bit coin currency without relying on a trusted authority one of these protocols uses the bit coin based timed commitments mentioned above our protocols guarantee fairness for the honest parties no matter how the loser behaves for example if one party interrupts the protocol then her money is transferred to the honest participants our protocols are practical to demonstrate it we performed their transactions in the actual bit coin system and can be used in real life as a replacement for the online gambling sites we think that this paradigm can have also other applications we discuss some of them", + "title_raw": "Secure Multiparty Computations on Bitcoin", + "abstract_raw": "Bit coin is a decentralized digital currency, introduced in 2008, that has recently gained noticeable popularity. Its main features are: (a) it lacks a central authority that controls the transactions, (b) the list of transactions is publicly available, and (c) its syntax allows more advanced transactions than simply transferring the money. The goal of this paper is to show how these properties of Bit coin can be used in the area of secure multiparty computation protocols (MPCs). Firstly, we show that the Bit coin system provides an attractive way to construct a version of \"timed commitments\", where the committer has to reveal his secret within a certain time frame, or to pay a fine. This, in turn, can be used to obtain fairness in some multiparty protocols. Secondly, we introduce a concept of multiparty protocols that work \"directly on Bit coin\". Recall that the standard definition of the MPCs guarantees only that the protocol \"emulates the trusted third party\". Hence ensuring that the inputs are correct, and the outcome is respected is beyond the scope of the definition. Our observation is that the Bit coin system can be used to go beyond the standard \"emulation-based\" definition, by constructing protocols that link their inputs and the outputs with the real Bit coin transactions. As an instantiation of this idea we construct protocols for secure multiparty lotteries using the Bit coin currency, without relying on a trusted authority (one of these protocols uses the Bit coin-based timed commitments mentioned above). Our protocols guarantee fairness for the honest parties no matter how the loser behaves. For example: if one party interrupts the protocol then her money is transferred to the honest participants. Our protocols are practical (to demonstrate it we performed their transactions in the actual Bit coin system), and can be used in real life as a replacement for the online gambling sites. We think that this paradigm can have also other applications. We discuss some of them.", + "link": "https://www.semanticscholar.org/paper/5fa7758d22fe61a16ef65706868d144c49950714", + "scraped_abstract": null, + "citation_best": 351 + }, + { + "paper": "2099619158", + "venue": "1152462849", + "year": "2015", + "title": "balancing accountability and privacy in the network", + "label": [ + "35341882", + "158379750", + "38652104", + "108827166", + "110875604" + ], + "author": [ + "2308141540", + "2223440356", + "3184847413" + ], + "reference": [ + "9205189", + "146048181", + "337355046", + "1494972982", + "1573705733", + "1615970822", + "1658748398", + "1664342854", + "1671218426", + "1755020405", + "1867219652", + "1949661937", + "1978884755", + "1989728020", + "2001968297", + "2005708967", + "2006435204", + "2011441851", + "2014952121", + "2035283704", + "2037107113", + "2087766914", + "2098786456", + "2103647628", + "2113780533", + "2116236383", + "2120065915", + "2129909248", + "2132976569", + "2155141181", + "2157525327", + "2162133150", + "2165210192", + "2165413796", + "2166065395", + "2282052748", + "2494059454", + "2607235567", + "2891107165", + "3160814718" + ], + "abstract": "though most would agree that accountability and privacy are both valuable today s internet provides little support for either previous efforts have explored ways to offer stronger guarantees for one of the two typically at the expense of the other indeed at first glance accountability and privacy appear mutually exclusive at the center of the tussle is the source address in an accountable internet source addresses undeniably link packets and senders so hosts can be punished for bad behavior in a privacy preserving internet source addresses are hidden as much as possible in this paper we argue that a balance is possible we introduce the accountable and private internet protocol apip which splits source addresses into two separate fields an accountability address and a return address and introduces independent mechanisms for managing each accountability addresses rather than pointing to hosts point to accountability delegates which agree to vouch for packets on their clients behalves taking appropriate action when misbehavior is reported with accountability handled by delegates senders are now free to mask their return addresses we discuss a few techniques for doing so", + "title_raw": "Balancing accountability and privacy in the network", + "abstract_raw": "Though most would agree that accountability and privacy are both valuable, today's Internet provides little support for either. Previous efforts have explored ways to offer stronger guarantees for one of the two, typically at the expense of the other; indeed, at first glance accountability and privacy appear mutually exclusive. At the center of the tussle is the source address: in an accountable Internet, source addresses undeniably link packets and senders so hosts can be punished for bad behavior. In a privacy-preserving Internet, source addresses are hidden as much as possible. In this paper, we argue that a balance is possible. We introduce the Accountable and Private Internet Protocol (APIP), which splits source addresses into two separate fields --- an accountability address and a return address --- and introduces independent mechanisms for managing each. Accountability addresses, rather than pointing to hosts, point to accountability delegates, which agree to vouch for packets on their clients' behalves, taking appropriate action when misbehavior is reported. With accountability handled by delegates, senders are now free to mask their return addresses; we discuss a few techniques for doing so.", + "link": "https://www.semanticscholar.org/paper/fbd8f6ff68cdc66b840d2de5a2332765a0e211a9", + "scraped_abstract": null, + "citation_best": 34 + }, + { + "paper": "2157990152", + "venue": "1152462849", + "year": "2015", + "title": "conga distributed congestion aware load balancing for datacenters", + "label": [ + "199845137", + "2779898492", + "31395832", + "31258907", + "119700423", + "139330139", + "157764524", + "120314980" + ], + "author": [ + "2309837544", + "2308806257", + "2073170117", + "2128564779", + "2231178289", + "2231752021", + "2229967665", + "2305739359", + "2562467048", + "2040462121" + ], + "reference": [ + "158766578", + "1503891749", + "1515106148", + "1543942291", + "1588306728", + "1698388015", + "1971663816", + "1978175770", + "1997996213", + "2030686380", + "2038718790", + "2045730089", + "2062362478", + "2062832101", + "2067348620", + "2088506653", + "2096655151", + "2096704096", + "2097568888", + "2097906305", + "2099421250", + "2099657323", + "2102090846", + "2102549685", + "2103110737", + "2103830109", + "2107983838", + "2112525087", + "2117884704", + "2125537511", + "2126822952", + "2126969025", + "2130531694", + "2133294820", + "2133581580", + "2142480021", + "2143508387", + "2147802358", + "2148983286", + "2151248167", + "2154965732", + "2157614013", + "2163404313", + "2164096531", + "2164740236", + "2168595508", + "2272018021", + "2281291499", + "2289132804", + "2339109141", + "2536663091", + "3161295396" + ], + "abstract": "we present the design implementation and evaluation of conga a network based distributed congestion aware load balancing mechanism for datacenters conga exploits recent trends including the use of regular clos topologies and overlays for network virtualization it splits tcp flows into flowlets estimates real time congestion on fabric paths and allocates flowlets to paths based on feedback from remote switches this enables conga to efficiently balance load and seamlessly handle asymmetry without requiring any tcp modifications conga has been implemented in custom asics as part of a new datacenter fabric in testbed experiments conga has 5x better flow completion times than ecmp even with a single link failure and achieves 2 8x better throughput than mptcp in incast scenarios further the price of anarchy for conga is provably small in leaf spine topologies hence conga is nearly as effective as a centralized scheduler while being able to react to congestion in microseconds our main thesis is that datacenter fabric load balancing is best done in the network and requires global schemes such as conga to handle asymmetry", + "title_raw": "CONGA: distributed congestion-aware load balancing for datacenters", + "abstract_raw": "We present the design, implementation, and evaluation of CONGA, a network-based distributed congestion-aware load balancing mechanism for datacenters. CONGA exploits recent trends including the use of regular Clos topologies and overlays for network virtualization. It splits TCP flows into flowlets, estimates real-time congestion on fabric paths, and allocates flowlets to paths based on feedback from remote switches. This enables CONGA to efficiently balance load and seamlessly handle asymmetry, without requiring any TCP modifications. CONGA has been implemented in custom ASICs as part of a new datacenter fabric. In testbed experiments, CONGA has 5x better flow completion times than ECMP even with a single link failure and achieves 2-8x better throughput than MPTCP in Incast scenarios. Further, the Price of Anarchy for CONGA is provably small in Leaf-Spine topologies; hence CONGA is nearly as effective as a centralized scheduler while being able to react to congestion in microseconds. Our main thesis is that datacenter fabric load balancing is best done in the network, and requires global schemes such as CONGA to handle asymmetry.", + "link": "https://www.semanticscholar.org/paper/025652412d507a8cf98ecacd8a44d32ce28995e1", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2089455813", + "venue": "1140684652", + "year": "2014", + "title": "partitioned elias fano indexes", + "label": [ + "2778044989", + "2777644182", + "73555534", + "162319229", + "11413529", + "80444323" + ], + "author": [ + "2153528897", + "2277909408" + ], + "reference": [ + "84082900", + "84899576", + "1507039213", + "1524501441", + "1532325895", + "1559631118", + "1791987072", + "1965473122", + "1985136582", + "1987007212", + "1987700483", + "2022292926", + "2025690557", + "2043909051", + "2057223122", + "2058150901", + "2063540853", + "2137339254", + "2138662031", + "2140453381", + "2152437528", + "2154610494", + "2154615738", + "2156000104", + "2157238134", + "2263798363", + "2621280964", + "2889395214", + "3145128584" + ], + "abstract": "the elias fano representation of monotone sequences has been recently applied to the compression of inverted indexes showing excellent query performance thanks to its efficient random access and search operations while its space occupancy is competitive with some state of the art methods such as gamma delta golomb codes and pfordelta it fails to exploit the local clustering that inverted lists usually exhibit namely the presence of long subsequences of close identifiers in this paper we describe a new representation based on partitioning the list into chunks and encoding both the chunks and their endpoints with elias fano hence forming a two level data structure this partitioning enables the encoding to better adapt to the local statistics of the chunk thus exploiting clustering and improving compression we present two partition strategies respectively with fixed and variable length chunks for the latter case we introduce a linear time optimization algorithm which identifies the minimum space partition up to an arbitrarily small approximation factor we show that our partitioned elias fano indexes offer significantly better compression than plain elias fano while preserving their query time efficiency furthermore compared with other state of the art compressed encodings our indexes exhibit the best compression ratio query time trade off", + "title_raw": "Partitioned Elias-Fano indexes", + "abstract_raw": "The Elias-Fano representation of monotone sequences has been recently applied to the compression of inverted indexes, showing excellent query performance thanks to its efficient random access and search operations. While its space occupancy is competitive with some state-of-the-art methods such as gamma-delta-Golomb codes and PForDelta, it fails to exploit the local clustering that inverted lists usually exhibit, namely the presence of long subsequences of close identifiers. In this paper we describe a new representation based on partitioning the list into chunks and encoding both the chunks and their endpoints with Elias-Fano, hence forming a two-level data structure. This partitioning enables the encoding to better adapt to the local statistics of the chunk, thus exploiting clustering and improving compression. We present two partition strategies, respectively with fixed and variable-length chunks. For the latter case we introduce a linear-time optimization algorithm which identifies the minimum-space partition up to an arbitrarily small approximation factor. We show that our partitioned Elias-Fano indexes offer significantly better compression than plain Elias-Fano, while preserving their query time efficiency. Furthermore, compared with other state-of-the-art compressed encodings, our indexes exhibit the best compression ratio/query time trade-off.", + "link": "https://www.semanticscholar.org/paper/04ebe19c8edf2a7588005900e23b5fadbd6e357a", + "scraped_abstract": null, + "citation_best": 127 + }, + { + "paper": "319976220", + "venue": "1131589359", + "year": "2014", + "title": "concave switching in single and multihop networks", + "label": [ + "174809319", + "29736862", + "31258907", + "2778306792", + "158379750", + "22684755", + "49605964", + "108037233", + "157764524", + "119404949", + "160403385", + "86127852", + "120314980" + ], + "author": [ + "2119600739" + ], + "reference": [ + "614186738", + "1623587138", + "1681209285", + "1973403707", + "1987497363", + "1999102639", + "2001821940", + "2003346154", + "2003436900", + "2010530686", + "2015207483", + "2022335993", + "2026976591", + "2053491613", + "2094055697", + "2096752169", + "2097058227", + "2097540519", + "2099111195", + "2103468752", + "2105177639", + "2106896172", + "2108327442", + "2122029049", + "2128142727", + "2129160848", + "2147524058", + "2147703203", + "2148825261", + "2155529449", + "2161616140", + "2161809383", + "2166933924", + "2167370536", + "2168055141", + "2171625164", + "2907497633", + "3020151382", + "3048468702", + "3104524955", + "3105861207" + ], + "abstract": "switched queueing networks model wireless networks input queued switches and numerous other networked communications systems for single hop networks we consider a g switch policy which combines the maxweight policies with bandwidth sharing networks a further well studied model of internet congestion we prove the maximum stability property for this class of randomized policies thus these policies have the same first order behavior as the maxweight policies however for multihop networks some of these generalized polices address a number of critical weakness of the maxweight backpressure policies for multihop networks with fixed routing we consider the proportional scheduler or 1 log policy in this setting the backpressure policy is maximum stable but must maintain a queue for every route destination which typically grows rapidly with a network s size however this proportionally fair policy only needs to maintain a queue for each outgoing link which is typically bounded in number as is common with internet routing by maintaining per link queueing each node only needs to know the next hop for each packet and not its entire route further in contrast to backpressure the proportional scheduler does not compare downstream queue lengths to determine weights only local link information is required this leads to greater potential for decomposed implementations of the policy through a reduction argument and an entropy argument we demonstrate that whilst maintaining substantially less queueing overhead the proportional scheduler achieves maximum throughput stability", + "title_raw": "Concave switching in single and multihop networks", + "abstract_raw": "Switched queueing networks model wireless networks, input queued switches and numerous other networked communications systems. For single-hop networks, we consider a (\u03b1,g)-switch policy} which combines the MaxWeight policies with bandwidth sharing networks -- a further well studied model of Internet congestion. We prove the maximum stability property for this class of randomized policies. Thus these policies have the same first order behavior as the MaxWeight policies. However, for multihop networks some of these generalized polices address a number of critical weakness of the MaxWeight/BackPressure policies. For multihop networks with fixed routing, we consider the Proportional Scheduler (or (1,log)-policy). In this setting, the BackPressure policy is maximum stable, but must maintain a queue for every route-destination, which typically grows rapidly with a network's size. However, this proportionally fair policy only needs to maintain a queue for each outgoing link, which is typically bounded in number. As is common with Internet routing, by maintaining per-link queueing each node only needs to know the next hop for each packet and not its entire route. Further, in contrast to BackPressure, the Proportional Scheduler does not compare downstream queue lengths to determine weights, only local link information is required. This leads to greater potential for decomposed implementations of the policy. Through a reduction argument and an entropy argument, we demonstrate that, whilst maintaining substantially less queueing overhead, the Proportional Scheduler achieves maximum throughput stability.", + "link": "https://www.semanticscholar.org/paper/39a4f2a84089545f24e009d18152b79b63de5819", + "scraped_abstract": null, + "citation_best": 22 + }, + { + "paper": "2099102906", + "venue": "1175089206", + "year": "2014", + "title": "materialization optimizations for feature selection workloads", + "label": [ + "146206909", + "114289077", + "510870499", + "79158427", + "1668388", + "177264268", + "124101348", + "148483581", + "58489278" + ], + "author": [ + "2776066402", + "3108633171", + "2156135343" + ], + "reference": [ + "53188351", + "205574054", + "1525878169", + "1554944419", + "1619226191", + "1922017469", + "1934084512", + "2006808488", + "2011039300", + "2014830756", + "2016182223", + "2030811966", + "2032775418", + "2044102377", + "2044849727", + "2060791232", + "2061902728", + "2090850279", + "2091703646", + "2102458936", + "2113651538", + "2117293168", + "2119479037", + "2134204499", + "2136195851", + "2138440835", + "2138542364", + "2146635036", + "2157219191", + "2164278908", + "2167411378", + "2181574135", + "2181914161", + "2184623761", + "2406996511", + "2491859986", + "2798766386", + "2998216295", + "3098031065" + ], + "abstract": "there is an arms race in the data management industry to support analytics in which one critical step is feature selection the process of selecting a feature set that will be used to build a statistical model analytics is one of the biggest topics in data management and feature selection is widely regarded as the most critical step of analytics thus we argue that managing the feature selection process is a pressing data management challenge we study this challenge by describing a feature selection language and a supporting prototype system that builds on top of current industrial r integration layers from our interactions with analysts we learned that feature selection is an interactive human in the loop process which means that feature selection workloads are rife with reuse opportunities thus we study how to materialize portions of this computation using not only classical database materialization optimizations but also methods that have not previously been used in database optimization including structural decomposition methods like qr factorization and warmstart these new methods have no analog in traditional sql systems but they may be interesting for array and scientific database applications on a diverse set of data sets and programs we find that traditional database style approaches that ignore these new opportunities are more than two orders of magnitude slower than an optimal plan in this new tradeoff space across multiple r backends furthermore we show that it is possible to build a simple cost based optimizer to automatically select a near optimal execution plan for feature selection", + "title_raw": "Materialization optimizations for feature selection workloads", + "abstract_raw": "There is an arms race in the data management industry to support analytics, in which one critical step is feature selection, the process of selecting a feature set that will be used to build a statistical model. Analytics is one of the biggest topics in data management, and feature selection is widely regarded as the most critical step of analytics; thus, we argue that managing the feature selection process is a pressing data management challenge. We study this challenge by describing a feature-selection language and a supporting prototype system that builds on top of current industrial, R-integration layers. From our interactions with analysts, we learned that feature selection is an interactive, human-in-the-loop process, which means that feature selection workloads are rife with reuse opportunities. Thus, we study how to materialize portions of this computation using not only classical database materialization optimizations but also methods that have not previously been used in database optimization, including structural decomposition methods (like QR factorization) and warmstart. These new methods have no analog in traditional SQL systems, but they may be interesting for array and scientific database applications. On a diverse set of data sets and programs, we find that traditional database-style approaches that ignore these new opportunities are more than two orders of magnitude slower than an optimal plan in this new tradeoff space across multiple R-backends. Furthermore, we show that it is possible to build a simple cost-based optimizer to automatically select a near-optimal execution plan for feature selection.", + "link": "https://www.semanticscholar.org/paper/92e87ab1cb445b93b900007d7e6fdeee4eda07cc", + "scraped_abstract": null, + "citation_best": 111 + }, + { + "paper": "2106941316", + "venue": "1166315290", + "year": "2014", + "title": "sensing techniques for tablet stylus interaction", + "label": [ + "164086593", + "107457646", + "9095184", + "207347870", + "121684516" + ], + "author": [ + "1560725665", + "1856233051", + "1886754024", + "1569396467", + "2289381742", + "213523633", + "2108826813", + "39539933", + "2889300199", + "2105571773" + ], + "reference": [ + "152129288", + "153256627", + "600399566", + "1514950410", + "1528943550", + "1576835806", + "1644895716", + "1965786395", + "1984575025", + "1985594972", + "1987220739", + "1992306856", + "1992326382", + "2003785947", + "2010813084", + "2022401626", + "2028716639", + "2038304148", + "2070885641", + "2077071444", + "2082743418", + "2104268896", + "2108147168", + "2109075207", + "2109830222", + "2111559205", + "2113628568", + "2116659773", + "2116691011", + "2119036170", + "2122424724", + "2123597365", + "2125147351", + "2125551414", + "2125691646", + "2126390890", + "2130306162", + "2139257564", + "2140706899", + "2144800932", + "2152153632", + "2155107494", + "2158272724", + "2161649348", + "2165149861", + "2169435375", + "2169732913", + "2407090010" + ], + "abstract": "we explore grip and motion sensing to afford new techniques that leverage how users naturally manipulate tablet and stylus devices during pen touch interaction we can detect whether the user holds the pen in a writing grip or tucked between his fingers we can distinguish bare handed inputs such as drag and pinch gestures produced by the nonpreferred hand from touch gestures produced by the hand holding the pen which necessarily impart a detectable motion signal to the stylus we can sense which hand grips the tablet and determine the screen s relative orientation to the pen by selectively combining these signals and using them to complement one another we can tailor interaction to the context such as by ignoring unintentional touch inputs while writing or supporting contextually appropriate tools such as a magnifier for detailed stroke work that appears when the user pinches with the pen tucked between his fingers these and other techniques can be used to impart new previously unanticipated subtleties to pen touch interaction on tablets", + "title_raw": "Sensing techniques for tablet+stylus interaction", + "abstract_raw": "We explore grip and motion sensing to afford new techniques that leverage how users naturally manipulate tablet and stylus devices during pen + touch interaction. We can detect whether the user holds the pen in a writing grip or tucked between his fingers. We can distinguish bare-handed inputs, such as drag and pinch gestures produced by the nonpreferred hand, from touch gestures produced by the hand holding the pen, which necessarily impart a detectable motion signal to the stylus. We can sense which hand grips the tablet, and determine the screen's relative orientation to the pen. By selectively combining these signals and using them to complement one another, we can tailor interaction to the context, such as by ignoring unintentional touch inputs while writing, or supporting contextually-appropriate tools such as a magnifier for detailed stroke work that appears when the user pinches with the pen tucked between his fingers. These and other techniques can be used to impart new, previously unanticipated subtleties to pen + touch interaction on tablets.", + "link": "https://www.semanticscholar.org/paper/f367dc182255c15343a0ce4c841d4d3598d09a23", + "scraped_abstract": null, + "citation_best": 64 + }, + { + "paper": "2028953510", + "venue": "1166315290", + "year": "2014", + "title": "expert crowdsourcing with flash teams", + "label": [ + "502989409", + "22414024", + "107457646", + "101468663", + "43521106", + "62230096" + ], + "author": [ + "2226569051", + "2230696987", + "2343034608", + "2052517607", + "2476187791", + "2120209517", + "1983424165", + "2498833676", + "1974803209" + ], + "reference": [ + "117998471", + "607150617", + "1667362966", + "1968326021", + "1972253352", + "1983508569", + "1985816302", + "1988511622", + "1989771824", + "2009976595", + "2017737164", + "2032843080", + "2041406299", + "2042061650", + "2050491563", + "2053859636", + "2055800664", + "2058556535", + "2059105030", + "2073338313", + "2080909714", + "2090048052", + "2096848877", + "2100922849", + "2108960318", + "2111298664", + "2112390589", + "2120396827", + "2127008633", + "2127031179", + "2133485007", + "2136240258", + "2138847321", + "2163986367", + "2164529201", + "2166145477", + "2167896490", + "2168290613", + "2168765606", + "2336115439", + "2402164836", + "2614710588", + "2911311425", + "3121257585" + ], + "abstract": "we introduce flash teams a framework for dynamically assembling and managing paid experts from the crowd flash teams advance a vision of expert crowd work that accomplishes complex interdependent goals such as engineering and design these teams consist of sequences of linked modular tasks and handoffs that can be computationally managed interactive systems reason about and manipulate these teams structures for example flash teams can be recombined to form larger organizations and authored automatically in response to a user s request flash teams can also hire more people elastically in reaction to task needs and pipeline intermediate output to accelerate completion times to enable flash teams we present foundry an end user authoring platform and runtime manager foundry allows users to author modular tasks then manages teams through handoffs of intermediate work we demonstrate that foundry and flash teams enable crowdsourcing of a broad class of goals including design prototyping course development and film animation in half the work time of traditional self managed teams", + "title_raw": "Expert crowdsourcing with flash teams", + "abstract_raw": "We introduce flash teams, a framework for dynamically assembling and managing paid experts from the crowd. Flash teams advance a vision of expert crowd work that accomplishes complex, interdependent goals such as engineering and design. These teams consist of sequences of linked modular tasks and handoffs that can be computationally managed. Interactive systems reason about and manipulate these teams' structures: for example, flash teams can be recombined to form larger organizations and authored automatically in response to a user's request. Flash teams can also hire more people elastically in reaction to task needs, and pipeline intermediate output to accelerate completion times. To enable flash teams, we present Foundry, an end-user authoring platform and runtime manager. Foundry allows users to author modular tasks, then manages teams through handoffs of intermediate work. We demonstrate that Foundry and flash teams enable crowdsourcing of a broad class of goals including design prototyping, course development, and film animation, in half the work time of traditional self-managed teams.", + "link": "https://www.semanticscholar.org/paper/ff48b661bd15089c1e0bd58bd9b708234113d35e", + "scraped_abstract": null, + "citation_best": 215 + }, + { + "paper": "2144575742", + "venue": "1166315290", + "year": "2014", + "title": "printscreen fabricating highly customizable thin film touch displays", + "label": [ + "25435620", + "78201319", + "172195944", + "150594956", + "121684516" + ], + "author": [ + "2003629216", + "2227324277", + "2098177638" + ], + "reference": [ + "562977455", + "1570857225", + "1887892688", + "1984740061", + "1988982607", + "2005198142", + "2011422734", + "2014137027", + "2022884931", + "2023513788", + "2040565638", + "2048912286", + "2054044780", + "2070583222", + "2071812850", + "2089787431", + "2096988419", + "2103339808", + "2123139789", + "2126138368", + "2131693996", + "2134249793", + "2147326417", + "2162286072", + "2163128030", + "2164219949", + "2164901194", + "2170342776", + "2171849507", + "2209868569", + "2462847126" + ], + "abstract": "printscreen is an enabling technology for digital fabrication of customized flexible displays using thin film electroluminescence tfel it enables inexpensive and rapid fabrication of highly customized displays in low volume in a simple lab environment print shop or even at home we show how to print ultra thin 120 m segmented and passive matrix displays in greyscale or multi color on a variety of deformable and rigid substrate materials including pet film office paper leather metal stone and wood the displays can have custom unconventional 2d shapes and can be bent rolled and folded to create 3d shapes we contribute a systematic overview of graphical display primitives for customized displays and show how to integrate them with static print and printed electronics furthermore we contribute a sensing framework which leverages the display itself for touch sensing to demonstrate the wide applicability of printscreen we present application examples from ubiquitous mobile and wearable computing", + "title_raw": "PrintScreen: fabricating highly customizable thin-film touch-displays", + "abstract_raw": "PrintScreen is an enabling technology for digital fabrication of customized flexible displays using thin-film electroluminescence (TFEL). It enables inexpensive and rapid fabrication of highly customized displays in low volume, in a simple lab environment, print shop or even at home. We show how to print ultra-thin (120 \u00b5m) segmented and passive matrix displays in greyscale or multi-color on a variety of deformable and rigid substrate materials, including PET film, office paper, leather, metal, stone, and wood. The displays can have custom, unconventional 2D shapes and can be bent, rolled and folded to create 3D shapes. We contribute a systematic overview of graphical display primitives for customized displays and show how to integrate them with static print and printed electronics. Furthermore, we contribute a sensing framework, which leverages the display itself for touch sensing. To demonstrate the wide applicability of PrintScreen, we present application examples from ubiquitous, mobile and wearable computing.", + "link": "https://www.semanticscholar.org/paper/d2386fc1e119b06eeeb353edc52b282103c4d076", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2243512312", + "venue": "1133523790", + "year": "2013", + "title": "the uncracked pieces in database cracking", + "label": [ + "166489589", + "75165309", + "77088390", + "124101348", + "108094655" + ], + "author": [ + "215310611", + "2114175654", + "2149801571" + ], + "reference": [ + "95569466", + "1970298499", + "1990965815", + "2030062409", + "2031227655", + "2075018787", + "2096229429", + "2110526107", + "2137156898", + "2151181047", + "2151224499", + "2155595639", + "2160404300", + "2167631575" + ], + "abstract": "database cracking has been an area of active research in recent years the core idea of database cracking is to create indexes adaptively and incrementally as a side product of query processing several works have proposed different cracking techniques for different aspects including updates tuple reconstruction convergence concurrency control and robustness however there is a lack of any comparative study of these different methods by an independent group in this paper we conduct an experimental study on database cracking our goal is to critically review several aspects identify the potential and propose promising directions in database cracking with this study we hope to expand the scope of database cracking and possibly leverage cracking in database engines other than monetdb we repeat several prior database cracking works including the core cracking algorithms as well as three other works on convergence hybrid cracking tuple reconstruction sideways cracking and robustness stochastic cracking respectively we evaluate these works and show possible directions to do even better we further test cracking under a variety of experimental settings including high selectivity queries low selectivity queries and multiple query access patterns finally we compare cracking against different sorting algorithms as well as against different main memory optimised indexes including the recently proposed adaptive radix tree art our results show that i the previously proposed cracking algorithms are repeatable ii there is still enough room to significantly improve the previously proposed cracking algorithms iii cracking depends heavily on query selectivity iv cracking needs to catch up with modern indexing trends and v different indexing algorithms have different indexing signatures", + "title_raw": "The uncracked pieces in database cracking", + "abstract_raw": "Database cracking has been an area of active research in recent years. The core idea of database cracking is to create indexes adaptively and incrementally as a side-product of query processing. Several works have proposed different cracking techniques for different aspects including updates, tuple-reconstruction, convergence, concurrency-control, and robustness. However, there is a lack of any comparative study of these different methods by an independent group. In this paper, we conduct an experimental study on database cracking. Our goal is to critically review several aspects, identify the potential, and propose promising directions in database cracking. With this study, we hope to expand the scope of database cracking and possibly leverage cracking in database engines other than MonetDB.\r\n\r\nWe repeat several prior database cracking works including the core cracking algorithms as well as three other works on convergence (hybrid cracking), tuple-reconstruction (sideways cracking), and robustness (stochastic cracking) respectively. We evaluate these works and show possible directions to do even better. We further test cracking under a variety of experimental settings, including high selectivity queries, low selectivity queries, and multiple query access patterns. Finally, we compare cracking against different sorting algorithms as well as against different main-memory optimised indexes, including the recently proposed Adaptive Radix Tree (ART). Our results show that: (i) the previously proposed cracking algorithms are repeatable, (ii) there is still enough room to significantly improve the previously proposed cracking algorithms, (iii) cracking depends heavily on query selectivity, (iv) cracking needs to catch up with modern indexing trends, and (v) different indexing algorithms have different indexing signatures.", + "link": "https://www.semanticscholar.org/paper/e363389d4c6cefa95b83f6433fffa8c0a5771a48", + "scraped_abstract": null, + "citation_best": 66 + }, + { + "paper": "1597017619", + "venue": "1133523790", + "year": "2014", + "title": "epic an extensible and scalable system for processing big data", + "label": [ + "75684735", + "2780870223", + "2781252014", + "150495011", + "9476365", + "173608175", + "40207289", + "138958017" + ], + "author": [ + "2119308123", + "2608476590", + "2037466936", + "2142476784", + "2117443739" + ], + "reference": [ + "1560473542", + "1854214752", + "1969970763", + "1973001156", + "1997020216", + "2000484009", + "2010279913", + "2013344760", + "2037168816", + "2052312648", + "2054693333", + "2060233189", + "2060280513", + "2083854694", + "2096765155", + "2100830825", + "2108510805", + "2110086534", + "2114303224", + "2119745055", + "2125520775", + "2125775320", + "2129603526", + "2131062488", + "2131975293", + "2134984950", + "2139445852", + "2140613126", + "2154894831", + "2160660844", + "2167331726", + "2170616854", + "2173213060", + "2200275386", + "2296349066", + "3138367763" + ], + "abstract": "the big data problem is characterized by the so called 3v features volume a huge amount of data velocity a high data ingestion rate and variety a mix of structured data semi structured data and unstructured data the state of the art solutions to the big data problem are largely based on the mapreduce framework aka its open source implementation hadoop although hadoop handles the data volume challenge successfully it does not deal with the data variety well since the programming interfaces and its associated data processing model is inconvenient and inefficient for handling structured data and graph data this paper presents epic an extensible system to tackle the big data s data variety challenge epic introduces a general actor like concurrent programming model independent of the data processing models for specifying parallel computations users process multi structured datasets with appropriate epic extensions the implementation of a data processing model best suited for the data type and auxiliary code for mapping that data processing model into epic s concurrent programming model like hadoop programs written in this way can be automatically parallelized and the runtime system takes care of fault tolerance and inter machine communications we present the design and implementation of epic s concurrent programming model we also present two customized data processing model an optimized mapreduce extension and a relational model on top of epic experiments demonstrate the effectiveness and efficiency of our proposed epic", + "title_raw": "epiC: an extensible and scalable system for processing big data", + "abstract_raw": "The Big Data problem is characterized by the so called 3V features: Volume - a huge amount of data, Velocity - a high data ingestion rate, and Variety - a mix of structured data, semi-structured data, and unstructured data. The state-of-the-art solutions to the Big Data problem are largely based on the MapReduce framework (aka its open source implementation Hadoop). Although Hadoop handles the data volume challenge successfully, it does not deal with the data variety well since the programming interfaces and its associated data processing model is inconvenient and inefficient for handling structured data and graph data.\r\n\r\nThis paper presents epiC, an extensible system to tackle the Big Data's data variety challenge. epiC introduces a general Actor-like concurrent programming model, independent of the data processing models, for specifying parallel computations. Users process multi-structured datasets with appropriate epiC extensions, the implementation of a data processing model best suited for the data type and auxiliary code for mapping that data processing model into epiC's concurrent programming model. Like Hadoop, programs written in this way can be automatically parallelized and the runtime system takes care of fault tolerance and inter-machine communications. We present the design and implementation of epiC's concurrent programming model. We also present two customized data processing model, an optimized MapReduce extension and a relational model, on top of epiC. Experiments demonstrate the effectiveness and efficiency of our proposed epiC.", + "link": "https://www.semanticscholar.org/paper/1a84663b77db30199e08640c01f4c18fd3995fe7", + "scraped_abstract": null, + "citation_best": 13 + }, + { + "paper": "2294510862", + "venue": "1133523790", + "year": "2014", + "title": "m4 a visualization oriented time series data aggregation", + "label": [ + "70518039", + "36464697", + "124101348", + "5655090" + ], + "author": [ + "2585713541", + "34755490", + "2122157342", + "1963964406" + ], + "reference": [ + "172626738", + "1488984669", + "1489091441", + "1491931038", + "1967295774", + "1969877208", + "1981934656", + "1988313157", + "2001103857", + "2025970444", + "2026346585", + "2040148652", + "2043097023", + "2063579540", + "2064784803", + "2081028405", + "2106595237", + "2122430112", + "2133665775", + "2142265837", + "2143545446", + "2167703429", + "2171330332", + "2182246517", + "2913251172", + "2917732394" + ], + "abstract": "visual analysis of high volume time series data is ubiquitous in many industries including finance banking and discrete manufacturing contemporary rdbms based systems for visualization of high volume time series data have difficulty to cope with the hard latency requirements and high ingestion rates of interactive visualizations existing solutions for lowering the volume of time series data disregard the semantics of visualizations and result in visualization errors in this work we introduce m4 an aggregation based time series dimensionality reduction technique that provides error free visualizations at high data reduction rates focusing on line charts as the predominant form of time series visualization we explain in detail the drawbacks of existing data reduction techniques and how our approach outperforms state of the art by respecting the process of line rasterization we describe how to incorporate aggregation based dimensionality reduction at the query level in a visualization driven query rewriting system our approach is generic and applicable to any visualization system that uses an rdbms as data source using real world data sets from high tech manufacturing stock markets and sports analytics domains we demonstrate that our visualization oriented data aggregation can reduce data volumes by up to two orders of magnitude while preserving perfect visualizations", + "title_raw": "M4: a visualization-oriented time series data aggregation", + "abstract_raw": "Visual analysis of high-volume time series data is ubiquitous in many industries, including finance, banking, and discrete manufacturing. Contemporary, RDBMS-based systems for visualization of high-volume time series data have difficulty to cope with the hard latency requirements and high ingestion rates of interactive visualizations. Existing solutions for lowering the volume of time series data disregard the semantics of visualizations and result in visualization errors.\r\n\r\nIn this work, we introduce M4, an aggregation-based time series dimensionality reduction technique that provides error-free visualizations at high data reduction rates. Focusing on line charts, as the predominant form of time series visualization, we explain in detail the drawbacks of existing data reduction techniques and how our approach outperforms state of the art, by respecting the process of line rasterization.\r\n\r\nWe describe how to incorporate aggregation-based dimensionality reduction at the query level in a visualization-driven query rewriting system. Our approach is generic and applicable to any visualization system that uses an RDBMS as data source. Using real world data sets from high tech manufacturing, stock markets, and sports analytics domains we demonstrate that our visualization-oriented data aggregation can reduce data volumes by up to two orders of magnitude, while preserving perfect visualizations.", + "link": "https://www.semanticscholar.org/paper/73a1116905643fad65c242c9f43e6b7fcc6b3aad", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2797202077", + "venue": "1175089206", + "year": "2018", + "title": "building efficient query engines in a high level language", + "label": [ + "100850083", + "133162039", + "109701466", + "169590947", + "137955351", + "199360897", + "135689500", + "16963264", + "19024347" + ], + "author": [ + "306879863", + "2226461695", + "2131931065" + ], + "reference": [ + "116654777", + "247055860", + "294835284", + "834377638", + "1491178396", + "1493893823", + "1506948396", + "1556604985", + "1557658176", + "1605782097", + "1650987719", + "1966981171", + "1969460277", + "1969877208", + "1970165929", + "1980590442", + "1981780124", + "1984005572", + "2004711550", + "2005112390", + "2008070495", + "2016943912", + "2026049208", + "2038412523", + "2039877226", + "2064514251", + "2074489032", + "2081192312", + "2081418732", + "2083355374", + "2086658561", + "2088675571", + "2093793584", + "2094455839", + "2100444372", + "2100542992", + "2101000001", + "2104242181", + "2105079611", + "2106771621", + "2106911865", + "2108290652", + "2110629752", + "2112491581", + "2112866468", + "2116094274", + "2117387696", + "2118578267", + "2119871735", + "2122315991", + "2122858224", + "2130642985", + "2135144788", + "2137259192", + "2138512691", + "2143932292", + "2148167495", + "2150101804", + "2150708135", + "2153185479", + "2153876009", + "2154697693", + "2155028447", + "2155070484", + "2156000708", + "2158543254", + "2159726625", + "2161818013", + "2185470498", + "2189465200", + "2217945669", + "2262876226", + "2266823300", + "2295831329", + "2340838390", + "2397097813", + "2399620389", + "2404114915", + "2524165724", + "2565050701", + "2572804059", + "2584555500", + "2765206444", + "2962773216", + "3138135046", + "3139669840" + ], + "abstract": "abstraction without regret refers to the vision of using high level programming languages for systems development without experiencing a negative impact on performance a database system designed according to this vision offers both increased productivity and high performance instead of sacrificing the former for the latter as is the case with existing monolithic implementations that are hard to maintain and extend in this article we realize this vision in the domain of analytical query processing we present legobase a query engine written in the high level programming language scala the key technique to regain efficiency is to apply generative programming legobase performs source to source compilation and optimizes database systems code by converting the high level scala code to specialized low level c code we show how generative programming allows to easily implement a wide spectrum of optimizations such as introducing data partitioning or switching from a row to a column data layout which are difficult to achieve with existing low level query compilers that handle only queries we demonstrate that sufficiently powerful abstractions are essential for dealing with the complexity of the optimization effort shielding developers from compiler internals and decoupling individual optimizations from each other we evaluate our approach with the tpc h benchmark and show that a with all optimizations enabled our architecture significantly outperforms a commercial in memory database as well as an existing query compiler b programmers need to provide just a few hundred lines of high level code for implementing the optimizations instead of complicated low level code that is required by existing query compilation approaches c these optimizations may potentially come at the cost of using more system memory for improved performance d the compilation overhead is low compared to the overall execution time thus making our approach usable in practice for compiling query engines", + "title_raw": "Building Efficient Query Engines in a High-Level Language", + "abstract_raw": "Abstraction without regret refers to the vision of using high-level programming languages for systems development without experiencing a negative impact on performance. A database system designed according to this vision offers both increased productivity and high performance instead of sacrificing the former for the latter as is the case with existing, monolithic implementations that are hard to maintain and extend. In this article, we realize this vision in the domain of analytical query processing. We present LegoBase, a query engine written in the high-level programming language Scala. The key technique to regain efficiency is to apply generative programming: LegoBase performs source-to-source compilation and optimizes database systems code by converting the high-level Scala code to specialized, low-level C code. We show how generative programming allows to easily implement a wide spectrum of optimizations, such as introducing data partitioning or switching from a row to a column data layout, which are difficult to achieve with existing low-level query compilers that handle only queries. We demonstrate that sufficiently powerful abstractions are essential for dealing with the complexity of the optimization effort, shielding developers from compiler internals and decoupling individual optimizations from each other. We evaluate our approach with the TPC-H benchmark and show that (a) with all optimizations enabled, our architecture significantly outperforms a commercial in-memory database as well as an existing query compiler. (b) Programmers need to provide just a few hundred lines of high-level code for implementing the optimizations, instead of complicated low-level code that is required by existing query compilation approaches. (c) These optimizations may potentially come at the cost of using more system memory for improved performance. (d) The compilation overhead is low compared to the overall execution time, thus making our approach usable in practice for compiling query engines.", + "link": "https://www.semanticscholar.org/paper/2e68170fbe4af2231c12410829425eaa3fbfe476", + "scraped_abstract": null, + "citation_best": 100 + }, + { + "paper": "2229053133", + "venue": "1133523790", + "year": "2014", + "title": "on k path covers and their applications", + "label": [ + "146380142", + "119322782", + "162319229", + "80444323" + ], + "author": [ + "2149705402", + "2131441961", + "1877039390" + ], + "reference": [ + "30435042", + "83351988", + "85521454", + "122384246", + "282961531", + "833447691", + "1904636577", + "1920501755", + "2022671649", + "2029538739", + "2052790720", + "2055721472", + "2072485026", + "2083019227", + "2112513979", + "2120358419", + "2126021606", + "2137118456", + "2143083884", + "2262197264" + ], + "abstract": "for a directed graph g with vertex set v we call a subset c v a k all path cover if c contains a node from any path consisting of k nodes this paper considers the problem of constructing small k path covers in the context of road networks with millions of nodes and edges in many application scenarios the set c and its induced overlay graph constitute a very compact synopsis of g which is the basis for the currently fastest data structure for personalized shortest path queries visually pleasing overlays of subsampled paths and efficient reporting retrieval and aggregation of associated data in spatial network databases apart from a theoretical investigation of the problem we provide efficient algorithms that produce very small k path covers for large real world road networks with a posteriori guarantees via instance based lower bounds", + "title_raw": "On k-Path Covers and their applications", + "abstract_raw": "For a directed graph G with vertex set V we call a subset C \u2286 V a k-(All-)Path Cover if C contains a node from any path consisting of k nodes. This paper considers the problem of constructing small k-Path Covers in the context of road networks with millions of nodes and edges. In many application scenarios the set C and its induced overlay graph constitute a very compact synopsis of G which is the basis for the currently fastest data structure for personalized shortest path queries, visually pleasing overlays of subsampled paths, and efficient reporting, retrieval and aggregation of associated data in spatial network databases. Apart from a theoretical investigation of the problem, we provide efficient algorithms that produce very small k-Path Covers for large real-world road networks (with a posteriori guarantees via instance-based lower bounds).", + "link": "https://www.semanticscholar.org/paper/438ef7fd297571ded07e9eff98f68d834ada8a1f", + "scraped_abstract": null, + "citation_best": 52 + }, + { + "paper": "2060170830", + "venue": "1135342153", + "year": "2014", + "title": "efficient estimation for high similarities using odd sketches", + "label": [ + "89604369", + "21569690", + "203519979", + "177264268", + "193524817", + "80444323" + ], + "author": [ + "1988080645", + "1864519460", + "2162985535" + ], + "reference": [ + "107173025", + "1595409123", + "1785933978", + "1983704604", + "1984630650", + "2029852131", + "2048779798", + "2053377618", + "2061980234", + "2081193615", + "2085922539", + "2102221597", + "2113853816", + "2123427850", + "2126907894", + "2132069633", + "2140431670", + "2145349611", + "2152228468", + "2152565070", + "2320051939" + ], + "abstract": "estimating set similarity is a central problem in many computer applications in this paper we introduce the odd sketch a compact binary sketch for estimating the jaccard similarity of two sets the exclusive or of two sketches equals the sketch of the symmetric difference of the two sets this means that odd sketches provide a highly space efficient estimator for sets of high similarity which is relevant in applications such as web duplicate detection collaborative filtering and association rule learning the method extends to weighted jaccard similarity relevant e g for tf idf vector comparison we present a theoretical analysis of the quality of estimation to guarantee the reliability of odd sketch based estimators our experiments confirm this efficiency and demonstrate the efficiency of odd sketches in comparison with b bit minwise hashing schemes on association rule learning and web duplicate detection tasks", + "title_raw": "Efficient estimation for high similarities using odd sketches", + "abstract_raw": "Estimating set similarity is a central problem in many computer applications. In this paper we introduce the Odd Sketch, a compact binary sketch for estimating the Jaccard similarity of two sets. The exclusive-or of two sketches equals the sketch of the symmetric difference of the two sets. This means that Odd Sketches provide a highly space-efficient estimator for sets of high similarity, which is relevant in applications such as web duplicate detection, collaborative filtering, and association rule learning. The method extends to weighted Jaccard similarity, relevant e.g. for TF-IDF vector comparison. We present a theoretical analysis of the quality of estimation to guarantee the reliability of Odd Sketch-based estimators. Our experiments confirm this efficiency, and demonstrate the efficiency of Odd Sketches in comparison with $b$-bit minwise hashing schemes on association rule learning and web duplicate detection tasks.", + "link": "https://www.semanticscholar.org/paper/b8e7a87a72ad802643d7ffb57efdd59c5a2492cd", + "scraped_abstract": null, + "citation_best": 63 + }, + { + "paper": "2186424956", + "venue": "1184914352", + "year": "2013", + "title": "smile shuffled multiple instance learning", + "label": [ + "119857082", + "58973888", + "150921843", + "136389625", + "178980831", + "95623464" + ], + "author": [ + "2468618410", + "2440006205" + ], + "reference": [ + "24402856", + "1524926518", + "1534519302", + "1535599202", + "1540386283", + "1550821944", + "1557325668", + "1560331282", + "1929873255", + "2009074768", + "2101234009", + "2108745803", + "2110119381", + "2112076978", + "2128678390", + "2133288557", + "2162685317", + "2163474322", + "2426031434", + "2570764145", + "2912934387" + ], + "abstract": "resampling techniques such as bagging are often used in supervised learning to produce more accurate classifiers in this work we show that multiple instance learning admits a different form of resampling which we call shuffling in shuffling we resample instances in such a way that the resulting bags are likely to be correctly labeled we show that resampling results in both a reduction of bag label noise and a propagation of additional informative constraints to a multiple instance classifier we empirically evaluate shuffling in the context of multiple instance classification and multiple instance active learning and show that the approach leads to significant improvements in accuracy", + "title_raw": "SMILe: shuffled multiple-instance learning", + "abstract_raw": "Resampling techniques such as bagging are often used in supervised learning to produce more accurate classifiers. In this work, we show that multiple-instance learning admits a different form of resampling, which we call \"shuffling.\" In shuffling, we resample instances in such a way that the resulting bags are likely to be correctly labeled. We show that resampling results in both a reduction of bag label noise and a propagation of additional informative constraints to a multiple-instance classifier. We empirically evaluate shuffling in the context of multiple-instance classification and multiple-instance active learning and show that the approach leads to significant improvements in accuracy.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=SMILe:+Shuffled+Multiple-Instance+Learning&as_oq=&as_eq=&as_occt=any&as_sauthors=Doran", + "scraped_abstract": null, + "citation_best": 3 + }, + { + "paper": "2215753997", + "venue": "1184914352", + "year": "2013", + "title": "hc search learning heuristics and cost functions for structured prediction", + "label": [ + "119857082", + "100853971", + "184783062", + "137955351", + "124101348", + "22367795", + "50817715", + "127705205" + ], + "author": [ + "2074111126", + "2139785505", + "1993564419" + ], + "reference": [ + "225641137", + "1499397922", + "1564663916", + "2001587401", + "2041615247", + "2097191079", + "2105644991", + "2117224769", + "2125879217", + "2130325067", + "2142641780", + "2146022472", + "2146140624", + "2147880316", + "2150821861", + "2152966212", + "2153846128", + "2160218441", + "2169961240", + "2187805056", + "2429914308", + "2884843966", + "2949600092", + "2949654875", + "2962957031", + "3021452258" + ], + "abstract": "structured prediction is the problem of learning a function from structured inputs to structured outputs inspired by the recent successes of search based structured prediction we introduce a new framework for structured prediction called hc search given a structured input the framework uses a search procedure guided by a learned heuristic h to uncover high quality candidate outputs and then uses a separate learned cost function c to select a final prediction among those outputs we can decompose the regret of the overall approach into the loss due to h not leading to high quality outputs and the loss due to c not selecting the best among the generated outputs guided by this decomposition we minimize the overall regret in a greedy stagewise manner by first training h to quickly uncover high quality outputs via imitation learning and then training c to correctly rank the outputs generated via h according to their true losses experiments on several benchmark domains show that our approach significantly outperforms the state of the art methods", + "title_raw": "HC-search: learning heuristics and cost functions for structured prediction", + "abstract_raw": "Structured prediction is the problem of learning a function from structured inputs to structured outputs. Inspired by the recent successes of search-based structured prediction, we introduce a new framework for structured prediction called HC-Search. Given a structured input, the framework uses a search procedure guided by a learned heuristic H to uncover high quality candidate outputs and then uses a separate learned cost function C to select a final prediction among those outputs. We can decompose the regret of the overall approach into the loss due to H not leading to high quality outputs, and the loss due to C not selecting the best among the generated outputs. Guided by this decomposition, we minimize the overall regret in a greedy stagewise manner by first training H to quickly uncover high quality outputs via imitation learning, and then training C to correctly rank the outputs generated via H according to their true losses. Experiments on several benchmark domains show that our approach significantly outperforms the state-of-the-art methods.", + "link": "https://www.semanticscholar.org/paper/73d38c70efb076bed6a218b8290ef748cd738dda", + "scraped_abstract": null, + "citation_best": 12 + }, + { + "paper": "2136985729", + "venue": "1188739475", + "year": "2013", + "title": "grounded language learning from video described with sentences", + "label": [ + "5366617", + "2776397901", + "121934690", + "204321447", + "61249035" + ], + "author": [ + "2150512450", + "273196668" + ], + "reference": [ + "55357602", + "1481820510", + "1486632395", + "1557379068", + "1575431606", + "1910567995", + "1967956032", + "1987835821", + "1991133427", + "2002906034", + "2007321142", + "2036989445", + "2063153269", + "2066134726", + "2086699924", + "2109586012", + "2119232785", + "2122683098", + "2146221819", + "2149172860", + "2152239535", + "2154764394", + "2156050092", + "2160783091", + "2168356304", + "2963811219", + "2963882196" + ], + "abstract": "we present a method that learns representations for word meanings from short video clips paired with sentences unlike prior work on learning language from symbolic input our input consists of video of people interacting with multiple complex objects in outdoor environments unlike prior computer vision approaches that learn from videos with verb labels or images with noun labels our labels are sentences containing nouns verbs prepositions adjectives and adverbs the correspondence between words and concepts in the video is learned in an unsupervised fashion even when the video depicts simultaneous events described by multiple sentences or when different aspects of a single event are described with multiple sentences the learned word meanings can be subsequently used to automatically generate description of new video", + "title_raw": "Grounded Language Learning from Video Described with Sentences", + "abstract_raw": "We present a method that learns representations for word meanings from short video clips paired with sentences. Unlike prior work on learning language from symbolic input, our input consists of video of people interacting with multiple complex objects in outdoor environments. Unlike prior computer-vision approaches that learn from videos with verb labels or images with noun labels, our labels are sentences containing nouns, verbs, prepositions, adjectives, and adverbs. The correspondence between words and concepts in the video is learned in an unsupervised fashion, even when the video depicts simultaneous events described by multiple sentences or when different aspects of a single event are described with multiple sentences. The learned word meanings can be subsequently used to automatically generate description of new video.", + "link": "https://www.semanticscholar.org/paper/96a0320ef14877038906947b684011cf7378c440", + "scraped_abstract": null, + "citation_best": 126 + }, + { + "paper": "2052209137", + "venue": "1163450153", + "year": "2013", + "title": "weighted graph comparison techniques for brain connectivity analysis", + "label": [ + "119857082", + "185578843", + "80444323" + ], + "author": [ + "2171099181", + "2100961798", + "1879266857", + "1672749942", + "2154968417" + ], + "reference": [ + "1498317890", + "1522840240", + "1524145331", + "1570995586", + "1897665750", + "1974829834", + "1984711392", + "1987924998", + "1997522225", + "1999653836", + "2006312249", + "2009650955", + "2011541551", + "2021750866", + "2022244667", + "2030246490", + "2033689731", + "2046870216", + "2059212462", + "2059312295", + "2063404606", + "2085394612", + "2096776015", + "2103497397", + "2110098655", + "2112938311", + "2116856566", + "2116938231", + "2118693417", + "2119026951", + "2119485467", + "2123097685", + "2126838454", + "2131181615", + "2131729872", + "2141355100", + "2157931324", + "2159683823", + "2159929956", + "2160137030", + "2161102393", + "2162896254", + "2166291291", + "2167790496", + "2168396492", + "2473032827" + ], + "abstract": "the analysis of brain connectivity is a vast field in neuroscience with a frequent use of visual representations and an increasing need for visual analysis tools based on an in depth literature review and interviews with neuroscientists we explore high level brain connectivity analysis tasks that need to be supported by dedicated visual analysis tools a significant example of such a task is the comparison of different connectivity data in the form of weighted graphs several approaches have been suggested for graph comparison within information visualization but the comparison of weighted graphs has not been addressed we explored the design space of applicable visual representations and present augmented adjacency matrix and node link visualizations to assess which representation best support weighted graph comparison tasks we performed a controlled experiment our findings suggest that matrices support these tasks well outperforming node link diagrams these results have significant implications for the design of brain connectivity analysis tools that require weighted graph comparisons they can also inform the design of visual analysis tools in other domains e g comparison of weighted social networks or biological pathways", + "title_raw": "Weighted graph comparison techniques for brain connectivity analysis", + "abstract_raw": "The analysis of brain connectivity is a vast field in neuroscience with a frequent use of visual representations and an increasing need for visual analysis tools. Based on an in-depth literature review and interviews with neuroscientists, we explore high-level brain connectivity analysis tasks that need to be supported by dedicated visual analysis tools. A significant example of such a task is the comparison of different connectivity data in the form of weighted graphs. Several approaches have been suggested for graph comparison within information visualization, but the comparison of weighted graphs has not been addressed. We explored the design space of applicable visual representations and present augmented adjacency matrix and node-link visualizations. To assess which representation best support weighted graph comparison tasks, we performed a controlled experiment. Our findings suggest that matrices support these tasks well, outperforming node-link diagrams. These results have significant implications for the design of brain connectivity analysis tools that require weighted graph comparisons. They can also inform the design of visual analysis tools in other domains, e.g. comparison of weighted social networks or biological pathways.", + "link": "https://www.semanticscholar.org/paper/1aca75eadea464667e1b68fa3250640c43df84f4", + "scraped_abstract": null, + "citation_best": 165 + }, + { + "paper": "2158892938", + "venue": "1163450153", + "year": "2013", + "title": "analyzing user generated youtube videos to understand touchscreen use by people with motor impairments", + "label": [ + "170130773", + "149229913", + "71901391", + "49774154", + "2778539339" + ], + "author": [ + "2097351642", + "2231378976", + "2165969493" + ], + "reference": [ + "248730495", + "1773876643", + "1968311814", + "1969780871", + "1974199094", + "1991032455", + "1995969295", + "2004195178", + "2004970449", + "2013207568", + "2020402781", + "2023655891", + "2035675819", + "2043991818", + "2062657340", + "2074510345", + "2076656908", + "2077057201", + "2079660385", + "2081623546", + "2095241246", + "2116208741", + "2125641213", + "2130917910", + "2132210633", + "2132527807", + "2133382749", + "2134836914", + "2136711221", + "2152414382", + "2157675004", + "2158130108", + "2168842329", + "2289998958" + ], + "abstract": "most work on the usability of touchscreen interaction for people with motor impairments has focused on lab studies with relatively few participants and small cross sections of the population to develop a richer characterization of use we turned to a previously untapped source of data youtube videos we collected and analyzed 187 non commercial videos uploaded to youtube that depicted a person with a physical disability interacting with a mainstream mobile touchscreen device we coded the videos along a range of dimensions to characterize the interaction the challenges encountered and the adaptations being adopted in daily use to complement the video data we also invited the video uploaders to complete a survey on their ongoing use of touchscreen technology our findings show that while many people with motor impairments find these devices empowering accessibility issues still exist in addition to providing implications for more accessible touchscreen design we reflect on the application of user generated content to study user interface design", + "title_raw": "Analyzing user-generated youtube videos to understand touchscreen use by people with motor impairments", + "abstract_raw": "Most work on the usability of touchscreen interaction for people with motor impairments has focused on lab studies with relatively few participants and small cross-sections of the population. To develop a richer characterization of use, we turned to a previously untapped source of data: YouTube videos. We collected and analyzed 187 non-commercial videos uploaded to YouTube that depicted a person with a physical disability interacting with a mainstream mobile touchscreen device. We coded the videos along a range of dimensions to characterize the interaction, the challenges encountered, and the adaptations being adopted in daily use. To complement the video data, we also invited the video uploaders to complete a survey on their ongoing use of touchscreen technology. Our findings show that, while many people with motor impairments find these devices empowering, accessibility issues still exist. In addition to providing implications for more accessible touchscreen design, we reflect on the application of user-generated content to study user interface design.", + "link": "https://www.semanticscholar.org/paper/098713427827d5f3eff0f2216b64ce2e24ba2dbe", + "scraped_abstract": null, + "citation_best": 137 + }, + { + "paper": "2128640376", + "venue": "1163450153", + "year": "2013", + "title": "improving navigation based file retrieval", + "label": [ + "77088390", + "40350719", + "91396116", + "2225880", + "171730128", + "23123220" + ], + "author": [ + "2083559347", + "2101644873", + "2071700171" + ], + "reference": [ + "601097232", + "1485391494", + "1491739947", + "1500303984", + "1531523252", + "1604946996", + "1988470798", + "1991332152", + "2003482363", + "2007764133", + "2021409766", + "2021625970", + "2039440413", + "2044675247", + "2047711699", + "2065132166", + "2080160723", + "2083896568", + "2086853365", + "2097127516", + "2097298348", + "2122544819", + "2130025274", + "2142094977", + "2156492599", + "2157289187", + "2165103795", + "2165703420", + "2169785626", + "2179427518", + "2321470647" + ], + "abstract": "navigating through a file hierarchy is one of the most common methods for accessing files yet it can be slow and repetitive new algorithms that predict upcoming file accesses have the potential to improve navigation based file retrieval but it is unknown how best to present their predictions to users we present three design goals aiming to improve navigation based file retrieval interfaces minimise the time spent at each hierarchical level en route to the target file reduce the number of levels traversed by providing shortcuts and promote rehearsal of the retrieval mechanics to facilitate expertise we introduce three interfaces that augment standard file browsers based on each of these goals icon highlights give greater prominence to predicted items in the current folder hover menus provide shortcuts to predicted folder content and search directed navigation uses predictive highlighting to guide users through the hierarchy in response to query terms results from a user evaluation show that all three interfaces improve file retrieval times with icon highlights and hover menus best suited for frequently accessed items and search directed navigation best suited for infrequent ones we also show that the benefits are larger when folder content is spatially unstable finally we discuss how the interfaces could be combined and deployed in existing file browsers", + "title_raw": "Improving navigation-based file retrieval", + "abstract_raw": "Navigating through a file hierarchy is one of the most common methods for accessing files, yet it can be slow and repetitive. New algorithms that predict upcoming file accesses have the potential to improve navigation-based file retrieval, but it is unknown how best to present their predictions to users. We present three design goals aiming to improve navigation-based file retrieval interfaces: minimise the time spent at each hierarchical level en route to the target file; reduce the number of levels traversed by providing shortcuts; and promote rehearsal of the retrieval mechanics to facilitate expertise. We introduce three interfaces that augment standard file browsers based on each of these goals: Icon Highlights give greater prominence to predicted items in the current folder; Hover Menus provide shortcuts to predicted folder content; and Search Directed Navigation uses predictive highlighting to guide users through the hierarchy in response to query terms. Results from a user evaluation show that all three interfaces improve file retrieval times, with Icon Highlights and Hover Menus best suited for frequently accessed items and Search Directed Navigation best suited for infrequent ones. We also show that the benefits are larger when folder content is spatially unstable. Finally, we discuss how the interfaces could be combined and deployed in existing file browsers.", + "link": "https://www.semanticscholar.org/paper/a83ac039411188a72ecef1b70f275460daab0a0e", + "scraped_abstract": null, + "citation_best": 30 + }, + { + "paper": "2168214746", + "venue": "1163450153", + "year": "2013", + "title": "sprweb preserving subjective responses to website colour schemes through automatic recolouring", + "label": [ + "201025465", + "107457646", + "49774154", + "24493144" + ], + "author": [ + "2093858891", + "2136495853", + "2071700171", + "728065937" + ], + "reference": [ + "39969610", + "161807829", + "1546688855", + "1562544913", + "1622427076", + "1975227708", + "1990574563", + "2000141706", + "2001700175", + "2025481627", + "2043870933", + "2065040432", + "2080220571", + "2080761261", + "2088437294", + "2099270617", + "2101793629", + "2103801253", + "2105332709", + "2106639827", + "2116244810", + "2120833565", + "2125005457", + "2129112648", + "2149824933", + "2165232124", + "2169787065", + "3136903822" + ], + "abstract": "colours are an important part of user experiences on the web colour schemes influence not only the aesthetics but also our first impressions and long term engagement with websites e g figure 1 shows a warm website colour scheme however five percent of people perceive a subset of all colours because they have colour vision deficiency cvd resulting in an unequal and presumably less rich user experience on the web figure 2 traditionally people with cvd have been supported by recolouring tools that improve colour differentiability but do not consider the subjective properties of colour schemes while recolouring figure 3 shows figure 1 after standard recolouring it is now cool instead of warm to address this we developed sprweb a tool that recolours websites to preserve subjective responses and improve colour differentiability thus enabling users with cvd to have similar online experiences figure 4 shows figure 1 recoloured using sprweb it is once again warm to develop sprweb we extended existing models of non cvd subjective responses to people with cvd then used this extended model to steer the recolouring process in a lab study we found that sprweb did significantly better than a standard recolouring tool at preserving the temperature and naturalness of websites while achieving similar weight and differentiability preservation we also found that recolouring did not preserve activity and hypothesize that visual complexity influences activity more than colour sprweb is the first tool to automatically preserve the subjective and perceptual properties of website colour schemes thereby equalizing the colour based web experience for people with cvd", + "title_raw": "SPRWEB: preserving subjective responses to website colour schemes through automatic recolouring", + "abstract_raw": "Colours are an important part of user experiences on the Web. Colour schemes influence not only the aesthetics, but also our first impressions and long-term engagement with websites (e.g., Figure 1 shows a 'warm' website colour scheme). However, five percent of people perceive a subset of all colours because they have colour vision deficiency (CVD), resulting in an unequal and presumably less-rich user experience on the Web (Figure 2). Traditionally, people with CVD have been supported by recolouring tools that improve colour differentiability, but do not consider the subjective properties of colour schemes while recolouring (Figure 3 shows Figure 1 after standard recolouring; it is now 'cool' instead of 'warm'). To address this, we developed SPRWeb, a tool that recolours websites to preserve subjective responses and improve colour differentiability - thus enabling users with CVD to have similar online experiences (Figure 4 shows Figure 1 recoloured using SPRWeb; it is once again 'warm'). To develop SPRWeb, we extended existing models of non-CVD subjective responses to people with CVD, then used this extended model to steer the recolouring process. In a lab study, we found that SPRWeb did significantly better than a standard recolouring tool at preserving the temperature and naturalness of websites, while achieving similar weight and differentiability preservation. We also found that recolouring did not preserve activity, and hypothesize that visual complexity influences activity more than colour. SPRWeb is the first tool to automatically preserve the subjective and perceptual properties of website colour schemes thereby equalizing the colour-based web experience for people with CVD.", + "link": "https://www.semanticscholar.org/paper/85330fde8d13219bd2ecce46b918ac14a6a2b169", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1986345088", + "venue": "1163450153", + "year": "2013", + "title": "the efficacy of human post editing for language translation", + "label": [ + "148526163", + "24687705", + "120012220", + "1813318", + "98199350", + "135784402", + "53893814", + "2986862884", + "130597682", + "110046852", + "51802942", + "204321447", + "203005215", + "39608478", + "123406163", + "83479923" + ], + "author": [ + "2169829658", + "2112690490", + "2149153931" + ], + "reference": [ + "115667701", + "125190730", + "144333925", + "214995755", + "293765883", + "358659425", + "570627446", + "575470992", + "652095014", + "1507460855", + "1606538849", + "1724972948", + "1972253352", + "1972620750", + "1979962431", + "1986450680", + "1986845215", + "2032175749", + "2032946087", + "2034224543", + "2048498434", + "2049774111", + "2057235967", + "2078575455", + "2087217001", + "2087735403", + "2090960387", + "2099737847", + "2101105183", + "2104798880", + "2106817091", + "2125001590", + "2132214281", + "2133102719", + "2143539737", + "2146707041", + "2148453313", + "2152065754", + "2156011021", + "2169128753", + "2250390011", + "2480550789", + "2588877501", + "2783135427", + "3104922922" + ], + "abstract": "language translation is slow and expensive so various forms of machine assistance have been devised automatic machine translation systems process text quickly and cheaply but with quality far below that of skilled human translators to bridge this quality gap the translation industry has investigated post editing or the manual correction of machine output we present the first rigorous controlled analysis of post editing and find that post editing leads to reduced time and surprisingly improved quality for three diverse language pairs english to arabic french and german our statistical models and visualizations of experimental data indicate that some simple predictors like source text part of speech counts predict translation time and that post editing results in very different interaction patterns from these results we distill implications for the design of new language translation interfaces", + "title_raw": "The efficacy of human post-editing for language translation", + "abstract_raw": "Language translation is slow and expensive, so various forms of machine assistance have been devised. Automatic machine translation systems process text quickly and cheaply, but with quality far below that of skilled human translators. To bridge this quality gap, the translation industry has investigated post-editing, or the manual correction of machine output. We present the first rigorous, controlled analysis of post-editing and find that post-editing leads to reduced time and, surprisingly, improved quality for three diverse language pairs (English to Arabic, French, and German). Our statistical models and visualizations of experimental data indicate that some simple predictors (like source text part of speech counts) predict translation time, and that post-editing results in very different interaction patterns. From these results we distill implications for the design of new language translation interfaces.", + "link": "https://www.semanticscholar.org/paper/92f79d2ea248ab1ce963a0985472e6da9c626de5", + "scraped_abstract": null, + "citation_best": 185 + }, + { + "paper": "2147603330", + "venue": "1163450153", + "year": "2013", + "title": "turkopticon interrupting worker invisibility in amazon mechanical turk", + "label": [ + "50962388", + "108827166", + "2777972825" + ], + "author": [ + "743585245", + "2133528168" + ], + "reference": [ + "582177337", + "602224805", + "1572374622", + "1577841485", + "1591744892", + "1783594902", + "1964450454", + "1970381522", + "1985720804", + "1992945486", + "2002340287", + "2021370805", + "2023442792", + "2053522862", + "2064286166", + "2095270559", + "2101419153", + "2109688080", + "2114269021", + "2133369086", + "2138849909", + "2139464545", + "2140420369", + "2144328386", + "2145162117", + "2154825228", + "2160180108", + "2163284576", + "2168318626", + "2295054684", + "2327521297", + "2337002970", + "2340117730", + "2764540120", + "3124258878", + "3125321342" + ], + "abstract": "as hci researchers have explored the possibilities of human computation they have paid less attention to ethics and values of crowdwork this paper offers an analysis of amazon mechanical turk a popular human computation system as a site of technically mediated worker employer relations we argue that human computation currently relies on worker invisibility we then present turkopticon an activist system that allows workers to publicize and evaluate their relationships with employers as a common infrastructure turkopticon also enables workers to engage one another in mutual aid we conclude by discussing the potentials and challenges of sustaining activist technologies that intervene in large existing socio technical systems", + "title_raw": "Turkopticon: interrupting worker invisibility in amazon mechanical turk", + "abstract_raw": "As HCI researchers have explored the possibilities of human computation, they have paid less attention to ethics and values of crowdwork. This paper offers an analysis of Amazon Mechanical Turk, a popular human computation system, as a site of technically mediated worker-employer relations. We argue that human computation currently relies on worker invisibility. We then present Turkopticon, an activist system that allows workers to publicize and evaluate their relationships with employers. As a common infrastructure, Turkopticon also enables workers to engage one another in mutual aid. We conclude by discussing the potentials and challenges of sustaining activist technologies that intervene in large, existing socio-technical systems.", + "link": "https://www.semanticscholar.org/paper/4f041141d4e61bedea35e958a7431fa86eb59ff1", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2166713718", + "venue": "1163450153", + "year": "2013", + "title": "illumiroom peripheral projected illusions for interactive experiences", + "label": [ + "35173682", + "107457646", + "49774154", + "2777538892", + "153715457" + ], + "author": [ + "2111437582", + "1886754024", + "1794776656", + "2105571773" + ], + "reference": [ + "1482795035", + "1559983024", + "1977638664", + "1994877858", + "1995923922", + "2002010034", + "2014073269", + "2022732461", + "2026858852", + "2028542749", + "2043132474", + "2066513689", + "2079638262", + "2099254324", + "2101630196", + "2103058297", + "2108579561", + "2111751370", + "2112565255", + "2113696470", + "2127957107", + "2152792096", + "2169667342", + "2199841362", + "2237155658", + "2997934106" + ], + "abstract": "illumiroom is a proof of concept system that augments the area surrounding a television with projected visualizations to enhance traditional gaming experiences we investigate how projected visualizations in the periphery can negate include or augment the existing physical environment and complement the content displayed on the television screen peripheral projected illusions can change the appearance of the room induce apparent motion extend the field of view and enable entirely new physical gaming experiences our system is entirely self calibrating and is designed to work in any room we present a detailed exploration of the design space of peripheral projected illusions and we demonstrate ways to trigger and drive such illusions from gaming content we also contribute specific feedback from two groups of target users 10 gamers and 15 game designers providing insights for enhancing game experiences through peripheral projected illusions", + "title_raw": "IllumiRoom: peripheral projected illusions for interactive experiences", + "abstract_raw": "IllumiRoom is a proof-of-concept system that augments the area surrounding a television with projected visualizations to enhance traditional gaming experiences. We investigate how projected visualizations in the periphery can negate, include, or augment the existing physical environment and complement the content displayed on the television screen. Peripheral projected illusions can change the appearance of the room, induce apparent motion, extend the field of view, and enable entirely new physical gaming experiences. Our system is entirely self-calibrating and is designed to work in any room. We present a detailed exploration of the design space of peripheral projected illusions and we demonstrate ways to trigger and drive such illusions from gaming content. We also contribute specific feedback from two groups of target users (10 gamers and 15 game designers); providing insights for enhancing game experiences through peripheral projected illusions.", + "link": "https://www.semanticscholar.org/paper/f93863979a57541dabac0240b865a77584edafe3", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2007644286", + "venue": "1163450153", + "year": "2013", + "title": "webzeitgeist design mining the web", + "label": [ + "130436687", + "35578498", + "544335954", + "521306242", + "120567893", + "89198739", + "21959979", + "197046077", + "79373723", + "182321512", + "199595568", + "176775163", + "136764020", + "24733836", + "2522767166", + "61096286" + ], + "author": [ + "2226499346", + "2022576506", + "2161195778", + "2134747522", + "2423899502", + "261822931", + "2054455516" + ], + "reference": [ + "140569653", + "1515087027", + "1542671304", + "1566135517", + "1922832738", + "1989338554", + "1989993068", + "2017102965", + "2040939335", + "2052889856", + "2063771604", + "2066114190", + "2069003154", + "2075779758", + "2095587486", + "2114103606", + "2126135370", + "2130610812", + "2131627887", + "2141834868", + "2145990704", + "2147717514", + "2154583606", + "2158051716", + "2171011251", + "2185706522", + "2253807446", + "2917552140" + ], + "abstract": "advances in data mining and knowledge discovery have transformed the way web sites are designed however while visual presentation is an intrinsic part of the web traditional data mining techniques ignore render time page structures and their attributes this paper introduces design mining for the web using knowledge discovery techniques to understand design demographics automate design curation and support data driven design tools this idea is manifest in webzeitgeist a platform for large scale design mining comprising a repository of over 100 000 web pages and 100 million design elements this paper describes the principles driving design mining the implementation of the webzeitgeist architecture and the new class of data driven design applications it enables", + "title_raw": "Webzeitgeist: design mining the web", + "abstract_raw": "Advances in data mining and knowledge discovery have transformed the way Web sites are designed. However, while visual presentation is an intrinsic part of the Web, traditional data mining techniques ignore render-time page structures and their attributes. This paper introduces design mining for the Web: using knowledge discovery techniques to understand design demographics, automate design curation, and support data-driven design tools. This idea is manifest in Webzeitgeist, a platform for large-scale design mining comprising a repository of over 100,000 Web pages and 100 million design elements. This paper describes the principles driving design mining, the implementation of the Webzeitgeist architecture, and the new class of data-driven design applications it enables.", + "link": "https://www.semanticscholar.org/paper/929947ccb72906bcad12bca007e21f649328de55", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2057073649", + "venue": "1163450153", + "year": "2013", + "title": "laserorigami laser cutting 3d objects", + "label": [ + "98045186" + ], + "author": [ + "2130928247", + "896815381", + "2009751849" + ], + "reference": [ + "171240568", + "613471255", + "1974788076", + "1979767018", + "1985153681", + "2009501808", + "2020169247", + "2029780972", + "2048912286", + "2051196564", + "2057715932", + "2057913402", + "2083929644", + "2100910924", + "2102712675", + "2103339808", + "2105021915", + "2105817127", + "2116820576", + "2123741193", + "2132168989", + "2134202846", + "2140205964", + "2140316998", + "2147326417", + "2150582285", + "2162514427" + ], + "abstract": "we present laserorigami a rapid prototyping system that produces 3d objects using a laser cutter laserorigami is substantially faster than traditional 3d fabrication techniques such as 3d printing and unlike traditional laser cutting the resulting 3d objects require no manual assembly the key idea behind laserorigami is that it achieves three dimensionality by folding and stretching the workpiece rather than by placing joints thereby eliminating the need for manual assembly laserorigami achieves this by heating up selected regions of the workpiece until they become compliant and bend down under the force of gravity laserorigami administers the heat by defocusing the laser which distributes the laser s power across a larger surface laserorigami implements cutting and bending in a single integrated process by automatically moving the cutting table up and down when users take out the workpiece it is already fully assembled we present the three main design elements of laserorigami the bend the suspender and the stretch and demonstrate how to use them to fabricate a range of physical objects finally we demonstrate an interactive fabrication version of laserorigami a process in which user interaction and fabrication alternate step by step", + "title_raw": "LaserOrigami: laser-cutting 3D objects", + "abstract_raw": "We present LaserOrigami, a rapid prototyping system that produces 3D objects using a laser cutter. LaserOrigami is substantially faster than traditional 3D fabrication techniques such as 3D printing and unlike traditional laser cutting the resulting 3D objects require no manual assembly. The key idea behind LaserOrigami is that it achieves three-dimensionality by folding and stretching the workpiece, rather than by placing joints, thereby eliminating the need for manual assembly. LaserOrigami achieves this by heating up selected regions of the workpiece until they become compliant and bend down under the force of gravity. LaserOrigami administers the heat by defocusing the laser, which distributes the laser's power across a larger surface. LaserOrigami implements cutting and bending in a single integrated process by automatically moving the cutting table up and down--when users take out the workpiece, it is already fully assembled. We present the three main design elements of LaserOrigami: the bend, the suspender, and the stretch, and demonstrate how to use them to fabricate a range of physical objects. Finally, we demonstrate an interactive fabrication version of LaserOrigami, a process in which user interaction and fabrication alternate step-by-step.", + "link": "https://www.semanticscholar.org/paper/5fc0d3b84b4ba2df2e28fad5c764dc8478e4eec4", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2056225838", + "venue": "1163450153", + "year": "2013", + "title": "labor dynamics in a mobile micro task market", + "label": [ + "136764020", + "2777884278", + "2522767166", + "62230096" + ], + "author": [ + "319674129", + "1960698960" + ], + "reference": [ + "1577841485", + "2098865355", + "2113342230", + "2114269021", + "2116705992", + "2164017702", + "2188732456", + "2442717800", + "2735460460", + "3122670150", + "3124258878" + ], + "abstract": "the ubiquity of smartphones has led to the emergence of mobile crowdsourcing markets where smartphone users participate to perform tasks in the physical world mobile crowdsourcing markets are uniquely different from their online counterparts in that they require spatial mobility and are therefore impacted by geographic factors and constraints that are not present in the online case despite the emergence and importance of such mobile marketplaces little to none is known about the labor dynamics and mobility patterns of agents this paper provides an in depth exploration of labor dynamics in mobile task markets based on a year long dataset from a leading mobile crowdsourcing platform we find that a small core group of workers 80 generated in the market we find that these super agents are more efficient than other agents across several dimensions a they are willing to move longer distances to perform tasks yet they amortize travel across more tasks b they work and search for tasks more efficiently c they have higher data quality in terms of accepted submissions and d they improve in almost all of these efficiency measures over time we find that super agent efficiency stems from two simple optimizations they are 3x more likely than other agents to chain tasks and they pick fewer lower priced tasks than other agents we compare mobile and online micro task markets and discuss differences in demographics data quality and time of use as well as similarities in super agent behavior we conclude with a discussion of how a mobile micro task market might leverage some of our results to improve performance", + "title_raw": "Labor dynamics in a mobile micro-task market", + "abstract_raw": "The ubiquity of smartphones has led to the emergence of mobile crowdsourcing markets, where smartphone users participate to perform tasks in the physical world. Mobile crowdsourcing markets are uniquely different from their online counterparts in that they require spatial mobility, and are therefore impacted by geographic factors and constraints that are not present in the online case. Despite the emergence and importance of such mobile marketplaces, little to none is known about the labor dynamics and mobility patterns of agents. This paper provides an in-depth exploration of labor dynamics in mobile task markets based on a year-long dataset from a leading mobile crowdsourcing platform. We find that a small core group of workers ( 80%) generated in the market. We find that these super agents are more efficient than other agents across several dimensions: a) they are willing to move longer distances to perform tasks, yet they amortize travel across more tasks, b) they work and search for tasks more efficiently, c) they have higher data quality in terms of accepted submissions, and d) they improve in almost all of these efficiency measures over time. We find that super agent efficiency stems from two simple optimizations --- they are 3x more likely than other agents to chain tasks and they pick fewer lower priced tasks than other agents. We compare mobile and online micro-task markets, and discuss differences in demographics, data quality, and time of use, as well as similarities in super agent behavior. We conclude with a discussion of how a mobile micro-task market might leverage some of our results to improve performance.", + "link": "https://www.semanticscholar.org/paper/0a1f8b4e1f9e6cfaccc87fb6798eb6f108515a73", + "scraped_abstract": null, + "citation_best": 106 + }, + { + "paper": "2015143364", + "venue": "1163450153", + "year": "2013", + "title": "job opportunities through entertainment virally spread speech based services for low literate users", + "label": [ + "173853756", + "108827166", + "40140605", + "105339364", + "136764020" + ], + "author": [ + "2154945499", + "2223165294", + "2229175489", + "2231051563", + "2345206531", + "1937213890", + "2117063646" + ], + "reference": [ + "62983095", + "1974030833", + "1979212467", + "1985944252", + "1986958057", + "2099732761", + "2110911455", + "2111139064", + "2119728341", + "2119877162", + "2123715715", + "2129045997", + "2134058258", + "2142869276", + "2147395458", + "2165577970", + "2171349442", + "2281668644", + "3139726049", + "3142833126" + ], + "abstract": "we explore how telephone based services might be mass adopted by low literate users in the developing world we focus on speech and push button dialog systems requiring neither literacy nor training building on the success of polly a simple telephone based voice manipulation and forwarding system that was first tested in 2011 we report on its first large scale sustained deployment in 24 7 operation in pakistan since may 9 2012 as of mid september polly has spread to 85 000 users engaging them in 495 000 interactions and is continuing to spread to 1 000 new people daily it has also attracted 27 000 people to a job search service who in turn listened 279 000 times to job ads and forwarded them 22 000 times to their friends we report users activity over time and across demographics analyze user behavior within several randomized controlled trials and describe lessons learned regarding spread scalability and sustainability of telephone based speech based services", + "title_raw": "Job opportunities through entertainment: virally spread speech-based services for low-literate users", + "abstract_raw": "We explore how telephone-based services might be mass adopted by low-literate users in the developing world. We focus on speech and push-button dialog systems requiring neither literacy nor training. Building on the success of Polly, a simple telephone-based voice manipulation and forwarding system that was first tested in 2011, we report on its first large-scale sustained deployment. In 24/7 operation in Pakistan since May 9, 2012, as of mid-September Polly has spread to 85,000 users, engaging them in 495,000 interactions, and is continuing to spread to 1,000 new people daily. It has also attracted 27,000 people to a job search service, who in turn listened 279,000 times to job ads and forwarded them 22,000 times to their friends. We report users' activity over time and across demographics, analyze user behavior within several randomized controlled trials, and describe lessons learned regarding spread, scalability and sustainability of telephone-based speech-based services.", + "link": "https://www.semanticscholar.org/paper/a18924c4b2551f4636dbb58c5334ccc944c34cd4", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2109727442", + "venue": "1163450153", + "year": "2013", + "title": "screenfinity extending the perception area of content on very large public displays", + "label": [ + "34127721", + "49774154" + ], + "author": [ + "2131810727", + "2908656269", + "2223317084" + ], + "reference": [ + "1764826739", + "1823329909", + "1968069296", + "1976349890", + "1986002752", + "1997601670", + "2000250157", + "2009104953", + "2011741072", + "2050552147", + "2097041645", + "2105408445", + "2105453942", + "2106223989", + "2106351401", + "2118045032", + "2124670211", + "2129056423", + "2140871574", + "2141716420", + "2143445300", + "2149041888", + "2156435695", + "2164915998", + "2171844507" + ], + "abstract": "we propose and validate a model of the perception area of content on public displays in order to predict from where users can read from this model we derive screenfinity a technique to rotate translate and zoom content in order to enable reading while passing by very large displays screenfinity is comfortable to read when close supports different content for different users does not waste screen real estate and allows expert passers by to read content while walking a laboratory study shows that expert users are able to perceive content when it moves a field study evaluates the effect of screenfinity on novice users in an ecologically valid setting we find 1 first time users can read content without slowing down or stopping 2 passers by stopping did so to explore the technology users explore the interaction the limits of the system manipulate the technology and look behind the screen", + "title_raw": "Screenfinity: extending the perception area of content on very large public displays", + "abstract_raw": "We propose and validate a model of the perception area of content on public displays in order to predict from where users can read. From this model, we derive Screenfinity, a technique to rotate, translate, and zoom content in order to enable reading while passing by very large displays. Screenfinity is comfortable to read when close, supports different content for different users, does not waste screen real estate and allows expert passers-by to read content while walking. A laboratory study shows that expert users are able to perceive content when it moves. A field study evaluates the effect of Screenfinity on novice users in an ecologically valid setting. We find 1) first time users can read content without slowing down or stopping; 2) Passers-by stopping did so to explore the technology. Users explore the interaction, the limits of the system, manipulate the technology, and look behind the screen.", + "link": "https://www.semanticscholar.org/paper/799ca421904ed272e555f064f09a00f9f154be3a", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2025802550", + "venue": "1163450153", + "year": "2013", + "title": "reasons to question seven segment displays", + "label": [ + "2779694141", + "107457646", + "44154836" + ], + "author": [ + "2595122141" + ], + "reference": [ + "1526703211", + "1567512422", + "1573617269", + "1868823453", + "2007617851", + "2093813535", + "2123253818", + "2124517782", + "2152630790", + "2752853835" + ], + "abstract": "seven segment number displays are ubiquitous and popular they are simple and familiar they seem to make economic sense and with only seven segments they require little wiring and electronics to support they are cheap to buy and cheap to use they make seemingly effective and unproblematic products this paper illustrates many examples of problematic uses of seven segment displays that could have been avoided more generally the paper raises design questions and some solutions to be considered when designing numerical displays and certainly before uncritically using seven segment displays although there are markets and applications where cost may be an overriding consideration for safety critical and other dependable types of use including general purpose devices that may sometimes be used for critical tasks more legible alternatives than standard seven segment displays should be preferred", + "title_raw": "Reasons to question seven segment displays", + "abstract_raw": "Seven segment number displays are ubiquitous and popular. They are simple and familiar. They seem to make economic sense, and with only seven segments they require little wiring and electronics to support. They are cheap to buy and cheap to use; they make seemingly effective and unproblematic products. This paper illustrates many examples of problematic uses of seven segment displays that could have been avoided. More generally, the paper raises design questions and some solutions to be considered when designing numerical displays, and certainly before uncritically using seven segment displays. Although there are markets and applications where cost may be an overriding consideration, for safety critical and other dependable types of use (including general purpose devices that may sometimes be used for critical tasks) more legible alternatives than standard seven segment displays should be preferred.", + "link": "https://www.semanticscholar.org/paper/5597d094169837831caf1dd235109204232e56b8", + "scraped_abstract": null, + "citation_best": 19 + }, + { + "paper": "2122146326", + "venue": "1158167855", + "year": "2013", + "title": "fast accurate detection of 100 000 object classes on a single machine", + "label": [ + "75294576", + "154945302", + "64729616", + "152003226", + "2776151529", + "182521987", + "31972630", + "100515483", + "11413529", + "106131492", + "71681937", + "146849305", + "74270461" + ], + "author": [ + "2053515084", + "2286653976", + "2341620196", + "2343055381", + "2069966712", + "1510200549" + ], + "reference": [ + "25437484", + "1736726159", + "1787735662", + "2036989445", + "2037227137", + "2044313232", + "2045762391", + "2094728533", + "2110990863", + "2120419212", + "2128715914", + "2129305389", + "2141357020", + "2147717514", + "2155080527", + "2161198271", + "2161969291", + "2168356304", + "2397770138", + "2538008885", + "2618530766", + "3097096317" + ], + "abstract": "many object detection systems are constrained by the time required to convolve a target image with a bank of filters that code for different aspects of an object s appearance such as the presence of component parts we exploit locality sensitive hashing to replace the dot product kernel operator in the convolution with a fixed number of hash table probes that effectively sample all of the filter responses in time independent of the size of the filter bank to show the effectiveness of the technique we apply it to evaluate 100 000 deformable part models requiring over a million part filters on multiple scales of a target image in less than 20 seconds using a single multi core processor with 20gb of ram this represents a speed up of approximately 20 000 times four orders of magnitude when compared with performing the convolutions explicitly on the same hardware while mean average precision over the full set of 100 000 object classes is around 0 16 due in large part to the challenges in gathering training data and collecting ground truth for so many classes we achieve a map of at least 0 20 on a third of the classes and 0 30 or better on about 20 of the classes", + "title_raw": "Fast, Accurate Detection of 100,000 Object Classes on a Single Machine", + "abstract_raw": "Many object detection systems are constrained by the time required to convolve a target image with a bank of filters that code for different aspects of an object's appearance, such as the presence of component parts. We exploit locality-sensitive hashing to replace the dot-product kernel operator in the convolution with a fixed number of hash-table probes that effectively sample all of the filter responses in time independent of the size of the filter bank. To show the effectiveness of the technique, we apply it to evaluate 100,000 deformable-part models requiring over a million (part) filters on multiple scales of a target image in less than 20 seconds using a single multi-core processor with 20GB of RAM. This represents a speed-up of approximately 20,000 times - four orders of magnitude - when compared with performing the convolutions explicitly on the same hardware. While mean average precision over the full set of 100,000 object classes is around 0.16 due in large part to the challenges in gathering training data and collecting ground truth for so many classes, we achieve a mAP of at least 0.20 on a third of the classes and 0.30 or better on about 20% of the classes.", + "link": "https://www.semanticscholar.org/paper/774f67303ea4a3a94874f08cf9a9dacc69b40782", + "scraped_abstract": null, + "citation_best": 320 + }, + { + "paper": "2135166986", + "venue": "1164975091", + "year": "2013", + "title": "from large scale image categorization to entry level categories", + "label": [ + "189391414", + "64729616", + "1667742", + "121934690", + "94124525", + "199579030", + "204321447", + "195324797" + ], + "author": [ + "2152316929", + "2608828817", + "2133417374", + "2645848709", + "2125758124" + ], + "reference": [ + "1897761818", + "1959000896", + "1974907760", + "2007653981", + "2017814585", + "2031489346", + "2038721957", + "2066134726", + "2108598243", + "2109586012", + "2110764733", + "2127279985", + "2129305389", + "2143017621", + "2145607950", + "2149172860", + "2163605009", + "2168356304", + "2253807446", + "2950789693", + "3035258717", + "3070706509" + ], + "abstract": "entry level categories the labels people will use to name an object were originally defined and studied by psychologists in the 1980s in this paper we study entry level categories at a large scale and learn the first models for predicting entry level categories for images our models combine visual recognition predictions with proxies for word naturalness mined from the enormous amounts of text on the web we demonstrate the usefulness of our models for predicting nouns entry level words associated with images by people we also learn mappings between concepts predicted by existing visual recognition systems and entry level concepts that could be useful for improving human focused applications such as natural language image description or retrieval", + "title_raw": "From Large Scale Image Categorization to Entry-Level Categories", + "abstract_raw": "Entry level categories - the labels people will use to name an object - were originally defined and studied by psychologists in the 1980s. In this paper we study entry-level categories at a large scale and learn the first models for predicting entry-level categories for images. Our models combine visual recognition predictions with proxies for word \"naturalness\" mined from the enormous amounts of text on the web. We demonstrate the usefulness of our models for predicting nouns (entry-level words) associated with images by people. We also learn mappings between concepts predicted by existing visual recognition systems and entry-level concepts that could be useful for improving human-focused applications such as natural language image description or retrieval.", + "link": "https://www.semanticscholar.org/paper/3bfeecf2aa26efe211985e19a967b2cb28012482", + "scraped_abstract": null, + "citation_best": 107 + }, + { + "paper": "2129386590", + "venue": "1174403976", + "year": "2013", + "title": "data clone detection and visualization in spreadsheets", + "label": [ + "121050878", + "133237599", + "36464697", + "199360897", + "46503548", + "2778739878", + "172367668", + "117447612", + "23123220" + ], + "author": [ + "2112506875", + "2631709481", + "335874646", + "2155442793" + ], + "reference": [ + "1497542708", + "1512285202", + "1551326568", + "1558974199", + "1593203335", + "1644311900", + "1698439592", + "1808011207", + "1958332869", + "1970665021", + "1994462700", + "2019628947", + "2020168614", + "2072632642", + "2080982416", + "2090432523", + "2096491586", + "2098347799", + "2101832700", + "2109943392", + "2115534035", + "2119391892", + "2119887272", + "2123718358", + "2124991517", + "2128698639", + "2129723764", + "2134329651", + "2135473121", + "2138633813", + "2138756793", + "2140606722", + "2156400118", + "2157532207", + "2159433976", + "2169917840", + "2267503994" + ], + "abstract": "spreadsheets are widely used in industry it is estimated that end user programmers outnumber programmers by a factor 5 however spreadsheets are error prone numerous companies have lost money because of spreadsheet errors one of the causes for spreadsheet problems is the prevalence of copy pasting in this paper we study this cloning in spreadsheets based on existing text based clone detection algorithms we have developed an algorithm to detect data clones in spreadsheets formulas whose values are copied as plain text in a different location to evaluate the usefulness of the proposed approach we conducted two evaluations a quantitative evaluation in which we analyzed the euses corpus and a qualitative evaluation consisting of two case studies the results of the evaluation clearly indicate that 1 data clones are common 2 data clones pose threats to spreadsheet quality and 3 our approach supports users in finding and resolving data clones", + "title_raw": "Data clone detection and visualization in spreadsheets", + "abstract_raw": "Spreadsheets are widely used in industry: it is estimated that end-user programmers outnumber programmers by a factor 5. However, spreadsheets are error-prone, numerous companies have lost money because of spreadsheet errors. One of the causes for spreadsheet problems is the prevalence of copy-pasting. In this paper, we study this cloning in spreadsheets. Based on existing text-based clone detection algorithms, we have developed an algorithm to detect data clones in spreadsheets: formulas whose values are copied as plain text in a different location. To evaluate the usefulness of the proposed approach, we conducted two evaluations. A quantitative evaluation in which we analyzed the EUSES corpus and a qualitative evaluation consisting of two case studies. The results of the evaluation clearly indicate that 1) data clones are common, 2) data clones pose threats to spreadsheet quality and 3) our approach supports users in finding and resolving data clones.", + "link": "https://www.semanticscholar.org/paper/315312bff5a0ac1261450ce1b68637907194bfc7", + "scraped_abstract": null, + "citation_best": 29 + }, + { + "paper": "2163339495", + "venue": "1174403976", + "year": "2013", + "title": "interaction based test suite minimization", + "label": [ + "7374053" + ], + "author": [ + "2131215670", + "1966604299", + "2658649754", + "2118004490" + ], + "reference": [ + "1582328334", + "1992987499", + "2014515160", + "2046482086", + "2071585092", + "2094071977", + "2095710561", + "2100162598", + "2125814238", + "2128204165", + "2143431852", + "2152949369", + "2156411624", + "2168172448" + ], + "abstract": "combinatorial test design ctd is an effective test planning technique that reveals faults resulting from feature interactions in a system the standard application of ctd requires manual modeling of the test space including a precise definition of restrictions between the test space parameters and produces a test suite that corresponds to new test cases to be implemented from scratch in this work we propose to use interaction based test suite minimization itsm as a complementary approach to standard ctd itsm reduces a given test suite without impacting its coverage of feature interactions itsm requires much less modeling effort and does not require a definition of restrictions it is appealing where there has been a significant investment in an existing test suite where creating new tests is expensive and where restrictions are very complex we discuss the tradeoffs between standard ctd and itsm and suggest an efficient algorithm for solving the latter we also discuss the challenges and additional requirements that arise when applying itsm to real life test suites we introduce solutions to these challenges and demonstrate them through two real life case studies", + "title_raw": "Interaction-based test-suite minimization", + "abstract_raw": "Combinatorial Test Design (CTD) is an effective test planning technique that reveals faults resulting from feature interactions in a system. The standard application of CTD requires manual modeling of the test space, including a precise definition of restrictions between the test space parameters, and produces a test suite that corresponds to new test cases to be implemented from scratch. In this work, we propose to use Interaction-based Test-Suite Minimization (ITSM) as a complementary approach to standard CTD. ITSM reduces a given test suite without impacting its coverage of feature interactions. ITSM requires much less modeling effort, and does not require a definition of restrictions. It is appealing where there has been a significant investment in an existing test suite, where creating new tests is expensive, and where restrictions are very complex. We discuss the tradeoffs between standard CTD and ITSM, and suggest an efficient algorithm for solving the latter. We also discuss the challenges and additional requirements that arise when applying ITSM to real-life test suites. We introduce solutions to these challenges and demonstrate them through two real-life case studies.", + "link": "https://www.semanticscholar.org/paper/248cdfe56549342e85393a51d194ba138f70172c", + "scraped_abstract": null, + "citation_best": 26 + }, + { + "paper": "1974648336", + "venue": "1174403976", + "year": "2013", + "title": "assisting developers of big data analytics applications when deploying on hadoop clouds", + "label": [ + "75684735", + "77088390", + "168065819", + "128487930", + "50712370", + "105339364", + "79974875", + "2522767166", + "200749887" + ], + "author": [ + "2118619547", + "2112274628", + "2329129867", + "2396584173", + "2110155496", + "2621646800" + ], + "reference": [ + "1526857433", + "1539722230", + "1591888204", + "1598064945", + "1854214752", + "2039157918", + "2058614910", + "2079756993", + "2098935637", + "2100830825", + "2130776964", + "2134066998", + "2142031898", + "2149488143", + "2150990363", + "2156170433", + "2157943826", + "2170114245", + "2170311191", + "2173213060" + ], + "abstract": "big data analytics is the process of examining large amounts of data big data in an effort to uncover hidden patterns or unknown correlations big data analytics applications bda apps are a new type of software applications which analyze big data using massive parallel processing frameworks e g hadoop developers of such applications typically develop them using a small sample of data in a pseudo cloud environment afterwards they deploy the applications in a large scale cloud environment with considerably more processing power and larger input data reminiscent of the mainframe days working with bda app developers in industry over the past three years we noticed that the runtime analysis and debugging of such applications in the deployment phase cannot be easily addressed by traditional monitoring and debugging approaches in this paper as a first step in assisting developers of bda apps for cloud deployments we propose a lightweight approach for uncovering differences between pseudo and large scale cloud deployments our approach makes use of the readily available yet rarely used execution logs from these platforms our approach abstracts the execution logs recovers the execution sequences and compares the sequences between the pseudo and cloud deployments through a case study on three representative hadoop based bda apps we show that our approach can rapidly direct the attention of bda app developers to the major differences between the two deployments knowledge of such differences is essential in verifying bda apps when analyzing big data in the cloud using injected deployment faults we show that our approach not only significantly reduces the deployment verification effort but also provides very few false positives when identifying deployment failures", + "title_raw": "Assisting developers of big data analytics applications when deploying on hadoop clouds", + "abstract_raw": "Big data analytics is the process of examining large amounts of data (big data) in an effort to uncover hidden patterns or unknown correlations. Big Data Analytics Applications (BDA Apps) are a new type of software applications, which analyze big data using massive parallel processing frameworks (e.g., Hadoop). Developers of such applications typically develop them using a small sample of data in a pseudo-cloud environment. Afterwards, they deploy the applications in a large-scale cloud environment with considerably more processing power and larger input data (reminiscent of the mainframe days). Working with BDA App developers in industry over the past three years, we noticed that the runtime analysis and debugging of such applications in the deployment phase cannot be easily addressed by traditional monitoring and debugging approaches. In this paper, as a first step in assisting developers of BDA Apps for cloud deployments, we propose a lightweight approach for uncovering differences between pseudo and large-scale cloud deployments. Our approach makes use of the readily-available yet rarely used execution logs from these platforms. Our approach abstracts the execution logs, recovers the execution sequences, and compares the sequences between the pseudo and cloud deployments. Through a case study on three representative Hadoop-based BDA Apps, we show that our approach can rapidly direct the attention of BDA App developers to the major differences between the two deployments. Knowledge of such differences is essential in verifying BDA Apps when analyzing big data in the cloud. Using injected deployment faults, we show that our approach not only significantly reduces the deployment verification effort, but also provides very few false positives when identifying deployment failures.", + "link": "https://www.semanticscholar.org/paper/a50289628b9f2661413f37fd42b0bc57185aa6ea", + "scraped_abstract": null, + "citation_best": 116 + }, + { + "paper": "2050340005", + "venue": "1174403976", + "year": "2013", + "title": "uml in practice", + "label": [ + "41298492", + "52913732", + "146939238", + "65842339", + "46110900", + "529173508", + "161743704", + "182500959", + "509989072", + "74579156", + "39890963", + "86610423", + "174683762", + "2777904410", + "145644426", + "186846655", + "115903868", + "54534927" + ], + "author": [ + "2086609251" + ], + "reference": [ + "22686852", + "80695334", + "995734871", + "1480113763", + "1523197392", + "1552830686", + "1562934601", + "1574796195", + "1598896112", + "1754883368", + "1973119294", + "1974485040", + "1989677053", + "1990554927", + "1997164669", + "2000117114", + "2009807357", + "2013940065", + "2019163807", + "2056806931", + "2061643308", + "2077063507", + "2089881021", + "2089953986", + "2095741627", + "2099293367", + "2117900291", + "2119786457", + "2123863234", + "2129125345", + "2164366431", + "2167736435", + "2622427009", + "3144401811", + "3144878813" + ], + "abstract": "uml has been described by some as the lingua franca of software engineering evidence from industry does not necessarily support such endorsements how exactly is uml being used in industry if it is this paper presents a corpus of interviews with 50 professional software engineers in 50 companies and identifies 5 patterns of uml use", + "title_raw": "UML in practice", + "abstract_raw": "UML has been described by some as \"the lingua franca\" of software engineering. Evidence from industry does not necessarily support such endorsements. How exactly is UML being used in industry if it is? This paper presents a corpus of interviews with 50 professional software engineers in 50 companies and identifies 5 patterns of UML use.", + "link": "https://www.semanticscholar.org/paper/3befaef04ff0c435cb0a31e7a35d5229bc122c43", + "scraped_abstract": null, + "citation_best": 143 + }, + { + "paper": "1977242042", + "venue": "1174403976", + "year": "2013", + "title": "dual ecological measures of focus in software development", + "label": [ + "82214349", + "171981572", + "529173508", + "182500959", + "42669973", + "201515116", + "39890963", + "46110900", + "89567784", + "105339364", + "2777904410", + "186846655", + "56909552", + "117447612" + ], + "author": [ + "2154522258", + "2156647386", + "1481757371", + "2030124443" + ], + "reference": [ + "23559225", + "97757283", + "121927057", + "135187631", + "139013987", + "276233706", + "1486057548", + "1596771679", + "1971127608", + "1971937094", + "1975998971", + "1988510359", + "2004728046", + "2007705030", + "2030857403", + "2043662475", + "2082092506", + "2111557242", + "2120538915", + "2123817056", + "2136173752", + "2145574830", + "2155114705", + "2157353183", + "2158744032", + "2159401492", + "2161017889", + "2164403426", + "2164492163", + "2165929464", + "2167117640", + "3022734214", + "3124772273", + "3139818494" + ], + "abstract": "work practices vary among software developers some are highly focused on a few artifacts others make wide ranging contributions similarly some artifacts are mostly authored or owned by one or few developers others have very wide ownership focus and ownership are related but different phenomena both with strong effect on software quality prior studies have mostly targeted ownership the measures of ownership used have generally been based on either simple counts information theoretic views of ownership or social network views of contribution patterns we argue for a more general conceptual view that unifies developer focus and artifact ownership we analogize the developer artifact contribution network to a predator prey food web and draw upon ideas from ecology to produce a novel and conceptually unified view of measuring focus and ownership these measures relate to both cross entropy and kullback liebler divergence and simultaneously provide two normalized measures of focus from both the developer and artifact perspectives we argue that these measures are theoretically well founded and yield novel predictive conceptual and actionable value in software projects we find that more focused developers introduce fewer defects than defocused developers in contrast files that receive narrowly focused activity are more likely to contain defects than other files", + "title_raw": "Dual ecological measures of focus in software development", + "abstract_raw": "Work practices vary among software developers. Some are highly focused on a few artifacts; others make wide-ranging contributions. Similarly, some artifacts are mostly authored, or owned, by one or few developers; others have very wide ownership. Focus and ownership are related but different phenomena, both with strong effect on software quality. Prior studies have mostly targeted ownership; the measures of ownership used have generally been based on either simple counts, information-theoretic views of ownership, or social-network views of contribution patterns. We argue for a more general conceptual view that unifies developer focus and artifact ownership. We analogize the developer-artifact contribution network to a predator-prey food web, and draw upon ideas from ecology to produce a novel, and conceptually unified view of measuring focus and ownership. These measures relate to both cross-entropy and Kullback-Liebler divergence, and simultaneously provide two normalized measures of focus from both the developer and artifact perspectives. We argue that these measures are theoretically well-founded, and yield novel predictive, conceptual, and actionable value in software projects. We find that more focused developers introduce fewer defects than defocused developers. In contrast, files that receive narrowly focused activity are more likely to contain defects than other files.", + "link": "https://www.semanticscholar.org/paper/e237a1a8603cfec4913d803bed612743fef95e9a", + "scraped_abstract": null, + "citation_best": 63 + }, + { + "paper": "2250922733", + "venue": "1203999783", + "year": "2013", + "title": "flexibility and decoupling in the simple temporal problem", + "label": [ + "79403827", + "45374587", + "11413529" + ], + "author": [ + "2102240428", + "2054470058", + "2033214668", + "2022557996" + ], + "reference": [ + "1522982136", + "1821679604", + "1869082036", + "2000129601", + "2007572995", + "2130182605", + "2158638039", + "2158770349", + "2185157207", + "2249747735", + "2337098149" + ], + "abstract": "in this paper we concentrate on finding a suitable metric to determine the flexibility of a simple temporal problem stp after reviewing some flexibility metrics that have been proposed we conclude that these metrics fail to capture the correlation between events specified in the stp resulting in an overestimation of the available flexibility in the system we propose to use an intuitively more acceptable flexibility metric based upon uncorrelated time intervals for the allowed starting times of events in an stp this metric is shown to be computable in low polynomial time as a byproduct of the flexibility computation we get a decomposition of the stn almost for free for every possible k partitioning of the event space a decomposition can be computed in o k time even more importantly we show that contrary to popular belief such a decomposition does not affect the flexibility of the original stp", + "title_raw": "Flexibility and decoupling in the simple temporal problem", + "abstract_raw": "In this paper we concentrate on finding a suitable metric to determine the flexibility of a Simple Temporal Problem (STP). After reviewing some flexibility metrics that have been proposed, we conclude that these metrics fail to capture the correlation between events specified in the STP, resulting in an overestimation of the available flexibility in the system. We propose to use an intuitively more acceptable flexibility metric based upon uncorrelated time-intervals for the allowed starting times of events in an STP. This metric is shown to be computable in low-polynomial time. As a byproduct of the flexibility computation, we get a decomposition of the STN almost for free: for every possible k-partitioning of the event space, a decomposition can be computed in O(k)-time. Even more importantly, we show that contrary to popular belief, such a decomposition does not affect the flexibility of the original STP.", + "link": "https://www.semanticscholar.org/paper/c614441abecf5a20531585208bf2194bd1b8fbcd", + "scraped_abstract": null, + "citation_best": 7 + }, + { + "paper": "2141336889", + "venue": "1123349196", + "year": "2013", + "title": "whole home gesture recognition using wireless signals", + "label": [ + "28490314", + "193293595", + "177264268", + "159437735", + "207347870", + "555944384", + "118530786", + "31972630" + ], + "author": [ + "2122055518", + "2915322351", + "2039996270", + "2127286128" + ], + "reference": [ + "1527747735", + "1585879837", + "1979018848", + "2034042141", + "2038063001", + "2093639577", + "2099257422", + "2099800354", + "2100147865", + "2103331800", + "2111986491", + "2122125652", + "2129151684", + "2144916689", + "2151034334", + "2153200718", + "2164692160", + "2169709590", + "2170240475", + "2171375393", + "2172156083", + "2949737820", + "2949821273", + "3200950865" + ], + "abstract": "this paper presents wisee a novel gesture recognition system that leverages wireless signals e g wi fi to enable whole home sensing and recognition of human gestures since wireless signals do not require line of sight and can traverse through walls wisee can enable whole home gesture recognition using few wireless sources further it achieves this goal without requiring instrumentation of the human body with sensing devices we implement a proof of concept prototype of wisee using usrp n210s and evaluate it in both an office environment and a two bedroom apartment our results show that wisee can identify and classify a set of nine gestures with an average accuracy of 94", + "title_raw": "Whole-home gesture recognition using wireless signals", + "abstract_raw": "This paper presents WiSee, a novel gesture recognition system that leverages wireless signals (e.g., Wi-Fi) to enable whole-home sensing and recognition of human gestures. Since wireless signals do not require line-of-sight and can traverse through walls, WiSee can enable whole-home gesture recognition using few wireless sources. Further, it achieves this goal without requiring instrumentation of the human body with sensing devices. We implement a proof-of-concept prototype of WiSee using USRP-N210s and evaluate it in both an office environment and a two- bedroom apartment. Our results show that WiSee can identify and classify a set of nine gestures with an average accuracy of 94%.", + "link": "https://www.semanticscholar.org/paper/3bb76c2989cb4aeae3b20f42e619a862f0d871ca", + "scraped_abstract": null, + "citation_best": 980 + }, + { + "paper": "2126709939", + "venue": "1127325140", + "year": "2013", + "title": "scalable influence estimation in continuous time diffusion networks", + "label": [ + "45374587", + "48044578", + "177264268", + "128669082" + ], + "author": [ + "2224413342", + "2113868374", + "2279633593", + "2099091510" + ], + "reference": [ + "19838944", + "188609219", + "1680189815", + "1965996575", + "1984069252", + "1991635064", + "1996816151", + "2056609785", + "2061820396", + "2092418988", + "2096845327", + "2101645017", + "2108278206", + "2108858998", + "2109533003", + "2112681514", + "2127434196", + "2127492100", + "2128914432", + "2141403143", + "2142880609", + "2164067128", + "2164900957", + "2185197589", + "2949567784", + "2952347589", + "3118655244" + ], + "abstract": "if a piece of information is released from a media site can we predict whether it may spread to one million web pages in a month this influence estimation problem is very challenging since both the time sensitive nature of the task and the requirement of scalability need to be addressed simultaneously in this paper we propose a randomized algorithm for influence estimation in continuous time diffusion networks our algorithm can estimate the influence of every node in a network with v nodes and e edges to an accuracy of e using n o 1 e2 randomizations and up to logarithmic factors o n e n v computations when used as a subroutine in a greedy influence maximization approach our proposed algorithm is guaranteed to find a set of c nodes with the influence of at least 1 1 e opt 2ce where opt is the optimal value experiments on both synthetic and real world data show that the proposed algorithm can easily scale up to networks of millions of nodes while significantly improves over previous state of the arts in terms of the accuracy of the estimated influence and the quality of the selected nodes in maximizing the influence", + "title_raw": "Scalable Influence Estimation in Continuous-Time Diffusion Networks", + "abstract_raw": "If a piece of information is released from a media site, can we predict whether it may spread to one million web pages, in a month ? This influence estimation problem is very challenging since both the time-sensitive nature of the task and the requirement of scalability need to be addressed simultaneously. In this paper, we propose a randomized algorithm for influence estimation in continuous-time diffusion networks. Our algorithm can estimate the influence of every node in a network with |V| nodes and |e| edges to an accuracy of e using n = O(1/e2) randomizations and up to logarithmic factors O(n|e| + n|V|) computations. When used as a subroutine in a greedy influence maximization approach, our proposed algorithm is guaranteed to find a set of C nodes with the influence of at least (1 - 1/e) OPT -2Ce, where OPT is the optimal value. Experiments on both synthetic and real-world data show that the proposed algorithm can easily scale up to networks of millions of nodes while significantly improves over previous state-of-the-arts in terms of the accuracy of the estimated influence and the quality of the selected nodes in maximizing the influence.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Scalable+Influence+Estimation+in+Continuous-Time+Diffusion+Networks&as_oq=&as_eq=&as_occt=any&as_sauthors=Du", + "scraped_abstract": null, + "citation_best": 141 + }, + { + "paper": "73604409", + "venue": "1158363782", + "year": "2013", + "title": "embassies radically refactoring the web", + "label": [ + "152752567", + "38652104", + "147346212", + "184337299", + "118643609", + "25621077", + "197362993", + "136764020", + "79974875", + "115168132" + ], + "author": [ + "2128381254", + "1595681779", + "135218249" + ], + "reference": [ + "15883", + "118511894", + "118559593", + "120838261", + "169545057", + "203986211", + "1519889149", + "1566345534", + "1568223756", + "1601735929", + "1644882639", + "1705596515", + "1737016141", + "1824799905", + "1907897959", + "1969295446", + "1980800818", + "2006989942", + "2010608681", + "2028914809", + "2029349492", + "2032309817", + "2034120733", + "2056073317", + "2062340141", + "2067538969", + "2067580212", + "2072768743", + "2072978486", + "2089775132", + "2098698881", + "2103131117", + "2107252100", + "2107881300", + "2107906795", + "2116236383", + "2122847456", + "2124282422", + "2129630219", + "2149684006", + "2154884316", + "2159079348", + "2159890891", + "2160343382", + "2163615516", + "2216311525", + "2298330071", + "2530174753", + "2614344471" + ], + "abstract": "web browsers ostensibly provide strong isolation for the client side components of web applications unfortunately this isolation is weak in practice as browsers add increasingly rich apis to please developers these complex interfaces bloat the trusted computing base and erode cross app isolation boundaries we reenvision the web interface based on the notion of a pico datacenter the client side version of a shared server datacenter mutually untrusting vendors run their code on the user s computer in low level native code containers that communicate with the outside world only via ip just as in the cloud datacenter the simple semantics makes isolation tractable yet native code gives vendors the freedom to run any software stack since the datacenter model is designed to be robust to malicious tenants it is never dangerous for the user to click a link and invite a possibly hostile party onto the client", + "title_raw": "Embassies: radically refactoring the web", + "abstract_raw": "Web browsers ostensibly provide strong isolation for the client-side components of web applications. Unfortunately, this isolation is weak in practice; as browsers add increasingly rich APIs to please developers, these complex interfaces bloat the trusted computing base and erode cross-app isolation boundaries.\r\n\r\nWe reenvision the web interface based on the notion of a pico-datacenter, the client-side version of a shared server datacenter. Mutually untrusting vendors run their code on the user's computer in low-level native code containers that communicate with the outside world only via IP. Just as in the cloud datacenter, the simple semantics makes isolation tractable, yet native code gives vendors the freedom to run any software stack. Since the datacenter model is designed to be robust to malicious tenants, it is never dangerous for the user to click a link and invite a possibly-hostile party onto the client.", + "link": "https://www.semanticscholar.org/paper/5d54d4a3b505b2b214eac85d0b62d4162d1cac3b", + "scraped_abstract": null, + "citation_best": 32 + }, + { + "paper": "2157216158", + "venue": "1127352206", + "year": "2013", + "title": "a general constraint centric scheduling framework for spatial architectures", + "label": [ + "56086750", + "45374587", + "113200698", + "169590947", + "173608175", + "120314980", + "127705205" + ], + "author": [ + "1989328337", + "266250343", + "1972398383", + "1209998147", + "2631780626", + "284070397" + ], + "reference": [ + "58680384", + "1480909796", + "1491178396", + "1595885266", + "1841149090", + "1964084906", + "1968143987", + "1969529818", + "1970141743", + "1975489482", + "1992908040", + "1995270665", + "1997161306", + "2006312753", + "2016628444", + "2038509324", + "2040167141", + "2065439108", + "2072298242", + "2072344787", + "2074798342", + "2083868341", + "2094999871", + "2097699872", + "2099973165", + "2105884870", + "2112575418", + "2116556636", + "2119217094", + "2122171990", + "2125463025", + "2129207930", + "2130408605", + "2131081741", + "2131929304", + "2134094348", + "2134886942", + "2142547931", + "2144271506", + "2144327181", + "2147345262", + "2150871888", + "2153331583", + "2153882047", + "2159456929", + "2159742171", + "2162802294", + "2169339215", + "2187230075", + "2544732887" + ], + "abstract": "specialized execution using spatial architectures provides energy efficient computation but requires effective algorithms for spatially scheduling the computation generally this has been solved with architecture specific heuristics an approach which suffers from poor compiler architect productivity lack of insight on optimality and inhibits migration of techniques between architectures our goal is to develop a scheduling framework usable for all spatial architectures to this end we expresses spatial scheduling as a constraint satisfaction problem using integer linear programming ilp we observe that architecture primitives and scheduler responsibilities can be related through five abstractions placement of computation routing of data managing event timing managing resource utilization and forming the optimization objectives we encode these responsibilities as 20 general ilp constraints which are used to create schedulers for the disparate trips dyser and plug architectures our results show that a general declarative approach using ilp is implementable practical and typically matches or outperforms specialized schedulers", + "title_raw": "A general constraint-centric scheduling framework for spatial architectures", + "abstract_raw": "Specialized execution using spatial architectures provides energy efficient computation, but requires effective algorithms for spatially scheduling the computation. Generally, this has been solved with architecture-specific heuristics, an approach which suffers from poor compiler/architect productivity, lack of insight on optimality, and inhibits migration of techniques between architectures. Our goal is to develop a scheduling framework usable for all spatial architectures. To this end, we expresses spatial scheduling as a constraint satisfaction problem using Integer Linear Programming (ILP). We observe that architecture primitives and scheduler responsibilities can be related through five abstractions: placement of computation, routing of data, managing event timing, managing resource utilization, and forming the optimization objectives. We encode these responsibilities as 20 general ILP constraints, which are used to create schedulers for the disparate TRIPS, DySER, and PLUG architectures. Our results show that a general declarative approach using ILP is implementable, practical, and typically matches or outperforms specialized schedulers.", + "link": "https://www.semanticscholar.org/paper/a4f5a4296f29e1457b84b68c08d98e3f338145d6", + "scraped_abstract": null, + "citation_best": 65 + }, + { + "paper": "2047068447", + "venue": "1127352206", + "year": "2013", + "title": "clap recording local executions to reproduce concurrency failures", + "label": [ + "133875982", + "12186640", + "82029504", + "48044578", + "138101251", + "193702766", + "173608175", + "53833338" + ], + "author": [ + "2153449647", + "2189948379", + "2089081252" + ], + "reference": [ + "92576581", + "119716405", + "1480909796", + "1522334395", + "1581214603", + "1710734607", + "1887412317", + "1986463648", + "1986819805", + "2020021151", + "2035382792", + "2049381173", + "2057979783", + "2080869721", + "2084719450", + "2096527448", + "2097576663", + "2098643127", + "2100399943", + "2100627043", + "2100889285", + "2100894869", + "2101134669", + "2106471636", + "2108112890", + "2108806129", + "2115732097", + "2115855199", + "2120261600", + "2122170581", + "2127445923", + "2129487583", + "2130473288", + "2131623415", + "2133697637", + "2134540982", + "2135948849", + "2149051149", + "2152795747", + "2154698535", + "2157479538", + "2158449625", + "2185208298", + "3136721778", + "3147113554", + "3147275543" + ], + "abstract": "we present clap a new technique to reproduce concurrency bugs clap has two key steps first it logs thread local execution paths at runtime second offline it computes memory dependencies that accord with the logged execution and are able to reproduce the observed bug the second step works by combining constraints from the thread paths and constraints based on a memory model and computing an execution with a constraint solver clap has four major advantages first logging purely local execution of each thread is substantially cheaper than logging memory interactions which enables clap to be efficient compared to previous approaches second our logging does not require any synchronization and hence with no added memory barriers or fences this minimizes perturbation and missed bugs due to extra synchronizations foreclosing certain racy behaviors third since it uses no synchronization we extend clap to work on a range of relaxed memory models such as tso and pso in addition to sequential consistency fourth clap can compute a much simpler execution than the original one that reveals the bug with minimal thread context switches to mitigate the scalability issues we also present an approach to parallelize constraint solving which theoretically scales our technique to programs with arbitrary execution length experimental results on a variety of multithreaded benchmarks and real world concurrent applications validate these advantages by showing that our technique is effective in reproducing concurrency bugs even under relaxed memory models furthermore it is significantly more efficient than a state of the art technique that records shared memory dependencies reducing execution time overhead by 45 and log size by 88 on average", + "title_raw": "CLAP: recording local executions to reproduce concurrency failures", + "abstract_raw": "We present CLAP, a new technique to reproduce concurrency bugs. CLAP has two key steps. First, it logs thread local execution paths at runtime. Second, offline, it computes memory dependencies that accord with the logged execution and are able to reproduce the observed bug. The second step works by combining constraints from the thread paths and constraints based on a memory model, and computing an execution with a constraint solver. CLAP has four major advantages. First, logging purely local execution of each thread is substantially cheaper than logging memory interactions, which enables CLAP to be efficient compared to previous approaches. Second, our logging does not require any synchronization and hence with no added memory barriers or fences; this minimizes perturbation and missed bugs due to extra synchronizations foreclosing certain racy behaviors. Third, since it uses no synchronization, we extend CLAP to work on a range of relaxed memory models, such as TSO and PSO, in addition to sequential consistency. Fourth, CLAP can compute a much simpler execution than the original one, that reveals the bug with minimal thread context switches. To mitigate the scalability issues, we also present an approach to parallelize constraint solving, which theoretically scales our technique to programs with arbitrary execution length. Experimental results on a variety of multithreaded benchmarks and real world concurrent applications validate these advantages by showing that our technique is effective in reproducing concurrency bugs even under relaxed memory models; furthermore, it is significantly more efficient than a state-of-the-art technique that records shared memory dependencies, reducing execution time overhead by 45% and log size by 88% on average.", + "link": "https://www.semanticscholar.org/paper/8f1fd357b62c946f6b415f8d49c808bb6137bb97", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2048025009", + "venue": "1127352206", + "year": "2013", + "title": "static analysis for probabilistic programs inferring whole program properties from finitely many paths", + "label": [ + "49937458", + "55439883", + "45374587", + "2778865114", + "177264268", + "2779639559", + "80444323", + "97686452" + ], + "author": [ + "2142544221", + "190390398", + "310804771" + ], + "reference": [ + "32064870", + "41007281", + "88990729", + "182014611", + "1512921847", + "1527197079", + "1545062007", + "1569248486", + "1729037535", + "1734364899", + "1777846110", + "1791348790", + "1862398452", + "1884784351", + "1890754682", + "1975447428", + "1990880047", + "2013979082", + "2014894062", + "2024355065", + "2029405758", + "2035720425", + "2060573639", + "2067971887", + "2073787328", + "2098294295", + "2104044130", + "2109426455", + "2121382061", + "2130514924", + "2137536943", + "2169784622", + "2273879208", + "2295349525", + "2295428206", + "2797148637", + "2913788705", + "2952886755", + "3020882730", + "3030098241" + ], + "abstract": "we propose an approach for the static analysis of probabilistic programs that sense manipulate and control based on uncertain data examples include programs used in risk analysis medical decision making and cyber physical systems correctness properties of such programs take the form of queries that seek the probabilities of assertions over program variables we present a static analysis approach that provides guaranteed interval bounds on the values assertion probabilities of such queries first we observe that for probabilistic programs it is possible to conclude facts about the behavior of the entire program by choosing a finite adequate set of its paths we provide strategies for choosing such a set of paths and verifying its adequacy the queries are evaluated over each path by a combination of symbolic execution and probabilistic volume bound computations each path yields interval bounds that can be summed up with a coverage bound to yield an interval that encloses the probability of assertion for the program as a whole we demonstrate promising results on a suite of benchmarks from many different sources including robotic manipulators and medical decision making programs", + "title_raw": "Static analysis for probabilistic programs: inferring whole program properties from finitely many paths", + "abstract_raw": "We propose an approach for the static analysis of probabilistic programs that sense, manipulate, and control based on uncertain data. Examples include programs used in risk analysis, medical decision making and cyber-physical systems. Correctness properties of such programs take the form of queries that seek the probabilities of assertions over program variables. We present a static analysis approach that provides guaranteed interval bounds on the values (assertion probabilities) of such queries. First, we observe that for probabilistic programs, it is possible to conclude facts about the behavior of the entire program by choosing a finite, adequate set of its paths. We provide strategies for choosing such a set of paths and verifying its adequacy. The queries are evaluated over each path by a combination of symbolic execution and probabilistic volume-bound computations. Each path yields interval bounds that can be summed up with a \"coverage\" bound to yield an interval that encloses the probability of assertion for the program as a whole. We demonstrate promising results on a suite of benchmarks from many different sources including robotic manipulators and medical decision making programs.", + "link": "https://www.semanticscholar.org/paper/eb4ee4b44f88d941d0bb1f612b6eb4745833ef4a", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2063553958", + "venue": "1127352206", + "year": "2013", + "title": "reconciling exhaustive pattern matching with objects", + "label": [ + "83852419", + "68859911", + "164155591", + "40608802", + "548217200", + "169590947", + "199360897", + "42383842", + "152124472", + "80444323" + ], + "author": [ + "2223294179", + "2141746463" + ], + "reference": [ + "80361977", + "100631053", + "205696660", + "1480909796", + "1525860943", + "1586902756", + "1588812554", + "1606121052", + "1968796785", + "1982243747", + "2000616678", + "2011660287", + "2035731741", + "2057405511", + "2070482555", + "2105427466", + "2106348655", + "2109795737", + "2111151482", + "2114980032", + "2117534212", + "2128186675", + "2131052253", + "2133546079", + "2137781785", + "2138964563", + "2150470619", + "2160877060", + "2162315884", + "2162525339", + "2295917400", + "2500112877" + ], + "abstract": "pattern matching an important feature of functional languages is in conflict with data abstraction and extensibility which are central to object oriented languages modal abstraction offers an integration of deep pattern matching and convenient iteration abstractions into an object oriented setting however because of data abstraction it is challenging for a compiler to statically verify properties such as exhaustiveness in this work we extend modal abstraction in the jmatch language to support static modular reasoning about exhaustiveness and redundancy new matching specifications allow these properties to be checked using an smt solver we also introduce expressive pattern matching constructs our evaluation shows that these new features enable more concise code and that the performance of checking exhaustiveness and redundancy is acceptable", + "title_raw": "Reconciling exhaustive pattern matching with objects", + "abstract_raw": "Pattern matching, an important feature of functional languages, is in conflict with data abstraction and extensibility, which are central to object-oriented languages. Modal abstraction offers an integration of deep pattern matching and convenient iteration abstractions into an object-oriented setting; however, because of data abstraction, it is challenging for a compiler to statically verify properties such as exhaustiveness. In this work, we extend modal abstraction in the JMatch language to support static, modular reasoning about exhaustiveness and redundancy. New matching specifications allow these properties to be checked using an SMT solver. We also introduce expressive pattern-matching constructs. Our evaluation shows that these new features enable more concise code and that the performance of checking exhaustiveness and redundancy is acceptable.", + "link": "https://www.semanticscholar.org/paper/c4add8d769fdddf16efd31027a9c9fa03c0381be", + "scraped_abstract": null, + "citation_best": 1 + }, + { + "paper": "2018746447", + "venue": "1163618098", + "year": "2013", + "title": "pinocchio nearly practical verifiable computation", + "label": [ + "55439883", + "33884865", + "45374587", + "80444323", + "200632571", + "111498074", + "178489894", + "203062551", + "2777062904", + "2776827251" + ], + "author": [ + "1595681779", + "2128381254", + "2339798781", + "2310902771" + ], + "reference": [ + "4180724", + "8137136", + "32654648", + "54526233", + "167928972", + "1480225633", + "1504669610", + "1506068270", + "1525737403", + "1538440442", + "1555566055", + "1557386445", + "1564691070", + "1566967335", + "1569229205", + "1576967699", + "1577653766", + "1584915320", + "1900771389", + "1905774212", + "1930567086", + "1970606468", + "1970808997", + "1971394347", + "1993673439", + "2019578639", + "2041068798", + "2045717693", + "2049982182", + "2051250911", + "2055456945", + "2059671515", + "2067047774", + "2067596507", + "2086042811", + "2089537103", + "2102632861", + "2103363198", + "2108255910", + "2110698954", + "2114579022", + "2116556172", + "2116653107", + "2117797270", + "2119948977", + "2122718825", + "2134679597", + "2141395382", + "2146099890", + "2148352980", + "2153553074", + "2169657197", + "2171960770", + "2192481898", + "2398448180", + "2401959250", + "2402869180", + "2526015247", + "2571694663", + "2963893667" + ], + "abstract": "to instill greater confidence in computations outsourced to the cloud clients should be able to verify the correctness of the results returned to this end we introduce pinocchio a built system for efficiently verifying general computations while relying only on cryptographic assumptions with pinocchio the client creates a public evaluation key to describe her computation this setup is proportional to evaluating the computation once the worker then evaluates the computation on a particular input and uses the evaluation key to produce a proof of correctness the proof is only 288 bytes regardless of the computation performed or the size of the inputs and outputs anyone can use a public verification key to check the proof crucially our evaluation on seven applications demonstrates that pinocchio is efficient in practice too pinocchio s verification time is typically 10ms 5 7 orders of magnitude less than previous work indeed pinocchio is the first general purpose system to demonstrate verification cheaper than native execution for some apps pinocchio also reduces the worker s proof effort by an additional 19 60x as an additional feature pinocchio generalizes to zero knowledge proofs at a negligible cost over the base protocol finally to aid development pinocchio provides an end to end toolchain that compiles a subset of c into programs that implement the verifiable computation protocol", + "title_raw": "Pinocchio: Nearly Practical Verifiable Computation", + "abstract_raw": "To instill greater confidence in computations outsourced to the cloud, clients should be able to verify the correctness of the results returned. To this end, we introduce Pinocchio, a built system for efficiently verifying general computations while relying only on cryptographic assumptions. With Pinocchio, the client creates a public evaluation key to describe her computation; this setup is proportional to evaluating the computation once. The worker then evaluates the computation on a particular input and uses the evaluation key to produce a proof of correctness. The proof is only 288 bytes, regardless of the computation performed or the size of the inputs and outputs. Anyone can use a public verification key to check the proof. Crucially, our evaluation on seven applications demonstrates that Pinocchio is efficient in practice too. Pinocchio's verification time is typically 10ms: 5-7 orders of magnitude less than previous work; indeed Pinocchio is the first general-purpose system to demonstrate verification cheaper than native execution (for some apps). Pinocchio also reduces the worker's proof effort by an additional 19-60x. As an additional feature, Pinocchio generalizes to zero-knowledge proofs at a negligible cost over the base protocol. Finally, to aid development, Pinocchio provides an end-to-end toolchain that compiles a subset of C into programs that implement the verifiable computation protocol.", + "link": "https://www.semanticscholar.org/paper/4b9f46104da1013651c1800ba307a0207799f176", + "scraped_abstract": null, + "citation_best": 774 + }, + { + "paper": "2099461393", + "venue": "1152462849", + "year": "2013", + "title": "ambient backscatter wireless communication out of thin air", + "label": [ + "96513508", + "101765175", + "555944384", + "98045186", + "76155785", + "74064498" + ], + "author": [ + "2062601765", + "2150791756", + "669839582", + "2039996270", + "2064270007", + "2154841660" + ], + "reference": [ + "1569638374", + "1966068407", + "1973794041", + "1975671167", + "1978451063", + "1991766833", + "2006578550", + "2010359062", + "2010759320", + "2019673433", + "2050164148", + "2088627138", + "2095581751", + "2097065647", + "2111652029", + "2116295576", + "2126470432", + "2133659287", + "2134179788", + "2140596192", + "2147946355", + "2148532538", + "2150689903", + "2159205185", + "2167547881", + "2167990963", + "2259610917", + "2340429540", + "2553603086", + "2583922584", + "2798333393" + ], + "abstract": "we present the design of a communication system that enables two devices to communicate using ambient rf as the only source of power our approach leverages existing tv and cellular transmissions to eliminate the need for wires and batteries thus enabling ubiquitous communication where devices can communicate among themselves at unprecedented scales and in locations that were previously inaccessible to achieve this we introduce ambient backscatter a new communication primitive where devices communicate by backscattering ambient rf signals our design avoids the expensive process of generating radio waves backscatter communication is orders of magnitude more power efficient than traditional radio communication further since it leverages the ambient rf signals that are already around us it does not require a dedicated power infrastructure as in traditional backscatter communication to show the feasibility of our design we prototype ambient backscatter devices in hardware and achieve information rates of 1 kbps over distances of 2 5 feet and 1 5 feet while operating outdoors and indoors respectively we use our hardware prototype to implement proof of concepts for two previously infeasible ubiquitous communication applications", + "title_raw": "Ambient backscatter: wireless communication out of thin air", + "abstract_raw": "We present the design of a communication system that enables two devices to communicate using ambient RF as the only source of power. Our approach leverages existing TV and cellular transmissions to eliminate the need for wires and batteries, thus enabling ubiquitous communication where devices can communicate among themselves at unprecedented scales and in locations that were previously inaccessible. To achieve this, we introduce ambient backscatter, a new communication primitive where devices communicate by backscattering ambient RF signals. Our design avoids the expensive process of generating radio waves; backscatter communication is orders of magnitude more power-efficient than traditional radio communication. Further, since it leverages the ambient RF signals that are already around us, it does not require a dedicated power infrastructure as in traditional backscatter communication. To show the feasibility of our design, we prototype ambient backscatter devices in hardware and achieve information rates of 1 kbps over distances of 2.5 feet and 1.5 feet, while operating outdoors and indoors respectively. We use our hardware prototype to implement proof-of-concepts for two previously infeasible ubiquitous communication applications.", + "link": "https://www.semanticscholar.org/paper/f8baf50e91121229e9ee2239d64231bc3b5e23c0", + "scraped_abstract": null, + "citation_best": 65 + }, + { + "paper": "2159205954", + "venue": "1140684652", + "year": "2013", + "title": "beliefs and biases in web search", + "label": [ + "97854310", + "521815418", + "86037889", + "23123220" + ], + "author": [ + "2096583854" + ], + "reference": [ + "141231646", + "158727920", + "612181259", + "1565693361", + "1583059097", + "1629317078", + "1787140601", + "1802760973", + "1911711584", + "1969340322", + "1970381522", + "1974360117", + "1975879668", + "1986776260", + "1992549066", + "2031160476", + "2035782089", + "2047221353", + "2059374265", + "2075585362", + "2093016541", + "2095627566", + "2099685860", + "2104677329", + "2113788004", + "2115281393", + "2119265954", + "2121354841", + "2122841972", + "2124371667", + "2125771191", + "2133156844", + "2134747565", + "2135500808", + "2137222670", + "2139450192", + "2148869009", + "2149490731", + "2153253904", + "2155355283", + "2155587858", + "2158450083", + "2164498220", + "2165612380", + "2165925342", + "2166743161", + "2168717408", + "2197919320", + "2290826340", + "3142744039" + ], + "abstract": "people s beliefs and unconscious biases that arise from those beliefs influence their judgment decision making and actions as is commonly accepted among psychologists biases can be observed in information retrieval in situations where searchers seek or are presented with information that significantly deviates from the truth there is little understanding of the impact of such biases in search in this paper we study search related biases via multiple probes an exploratory retrospective survey human labeling of the captions and results returned by a web search engine and a large scale log analysis of search behavior on that engine targeting yes no questions in the critical domain of health search we show that web searchers exhibit their own biases and are also subject to bias from the search engine we clearly observe searchers favoring positive information over negative and more than expected given base rates based on consensus answers from physicians we also show that search engines strongly favor a particular usually positive perspective irrespective of the truth importantly we show that these biases can be counterproductive and affect search outcomes in our study around half of the answers that searchers settled on were actually incorrect our findings have implications for search engine design including the development of ranking algorithms that con sider the desire to satisfy searchers by validating their beliefs and providing accurate answers and properly considering base rates incorporating likelihood information into search is particularly important for consequential tasks such as those with a medical focus", + "title_raw": "Beliefs and biases in web search", + "abstract_raw": "People's beliefs, and unconscious biases that arise from those beliefs, influence their judgment, decision making, and actions, as is commonly accepted among psychologists. Biases can be observed in information retrieval in situations where searchers seek or are presented with information that significantly deviates from the truth. There is little understanding of the impact of such biases in search. In this paper we study search-related biases via multiple probes: an exploratory retrospective survey, human labeling of the captions and results returned by a Web search engine, and a large-scale log analysis of search behavior on that engine. Targeting yes-no questions in the critical domain of health search, we show that Web searchers exhibit their own biases and are also subject to bias from the search engine. We clearly observe searchers favoring positive information over negative and more than expected given base rates based on consensus answers from physicians. We also show that search engines strongly favor a particular, usually positive, perspective, irrespective of the truth. Importantly, we show that these biases can be counterproductive and affect search outcomes; in our study, around half of the answers that searchers settled on were actually incorrect. Our findings have implications for search engine design, including the development of ranking algorithms that con-sider the desire to satisfy searchers (by validating their beliefs) and providing accurate answers and properly considering base rates. Incorporating likelihood information into search is particularly important for consequential tasks, such as those with a medical focus.", + "link": "https://www.semanticscholar.org/paper/2f278d1dab0f6e3939c747a2fc2a4cecdfc912b9", + "scraped_abstract": null, + "citation_best": 153 + }, + { + "paper": "2145432435", + "venue": "1131589359", + "year": "2013", + "title": "queueing system topologies with limited flexibility", + "label": [ + "199845137", + "22684755", + "113200698", + "167272206", + "33891772", + "160403385" + ], + "author": [ + "80220757", + "2697576974" + ], + "reference": [ + "1591180426", + "1746293134", + "1964781810", + "1974518363", + "1998145795", + "2003346154", + "2011786610", + "2021434280", + "2041726543", + "2043953242", + "2045657900", + "2062832101", + "2110240045", + "2119895774", + "2122320538", + "2125575238", + "2135263108", + "2136380306", + "2147064710", + "2150041832", + "2155393670", + "2164816212", + "2168980528", + "2170376751", + "2182521281" + ], + "abstract": "we study a multi server model with n flexible servers and rn queues connected through a fixed bipartite graph where the level of flexibility is captured by the average degree d n of the queues applications in content replication in data centers skill based routing in call centers and flexible supply chains are among our main motivations we focus on the scaling regime where the system size n tends to infinity while the overall traffic intensity stays fixed we show that a large capacity region robustness and diminishing queueing delay performance are jointly achievable even under very limited flexibility d n l n in particular when d n gg ln n a family of random graph based interconnection topologies is with high probability capable of stabilizing all admissible arrival rate vectors under a bounded support assumption while simultaneously ensuring a diminishing queueing delay of order ln n d n as n our analysis is centered around a new class of virtual queue based scheduling policies that rely on dynamically constructed partial matchings on the connectivity graph", + "title_raw": "Queueing system topologies with limited flexibility", + "abstract_raw": "We study a multi-server model with n flexible servers and rn queues, connected through a fixed bipartite graph, where the level of flexibility is captured by the average degree, d(n), of the queues. Applications in content replication in data centers, skill-based routing in call centers, and flexible supply chains are among our main motivations. We focus on the scaling regime where the system size n tends to infinity, while the overall traffic intensity stays fixed. We show that a large capacity region (robustness) and diminishing queueing delay (performance) are jointly achievable even under very limited flexibility (d(n) l n). In particular, when d(n) gg ln n , a family of random-graph-based interconnection topologies is (with high probability) capable of stabilizing all admissible arrival rate vectors (under a bounded support assumption), while simultaneously ensuring a diminishing queueing delay, of order ln n/ d(n), as n-> \u221e. Our analysis is centered around a new class of virtual-queue-based scheduling policies that rely on dynamically constructed partial matchings on the connectivity graph.", + "link": "https://www.semanticscholar.org/paper/464337b5392bbbb100074a7b4e13cae1e71685fc", + "scraped_abstract": null, + "citation_best": 32 + }, + { + "paper": "2002203222", + "venue": "1175089206", + "year": "2013", + "title": "massive graph triangulation", + "label": [ + "205711294", + "136134403", + "80444323" + ], + "author": [ + "2171908625", + "2131082813", + "2107685843" + ], + "reference": [ + "1027972153", + "1482680420", + "1489509891", + "1904294951", + "1979110895", + "1991858502", + "1998341696", + "2000979164", + "2010042187", + "2012720017", + "2013414394", + "2016311778", + "2019724001", + "2023536175", + "2026160696", + "2030088585", + "2038142281", + "2055245094", + "2068700117", + "2068871408", + "2106116249", + "2111787123", + "2112090702", + "2588458088", + "2950661867", + "3102961618" + ], + "abstract": "this paper studies i o efficient algorithms for settling the classic triangle listing problem whose solution is a basic operator in dealing with many other graph problems specifically given an undirected graph g the objective of triangle listing is to find all the cliques involving 3 vertices in g the problem has been well studied in internal memory but remains an urgent difficult challenge when g does not fit in memory rendering any algorithm to entail frequent i o accesses although previous research has attempted to tackle the challenge the state of the art solutions rely on a set of crippling assumptions to guarantee good performance motivated by this we develop a new algorithm that is provably i o and cpu efficient at the same time without making any assumption on the input g at all the algorithm uses ideas drastically different from all the previous approaches and outperformed the existing competitors by a factor over an order of magnitude in our extensive experimentation", + "title_raw": "Massive graph triangulation", + "abstract_raw": "This paper studies I/O-efficient algorithms for settling the classic triangle listing problem, whose solution is a basic operator in dealing with many other graph problems. Specifically, given an undirected graph G, the objective of triangle listing is to find all the cliques involving 3 vertices in G. The problem has been well studied in internal memory, but remains an urgent difficult challenge when G does not fit in memory, rendering any algorithm to entail frequent I/O accesses. Although previous research has attempted to tackle the challenge, the state-of-the-art solutions rely on a set of crippling assumptions to guarantee good performance. Motivated by this, we develop a new algorithm that is provably I/O and CPU efficient at the same time, without making any assumption on the input G at all. The algorithm uses ideas drastically different from all the previous approaches, and outperformed the existing competitors by a factor over an order of magnitude in our extensive experimentation.", + "link": "https://www.semanticscholar.org/paper/fbeea48196d128288c0e314ba544497a7ac18771", + "scraped_abstract": null, + "citation_best": 132 + }, + { + "paper": "2104670257", + "venue": "1171178643", + "year": "2013", + "title": "the scalable commutativity rule designing scalable software for multicore processors", + "label": [ + "111919701", + "48044578", + "532756234", + "2777904410", + "78766204", + "25621077", + "2778579508" + ], + "author": [ + "2171474616", + "2779384724", + "1150986126", + "2270424441", + "2114981089" + ], + "reference": [ + "149219117", + "190062532", + "1480909796", + "1567570559", + "1587553526", + "1710734607", + "1938553034", + "1963506672", + "1969794418", + "1978958605", + "1986009031", + "1988597627", + "1993505169", + "2001738739", + "2009489720", + "2064091303", + "2064372914", + "2073256416", + "2096449544", + "2100817684", + "2101939036", + "2104251622", + "2106782772", + "2119717320", + "2129458440", + "2130068507", + "2132897303", + "2157092502", + "2163121173", + "2165772200", + "2168075869", + "2173730676", + "2364800968", + "2584045800", + "2912966011" + ], + "abstract": "what fundamental opportunities for scalability are latent in interfaces such as system call apis can scalability opportunities be identified even before any implementation exists simply by considering interface specifications to answer these questions this paper introduces the following rule whenever interface operations commute they can be implemented in a way that scales this rule aids developers in building more scalable software starting from interface design and carrying on through implementation testing and evaluation to help developers apply the rule a new tool named commuter accepts high level interface models and generates tests of operations that commute and hence could scale using these tests commuter can evaluate the scalability of an implementation we apply commuter to 18 posix calls and use the results to guide the implementation of a new research operating system kernel called sv6 linux scales for 68 of the 13 664 tests generated by commuter for these calls and commuter finds many problems that have been observed to limit application scalability sv6 scales for 99 of the tests", + "title_raw": "The scalable commutativity rule: designing scalable software for multicore processors", + "abstract_raw": "What fundamental opportunities for scalability are latent in interfaces, such as system call APIs? Can scalability opportunities be identified even before any implementation exists, simply by considering interface specifications? To answer these questions this paper introduces the following rule: Whenever interface operations commute, they can be implemented in a way that scales. This rule aids developers in building more scalable software starting from interface design and carrying on through implementation, testing, and evaluation. To help developers apply the rule, a new tool named Commuter accepts high-level interface models and generates tests of operations that commute and hence could scale. Using these tests, Commuter can evaluate the scalability of an implementation. We apply Commuter to 18 POSIX calls and use the results to guide the implementation of a new research operating system kernel called sv6. Linux scales for 68% of the 13,664 tests generated by Commuter for these calls, and Commuter finds many problems that have been observed to limit application scalability. sv6 scales for 99% of the tests.", + "link": "https://www.semanticscholar.org/paper/af4c61c2bbe823d1013c2c695968b6af9584a102", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1978364288", + "venue": "1171178643", + "year": "2013", + "title": "towards optimization safe systems analyzing the impact of undefined behavior", + "label": [ + "153083717", + "1009929", + "553261973", + "190902152", + "199360897", + "64156549", + "80444323" + ], + "author": [ + "2303948204", + "1150986126", + "2779384724", + "2166621873" + ], + "reference": [ + "183305829", + "1579437898", + "1709547319", + "1710734607", + "1775889152", + "1904404804", + "1965864973", + "1969064066", + "1976721395", + "2043811931", + "2086234010", + "2089294198", + "2104579430", + "2112472122", + "2114067856", + "2122868537", + "2135794045", + "2153185479", + "2162604396", + "2168096831", + "3005406813", + "3142903977" + ], + "abstract": "this paper studies an emerging class of software bugs called optimization unstable code code that is unexpectedly discarded by compiler optimizations due to undefined behavior in the program unstable code is present in many systems including the linux kernel and the postgres database the consequences of unstable code range from incorrect functionality to missing security checks to reason about unstable code this paper proposes a novel model which views unstable code in terms of optimizations that leverage undefined behavior using this model we introduce a new static checker called stack that precisely identifies unstable code applying stack to widely used systems has uncovered 160 new bugs that have been confirmed and fixed by developers", + "title_raw": "Towards optimization-safe systems: analyzing the impact of undefined behavior", + "abstract_raw": "This paper studies an emerging class of software bugs called optimization-unstable code: code that is unexpectedly discarded by compiler optimizations due to undefined behavior in the program. Unstable code is present in many systems, including the Linux kernel and the Postgres database. The consequences of unstable code range from incorrect functionality to missing security checks. To reason about unstable code, this paper proposes a novel model, which views unstable code in terms of optimizations that leverage undefined behavior. Using this model, we introduce a new static checker called Stack that precisely identifies unstable code. Applying Stack to widely used systems has uncovered 160 new bugs that have been confirmed and fixed by developers.", + "link": "https://www.semanticscholar.org/paper/e1d5153ebbc240858bac11fc0102b8976a33bb84", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2082171780", + "venue": "1171178643", + "year": "2013", + "title": "naiad a timely dataflow system", + "label": [ + "34165917", + "96324660", + "107027933", + "45374587", + "113954288", + "160713754", + "173608175", + "136134403", + "120314980" + ], + "author": [ + "2307210230", + "1967907825", + "2118758609", + "2004792601", + "2296249340", + "2162106674" + ], + "reference": [ + "22572007", + "78077100", + "193566757", + "206874040", + "1487337216", + "1493893823", + "1562189966", + "1652793671", + "1684942806", + "1845494277", + "1861377444", + "1976821017", + "1982003698", + "1982063824", + "2010365467", + "2027418104", + "2029467255", + "2045063824", + "2073904762", + "2100830825", + "2101196063", + "2105170773", + "2122465391", + "2124939717", + "2126022651", + "2130747448", + "2131975293", + "2133156458", + "2133941976", + "2135216003", + "2135727652", + "2136575791", + "2141711298", + "2148132761", + "2153972927", + "2156212081", + "2164740236", + "2165506510", + "2167845698", + "2170616854", + "2566979091", + "2772151590", + "3098257205" + ], + "abstract": "naiad is a distributed system for executing data parallel cyclic dataflow programs it offers the high throughput of batch processors the low latency of stream processors and the ability to perform iterative and incremental computations although existing systems offer some of these features applications that require all three have relied on multiple platforms at the expense of efficiency maintainability and simplicity naiad resolves the complexities of combining these features in one framework a new computational model timely dataflow underlies naiad and captures opportunities for parallelism across a wide class of algorithms this model enriches dataflow computation with timestamps that represent logical points in the computation and provide the basis for an efficient lightweight coordination mechanism we show that many powerful high level programming models can be built on naiad s low level primitives enabling such diverse tasks as streaming data analysis iterative machine learning and interactive graph mining naiad outperforms specialized systems in their target application domains and its unique features enable the development of new high performance applications", + "title_raw": "Naiad: a timely dataflow system", + "abstract_raw": "Naiad is a distributed system for executing data parallel, cyclic dataflow programs. It offers the high throughput of batch processors, the low latency of stream processors, and the ability to perform iterative and incremental computations. Although existing systems offer some of these features, applications that require all three have relied on multiple platforms, at the expense of efficiency, maintainability, and simplicity. Naiad resolves the complexities of combining these features in one framework. A new computational model, timely dataflow, underlies Naiad and captures opportunities for parallelism across a wide class of algorithms. This model enriches dataflow computation with timestamps that represent logical points in the computation and provide the basis for an efficient, lightweight coordination mechanism. We show that many powerful high-level programming models can be built on Naiad's low-level primitives, enabling such diverse tasks as streaming data analysis, iterative machine learning, and interactive graph mining. Naiad outperforms specialized systems in their target application domains, and its unique features enable the development of new high-performance applications.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Naiad:+A+Timely+Dataflow+System&as_oq=&as_eq=&as_occt=any&as_sauthors=Murray", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2150549828", + "venue": "1166315290", + "year": "2013", + "title": "pneui pneumatically actuated soft composite materials for shape changing interfaces", + "label": [ + "2776058767" + ], + "author": [ + "2127501813", + "1664658768", + "2123988653", + "2069682576", + "2127473554", + "2101434678" + ], + "reference": [ + "422581908", + "1601950763", + "1980587781", + "1990259408", + "1999224599", + "2005198142", + "2014137027", + "2016443402", + "2017626220", + "2018648404", + "2025613329", + "2046035349", + "2047083535", + "2047255713", + "2048266952", + "2049774492", + "2053915132", + "2062182733", + "2070099904", + "2073442815", + "2082727067", + "2089787431", + "2097214940", + "2108854843", + "2109525705", + "2112374856", + "2113331157", + "2122682172", + "2124097354", + "2126469287", + "2135798695", + "2143477594", + "2149165496", + "2151386142", + "2154104679", + "2161121228", + "2166585449" + ], + "abstract": "this paper presents pneui an enabling technology to build shape changing interfaces through pneumatically actuated soft composite materials the composite materials integrate the capabilities of both input sensing and active shape output this is enabled by the composites multi layer structures with different mechanical or electrical properties the shape changing states are computationally controllable through pneumatics and pre defined structure we explore the design space of pneui through four applications height changing tangible phicons a shape changing mobile a transformable tablet case and a shape shifting lamp", + "title_raw": "PneUI: pneumatically actuated soft composite materials for shape changing interfaces", + "abstract_raw": "This paper presents PneUI, an enabling technology to build shape-changing interfaces through pneumatically-actuated soft composite materials. The composite materials integrate the capabilities of both input sensing and active shape output. This is enabled by the composites' multi-layer structures with different mechanical or electrical properties. The shape changing states are computationally controllable through pneumatics and pre-defined structure. We explore the design space of PneUI through four applications: height changing tangible phicons, a shape changing mobile, a transformable tablet case and a shape shifting lamp.", + "link": "https://www.semanticscholar.org/paper/5edc648630e4e855b5e640318a993e5e7a679594", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2109018459", + "venue": "1166315290", + "year": "2013", + "title": "touch activate adding interactivity to existing objects using active acoustic sensing", + "label": [ + "2778263558", + "207347870", + "31972630", + "144430266", + "171268870", + "12267149" + ], + "author": [ + "2227249554", + "271348325", + "2284288079" + ], + "reference": [ + "152129288", + "657160825", + "761648355", + "1520684205", + "1970334548", + "1979518809", + "1987220739", + "1994547327", + "1996433339", + "2006276005", + "2007210444", + "2010813084", + "2027064609", + "2034347411", + "2036530198", + "2038304148", + "2043581786", + "2048207755", + "2054517723", + "2058306407", + "2059006350", + "2060787432", + "2062658884", + "2070885641", + "2071963718", + "2077071444", + "2079196436", + "2082073215", + "2082476618", + "2088182082", + "2099940712", + "2102413118", + "2103339808", + "2109075207", + "2113628568", + "2116817356", + "2116839359", + "2117601224", + "2120093000", + "2124917042", + "2131740967", + "2133990837", + "2140982079", + "2145044381", + "2153200718", + "2153635508", + "2161996677", + "2169709590", + "2184131172", + "2913524657" + ], + "abstract": "in this paper we present a novel acoustic touch sensing technique called touch activate it recognizes a rich context of touches including grasp on existing objects by attaching only a vibration speaker and a piezo electric microphone paired as a sensor it provides easy hardware configuration for prototyping interactive objects that have touch input capability we conducted a controlled experiment to measure the accuracy and trade off between the accuracy and number of training rounds for our technique from its results per user recognition accuracies with five touch gestures for a plastic toy as a simple example and six hand postures for the posture recognition as a complex example were 99 6 and 86 3 respectively walk up user recognition accuracies for the two applications were 97 8 and 71 2 respectively since the results of our experiment showed a promising accuracy for the recognition of touch gestures and hand postures touch activate should be feasible for prototype interactive objects that have touch input capability", + "title_raw": "Touch & activate: adding interactivity to existing objects using active acoustic sensing", + "abstract_raw": "In this paper, we present a novel acoustic touch sensing technique called Touch & Activate. It recognizes a rich context of touches including grasp on existing objects by attaching only a vibration speaker and a piezo-electric microphone paired as a sensor. It provides easy hardware configuration for prototyping interactive objects that have touch input capability. We conducted a controlled experiment to measure the accuracy and trade-off between the accuracy and number of training rounds for our technique. From its results, per-user recognition accuracies with five touch gestures for a plastic toy as a simple example and six hand postures for the posture recognition as a complex example were 99.6% and 86.3%, respectively. Walk up user recognition accuracies for the two applications were 97.8% and 71.2%, respectively. Since the results of our experiment showed a promising accuracy for the recognition of touch gestures and hand postures, Touch & Activate should be feasible for prototype interactive objects that have touch input capability.", + "link": "https://www.semanticscholar.org/paper/3142ed568bfc357dc53c9cba9059029b130faedd", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2001716033", + "venue": "1166315290", + "year": "2013", + "title": "fiberio a touchscreen that senses fingerprints", + "label": [ + "188255373", + "78646695", + "761482", + "184297639", + "45235069", + "2778539339", + "49774154", + "31972630", + "2777826928" + ], + "author": [ + "2208258931", + "2009751849" + ], + "reference": [ + "197624350", + "1164102756", + "1483289014", + "1538460644", + "1583603951", + "1639795441", + "1986606504", + "1990617712", + "2008150314", + "2019988235", + "2069915259", + "2097332559", + "2100491474", + "2117105401", + "2119421579", + "2129146498", + "2130038307", + "2132854028", + "2140215318", + "2140982079", + "2141026766", + "2145044381", + "2149344674", + "2150115983", + "2152430089", + "2155461104", + "2158707444", + "2162878695", + "2168926782", + "2256093822", + "2282265123", + "2295007882", + "2539247512" + ], + "abstract": "we present fiberio a rear projected multitouch table that identifies users biometrically based on their fingerprints during each touch interaction fiberio accomplishes this using a new type of screen material a large fiber optic plate the plate diffuses light on transmission thereby allowing it to act as projection surface at the same time the plate reflects light specularly which produces the contrast required for fingerprint sensing in addition to offering all the functionality known from traditional diffused illumination systems fiberio is the first interactive tabletop system that authenticates users during touch interaction unobtrusively and securely using the biometric features of fingerprints which eliminates the need for users to carry any identification tokens", + "title_raw": "Fiberio: a touchscreen that senses fingerprints", + "abstract_raw": "We present Fiberio, a rear-projected multitouch table that identifies users biometrically based on their fingerprints during each touch interaction. Fiberio accomplishes this using a new type of screen material: a large fiber optic plate. The plate diffuses light on transmission, thereby allowing it to act as projection surface. At the same time, the plate reflects light specularly, which produces the contrast required for fingerprint sensing. In addition to offering all the functionality known from traditional diffused illumination systems, Fiberio is the first interactive tabletop system that authenticates users during touch interaction-unobtrusively and securely using the biometric features of fingerprints, which eliminates the need for users to carry any identification tokens.", + "link": "https://www.semanticscholar.org/paper/c7067ff53ab2018327ab041b0f6ff4174922dbe1", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2033201131", + "venue": "1133523790", + "year": "2012", + "title": "disc diversity result diversification based on dissimilarity and coverage", + "label": [ + "64729616", + "124101348", + "127705205" + ], + "author": [ + "1136363840", + "2242762518" + ], + "reference": [ + "299839057", + "1859467588", + "1976422444", + "1977586752", + "1993320088", + "1995913408", + "2011039300", + "2011806775", + "2019196387", + "2032518213", + "2049797265", + "2056749377", + "2063572899", + "2092808090", + "2094067618", + "2097860522", + "2097951507", + "2099799055", + "2103186657", + "2111336742", + "2118466295", + "2126976589", + "2132314908", + "2146831356", + "2149177634", + "2149547418", + "2152228468", + "2155912844", + "2161547991", + "2166224193" + ], + "abstract": "recently result diversification has attracted a lot of attention as a means to improve the quality of results retrieved by user queries in this paper we propose a new intuitive definition of diversity called disc diversity a disc diverse subset of a query result contains objects such that each object in the result is represented by a similar object in the diverse subset and the objects in the diverse subset are dissimilar to each other we show that locating a minimum disc diverse subset is an np hard problem and provide heuristics for its approximation we also propose adapting disc diverse subsets to a different degree of diversification we call this operation zooming we present efficient implementations of our algorithms based on the m tree a spatial index structure and experimentally evaluate their performance", + "title_raw": "DisC diversity: result diversification based on dissimilarity and coverage", + "abstract_raw": "Recently, result diversification has attracted a lot of attention as a means to improve the quality of results retrieved by user queries. In this paper, we propose a new, intuitive definition of diversity called DisC diversity. A DisC diverse subset of a query result contains objects such that each object in the result is represented by a similar object in the diverse subset and the objects in the diverse subset are dissimilar to each other. We show that locating a minimum DisC diverse subset is an NP-hard problem and provide heuristics for its approximation. We also propose adapting DisC diverse subsets to a different degree of diversification. We call this operation zooming. We present efficient implementations of our algorithms based on the M-tree, a spatial index structure, and experimentally evaluate their performance.", + "link": "https://www.semanticscholar.org/paper/eae77b5282137bb1d3cf8e064895a16353f827d7", + "scraped_abstract": null, + "citation_best": 3 + }, + { + "paper": "2127411301", + "venue": "1135342153", + "year": "2013", + "title": "no country for old members user lifecycle and linguistic change in online communities", + "label": [ + "56739046", + "2777027219", + "67469775" + ], + "author": [ + "160157097", + "2984931003", + "2089131864", + "1878631932", + "2114426036" + ], + "reference": [ + "222053410", + "1233176854", + "1488989678", + "1510898630", + "1531752105", + "1664970667", + "1814109117", + "1854262109", + "1964137370", + "1993435668", + "2001259128", + "2017204136", + "2024324635", + "2036248440", + "2038631615", + "2056510402", + "2062783322", + "2071106879", + "2081317065", + "2090539334", + "2108614537", + "2109091500", + "2110930288", + "2114544578", + "2114589588", + "2118553383", + "2122710250", + "2124003719", + "2127471749", + "2131857427", + "2134237567", + "2136891251", + "2141141510", + "2143318268", + "2145446394", + "2147154374", + "2151078464", + "2151884917", + "2157127722", + "2160176417", + "2166754847", + "2166771689", + "2167057952", + "2170030781", + "2432978112", + "2467997928", + "2745806554", + "3017658320", + "3149829789" + ], + "abstract": "vibrant online communities are in constant flux as members join and depart the interactional norms evolve stimulating further changes to the membership and its social dynamics linguistic change in the sense of innovation that becomes accepted as the norm is essential to this dynamic process it both facilitates individual expression and fosters the emergence of a collective identity we propose a framework for tracking linguistic change as it happens and for understanding how specific users react to these evolving norms by applying this framework to two large online communities we show that users follow a determined two stage lifecycle with respect to their susceptibility to linguistic change a linguistically innovative learning phase in which users adopt the language of the community followed by a conservative phase in which users stop changing and the evolving community norms pass them by building on this observation we show how this framework can be used to detect early in a user s career how long she will stay active in the community thus this work has practical significance for those who design and maintain online communities it also yields new theoretical insights into the evolution of linguistic norms and the complex interplay between community level and individual level linguistic change", + "title_raw": "No country for old members: user lifecycle and linguistic change in online communities", + "abstract_raw": "Vibrant online communities are in constant flux. As members join and depart, the interactional norms evolve, stimulating further changes to the membership and its social dynamics. Linguistic change --- in the sense of innovation that becomes accepted as the norm --- is essential to this dynamic process: it both facilitates individual expression and fosters the emergence of a collective identity. We propose a framework for tracking linguistic change as it happens and for understanding how specific users react to these evolving norms. By applying this framework to two large online communities we show that users follow a determined two-stage lifecycle with respect to their susceptibility to linguistic change: a linguistically innovative learning phase in which users adopt the language of the community followed by a conservative phase in which users stop changing and the evolving community norms pass them by. Building on this observation, we show how this framework can be used to detect, early in a user's career, how long she will stay active in the community. Thus, this work has practical significance for those who design and maintain online communities. It also yields new theoretical insights into the evolution of linguistic norms and the complex interplay between community-level and individual-level linguistic change.", + "link": "https://www.semanticscholar.org/paper/3b3e2ba40f88f44a25dcfe346665211a57c4238c", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2293636571", + "venue": "1184914352", + "year": "2012", + "title": "learning svm classifiers with indefinite kernels", + "label": [ + "119857082", + "134517425", + "66905080", + "98234853", + "99018454", + "140417398", + "178980831", + "12267149", + "182335926", + "75866337", + "123860398" + ], + "author": [ + "2153811272", + "2256485367" + ], + "reference": [ + "91932901", + "1512669428", + "2015855658", + "2034726418", + "2100781145", + "2102579186", + "2104323334", + "2115933183", + "2118216287", + "2121950477", + "2123818990", + "2139999468", + "2148894497", + "2157656721", + "2172000360", + "2296319761", + "2798909945", + "3002694247" + ], + "abstract": "recently training support vector machines with indefinite kernels has attracted great attention in the machine learning community in this paper we tackle this problem by formulating a joint optimization model over svm classifications and kernel principal component analysis we first reformulate the kernel principal component analysis as a general kernel transformation framework and then incorporate it into the svm classification to formulate a joint optimization model the proposed model has the advantage of making consistent kernel transformations over training and test samples it can be used for both binary classification and multiclass classification problems our experimental results on both synthetic data sets and real world data sets show the proposed model can significantly outperform related approaches", + "title_raw": "Learning SVM classifiers with indefinite kernels", + "abstract_raw": "Recently, training support vector machines with indefinite kernels has attracted great attention in the machine learning community. In this paper, we tackle this problem by formulating a joint optimization model over SVM classifications and kernel principal component analysis. We first reformulate the kernel principal component analysis as a general kernel transformation framework, and then incorporate it into the SVM classification to formulate a joint optimization model. The proposed model has the advantage of making consistent kernel transformations over training and test samples. It can be used for both binary classification and multiclass classification problems. Our experimental results on both synthetic data sets and real world data sets show the proposed model can significantly outperform related approaches.", + "link": "https://www.semanticscholar.org/paper/535d35b1eb40e7c91af2e54850d565524d07f13d", + "scraped_abstract": null, + "citation_best": 27 + }, + { + "paper": "1512874001", + "venue": "1184914352", + "year": "2012", + "title": "document summarization based on data reconstruction", + "label": [ + "12713177", + "124101348", + "152124472", + "170858558" + ], + "author": [ + "2163761377", + "2151411149", + "2169876372", + "2293456513", + "2617370321", + "2141500565", + "2157503142" + ], + "reference": [ + "179757531", + "1580733178", + "1963959122", + "1964488740", + "1967082914", + "1971520389", + "1975579663", + "1976645892", + "1985710361", + "2000023271", + "2009134948", + "2012585029", + "2028781966", + "2029320853", + "2047028564", + "2054211469", + "2055150316", + "2066636486", + "2080370442", + "2081580037", + "2083778364", + "2089391273", + "2090462687", + "2102618089", + "2105542801", + "2108119513", + "2111619374", + "2114188922", + "2126709460", + "2135046866", + "2138621811", + "2140440594", + "2144933361", + "2150824314", + "2154652894", + "2166052814", + "2538492099" + ], + "abstract": "document summarization is of great value to many real world applications such as snippets generation for search results and news headlines generation traditionally document summarization is implemented by extracting sentences that cover the main topics of a document with a minimum redundancy in this paper we take a different perspective from data reconstruction and propose a novel framework named document summarization based on data reconstruction dsdr specifically our approach generates a summary which consist of those sentences that can best reconstruct the original document to model the relationship among sentences we introduce two objective functions 1 linear reconstruction which approximates the document by linear combinations of the selected sentences 2 nonnegative linear reconstruction which allows only additive not subtractive linear combinations in this framework the reconstruction error becomes a natural criterion for measuring the quality of the summary for each objective function we develop an efficient algorithm to solve the corresponding optimization problem extensive experiments on summarization benchmark data sets duc 2006 and duc 2007 demonstrate the effectiveness of our proposed approach", + "title_raw": "Document summarization based on data reconstruction", + "abstract_raw": "Document summarization is of great value to many real world applications, such as snippets generation for search results and news headlines generation. Traditionally, document summarization is implemented by extracting sentences that cover the main topics of a document with a minimum redundancy. In this paper, we take a different perspective from data reconstruction and propose a novel framework named Document Summarization based on Data Reconstruction (DSDR). Specifically, our approach generates a summary which consist of those sentences that can best reconstruct the original document. To model the relationship among sentences, we introduce two objective functions: (1) linear reconstruction, which approximates the document by linear combinations of the selected sentences; (2) nonnegative linear reconstruction, which allows only additive, not subtractive, linear combinations. In this framework, the reconstruction error becomes a natural criterion for measuring the quality of the summary. For each objective function, we develop an efficient algorithm to solve the corresponding optimization problem. Extensive experiments on summarization benchmark data sets DUC 2006 and DUC 2007 demonstrate the effectiveness of our proposed approach.", + "link": "https://www.semanticscholar.org/paper/d7c58e4f16504500329315e06eeba700c4b7abca", + "scraped_abstract": null, + "citation_best": 118 + }, + { + "paper": "2159250884", + "venue": "1188739475", + "year": "2012", + "title": "bayesian symbol refined tree substitution grammars for syntactic parsing", + "label": [ + "146810361", + "9432014", + "42560504", + "206134035", + "53893814", + "118364021", + "147547768", + "204321447", + "186644900" + ], + "author": [ + "2012524798", + "2036428500", + "2142558357", + "2102463429" + ], + "reference": [ + "1499470996", + "1540302538", + "1551104980", + "1551202288", + "1632114991", + "1892363745", + "1978470410", + "1982649163", + "1983728325", + "2005902041", + "2069429561", + "2087309226", + "2092654472", + "2097606805", + "2111500764", + "2115409255", + "2117126688", + "2120735855", + "2121380975", + "2125712079", + "2126449874", + "2128774237", + "2132726600", + "2134729743", + "2139621418", + "2152561660", + "2152600988", + "2154099718", + "2157874452", + "2164151151", + "2168959697" + ], + "abstract": "we propose symbol refined tree substitution grammars sr tsgs for syntactic parsing an sr tsg is an extension of the conventional tsg model where each nonterminal symbol can be refined subcategorized to fit the training data we aim to provide a unified model where tsg rules and symbol refinement are learned from training data in a fully automatic and consistent fashion we present a novel probabilistic sr tsg model based on the hierarchical pitman yor process to encode backoff smoothing from a fine grained sr tsg to simpler cfg rules and develop an efficient training method based on markov chain monte carlo mcmc sampling our sr tsg parser achieves an f1 score of 92 4 in the wall street journal wsj english penn treebank parsing task which is a 7 7 point improvement over a conventional bayesian tsg parser and better than state of the art discriminative reranking parsers", + "title_raw": "Bayesian Symbol-Refined Tree Substitution Grammars for Syntactic Parsing", + "abstract_raw": "We propose Symbol-Refined Tree Substitution Grammars (SR-TSGs) for syntactic parsing. An SR-TSG is an extension of the conventional TSG model where each nonterminal symbol can be refined (subcategorized) to fit the training data. We aim to provide a unified model where TSG rules and symbol refinement are learned from training data in a fully automatic and consistent fashion. We present a novel probabilistic SR-TSG model based on the hierarchical Pitman-Yor Process to encode backoff smoothing from a fine-grained SR-TSG to simpler CFG rules, and develop an efficient training method based on Markov Chain Monte Carlo (MCMC) sampling. Our SR-TSG parser achieves an F1 score of 92.4% in the Wall Street Journal (WSJ) English Penn Treebank parsing task, which is a 7.7 point improvement over a conventional Bayesian TSG parser, and better than state-of-the-art discriminative reranking parsers.", + "link": "https://www.semanticscholar.org/paper/2b920fe2d038571693bd96dafd3ed0dbadc4cb67", + "scraped_abstract": null, + "citation_best": 44 + }, + { + "paper": "2136345490", + "venue": "1163450153", + "year": "2012", + "title": "observational and experimental investigation of typing behaviour using virtual keyboards for mobile devices", + "label": [ + "40969351", + "557433098", + "107457646", + "2781209916", + "2780367331", + "186967261", + "2778539339", + "2777421447" + ], + "author": [ + "2150949379", + "74286286", + "2101137802" + ], + "reference": [ + "1569910286", + "1695216085", + "1999806644", + "2003849625", + "2004403975", + "2004694131", + "2018551689", + "2028618039", + "2036530198", + "2038710768", + "2083243042", + "2099287431", + "2108518773", + "2119429908", + "2139624348", + "2148748674", + "2156732889", + "2295007882", + "2519682392" + ], + "abstract": "with the rise of current smartphones virtual keyboards for touchscreens became the dominant mobile text entry technique we developed a typing game that records how users touch on the standard android keyboard to investigate users typing behaviour 47 770 625 keystrokes from 72 945 installations have been collected by publishing the game by visualizing the touch distribution we identified a systematic skew and derived a function that compensates this skew by shifting touch events by updating the game we conduct an experiment that investigates the effect of shifting touch events changing the keys labels and visualizing the touched position results based on 6 603 659 keystrokes and 13 013 installations show that visualizing the touched positions using a simple dot decreases the error rate of the android keyboard by 18 3 but also decreases the speed by 5 2 with no positive effect on learnability the android keyboard outperforms the control condition but the constructed shift function further improves the performance by 2 2 and decreases the error rate by 9 1 we argue that the shift function can improve existing keyboards at no costs", + "title_raw": "Observational and experimental investigation of typing behaviour using virtual keyboards for mobile devices", + "abstract_raw": "With the rise of current smartphones, virtual keyboards for touchscreens became the dominant mobile text entry technique. We developed a typing game that records how users touch on the standard Android keyboard to investigate users' typing behaviour. 47,770,625 keystrokes from 72,945 installations have been collected by publishing the game. By visualizing the touch distribution we identified a systematic skew and derived a function that compensates this skew by shifting touch events. By updating the game we conduct an experiment that investigates the effect of shifting touch events, changing the keys' labels, and visualizing the touched position. Results based on 6,603,659 keystrokes and 13,013 installations show that visualizing the touched positions using a simple dot decreases the error rate of the Android keyboard by 18.3% but also decreases the speed by 5.2% with no positive effect on learnability. The Android keyboard outperforms the control condition but the constructed shift function further improves the performance by 2.2% and decreases the error rate by 9.1%. We argue that the shift function can improve existing keyboards at no costs.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Observational+and+Experimental+Investigation+of+Typing+Behaviour+using+Virtual+Keyboards+for+Mobile+Devices&as_oq=&as_eq=&as_occt=any&as_sauthors=Henze", + "scraped_abstract": null, + "citation_best": 96 + }, + { + "paper": "2126298837", + "venue": "1163450153", + "year": "2012", + "title": "the normal natural troubles of driving with gps", + "label": [ + "36301306", + "149635348", + "60229501", + "43472768", + "107457646", + "2778582051", + "87546605" + ], + "author": [ + "3037242985", + "2788376239" + ], + "reference": [ + "165187429", + "962894947", + "1489032534", + "1995467171", + "2012384339", + "2026645894", + "2038402307", + "2043745669", + "2053078227", + "2057696488", + "2066681138", + "2068408009", + "2069683636", + "2085170882", + "2099001635", + "2112868075", + "2121722567", + "2125484268", + "2138556657", + "2138683923", + "2144546006", + "2150396292", + "2152551784", + "2152982168", + "2158692512", + "2160702867", + "2294123372", + "2318928470", + "2498263534", + "2998695964", + "3145637508" + ], + "abstract": "in car gps based satellite navigation systems are now a common part of driving providing turn by turn navigation instructions on smartphones portable units or in car dashboard navigation systems this paper uses interactional analysis of video data from fifteen naturalistically recorded journeys with gps to understand the navigational practices deployed by drivers and passengers the paper documents five types of trouble where gps systems cause issues and confusion for drivers around destinations routes maps sensors timing and relevance and legality the paper argues that to design gps systems better we need to move beyond the notion of a docile driver who follows gps command blindly to a better understanding of how drivers passengers and gps systems work together we develop this in discussing how technology might better support instructed action", + "title_raw": "The normal natural troubles of driving with GPS", + "abstract_raw": "In-car GPS based satellite navigation systems are now a common part of driving, providing turn-by-turn navigation instructions on smartphones, portable units or in-car dashboard navigation systems. This paper uses interactional analysis of video data from fifteen naturalistically recorded journeys with GPS to understand the navigational practices deployed by drivers and passengers. The paper documents five types of 'trouble' where GPS systems cause issues and confusion for drivers around: destinations, routes, maps & sensors, timing and relevance and legality. The paper argues that to design GPS systems better we need to move beyond the notion of a docile driver who follows GPS command blindly, to a better understanding of how drivers, passengers and GPS systems work together. We develop this in discussing how technology might better support 'instructed action'.", + "link": "https://www.semanticscholar.org/paper/3f0f48280896d492c0e68093c8b82ed3df6d985d", + "scraped_abstract": null, + "citation_best": 105 + }, + { + "paper": "2060696172", + "venue": "1163450153", + "year": "2012", + "title": "improving command selection with commandmaps", + "label": [ + "107457646", + "108265739" + ], + "author": [ + "2234027993", + "2101644873", + "2071700171", + "2039874956" + ], + "reference": [ + "1500303984", + "1586098591", + "1601395896", + "1669686298", + "1879937569", + "1974170312", + "1990242729", + "1999538864", + "1999555653", + "2012249883", + "2014448832", + "2029469562", + "2034511689", + "2035715329", + "2036433269", + "2064406997", + "2067534690", + "2069324208", + "2090949033", + "2095753442", + "2097298348", + "2106311469", + "2112525093", + "2113725822", + "2127555702", + "2130025274", + "2157289187", + "2164620969", + "2179427518", + "2406072942", + "2496992222", + "2786157445" + ], + "abstract": "designers of gui applications typically arrange commands in hierarchical structures such as menus due to screen space limitations however hierarchical organisations are known to slow down expert users this paper proposes the use of spatial memory in combination with hierarchy flattening as a means of improving gui performance we demonstrate these concepts through the design of a command selection interface called commandmaps and analyse its theoretical performance characteristics we then describe two studies evaluating commandmaps against menus and microsoft s ribbon interface for both novice and experienced users results show that for novice users there is no significant performance difference between commandmaps and traditional interfaces but for experienced users commandmaps are significantly faster than both menus and the ribbon", + "title_raw": "Improving command selection with CommandMaps", + "abstract_raw": "Designers of GUI applications typically arrange commands in hierarchical structures, such as menus, due to screen space limitations. However, hierarchical organisations are known to slow down expert users. This paper proposes the use of spatial memory in combination with hierarchy flattening as a means of improving GUI performance. We demonstrate these concepts through the design of a command selection interface, called CommandMaps, and analyse its theoretical performance characteristics. We then describe two studies evaluating CommandMaps against menus and Microsoft's Ribbon interface for both novice and experienced users. Results show that for novice users, there is no significant performance difference between CommandMaps and traditional interfaces -- but for experienced users, CommandMaps are significantly faster than both menus and the Ribbon.", + "link": "https://www.semanticscholar.org/paper/cc6919db418ac78c59a291ba200b5ad796e47e4c", + "scraped_abstract": null, + "citation_best": 64 + }, + { + "paper": "2132962756", + "venue": "1163450153", + "year": "2012", + "title": "communitysourcing engaging local crowds to perform expert work via physical kiosks", + "label": [ + "2909262543", + "107457646", + "49774154", + "172195944", + "2777852691", + "116865082", + "62230096" + ], + "author": [ + "2077519821", + "308150686", + "2152765731", + "2325120967", + "2192055696" + ], + "reference": [ + "2008173", + "112112871", + "1523986167", + "1564762873", + "1569006877", + "1985543789", + "1985816302", + "2015337919", + "2023430268", + "2035683813", + "2036625304", + "2054475738", + "2058179030", + "2058556535", + "2090048052", + "2099769844", + "2114269021", + "2116119142", + "2117470435", + "2127008633", + "2128386057", + "2138965424", + "2140464265", + "2148479118", + "2151242492", + "2151401338", + "2160759300", + "2168131931", + "2170989440", + "3124258878" + ], + "abstract": "online labor markets such as amazon s mechanical turk have been used to crowdsource simple short tasks like image labeling and transcription however expert knowledge is often lacking in such markets making it impossible to complete certain classes of tasks in this work we introduce an alternative mechanism for crowdsourcing tasks that require specialized knowledge or skill communitysourcing the use of physical kiosks to elicit work from specific populations we investigate the potential of communitysourcing by designing implementing and evaluating umati the communitysourcing vending machine umati allows users to earn credits by performing tasks using a touchscreen attached to the machine physical rewards in this case snacks are dispensed through traditional vending mechanics we evaluated whether communitysourcing can accomplish expert work by using umati to grade computer science exams we placed umati in a university computer science building targeting students with grading tasks for snacks over one week 328 unique users 302 of whom were students completed 7771 tasks 7240 by students 80 of users had never participated in a crowdsourcing market before we found that umati was able to grade exams with 2 higher accuracy at the same price or at 33 lower cost at equivalent accuracy than traditional single expert grading mechanical turk workers had no success grading the same exams these results indicate that communitysourcing can successfully elicit high quality expert work from specific communities", + "title_raw": "CommunitySourcing: engaging local crowds to perform expert work via physical kiosks", + "abstract_raw": "Online labor markets, such as Amazon's Mechanical Turk, have been used to crowdsource simple, short tasks like image labeling and transcription. However, expert knowledge is often lacking in such markets, making it impossible to complete certain classes of tasks. In this work we introduce an alternative mechanism for crowdsourcing tasks that require specialized knowledge or skill: communitysourcing --- the use of physical kiosks to elicit work from specific populations. We investigate the potential of communitysourcing by designing, implementing and evaluating Umati: the communitysourcing vending machine. Umati allows users to earn credits by performing tasks using a touchscreen attached to the machine. Physical rewards (in this case, snacks) are dispensed through traditional vending mechanics. We evaluated whether communitysourcing can accomplish expert work by using Umati to grade Computer Science exams. We placed Umati in a university Computer Science building, targeting students with grading tasks for snacks. Over one week, 328 unique users (302 of whom were students) completed 7771 tasks (7240 by students). 80% of users had never participated in a crowdsourcing market before. We found that Umati was able to grade exams with 2% higher accuracy (at the same price) or at 33% lower cost (at equivalent accuracy) than traditional single-expert grading. Mechanical Turk workers had no success grading the same exams. These results indicate that communitysourcing can successfully elicit high-quality expert work from specific communities.", + "link": "https://www.semanticscholar.org/paper/b8a935bb46109e0451c07c3e6316f147336d67bf", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2088182082", + "venue": "1163450153", + "year": "2012", + "title": "touche enhancing touch interaction on humans screens liquids and everyday objects", + "label": [ + "5366617", + "193293595", + "207347870", + "31972630", + "186967261" + ], + "author": [ + "2168227675", + "2728936", + "2123491528" + ], + "reference": [ + "174061063", + "1967451823", + "1969037571", + "1979512319", + "1980224946", + "1993466468", + "2005198142", + "2005681075", + "2007210444", + "2008150314", + "2014788716", + "2017320085", + "2034664912", + "2041210843", + "2048207755", + "2050954805", + "2059006350", + "2063812706", + "2089198411", + "2091178526", + "2094577098", + "2095956339", + "2097672823", + "2109075207", + "2111424004", + "2112416127", + "2113628568", + "2114663654", + "2115949743", + "2119183201", + "2128026023", + "2130306162", + "2131740967", + "2133586326", + "2133990480", + "2138687233", + "2140395929", + "2141964067", + "2145550961", + "2151207336", + "2158707444", + "2165940897", + "2169709590", + "2295775608", + "2534521461", + "2906710874" + ], + "abstract": "touche proposes a novel swept frequency capacitive sensing technique that can not only detect a touch event but also recognize complex configurations of the human hands and body such contextual information significantly enhances touch interaction in a broad range of applications from conventional touchscreens to unique contexts and materials for example in our explorations we add touch and gesture sensitivity to the human body and liquids we demonstrate the rich capabilities of touche with five example setups from different application domains and conduct experimental studies that show gesture classification accuracies of 99 are achievable with our technology", + "title_raw": "Touch\u00e9: enhancing touch interaction on humans, screens, liquids, and everyday objects", + "abstract_raw": "Touche proposes a novel Swept Frequency Capacitive Sensing technique that can not only detect a touch event, but also recognize complex configurations of the human hands and body. Such contextual information significantly enhances touch interaction in a broad range of applications, from conventional touchscreens to unique contexts and materials. For example, in our explorations we add touch and gesture sensitivity to the human body and liquids. We demonstrate the rich capabilities of Touche with five example setups from different application domains and conduct experimental studies that show gesture classification accuracies of 99% are achievable with our technology.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Touch\u00e9:+Enhancing+Touch+Interaction+on+Humans,+Screens,+Liquids,+and+Everyday+Objects&as_oq=&as_eq=&as_occt=any&as_sauthors=Sato", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2166132393", + "venue": "1163450153", + "year": "2012", + "title": "detecting error related negativity for interaction design", + "label": [ + "28490314", + "13854087", + "89505385", + "95623464" + ], + "author": [ + "2101395015", + "2251937356" + ], + "reference": [ + "77705130", + "1482124653", + "1503283999", + "1883664232", + "1967167074", + "1973638771", + "1981419162", + "1987830056", + "1987883337", + "1990592342", + "1999880904", + "2005288559", + "2035082563", + "2050218197", + "2095753442", + "2098100592", + "2101345011", + "2110274942", + "2110849321", + "2121934633", + "2125228090", + "2133214227", + "2135572147", + "2141088043", + "2165389751", + "2170565853", + "2187663530", + "2534890789", + "2906752831" + ], + "abstract": "this paper examines the ability to detect a characteristic brain potential called the error related negativity ern using off the shelf headsets and explores its applicability to hci ern is triggered when a user either makes a mistake or the application behaves differently from their expectation we first show that ern can be seen on signals captured by eeg headsets like emotiv when doing a typical multiple choice reaction time rt task flanker task we then present a single trial online ern algorithm that works by pre computing the coefficient matrix of a logistic regression classifier using some data from a multiple choice reaction time task and uses it to classify incoming signals of that task on a single trial of data we apply it to an interactive selection task that involved users selecting an object under time pressure furthermore the study was conducted in a typical office environment with ambient noise our results show that online single trial ern detection is possible using off the shelf headsets during tasks that are typical of interactive applications we then design a superflick experiment with an integrated module mimicking an ern detector to evaluate the accuracy of detecting ern in the context of assisting users in interactive tasks based on these results we discuss and present several hci scenarios for use of ern", + "title_raw": "Detecting error-related negativity for interaction design", + "abstract_raw": "This paper examines the ability to detect a characteristic brain potential called the Error-Related Negativity (ERN) using off-the-shelf headsets and explores its applicability to HCI. ERN is triggered when a user either makes a mistake or the application behaves differently from their expectation. We first show that ERN can be seen on signals captured by EEG headsets like Emotiv\u2122 when doing a typical multiple choice reaction time (RT) task -- Flanker task. We then present a single-trial online ERN algorithm that works by pre-computing the coefficient matrix of a logistic regression classifier using some data from a multiple choice reaction time task and uses it to classify incoming signals of that task on a single trial of data. We apply it to an interactive selection task that involved users selecting an object under time pressure. Furthermore the study was conducted in a typical office environment with ambient noise. Our results show that online single trial ERN detection is possible using off-the-shelf headsets during tasks that are typical of interactive applications. We then design a Superflick experiment with an integrated module mimicking an ERN detector to evaluate the accuracy of detecting ERN in the context of assisting users in interactive tasks. Based on these results we discuss and present several HCI scenarios for use of ERN.", + "link": "https://www.semanticscholar.org/paper/333adc43fe2b6b93d84a3a6a8ca5cad8e66686a8", + "scraped_abstract": null, + "citation_best": 63 + }, + { + "paper": "1974923332", + "venue": "1163450153", + "year": "2012", + "title": "using rhythmic patterns as an input method", + "label": [ + "28490314", + "2779730394", + "2778816558" + ], + "author": [ + "2243140582", + "2776349054", + "1947389838", + "166732564", + "2674390412" + ], + "reference": [ + "71263425", + "153748520", + "762344116", + "1516339862", + "1569085508", + "1969015291", + "1970294130", + "1973310077", + "1990451873", + "1992880878", + "1994330430", + "1998098156", + "1999587069", + "2015188905", + "2042409883", + "2067534690", + "2067870229", + "2071003439", + "2077023123", + "2079349304", + "2087361844", + "2092601877", + "2097374659", + "2114809517", + "2120805674", + "2125612136", + "2125811888", + "2132986671", + "2144007431", + "2146567024", + "2166153889", + "2504258391", + "2971331781" + ], + "abstract": "while interaction techniques that use the temporal dimension have been used for a long time such as multiple clicks or spring loaded widgets more advanced uses of rhythmic patterns have received little attention in hci using such temporal structures to convey information can be particularly useful in situations where the visual channel is overloaded or even not available in this paper we introduce rhythmic interaction as the use of rhythms for input we report the results of two experiments that show that i rhythmic patterns can be efficiently reproduced by novice users and recognized by computer algorithms and ii rhythmic patterns can be memorized as efficiently as traditional shortcuts when associating them with visual commands overall these results demonstrate the potential of rhythmic interaction and open the way to a richer repertoire of interaction techniques", + "title_raw": "Using rhythmic patterns as an input method", + "abstract_raw": "While interaction techniques that use the temporal dimension have been used for a long time, such as multiple clicks or spring-loaded widgets, more advanced uses of rhythmic patterns have received little attention in HCI. Using such temporal structures to convey information can be particularly useful in situations where the visual channel is overloaded or even not available. In this paper we introduce Rhythmic Interaction as the use of rhythms for input. We report the results of two experiments that show that (i) rhythmic patterns can be efficiently reproduced by novice users and recognized by computer algorithms, and (ii) rhythmic patterns can be memorized as efficiently as traditional shortcuts when associating them with visual commands. Overall, these results demonstrate the potential of Rhythmic Interaction and open the way to a richer repertoire of interaction techniques.", + "link": "https://www.semanticscholar.org/paper/e8673d1d7a3a18024db575fc1b364f7f6273eb8e", + "scraped_abstract": null, + "citation_best": 38 + }, + { + "paper": "2168953163", + "venue": "1163450153", + "year": "2012", + "title": "revisiting the jacquard loom threads of history and current patterns in hci", + "label": [ + "107457646", + "40458791", + "2780120335", + "13854087" + ], + "author": [ + "2264635623", + "2118748986", + "150241784" + ], + "reference": [ + "182386645", + "1504691351", + "1548118120", + "1558535499", + "1770006921", + "1966864622", + "1987398446", + "2001029127", + "2022360138", + "2034353103", + "2038107928", + "2050896993", + "2082324053", + "2100700169", + "2100953056", + "2104494713", + "2108870675", + "2109058912", + "2110378855", + "2116331343", + "2122928453", + "2123389807", + "2141385588", + "2149891956", + "2153058366", + "2154825228", + "2157937414", + "2159736876", + "2165962769", + "2167686873", + "2170000359", + "2170493449", + "2170749155", + "2293357609", + "2294102697", + "2500976406" + ], + "abstract": "in the recent developments of human computer interaction one central challenge has been to find and to explore alternatives to the legacy of the desktop computer paradigm for interaction design to investigate this issue further we have conducted an analysis on a fascinating piece of machinery often referred to as one of the predecessors of the modern day computer the jacquard loom in analysing the jacquard loom we look at qualities in design and interaction from some different perspectives how historical tools crafts and practices can inform interaction design the role of physicality materiality and full body interaction in order to rethink some current conceptions of interaction and design of computational devices", + "title_raw": "Revisiting the jacquard loom: threads of history and current patterns in HCI", + "abstract_raw": "In the recent developments of human computer interaction, one central challenge has been to find and to explore alternatives to the legacy of the desktop computer paradigm for interaction design. To investigate this issue further we have conducted an analysis on a fascinating piece of machinery often referred to as one of the predecessors of the modern day computer, the Jacquard loom. In analysing the Jacquard loom we look at qualities in design and interaction from some different perspectives: how historical tools, crafts, and practices can inform interaction design, the role of physicality, materiality, and full-body interaction in order to rethink some current conceptions of interaction and design of computational devices.", + "link": "https://www.semanticscholar.org/paper/3a543aafe595b234b2e9fc91e47aa6872a33559e", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2018499862", + "venue": "1163450153", + "year": "2012", + "title": "clayvision the elastic image of the city", + "label": [ + "115961682", + "2777891301", + "107457646", + "49774154", + "120588126", + "153715457", + "2780220136" + ], + "author": [ + "2704264972", + "1124562777" + ], + "reference": [ + "614853332", + "1484775731", + "1529253181", + "1572238170", + "1588957466", + "1594548792", + "1930774872", + "2023331673", + "2024585795", + "2037115209", + "2056269876", + "2061874819", + "2067779848", + "2082440131", + "2087191009", + "2096293924", + "2107353262", + "2109504916", + "2121053321", + "2122015342", + "2122122381", + "2123068464", + "2125873654", + "2128026023", + "2131800446", + "2139581114", + "2141341554", + "2151103935", + "2155843307", + "2156598602", + "2158705726", + "2293351204", + "2327525319", + "2546299398", + "2778205983" + ], + "abstract": "in this paper we describe clayvision a new quasi immersive urban navigation system that rethinks the design conventions of existing augmented reality ar applications by aggressively incorporating knowledge from non computer science fields namely information design and urban planning instead of the prevailing approach of pasting information bubbles onto the existing urban scenery clayvision communicates through real time 3d transformations of city elements in other words the system dynamically probes and reassembles the city into a better designed copy of the original that is both easier to navigate and tailored to suit the user s needs and preferences we provide extensive discussions that cover the technical details of the system the types of city morphing operations that can be effectively applied and what people s experiences will be in the newly elastic city", + "title_raw": "ClayVision: the (elastic) image of the city", + "abstract_raw": "In this paper we describe ClayVision, a new quasi-immersive urban navigation system that rethinks the design conventions of existing Augmented Reality (AR) applications, by aggressively incorporating knowledge from non-Computer Science fields - namely Information Design and Urban Planning. Instead of the prevailing approach of pasting \"information bubbles\" onto the existing urban scenery, ClayVision communicates through real-time 3D transformations of city elements. In other words, the system dynamically probes and reassembles the city into a better-designed copy of the original, that is both easier to navigate and tailored to suit the user's needs and preferences. We provide extensive discussions that cover the technical details of the system, the types of city-morphing operations that can be effectively applied, and what people's experiences will be in the newly \"elastic\" city.", + "link": "https://www.semanticscholar.org/paper/17adb108a140da2b0deb5dfde664d13a20ba1944", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2148787816", + "venue": "1199533187", + "year": "2012", + "title": "seeking the ground truth a retroactive study on the evolution and migration of software libraries", + "label": [ + "548217200", + "177264268", + "557471498", + "199360897", + "89187990", + "2777904410", + "99613125", + "115903868", + "10885421" + ], + "author": [ + "2225479283", + "2102868281" + ], + "reference": [ + "1565935466", + "1644882639", + "1737238207", + "1964973627", + "1989480206", + "1989571326", + "1991613282", + "2003630076", + "2045749853", + "2052982904", + "2072025944", + "2081757705", + "2099056153", + "2100310705", + "2103640219", + "2112847033", + "2118581948", + "2120167743", + "2127811329", + "2133363731", + "2133437259", + "2135742552", + "2150244020", + "2153887189", + "2155773669", + "2157836986", + "2294305189" + ], + "abstract": "application programming interfaces apis are a common and industrially relevant means for third party software developers to reuse external functionality several techniques have been proposed to help migrate client code between library versions with incompatible apis but it is not clear how well these perform in an absolute sense we present a retroactive study into the presence and nature of api incompatibilities between several versions of a set of java based software libraries for each we perform a detailed manual analysis to determine what the correct adaptations are to migrate from the older to the newer version in addition we investigate whether any of a set of adaptation recommender techniques is capable of identifying the correct adaptations for library migration we find that a given api incompatibility can typically be addressed by only one or two recommender techniques but sometimes none serve furthermore those techniques give correct recommendations on average in only about 20 of cases", + "title_raw": "Seeking the ground truth: a retroactive study on the evolution and migration of software libraries", + "abstract_raw": "Application programming interfaces (APIs) are a common and industrially-relevant means for third-party software developers to reuse external functionality. Several techniques have been proposed to help migrate client code between library versions with incompatible APIs, but it is not clear how well these perform in an absolute sense. We present a retroactive study into the presence and nature of API incompatibilities between several versions of a set of Java-based software libraries; for each, we perform a detailed, manual analysis to determine what the correct adaptations are to migrate from the older to the newer version. In addition, we investigate whether any of a set of adaptation recommender techniques is capable of identifying the correct adaptations for library migration. We find that a given API incompatibility can typically be addressed by only one or two recommender techniques, but sometimes none serve. Furthermore, those techniques give correct recommendations, on average, in only about 20% of cases.", + "link": "https://www.semanticscholar.org/paper/4f03b6d18071c1a5d1c6019f7fbc6de827a26d00", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1965085982", + "venue": "1199533187", + "year": "2012", + "title": "scalable test data generation from multidimensional models", + "label": [ + "22414024", + "80444323", + "48044578", + "16910744", + "124101348", + "47980962", + "2776235265", + "2778770139", + "6943359", + "2777904410" + ], + "author": [ + "2344136120" + ], + "reference": [ + "584634945", + "1483804518", + "1570949702", + "1766020475", + "1895387792", + "1971509121", + "1973648822", + "1974153000", + "1979915683", + "1992730709", + "2003562044", + "2006315879", + "2010173932", + "2022431239", + "2023382806", + "2031754437", + "2034204026", + "2044399674", + "2050086297", + "2056119708", + "2072737285", + "2078842512", + "2086028392", + "2105749831", + "2111845378", + "2121084172", + "2126354234", + "2128363840", + "2132224358", + "2137370514", + "2138759702", + "2143401113", + "2144901544", + "2151583733", + "2152949134", + "2159114365", + "2160648174", + "2164636932", + "2169052472", + "2798588639", + "2966055625" + ], + "abstract": "multidimensional data models form the core of modern decision support software the need for this kind of software is significant and it continues to grow with the size and variety of datasets being collected today yet real multidimensional instances are often unavailable for testing and benchmarking and existing data generators can only produce a limited class of such structures in this paper we present a new framework for scalable generation of test data from a rich class of multidimensional models the framework provides a small expressive language for specifying such models and a novel solver for generating sample data from them while the satisfiability problem for the language is np hard we identify a polynomially solvable fragment that captures most practical modeling patterns given a model and optionally a statistical specification of the desired test dataset the solver detects and instantiates a maximal subset of the model within this fragment generating data that exhibits the desired statistical properties we use our framework to generate a variety of high quality test datasets from real industrial models which cannot be correctly instantiated by existing data generators or as effectively solved by general purpose constraint solvers", + "title_raw": "Scalable test data generation from multidimensional models", + "abstract_raw": "Multidimensional data models form the core of modern decision support software. The need for this kind of software is significant, and it continues to grow with the size and variety of datasets being collected today. Yet real multidimensional instances are often unavailable for testing and benchmarking, and existing data generators can only produce a limited class of such structures. In this paper, we present a new framework for scalable generation of test data from a rich class of multidimensional models. The framework provides a small, expressive language for specifying such models, and a novel solver for generating sample data from them. While the satisfiability problem for the language is NP-hard, we identify a polynomially solvable fragment that captures most practical modeling patterns. Given a model and, optionally, a statistical specification of the desired test dataset, the solver detects and instantiates a maximal subset of the model within this fragment, generating data that exhibits the desired statistical properties. We use our framework to generate a variety of high-quality test datasets from real industrial models, which cannot be correctly instantiated by existing data generators, or as effectively solved by general-purpose constraint solvers.", + "link": "https://www.semanticscholar.org/paper/d67fd08f401feefc0609e697ce4268529f8140c0", + "scraped_abstract": null, + "citation_best": 16 + }, + { + "paper": "2098278465", + "venue": "1174403976", + "year": "2012", + "title": "using dynamic analysis to discover polynomial and array invariants", + "label": [ + "152752567", + "98183937", + "168065819", + "80444323" + ], + "author": [ + "2328430926", + "2011146398", + "1977991679", + "2159603389" + ], + "reference": [ + "1480119827", + "1498946538", + "1507199453", + "1512563586", + "1521711401", + "1556604985", + "1611084195", + "1637866372", + "1863722042", + "1968482350", + "1971043610", + "1978367838", + "2031373197", + "2038053930", + "2064934273", + "2075913776", + "2080573945", + "2099866050", + "2110908283", + "2112925845", + "2116409384", + "2130711572", + "2138059712", + "2145913856", + "2149906774", + "2155330305", + "2160307932", + "2160363852", + "2162376048", + "2170736936", + "2350974888", + "3145693741" + ], + "abstract": "dynamic invariant analysis identifies likely properties over variables from observed program traces these properties can aid programmers in refactoring documenting and debugging tasks by making dynamic patterns visible statically two useful forms of invariants involve relations among polynomials over program variables and relations among array variables current dynamic analysis methods support such invariants in only very limited forms we combine mathematical techniques that have not previously been applied to this problem namely equation solving polyhedra construction and smt solving to bring new capabilities to dynamic invariant detection using these methods we show how to find equalities and inequalities among nonlinear polynomials over program variables and linear relations among array variables of multiple dimensions preliminary experiments on 24 mathematical algorithms and an implementation of aes encryption provide evidence that the approach is effective at finding these invariants", + "title_raw": "Using dynamic analysis to discover polynomial and array invariants", + "abstract_raw": "Dynamic invariant analysis identifies likely properties over variables from observed program traces. These properties can aid programmers in refactoring, documenting, and debugging tasks by making dynamic patterns visible statically. Two useful forms of invariants involve relations among polynomials over program variables and relations among array variables. Current dynamic analysis methods support such invariants in only very limited forms. We combine mathematical techniques that have not previously been applied to this problem, namely equation solving, polyhedra construction, and SMT solving, to bring new capabilities to dynamic invariant detection. Using these methods, we show how to find equalities and inequalities among nonlinear polynomials over program variables, and linear relations among array variables of multiple dimensions. Preliminary experiments on 24 mathematical algorithms and an implementation of AES encryption provide evidence that the approach is effective at finding these invariants.", + "link": "https://www.semanticscholar.org/paper/dc63fd57459d265ff10108790eb5db6cf4bf2724", + "scraped_abstract": null, + "citation_best": 70 + }, + { + "paper": "2131008594", + "venue": "1174403976", + "year": "2012", + "title": "automated detection of client state manipulation vulnerabilities", + "label": [ + "97686452", + "38652104", + "110875604", + "548217200", + "118643609", + "2778514511", + "137822555", + "22680326", + "59241245", + "2908560851", + "115903868", + "149672775" + ], + "author": [ + "2189404259", + "2137402680" + ], + "reference": [ + "33764377", + "109951691", + "119710065", + "142308502", + "338991206", + "1489243061", + "1505465226", + "1809751277", + "1861561811", + "1948712562", + "1975428729", + "1979931683", + "1980694458", + "1983142587", + "2001693166", + "2006591097", + "2008158744", + "2032417133", + "2054652127", + "2056740472", + "2088018315", + "2113709047", + "2117353399", + "2125357166", + "2129278597", + "2129362719", + "2134429122", + "2134646643", + "2145966957", + "2152225177", + "2155853355", + "2161688581", + "2162720432", + "2165698272", + "2404990348", + "2646621319", + "3019252013" + ], + "abstract": "web application programmers must be aware of a wide range of potential security risks although the most common pitfalls are well described and categorized in the literature it remains a challenging task to ensure that all guidelines are followed for this reason it is desirable to construct automated tools that can assist the programmers in the application development process by detecting weaknesses many vulnerabilities are related to web application code that stores references to application state in the generated html documents to work around the statelessness of the http protocol in this paper we show that such client state manipulation vulnerabilities are amenable to tool supported detection we present a static analysis for the widely used frameworks java servlets jsp and struts given a web application archive as input the analysis identifies occurrences of client state and infers the information flow between the client state and the shared application state on the server this makes it possible to check how client state manipulation performed by malicious users may affect the shared application state and cause leakage or modifications of sensitive information the warnings produced by the tool help the application programmer identify vulnerabilities moreover the inferred information can be applied to configure a security filter that automatically guards against attacks experiments on a collection of open source web applications indicate that the static analysis is able to effectively help the programmer prevent client state manipulation vulnerabilities", + "title_raw": "Automated detection of client-state manipulation vulnerabilities", + "abstract_raw": "Web application programmers must be aware of a wide range of potential security risks. Although the most common pitfalls are well described and categorized in the literature, it remains a challenging task to ensure that all guidelines are followed. For this reason, it is desirable to construct automated tools that can assist the programmers in the application development process by detecting weaknesses. Many vulnerabilities are related to web application code that stores references to application state in the generated HTML documents to work around the statelessness of the HTTP protocol. In this paper, we show that such client-state manipulation vulnerabilities are amenable to tool supported detection. We present a static analysis for the widely used frameworks Java Servlets, JSP, and Struts. Given a web application archive as input, the analysis identifies occurrences of client state and infers the information flow between the client state and the shared application state on the server. This makes it possible to check how client-state manipulation performed by malicious users may affect the shared application state and cause leakage or modifications of sensitive information. The warnings produced by the tool help the application programmer identify vulnerabilities. Moreover, the inferred information can be applied to configure a security filter that automatically guards against attacks. Experiments on a collection of open source web applications indicate that the static analysis is able to effectively help the programmer prevent client-state manipulation vulnerabilities.", + "link": "https://www.semanticscholar.org/paper/6eb9a3065a286eedc6f1e234c7640109e8fcad97", + "scraped_abstract": null, + "citation_best": 11 + }, + { + "paper": "2168096831", + "venue": "1174403976", + "year": "2012", + "title": "understanding integer overflow in c c", + "label": [ + "190902152", + "519991488", + "199360897", + "173608175", + "2778565505", + "64156549", + "48165567" + ], + "author": [ + "1975019471", + "2428971984", + "1993899960", + "1990061224" + ], + "reference": [ + "127238549", + "1506510492", + "1514171102", + "1536898727", + "1600776630", + "2100505193", + "2123527946", + "2153185479", + "2163010250", + "2332631626", + "3142903977" + ], + "abstract": "integer overflow bugs in c and c programs are difficult to track down and may lead to fatal errors or exploitable vulnerabilities although a number of tools for finding these bugs exist the situation is complicated because not all overflows are bugs better tools need to be constructed but a thorough understanding of the issues behind these errors does not yet exist we developed ioc a dynamic checking tool for integer overflows and used it to conduct the first detailed empirical study of the prevalence and patterns of occurrence of integer overflows in c and c code our results show that intentional uses of wraparound behaviors are more common than is widely believed for example there are over 200 distinct locations in the spec cint2000 benchmarks where overflow occurs although many overflows are intentional a large number of accidental overflows also occur orthogonal to programmers intent overflows are found in both well defined and undefined flavors applications executing undefined operations can be and have been broken by improvements in compiler optimizations looking beyond spec we found and reported undefined integer overflows in sqlite postgresql safeint gnu mpc and gmp firefox gcc llvm python bind and openssl many of these have since been fixed our results show that integer overflow issues in c and c are subtle and complex that they are common even in mature widely used programs and that they are widely misunderstood by developers", + "title_raw": "Understanding integer overflow in C/C++", + "abstract_raw": "Integer overflow bugs in C and C++ programs are difficult to track down and may lead to fatal errors or exploitable vulnerabilities. Although a number of tools for finding these bugs exist, the situation is complicated because not all overflows are bugs. Better tools need to be constructed---but a thorough understanding of the issues behind these errors does not yet exist. We developed IOC, a dynamic checking tool for integer overflows, and used it to conduct the first detailed empirical study of the prevalence and patterns of occurrence of integer overflows in C and C++ code. Our results show that intentional uses of wraparound behaviors are more common than is widely believed; for example, there are over 200 distinct locations in the SPEC CINT2000 benchmarks where overflow occurs. Although many overflows are intentional, a large number of accidental overflows also occur. Orthogonal to programmers' intent, overflows are found in both well-defined and undefined flavors. Applications executing undefined operations can be, and have been, broken by improvements in compiler optimizations. Looking beyond SPEC, we found and reported undefined integer overflows in SQLite, PostgreSQL, SafeInt, GNU MPC and GMP, Firefox, GCC, LLVM, Python, BIND, and OpenSSL; many of these have since been fixed. Our results show that integer overflow issues in C and C++ are subtle and complex, that they are common even in mature, widely used programs, and that they are widely misunderstood by developers.", + "link": "https://www.semanticscholar.org/paper/b5225204e0e999eb3e768da5d12006d66709c757", + "scraped_abstract": null, + "citation_best": 68 + }, + { + "paper": "2112351052", + "venue": "1174403976", + "year": "2012", + "title": "how do professional developers comprehend software", + "label": [ + "81587897", + "89505385", + "56666940", + "101317890", + "2777904410", + "56739046", + "91262260", + "2777561058" + ], + "author": [ + "2167615811", + "179358062", + "218731078", + "1079826502" + ], + "reference": [ + "1714723801", + "1964679590", + "1989526951", + "1997405881", + "1998265754", + "2030852377", + "2055821246", + "2080534028", + "2097750323", + "2099571428", + "2100894766", + "2109125971", + "2120704737", + "2128176227", + "2142190098", + "2148357053", + "2151996389", + "2157922094", + "2159470208", + "2160423649", + "2180107243" + ], + "abstract": "research in program comprehension has considerably evolved over the past two decades however only little is known about how developers practice program comprehension under time and project pressure and which methods and tools proposed by researchers are used in industry this paper reports on an observational study of 28 professional developers from seven companies investigating how developers comprehend software in particular we focus on the strategies followed information needed and tools used we found that developers put themselves in the role of end users by inspecting user interfaces they try to avoid program comprehension and employ recurring structured comprehension strategies depending on work context further we found that standards and experience facilitate comprehension program comprehension was considered a subtask of other maintenance tasks rather than a task by itself we also found that face to face communication is preferred to documentation overall our results show a gap between program comprehension research and practice as we did not observe any use of state of the art comprehension tools and developers seem to be unaware of them our findings call for further careful analysis and for reconsidering research agendas", + "title_raw": "How do professional developers comprehend software", + "abstract_raw": "Research in program comprehension has considerably evolved over the past two decades. However, only little is known about how developers practice program comprehension under time and project pressure, and which methods and tools proposed by researchers are used in industry. This paper reports on an observational study of 28 professional developers from seven companies, investigating how developers comprehend software. In particular we focus on the strategies followed, information needed, and tools used. We found that developers put themselves in the role of end users by inspecting user interfaces. They try to avoid program comprehension, and employ recurring, structured comprehension strategies depending on work context. Further, we found that standards and experience facilitate comprehension. Program comprehension was considered a subtask of other maintenance tasks rather than a task by itself. We also found that face-to-face communication is preferred to documentation. Overall, our results show a gap between program comprehension research and practice as we did not observe any use of state of the art comprehension tools and developers seem to be unaware of them. Our findings call for further careful analysis and for reconsidering research agendas.", + "link": "https://www.semanticscholar.org/paper/3170bb1affffeddfe9d3262c9a0787c0b29cb18a", + "scraped_abstract": null, + "citation_best": 149 + }, + { + "paper": "2121061989", + "venue": "1174403976", + "year": "2012", + "title": "partial models towards modeling and reasoning with uncertainty", + "label": [ + "119857082", + "529173508", + "2777904410", + "145644426", + "177803969" + ], + "author": [ + "511866907", + "1274701876", + "2032435099" + ], + "reference": [ + "99545732", + "187889024", + "204235972", + "1488121907", + "1494019345", + "1507387197", + "1536631555", + "1548957673", + "1555593767", + "1601820116", + "1983160424", + "1988506401", + "1989528416", + "2002662905", + "2058547748", + "2091968063", + "2129289644", + "2130195901", + "2143614054", + "2148106163", + "2155028447", + "2477378326", + "2760715935", + "3127803815", + "3152219479" + ], + "abstract": "models are good at expressing information about software but not as good at expressing modelers uncertainty about it the highly incremental and iterative nature of software development nonetheless requires the ability to express uncertainty and reason with models containing it in this paper we build on our earlier work on expressing uncertainty using partial models by elaborating an approach to reasoning with such models we evaluate our approach by experimentally comparing it to traditional strategies for dealing with uncertainty as well as by conducting a case study using open source software we conclude that we are able to reap the benefits of well managed uncertainty while incurring minimal additional cost", + "title_raw": "Partial models: towards modeling and reasoning with uncertainty", + "abstract_raw": "Models are good at expressing information about software but not as good at expressing modelers' uncertainty about it. The highly incremental and iterative nature of software development nonetheless requires the ability to express uncertainty and reason with models containing it. In this paper, we build on our earlier work on expressing uncertainty using partial models, by elaborating an approach to reasoning with such models. We evaluate our approach by experimentally comparing it to traditional strategies for dealing with uncertainty as well as by conducting a case study using open source software. We conclude that we are able to reap the benefits of well-managed uncertainty while incurring minimal additional cost.", + "link": "https://www.semanticscholar.org/paper/b71f199a657e2fe5dbabda8e803a0e3556eccc8b", + "scraped_abstract": null, + "citation_best": 84 + }, + { + "paper": "2099302229", + "venue": "1130985203", + "year": "2012", + "title": "searching and mining trillions of time series subsequences under dynamic time warping", + "label": [ + "119857082", + "89198739", + "120174047", + "116738811", + "124101348", + "2780513914", + "73555534", + "125583679", + "88516994" + ], + "author": [ + "141314290", + "2125777994", + "2083987245", + "2165222361", + "2586945262", + "2312006013", + "2129110089", + "2170070822" + ], + "reference": [ + "23356943", + "1514400885", + "1853995153", + "1858214555", + "1864972570", + "1968010112", + "1973222433", + "1981055269", + "1995504712", + "1999352753", + "2010478633", + "2012592962", + "2024081861", + "2029438113", + "2049120089", + "2051110057", + "2072708938", + "2076424822", + "2077720176", + "2080698135", + "2097248932", + "2098759488", + "2103932490", + "2125201599", + "2129330015", + "2139440613", + "2148682905", + "2151937625", + "2155919101", + "2159875404", + "2162193833", + "2168080440", + "2169004268", + "2288955986", + "2294644219", + "2545993457", + "2952321802", + "3125923948" + ], + "abstract": "most time series data mining algorithms use similarity search as a core subroutine and thus the time taken for similarity search is the bottleneck for virtually all time series data mining algorithms the difficulty of scaling search to large datasets largely explains why most academic work on time series data mining has plateaued at considering a few millions of time series objects while much of industry and science sits on billions of time series objects waiting to be explored in this work we show that by using a combination of four novel ideas we can search and mine truly massive time series for the first time we demonstrate the following extremely unintuitive fact in large datasets we can exactly search under dtw much more quickly than the current state of the art euclidean distance search algorithms we demonstrate our work on the largest set of time series experiments ever attempted in particular the largest dataset we consider is larger than the combined size of all of the time series datasets considered in all data mining papers ever published we show that our ideas allow us to solve higher level time series data mining problem such as motif discovery and clustering at scales that would otherwise be untenable in addition to mining massive datasets we will show that our ideas also have implications for real time monitoring of data streams allowing us to handle much faster arrival rates and or use cheaper and lower powered devices than are currently possible", + "title_raw": "Searching and mining trillions of time series subsequences under dynamic time warping", + "abstract_raw": "Most time series data mining algorithms use similarity search as a core subroutine, and thus the time taken for similarity search is the bottleneck for virtually all time series data mining algorithms. The difficulty of scaling search to large datasets largely explains why most academic work on time series data mining has plateaued at considering a few millions of time series objects, while much of industry and science sits on billions of time series objects waiting to be explored. In this work we show that by using a combination of four novel ideas we can search and mine truly massive time series for the first time. We demonstrate the following extremely unintuitive fact; in large datasets we can exactly search under DTW much more quickly than the current state-of-the-art Euclidean distance search algorithms. We demonstrate our work on the largest set of time series experiments ever attempted. In particular, the largest dataset we consider is larger than the combined size of all of the time series datasets considered in all data mining papers ever published. We show that our ideas allow us to solve higher-level time series data mining problem such as motif discovery and clustering at scales that would otherwise be untenable. In addition to mining massive datasets, we will show that our ideas also have implications for real-time monitoring of data streams, allowing us to handle much faster arrival rates and/or use cheaper and lower powered devices than are currently possible.", + "link": "https://www.semanticscholar.org/paper/84be8765d36ad7e33170442f16362181728bd998", + "scraped_abstract": null, + "citation_best": 982 + }, + { + "paper": "2131975293", + "venue": "1158363782", + "year": "2012", + "title": "resilient distributed datasets a fault tolerant abstraction for in memory cluster computing", + "label": [ + "63540848", + "34165917", + "133875982", + "48103436", + "29140674", + "91481028", + "120314980" + ], + "author": [ + "2009645378", + "2164646637", + "2098092316", + "2502187356", + "2807499900", + "2147215206", + "2523407221", + "719828399", + "2161479384" + ], + "reference": [ + "193620207", + "200298483", + "1480850417", + "1487337216", + "1493893823", + "1497290554", + "1554944419", + "1845494277", + "1867761151", + "1976860187", + "1978924650", + "2010929544", + "2013344760", + "2023186718", + "2033656974", + "2035168235", + "2045271686", + "2060204338", + "2066636486", + "2073065242", + "2074935284", + "2096092966", + "2096125134", + "2098935637", + "2100830825", + "2109722477", + "2140953143", + "2144002928", + "2144518192", + "2153811040", + "2163764145", + "2163961697", + "2167541073", + "2170616854", + "2173213060", + "2406836379", + "2607967384", + "2753710282", + "3098582471" + ], + "abstract": "we present resilient distributed datasets rdds a distributed memory abstraction that lets programmers perform in memory computations on large clusters in a fault tolerant manner rdds are motivated by two types of applications that current computing frameworks handle inefficiently iterative algorithms and interactive data mining tools in both cases keeping data in memory can improve performance by an order of magnitude to achieve fault tolerance efficiently rdds provide a restricted form of shared memory based on coarse grained transformations rather than fine grained updates to shared state however we show that rdds are expressive enough to capture a wide class of computations including recent specialized programming models for iterative jobs such as pregel and new applications that these models do not capture we have implemented rdds in a system called spark which we evaluate through a variety of user applications and benchmarks", + "title_raw": "Resilient distributed datasets: a fault-tolerant abstraction for in-memory cluster computing", + "abstract_raw": "We present Resilient Distributed Datasets (RDDs), a distributed memory abstraction that lets programmers perform in-memory computations on large clusters in a fault-tolerant manner. RDDs are motivated by two types of applications that current computing frameworks handle inefficiently: iterative algorithms and interactive data mining tools. In both cases, keeping data in memory can improve performance by an order of magnitude. To achieve fault tolerance efficiently, RDDs provide a restricted form of shared memory, based on coarse-grained transformations rather than fine-grained updates to shared state. However, we show that RDDs are expressive enough to capture a wide class of computations, including recent specialized programming models for iterative jobs, such as Pregel, and new applications that these models do not capture. We have implemented RDDs in a system called Spark, which we evaluate through a variety of user applications and benchmarks.", + "link": "https://www.semanticscholar.org/paper/798cc254a94dc6f72896bcdc518a3447fbc94c3d", + "scraped_abstract": null, + "citation_best": 3457 + }, + { + "paper": "2098575846", + "venue": "1127352206", + "year": "2012", + "title": "concurrent data representation synthesis", + "label": [ + "116409475", + "169590947", + "52723943", + "199360897", + "136134403", + "162319229" + ], + "author": [ + "2785283258", + "2067453598", + "2134374908", + "343541395", + "1345056057" + ], + "reference": [ + "152789412", + "166724137", + "1590443763", + "1991199257", + "2016049762", + "2030183445", + "2036984542", + "2040073555", + "2063050381", + "2089901765", + "2101939036", + "2106288302", + "2106703803", + "2124440085", + "2137824953", + "2138567239", + "2147760702", + "2149298504", + "2169611673", + "2293756656", + "2586398916", + "2769656678" + ], + "abstract": "we describe an approach for synthesizing data representations for concurrent programs our compiler takes as input a program written using concurrent relations and synthesizes a representation of the relations as sets of cooperating data structures as well as the placement and acquisition of locks to synchronize concurrent access to those data structures the resulting code is correct by construction individual relational operations are implemented correctly and the aggregate set of operations is serializable and deadlock free the relational specification also permits a high level optimizer to choose the best performing of many possible legal data representations and locking strategies which we demonstrate with an experiment autotuning a graph benchmark", + "title_raw": "Concurrent data representation synthesis", + "abstract_raw": "We describe an approach for synthesizing data representations for concurrent programs. Our compiler takes as input a program written using concurrent relations and synthesizes a representation of the relations as sets of cooperating data structures as well as the placement and acquisition of locks to synchronize concurrent access to those data structures. The resulting code is correct by construction: individual relational operations are implemented correctly and the aggregate set of operations is serializable and deadlock free. The relational specification also permits a high-level optimizer to choose the best performing of many possible legal data representations and locking strategies, which we demonstrate with an experiment autotuning a graph benchmark.", + "link": "https://www.semanticscholar.org/paper/798a40c40539a53a71c7f15c3ff90e15af6dcca4", + "scraped_abstract": null, + "citation_best": 63 + }, + { + "paper": "2095156129", + "venue": "1163618098", + "year": "2012", + "title": "don t trust satellite phones a security analysis of two satphone standards", + "label": [ + "101618186", + "31258907", + "38652104", + "92950451", + "59201141", + "2780221543", + "178489894", + "181149355", + "200632571", + "65302260", + "148730421" + ], + "author": [ + "2251637688", + "2035582089", + "2112524672", + "356053134", + "2568277420" + ], + "reference": [ + "126407768", + "172795436", + "1544956979", + "1553308705", + "1554090281", + "1563341929", + "1576797895", + "1581058907", + "1600415638", + "1603428681", + "1641639920", + "1755066991", + "1894646615", + "2063833036", + "2097140439", + "2102096888", + "2156453323", + "2406652305", + "2495326574" + ], + "abstract": "there is a rich body of work related to the security aspects of cellular mobile phones in particular with respect to the gsm and umts systems to the best of our knowledge however there has been no investigation of the security of satellite phones abbr sat phones even though a niche market compared to the g2 and g3 mobile systems there are several 100 000 sat phone subscribers worldwide given the sensitive nature of some of their application domains e g natural disaster areas or military campaigns security plays a particularly important role for sat phones in this paper we analyze the encryption systems used in the two existing and competing sat phone standards gmr 1 and gmr 2 the first main contribution is that we were able to completely reverse engineer the encryption algorithms employed both ciphers had not been publicly known previously we describe the details of the recovery of the two algorithms from freely available dsp firmware updates for sat phones which included the development of a custom disassembler and tools to analyze the code and extending prior work on binary analysis to efficiently identify cryptographic code we note that these steps had to be repeated for both systems because the available binaries were from two entirely different dsp processors perhaps somewhat surprisingly we found that the gmr 1 cipher can be considered a proprietary variant of the gsm a5 2 algorithm whereas the gmr 2 cipher is an entirely new design the second main contribution lies in the cryptanalysis of the two proprietary stream ciphers we were able to adopt known a5 2 cipher text only attacks to the gmr 1 algorithm with an average case complexity of 232 steps with respect to the gmr 2 cipher we developed a new attack which is powerful in a known plaintext setting in this situation the encryption key for one session i e one phone call can be recovered with approximately 50 65 bytes of key stream and a moderate computational complexity a major finding of our work is that the stream ciphers of the two existing satellite phone systems are considerably weaker than what is state of the art in symmetric cryptography", + "title_raw": "Don't Trust Satellite Phones: A Security Analysis of Two Satphone Standards", + "abstract_raw": "There is a rich body of work related to the security aspects of cellular mobile phones, in particular with respect to the GSM and UMTS systems. To the best of our knowledge, however, there has been no investigation of the security of satellite phones (abbr. sat phones). Even though a niche market compared to the G2 and G3 mobile systems, there are several 100,000 sat phone subscribers worldwide. Given the sensitive nature of some of their application domains (e.g., natural disaster areas or military campaigns), security plays a particularly important role for sat phones. In this paper, we analyze the encryption systems used in the two existing (and competing) sat phone standards, GMR-1 and GMR-2. The first main contribution is that we were able to completely reverse engineer the encryption algorithms employed. Both ciphers had not been publicly known previously. We describe the details of the recovery of the two algorithms from freely available DSP-firmware updates for sat phones, which included the development of a custom disassembler and tools to analyze the code, and extending prior work on binary analysis to efficiently identify cryptographic code. We note that these steps had to be repeated for both systems, because the available binaries were from two entirely different DSP processors. Perhaps somewhat surprisingly, we found that the GMR-1 cipher can be considered a proprietary variant of the GSM A5/2 algorithm, whereas the GMR-2 cipher is an entirely new design. The second main contribution lies in the cryptanalysis of the two proprietary stream ciphers. We were able to adopt known A5/2 cipher text-only attacks to the GMR-1 algorithm with an average case complexity of 232 steps. With respect to the GMR-2 cipher, we developed a new attack which is powerful in a known-plaintext setting. In this situation, the encryption key for one session, i.e., one phone call, can be recovered with approximately 50-65 bytes of key stream and a moderate computational complexity. A major finding of our work is that the stream ciphers of the two existing satellite phone systems are considerably weaker than what is state-of-the-art in symmetric cryptography.", + "link": "https://www.semanticscholar.org/paper/62da6562b1b3f7c4fc9592abf21880175a251132", + "scraped_abstract": null, + "citation_best": 66 + }, + { + "paper": "2113551315", + "venue": "1152462849", + "year": "2012", + "title": "multi resource fair queueing for packet processing", + "label": [ + "200651375", + "31258907", + "22684755", + "113200698", + "102486512", + "2780584376", + "143647952", + "86127852", + "2779581428", + "175893541", + "120314980" + ], + "author": [ + "734473377", + "2293201185", + "2009645378", + "2161479384" + ], + "reference": [ + "103955348", + "1867018545", + "1890643295", + "1969844906", + "1981800388", + "2010365467", + "2020183851", + "2027458924", + "2055816813", + "2073440460", + "2076516498", + "2097831241", + "2100892891", + "2102978741", + "2103068903", + "2104276806", + "2107187535", + "2117366456", + "2129700391", + "2136400262", + "2139475785", + "2146479949", + "2155053108", + "2161480401", + "2176566884", + "2202294430", + "3163054746", + "3163287424", + "3163372212" + ], + "abstract": "middleboxes are ubiquitous in today s networks and perform a variety of important functions including ids vpn firewalling and wan optimization these functions differ vastly in their requirements for hardware resources e g cpu cycles and memory bandwidth thus depending on the functions they go through different flows can consume different amounts of a middlebox s resources while there is much literature on weighted fair sharing of link bandwidth to isolate flows it is unclear how to schedule multiple resources in a middlebox to achieve similar guarantees in this paper we analyze several natural packet scheduling algorithms for multiple resources and show that they have undesirable properties we propose a new algorithm dominant resource fair queuing drfq that retains the attractive properties that fair sharing provides for one resource in doing so we generalize the concept of virtual time in classical fair queuing to multi resource settings the resulting algorithm is also applicable in other contexts where several resources need to be multiplexed in the time domain", + "title_raw": "Multi-resource fair queueing for packet processing", + "abstract_raw": "Middleboxes are ubiquitous in today's networks and perform a variety of important functions, including IDS, VPN, firewalling, and WAN optimization. These functions differ vastly in their requirements for hardware resources (e.g., CPU cycles and memory bandwidth). Thus, depending on the functions they go through, different flows can consume different amounts of a middlebox's resources. While there is much literature on weighted fair sharing of link bandwidth to isolate flows, it is unclear how to schedule multiple resources in a middlebox to achieve similar guarantees. In this paper, we analyze several natural packet scheduling algorithms for multiple resources and show that they have undesirable properties. We propose a new algorithm, Dominant Resource Fair Queuing (DRFQ), that retains the attractive properties that fair sharing provides for one resource. In doing so, we generalize the concept of virtual time in classical fair queuing to multi-resource settings. The resulting algorithm is also applicable in other contexts where several resources need to be multiplexed in the time domain.", + "link": "https://www.semanticscholar.org/paper/27ee3ba23f2562185efd5e111209412d5c9c04be", + "scraped_abstract": null, + "citation_best": 41 + }, + { + "paper": "2029009258", + "venue": "1140684652", + "year": "2012", + "title": "time based calibration of effectiveness measures", + "label": [ + "2778029271", + "2780009758", + "23123220" + ], + "author": [ + "1959697873", + "2098618034" + ], + "reference": [ + "126907439", + "132343450", + "194361697", + "1604317414", + "1965862077", + "1968927634", + "1973693034", + "1987500252", + "1988711048", + "1992411228", + "2000432130", + "2012430612", + "2014415866", + "2020584988", + "2026430213", + "2028009320", + "2031858317", + "2034173707", + "2035569891", + "2037377096", + "2046011778", + "2047860186", + "2058896506", + "2069870183", + "2070192299", + "2071156242", + "2077213532", + "2079701654", + "2090715008", + "2099048704", + "2102914327", + "2113640060", + "2119529697", + "2120966432", + "2130076000", + "2132069633", + "2134440707", + "2148545726", + "2152707065", + "2408005573", + "2900443855" + ], + "abstract": "many current effectiveness measures incorporate simplifying assumptions about user behavior these assumptions prevent the measures from reflecting aspects of the search process that directly impact the quality of retrieval results as experienced by the user in particular these measures implicitly model users as working down a list of retrieval results spending equal time assessing each document in reality even a careful user intending to identify as much relevant material as possible must spend longer on some documents than on others aspects such as document length duplicates and summaries all influence the time required in this paper we introduce a time biased gain measure which explicitly accommodates such aspects of the search process by conducting an appropriate user study we calibrate and validate the measure against the trec 2005 robust track test collection we examine properties of the measure contrasting it to traditional effectiveness measures and exploring its extension to other aspects and environments as its primary benefit the measure allows us to evaluate system performance in human terms while maintaining the simplicity and repeatability of system oriented tests overall we aim to achieve a clearer connection between user oriented studies and system oriented tests allowing us to better transfer insights and outcomes from one to the other", + "title_raw": "Time-based calibration of effectiveness measures", + "abstract_raw": "Many current effectiveness measures incorporate simplifying assumptions about user behavior. These assumptions prevent the measures from reflecting aspects of the search process that directly impact the quality of retrieval results as experienced by the user. In particular, these measures implicitly model users as working down a list of retrieval results, spending equal time assessing each document. In reality, even a careful user, intending to identify as much relevant material as possible, must spend longer on some documents than on others. Aspects such as document length, duplicates and summaries all influence the time required. In this paper, we introduce a time-biased gain measure, which explicitly accommodates such aspects of the search process. By conducting an appropriate user study, we calibrate and validate the measure against the TREC 2005 Robust Track test collection. We examine properties of the measure, contrasting it to traditional effectiveness measures, and exploring its extension to other aspects and environments. As its primary benefit, the measure allows us to evaluate system performance in human terms, while maintaining the simplicity and repeatability of system-oriented tests. Overall, we aim to achieve a clearer connection between user-oriented studies and system-oriented tests, allowing us to better transfer insights and outcomes from one to the other.", + "link": "https://www.semanticscholar.org/paper/aeda29c21d1f6eb89b1bed6f85e82aadfcc15d76", + "scraped_abstract": null, + "citation_best": 174 + }, + { + "paper": "2101221989", + "venue": "1131589359", + "year": "2012", + "title": "temperature management in data centers why some might like it hot", + "label": [ + "153740404", + "44154836", + "12302492" + ], + "author": [ + "2108091596", + "2231016475", + "1846380883", + "2226761501", + "2099243581" + ], + "reference": [ + "59613816", + "106162958", + "145805655", + "1411882273", + "1510445203", + "1558516248", + "2004340162", + "2022865998", + "2043719859", + "2055043387", + "2065395077", + "2075653320", + "2105321788", + "2105860728", + "2112137708", + "2114063275", + "2118222943", + "2120613668", + "2122249806", + "2125165932", + "2161234420", + "2163836105", + "2172016740" + ], + "abstract": "the energy consumed by data centers is starting to make up a significant fraction of the world s energy consumption and carbon emissions a large fraction of the consumed energy is spent on data center cooling which has motivated a large body of work on temperature management in data centers interestingly a key aspect of temperature management has not been well understood controlling the setpoint temperature at which to run a data center s cooling system most data centers set their thermostat based on conservative suggestions by manufacturers as there is limited understanding of how higher temperatures will affect the system at the same time studies suggest that increasing the temperature setpoint by just one degree could save 2 5 of the energy consumption this paper provides a multi faceted study of temperature management in data centers we use a large collection of field data from different production environments to study the impact of temperature on hardware reliability including the reliability of the storage subsystem the memory subsystem and server reliability as a whole we also use an experimental testbed based on a thermal chamber and a large array of benchmarks to study two other potential issues with higher data center temperatures the effect on server performance and power based on our findings we make recommendations for temperature management in data centers that create the potential for saving energy while limiting negative effects on system reliability and performance", + "title_raw": "Temperature management in data centers: why some (might) like it hot", + "abstract_raw": "The energy consumed by data centers is starting to make up a significant fraction of the world's energy consumption and carbon emissions. A large fraction of the consumed energy is spent on data center cooling, which has motivated a large body of work on temperature management in data centers. Interestingly, a key aspect of temperature management has not been well understood: controlling the setpoint temperature at which to run a data center's cooling system. Most data centers set their thermostat based on (conservative) suggestions by manufacturers, as there is limited understanding of how higher temperatures will affect the system. At the same time, studies suggest that increasing the temperature setpoint by just one degree could save 2-5% of the energy consumption. This paper provides a multi-faceted study of temperature management in data centers. We use a large collection of field data from different production environments to study the impact of temperature on hardware reliability, including the reliability of the storage subsystem, the memory subsystem and server reliability as a whole. We also use an experimental testbed based on a thermal chamber and a large array of benchmarks to study two other potential issues with higher data center temperatures: the effect on server performance and power. Based on our findings, we make recommendations for temperature management in data centers, that create the potential for saving energy, while limiting negative effects on system reliability and performance.", + "link": "https://www.semanticscholar.org/paper/239e046347d5075b3eeef5439050e9f2ca760b7b", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2123458705", + "venue": "1175089206", + "year": "2012", + "title": "high performance complex event processing over xml streams", + "label": [ + "77944639", + "34127721", + "123606473", + "2780213375", + "199360897", + "15845906", + "8797682", + "2385561", + "120314980" + ], + "author": [ + "2002600875", + "2157610843", + "2028117853" + ], + "reference": [ + "82970728", + "180382289", + "836144344", + "1492682035", + "1532788724", + "1561433714", + "1570105390", + "1603860915", + "1769286860", + "1964904001", + "1985108724", + "1996794063", + "2049273188", + "2064580935", + "2083308531", + "2112167310", + "2115295490", + "2127444179", + "2128710701", + "2133353243", + "2135611729", + "2144171837", + "2146886516", + "2151033407", + "2151886642", + "2157072592", + "2166604013", + "2170016254", + "2171037641", + "2230830246", + "2294034341", + "2913069824", + "3005762627" + ], + "abstract": "much research attention has been given to delivering high performance systems that are capable of complex event processing cep in a wide range of applications however many current cep systems focus on processing efficiently data having a simple structure and are otherwise limited in their ability to support efficiently complex continuous queries on structured or semi structured information however xml streams represent a very popular form of data exchange comprising large portions of social network and rss feeds financial records configuration files and similar applications requiring advanced cep queries in this paper we present the xseq language and system that support cep on xml streams via an extension of xpath that is both powerful and amenable to an efficient implementation specifically the xseq language extends xpath with natural operators to express sequential and kleene patterns over xml streams while remaining highly amenable to efficient implementation xseq is designed to take full advantage of recent advances in the field of automata on visibly pushdown automata vpa where higher expressive power can be achieved without compromising efficiency whereas the amenability to efficient implementation was not demonstrated in xpath extensions previously proposed we illustrate xseq s power for cep applications through examples from different domains and provide formal results on its expressiveness and complexity finally we present several optimization techniques for xseq queries our extensive experiments indicate that xseq brings outstanding performance to cep applications two orders of magnitude improvement are obtained over the same queries executed in general purpose xml engines", + "title_raw": "High-performance complex event processing over XML streams", + "abstract_raw": "Much research attention has been given to delivering high-performance systems that are capable of complex event processing (CEP) in a wide range of applications. However, many current CEP systems focus on processing efficiently data having a simple structure, and are otherwise limited in their ability to support efficiently complex continuous queries on structured or semi-structured information. However, XML streams represent a very popular form of data exchange, comprising large portions of social network and RSS feeds, financial records, configuration files, and similar applications requiring advanced CEP queries. In this paper, we present the XSeq language and system that support CEP on XML streams, via an extension of XPath that is both powerful and amenable to an efficient implementation. Specifically, the XSeq language extends XPath with natural operators to express sequential and Kleene-* patterns over XML streams, while remaining highly amenable to efficient implementation. XSeq is designed to take full advantage of recent advances in the field of automata on Visibly Pushdown Automata (VPA), where higher expressive power can be achieved without compromising efficiency (whereas the amenability to efficient implementation was not demonstrated in XPath extensions previously proposed). We illustrate XSeq's power for CEP applications through examples from different domains, and provide formal results on its expressiveness and complexity. Finally, we present several optimization techniques for XSeq queries. Our extensive experiments indicate that XSeq brings outstanding performance to CEP applications: two orders of magnitude improvement are obtained over the same queries executed in general-purpose XML engines.", + "link": "https://www.semanticscholar.org/paper/17f66d7a69bf20b29602d943069eedcd1c07abff", + "scraped_abstract": null, + "citation_best": 63 + }, + { + "paper": "2113331157", + "venue": "1166315290", + "year": "2012", + "title": "jamming user interfaces programmable particle stiffness and sensing for malleable and shape changing devices", + "label": [ + "44154836", + "206755178", + "89505385", + "186967261", + "91559456", + "152086174", + "194995250" + ], + "author": [ + "2069682576", + "1647524748", + "102229238", + "2150915362", + "2101434678" + ], + "reference": [ + "62668296", + "1811460226", + "1976144756", + "1981927657", + "1988982607", + "2004967763", + "2005198142", + "2007924815", + "2011422734", + "2015288399", + "2018085588", + "2026011478", + "2029744358", + "2033496303", + "2035084961", + "2036723211", + "2049298692", + "2054659059", + "2070099904", + "2089305039", + "2089787431", + "2093061150", + "2097214940", + "2102491248", + "2106310345", + "2107374986", + "2112374856", + "2113628568", + "2122261804", + "2126469287", + "2140395929", + "2146752178", + "2146947104", + "2147807099", + "2151386142", + "2153054042", + "2153975891", + "2161121228", + "2167339065", + "2256897725", + "2294698177" + ], + "abstract": "malleable and organic user interfaces have the potential to enable radically new forms of interactions and expressiveness through flexible free form and computationally controlled shapes and displays this work specifically focuses on particle jamming as a simple effective method for flexible shape changing user interfaces where programmatic control of material stiffness enables haptic feedback deformation tunable affordances and control gain we introduce a compact low power pneumatic jamming system suitable for mobile devices and a new hydraulic based technique with fast silent actuation and optical shape sensing we enable jamming structures to sense input and function as interaction devices through two contributed methods for high resolution shape sensing using 1 index matched particles and fluids and 2 capacitive and electric field sensing we explore the design space of malleable and organic user interfaces enabled by jamming through four motivational prototypes that highlight jamming s potential in hci including applications for tabletops tablets and for portable shape changing mobile devices", + "title_raw": "Jamming user interfaces: programmable particle stiffness and sensing for malleable and shape-changing devices", + "abstract_raw": "Malleable and organic user interfaces have the potential to enable radically new forms of interactions and expressiveness through flexible, free-form and computationally controlled shapes and displays. This work, specifically focuses on particle jamming as a simple, effective method for flexible, shape-changing user interfaces where programmatic control of material stiffness enables haptic feedback, deformation, tunable affordances and control gain. We introduce a compact, low-power pneumatic jamming system suitable for mobile devices, and a new hydraulic-based technique with fast, silent actuation and optical shape sensing. We enable jamming structures to sense input and function as interaction devices through two contributed methods for high-resolution shape sensing using: 1) index-matched particles and fluids, and 2) capacitive and electric field sensing. We explore the design space of malleable and organic user interfaces enabled by jamming through four motivational prototypes that highlight jamming's potential in HCI, including applications for tabletops, tablets and for portable shape-changing mobile devices.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Jamming+User+Interfaces:+Programmable+Particle+Stiffness+and+Sensing+for+Malleable+and+Shape-Changing+Devices&as_oq=&as_eq=&as_occt=any&as_sauthors=Follmer", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2010127311", + "venue": "1166315290", + "year": "2012", + "title": "cliplets juxtaposing still and dynamic imagery", + "label": [ + "129315195", + "40140605", + "49774154", + "200632571", + "186967261", + "136197465", + "194995250" + ], + "author": [ + "2303746224", + "2410459323", + "14227721", + "2078800774", + "2206701583", + "2054776351", + "3049172691" + ], + "reference": [ + "1491719799", + "1812925652", + "1968589407", + "1975862670", + "2001933992", + "2022532533", + "2037143014", + "2043543539", + "2052452567", + "2068859278", + "2077786999", + "2081711411", + "2085261163", + "2098628853", + "2099447573", + "2113018061", + "2113079052", + "2116270009", + "2124386111", + "2153157788", + "2153709524", + "2163725052", + "2244837655", + "2294052718", + "2295660824", + "2750932871" + ], + "abstract": "we explore creating cliplets a form of visual media that juxtaposes still image and video segments both spatially and temporally to expressively abstract a moment much as in cinemagraphs the tension between static and dynamic elements in a cliplet reinforces both aspects strongly focusing the viewer s attention creating this type of imagery is challenging without professional tools and training we develop a set of idioms essentially spatiotemporal mappings that characterize cliplet elements and use these idioms in an interactive system to quickly compose a cliplet from ordinary handheld video one difficulty is to avoid artifacts in the cliplet composition without resorting to extensive manual input we address this with automatic alignment looping optimization and feathering simultaneous matting and compositing and laplacian blending a key user interface challenge is to provide affordances to define the parameters of the mappings from input time to output time while maintaining a focus on the cliplet being created we demonstrate the creation of a variety of cliplet types we also report on informal feedback as well as a more structured survey of users", + "title_raw": "Cliplets: juxtaposing still and dynamic imagery", + "abstract_raw": "We explore creating \"\"cliplets\"\", a form of visual media that juxtaposes still image and video segments, both spatially and temporally, to expressively abstract a moment. Much as in \"\"cinemagraphs\"\", the tension between static and dynamic elements in a cliplet reinforces both aspects, strongly focusing the viewer's attention. Creating this type of imagery is challenging without professional tools and training. We develop a set of idioms, essentially spatiotemporal mappings, that characterize cliplet elements, and use these idioms in an interactive system to quickly compose a cliplet from ordinary handheld video. One difficulty is to avoid artifacts in the cliplet composition without resorting to extensive manual input. We address this with automatic alignment, looping optimization and feathering, simultaneous matting and compositing, and Laplacian blending. A key user-interface challenge is to provide affordances to define the parameters of the mappings from input time to output time while maintaining a focus on the cliplet being created. We demonstrate the creation of a variety of cliplet types. We also report on informal feedback as well as a more structured survey of users.", + "link": "https://www.semanticscholar.org/paper/e5dde1fd7820e1eb2c535e75651dad0c6bec48be", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2167913131", + "venue": "1166315290", + "year": "2012", + "title": "crowdscape interactively visualizing user behavior and output", + "label": [ + "2775924081", + "107457646", + "36464697", + "64073096", + "62230096" + ], + "author": [ + "2311654231", + "2015015692" + ], + "reference": [ + "82775311", + "1970381522", + "1985816302", + "2034694694", + "2050491563", + "2059105030", + "2098865355", + "2104749423", + "2124994029", + "2127008633", + "2133485007", + "2134937538", + "2135415614", + "2143539737", + "2151401338", + "2153217804", + "2168765606", + "2996360111", + "3122111065" + ], + "abstract": "crowdsourcing has become a powerful paradigm for accomplishing work quickly and at scale but involves significant challenges in quality control researchers have developed algorithmic quality control approaches based on either worker outputs such as gold standards or worker agreement or worker behavior such as task fingerprinting but each approach has serious limitations especially for complex or creative work human evaluation addresses these limitations but does not scale well with increasing numbers of workers we present crowdscape a system that supports the human evaluation of complex crowd work through interactive visualization and mixed initiative machine learning the system combines information about worker behavior with worker outputs helping users to better understand and harness the crowd we describe the system and discuss its utility through grounded case studies we explore other contexts where crowdscape s visualizations might be useful such as in user studies", + "title_raw": "CrowdScape: interactively visualizing user behavior and output", + "abstract_raw": "Crowdsourcing has become a powerful paradigm for accomplishing work quickly and at scale, but involves significant challenges in quality control. Researchers have developed algorithmic quality control approaches based on either worker outputs (such as gold standards or worker agreement) or worker behavior (such as task fingerprinting), but each approach has serious limitations, especially for complex or creative work. Human evaluation addresses these limitations but does not scale well with increasing numbers of workers. We present CrowdScape, a system that supports the human evaluation of complex crowd work through interactive visualization and mixed initiative machine learning. The system combines information about worker behavior with worker outputs, helping users to better understand and harness the crowd. We describe the system and discuss its utility through grounded case studies. We explore other contexts where CrowdScape's visualizations might be useful, such as in user studies.", + "link": "https://www.semanticscholar.org/paper/da09e12a0a5114107bbc7fbf8bdf0bf68d10b2f9", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2005499394", + "venue": "1133523790", + "year": "2012", + "title": "dense subgraph maintenance under streaming edge weight updates for real time story identification", + "label": [ + "78646695", + "86256295", + "194145944", + "124101348", + "518677369", + "162307627", + "23123220" + ], + "author": [ + "2106511090", + "1147269944", + "335443309", + "2196778472" + ], + "reference": [ + "183656749", + "1497709229", + "1500512125", + "1530595269", + "1545030848", + "1558363837", + "1580818183", + "1586552957", + "1604983895", + "1612498707", + "1984374364", + "1984407615", + "1995913408", + "2000649160", + "2002576896", + "2007273639", + "2009723146", + "2014426991", + "2018165284", + "2027472851", + "2040420521", + "2064853889", + "2069318033", + "2080068757", + "2082603400", + "2096154584", + "2100832675", + "2114731047", + "2123297508", + "2150286230", + "2293546752", + "2913283679", + "2953102514", + "2997047297", + "3125953395" + ], + "abstract": "recent years have witnessed an unprecedented proliferation of social media people around the globe author every day millions of blog posts micro blog posts social network status updates etc this rich stream of information can be used to identify on an ongoing basis emerging stories and events that capture popular attention stories can be identified via groups of tightly coupled real world entities namely the people locations products etc that are involved in the story the sheer scale and rapid evolution of the data involved necessitate highly efficient techniques for identifying important stories at every point of time the main challenge in real time story identification is the maintenance of dense subgraphs corresponding to groups of tightly coupled entities under streaming edge weight updates resulting from a stream of user generated content this is the first work to study the efficient maintenance of dense subgraphs under such streaming edge weight updates for a wide range of definitions of density we derive theoretical results regarding the magnitude of change that a single edge weight update can cause based on these we propose a novel algorithm dyndens which outperforms adaptations of existing techniques to this setting and yields meaningful results our approach is validated by a thorough experimental evaluation on large scale real and synthetic datasets", + "title_raw": "Dense subgraph maintenance under streaming edge weight updates for real-time story identification", + "abstract_raw": "Recent years have witnessed an unprecedented proliferation of social media. People around the globe author, every day, millions of blog posts, micro-blog posts, social network status updates, etc. This rich stream of information can be used to identify, on an ongoing basis, emerging stories, and events that capture popular attention. Stories can be identified via groups of tightly-coupled real-world entities, namely the people, locations, products, etc., that are involved in the story. The sheer scale, and rapid evolution of the data involved necessitate highly efficient techniques for identifying important stories at every point of time.\r\n\r\nThe main challenge in real-time story identification is the maintenance of dense subgraphs (corresponding to groups of tightly-coupled entities) under streaming edge weight updates (resulting from a stream of user-generated content). This is the first work to study the efficient maintenance of dense subgraphs under such streaming edge weight updates. For a wide range of definitions of density, we derive theoretical results regarding the magnitude of change that a single edge weight update can cause. Based on these, we propose a novel algorithm, DynDens, which outperforms adaptations of existing techniques to this setting, and yields meaningful results. Our approach is validated by a thorough experimental evaluation on large-scale real and synthetic datasets.", + "link": "https://www.semanticscholar.org/paper/88f52892270974ce9673bdc57cb5bc3b33de7156", + "scraped_abstract": null, + "citation_best": 102 + }, + { + "paper": "1982139456", + "venue": "1135342153", + "year": "2012", + "title": "counting beyond a yottabyte or how sparql 1 1 property paths will prevent adoption of the standard", + "label": [ + "96956885", + "80444323", + "147497476", + "110893760", + "15657843", + "136134403", + "41009113" + ], + "author": [ + "2117861229", + "2225525137", + "2129336444" + ], + "reference": [ + "1809515864", + "1976580206", + "1984509356", + "1986058515", + "1989783863", + "2006912660", + "2008999657", + "2038368651", + "2088044688", + "2092512344", + "2101491706", + "2117657788", + "2126079271", + "2128478991", + "2131785201", + "2134826526", + "2135282325", + "2242459625", + "2294034341" + ], + "abstract": "sparql the standard query language for querying rdf provides only limited navigational functionalities although these features are of fundamental importance for graph data formats such as rdf this has led the w3c to include the property path feature in the upcoming version of the standard sparql 1 1 we tested several implementations of sparql 1 1 handling property path queries and we observed that their evaluation methods for this class of queries have a poor performance even in some very simple scenarios to formally explain this fact we conduct a theoretical study of the computational complexity of property paths evaluation our results imply that the poor performance of the tested implementations is not a problem of these particular systems but of the specification itself in fact we show that any implementation that adheres to the sparql 1 1 specification as of november 2011 is doomed to show the same behavior the key issue being the need for counting solutions imposed by the current specification we provide several intractability results that together with our empirical results provide strong evidence against the current semantics of sparql 1 1 property paths finally we put our results in perspective and propose a natural alternative semantics with tractable evaluation that we think may lead to a wide adoption of the language by practitioners developers and theoreticians", + "title_raw": "Counting beyond a Yottabyte, or how SPARQL 1.1 property paths will prevent adoption of the standard", + "abstract_raw": "SPARQL -the standard query language for querying RDF- provides only limited navigational functionalities, although these features are of fundamental importance for graph data formats such as RDF. This has led the W3C to include the property path feature in the upcoming version of the standard, SPARQL 1.1.We tested several implementations of SPARQL 1.1 handling property path queries, and we observed that their evaluation methods for this class of queries have a poor performance even in some very simple scenarios. To formally explain this fact, we conduct a theoretical study of the computational complexity of property paths evaluation. Our results imply that the poor performance of the tested implementations is not a problem of these particular systems, but of the specification itself. In fact, we show that any implementation that adheres to the SPARQL 1.1 specification (as of November 2011) is doomed to show the same behavior, the key issue being the need for counting solutions imposed by the current specification. We provide several intractability results, that together with our empirical results, provide strong evidence against the current semantics of SPARQL 1.1 property paths. Finally, we put our results in perspective, and propose a natural alternative semantics with tractable evaluation, that we think may lead to a wide adoption of the language by practitioners, developers and theoreticians.", + "link": "https://www.semanticscholar.org/paper/293700d335c1a842d29ecf1248dba0ea1569fcdd", + "scraped_abstract": null, + "citation_best": 123 + }, + { + "paper": "57159709", + "venue": "1184914352", + "year": "2011", + "title": "complexity of and algorithms for borda manipulation", + "label": [ + "154945302", + "179799912", + "87219788", + "11413529", + "80444323" + ], + "author": [ + "2250388414", + "1938023028", + "172841154", + "2156731892" + ], + "reference": [ + "155845106", + "1840860628", + "1972375916", + "1982864005", + "2000056145", + "2003296459", + "2035130077", + "2060522636", + "2063292248", + "2078664320", + "2100172689", + "2134628753", + "2963015243" + ], + "abstract": "we prove that it is np hard for a coalition of two manipulators to compute how to manipulate the borda voting rule this resolves one of the last open problems in the computational complexity of manipulating common voting rules because of this np hardness we treat computing a manipulation as an approximation problem where we try to minimize the number of manipulators based on ideas from bin packing and multiprocessor scheduling we propose two new approximation methods to compute manipulations of the borda rule experiments show that these methods significantly outperform the previous best known approximation method we are able to find optimal manipulations in almost all the randomly generated elections tested our results suggest that whilst computing a manipulation of the borda rule by a coalition is np hard computational complexity may provide only a weak barrier against manipulation in practice", + "title_raw": "Complexity of and algorithms for borda manipulation", + "abstract_raw": "We prove that it is NP-hard for a coalition of two manipulators to compute how to manipulate the Borda voting rule. This resolves one of the last open problems in the computational complexity of manipulating common voting rules. Because of this NP-hardness, we treat computing a manipulation as an approximation problem where we try to minimize the number of manipulators. Based on ideas from bin packing and multiprocessor scheduling, we propose two new approximation methods to compute manipulations of the Borda rule. Experiments show that these methods significantly outperform the previous best known approximation method. We are able to find optimal manipulations in almost all the randomly generated elections tested. Our results suggest that, whilst computing a manipulation of the Borda rule by a coalition is NP-hard, computational complexity may provide only a weak barrier against manipulation in practice.", + "link": "https://www.semanticscholar.org/paper/08d354d27463922ac5ae02f6f93f7eec98c40dd8", + "scraped_abstract": null, + "citation_best": 75 + }, + { + "paper": "2142523187", + "venue": "1188739475", + "year": "2011", + "title": "unsupervised part of speech tagging with bilingual graph based projections", + "label": [ + "28490314", + "136134403", + "204321447", + "2776960227", + "23224414" + ], + "author": [ + "2181087466", + "2344539050" + ], + "reference": [ + "22168010", + "1570013475", + "1598941623", + "1632114991", + "1709989312", + "1894835849", + "1909733559", + "2001283107", + "2006969979", + "2027979924", + "2038698865", + "2049633694", + "2051434435", + "2079442239", + "2091055880", + "2095758845", + "2102366626", + "2108622839", + "2131134557", + "2135843243", + "2139823104", + "2140460010", + "2151942821", + "2170986599", + "2569308312", + "2949259776" + ], + "abstract": "we describe a novel approach for inducing unsupervised part of speech taggers for languages that have no labeled training data but have translated text in a resource rich language our method does not assume any knowledge about the target language in particular no tagging dictionary is assumed making it applicable to a wide array of resource poor languages we use graph based label propagation for cross lingual knowledge transfer and use the projected labels as features in an unsupervised model berg kirkpatrick et al 2010 across eight european languages our approach results in an average absolute improvement of 10 4 over a state of the art baseline and 16 7 over vanilla hidden markov models induced with the expectation maximization algorithm", + "title_raw": "Unsupervised Part-of-Speech Tagging with Bilingual Graph-Based Projections", + "abstract_raw": "We describe a novel approach for inducing unsupervised part-of-speech taggers for languages that have no labeled training data, but have translated text in a resource-rich language. Our method does not assume any knowledge about the target language (in particular no tagging dictionary is assumed), making it applicable to a wide array of resource-poor languages. We use graph-based label propagation for cross-lingual knowledge transfer and use the projected labels as features in an unsupervised model (Berg-Kirkpatrick et al., 2010). Across eight European languages, our approach results in an average absolute improvement of 10.4% over a state-of-the-art baseline, and 16.7% over vanilla hidden Markov models induced with the Expectation Maximization algorithm.", + "link": "https://www.semanticscholar.org/paper/343733a063e491d234a36d3e1090a739318b3566", + "scraped_abstract": null, + "citation_best": 258 + }, + { + "paper": "2075779758", + "venue": "1163450153", + "year": "2011", + "title": "bricolage example based retargeting for web design", + "label": [ + "521306242", + "21959979", + "2780575108", + "107457646", + "22367795", + "136197465", + "130436687" + ], + "author": [ + "2226499346", + "2054455516", + "2423899502", + "261822931" + ], + "reference": [ + "296081679", + "632224533", + "1738124305", + "1803802947", + "1976373002", + "2008652694", + "2020254000", + "2053859636", + "2056708848", + "2071873073", + "2108730513", + "2115210820", + "2116787255", + "2117209866", + "2121600160", + "2122401044", + "2126135370", + "2133749427", + "2136384865", + "2143275903", + "2147152072", + "2151180068", + "2154583606", + "2161127713", + "2183671248", + "2338348343" + ], + "abstract": "the web provides a corpus of design examples unparalleled in human history however leveraging existing designs to produce new pages is often difficult this paper introduces the bricolage algorithm for transferring design and content between web pages bricolage employs a novel structured prediction technique that learns to create coherent mappings between pages by training on human generated exemplars the produced mappings are then used to automatically transfer the content from one page into the style and layout of another we show that bricolage can learn to accurately reproduce human page mappings and that it provides a general efficient and automatic technique for retargeting content between a variety of real web pages", + "title_raw": "Bricolage: example-based retargeting for web design", + "abstract_raw": "The Web provides a corpus of design examples unparalleled in human history. However, leveraging existing designs to produce new pages is often difficult. This paper introduces the Bricolage algorithm for transferring design and content between Web pages. Bricolage employs a novel, structured-prediction technique that learns to create coherent mappings between pages by training on human-generated exemplars. The produced mappings are then used to automatically transfer the content from one page into the style and layout of another. We show that Bricolage can learn to accurately reproduce human page mappings, and that it provides a general, efficient, and automatic technique for retargeting content between a variety of real Web pages.", + "link": "https://www.semanticscholar.org/paper/11254a1733077a6ffb11618d7eed259475e5bc76", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2022815033", + "venue": "1163450153", + "year": "2011", + "title": "mid air pan and zoom on wall sized displays", + "label": [ + "207347870", + "31972630", + "2777884278" + ], + "author": [ + "142005345", + "2222136494", + "36478446", + "166732564", + "1925449899" + ], + "reference": [ + "1552213365", + "1968367928", + "1973787390", + "1982673860", + "1993448335", + "1995571060", + "1995923922", + "2006284338", + "2009840077", + "2017763193", + "2025790386", + "2043935056", + "2058526973", + "2067870298", + "2069629500", + "2075809334", + "2090278973", + "2095956339", + "2096147482", + "2096269523", + "2110641362", + "2117043006", + "2124670211", + "2124781093", + "2128555886", + "2141293686", + "2142859885", + "2146269805", + "2151295100", + "2153818483", + "2156665908", + "2159262688", + "2159990547", + "2165500877", + "2166153889", + "2166172711" + ], + "abstract": "very high resolution wall sized displays offer new opportunities for interacting with large data sets while pointing on this type of display has been studied extensively higher level more complex tasks such as pan zoom navigation have received little attention it thus remains unclear which techniques are best suited to perform multiscale navigation in these environments building upon empirical data gathered from studies of pan and zoom on desktop computers and studies of remote pointing we identified three key factors for the design of mid air pan and zoom techniques uni vs bimanual interaction linear vs circular movements and level of guidance to accomplish the gestures in mid air after an extensive phase of iterative design and pilot testing we ran a controlled experiment aimed at better understanding the influence of these factors on task performance significant effects were obtained for all three factors bimanual interaction linear gestures and a high level of guidance resulted in significantly improved performance moreover the interaction effects among some of the dimensions suggest possible combinations for more complex real world tasks", + "title_raw": "Mid-air pan-and-zoom on wall-sized displays", + "abstract_raw": "Very-high-resolution wall-sized displays offer new opportunities for interacting with large data sets. While pointing on this type of display has been studied extensively, higher-level, more complex tasks such as pan-zoom navigation have received little attention. It thus remains unclear which techniques are best suited to perform multiscale navigation in these environments. Building upon empirical data gathered from studies of pan-and-zoom on desktop computers and studies of remote pointing, we identified three key factors for the design of mid-air pan-and-zoom techniques: uni- vs. bimanual interaction, linear vs. circular movements, and level of guidance to accomplish the gestures in mid-air. After an extensive phase of iterative design and pilot testing, we ran a controlled experiment aimed at better understanding the influence of these factors on task performance. Significant effects were obtained for all three factors: bimanual interaction, linear gestures and a high level of guidance resulted in significantly improved performance. Moreover, the interaction effects among some of the dimensions suggest possible combinations for more complex, real-world tasks.", + "link": "https://www.semanticscholar.org/paper/cc8a9f63d8ce9a8e0d7d7e995bc7648879f7fea3", + "scraped_abstract": null, + "citation_best": 202 + }, + { + "paper": "2153391061", + "venue": "1163450153", + "year": "2011", + "title": "why is my internet slow making network speeds visible", + "label": [ + "38652104", + "125599584", + "76155785", + "110875604", + "509933004" + ], + "author": [ + "2142378125", + "2627440545", + "2409146016", + "2227914243", + "2231863753", + "504046652" + ], + "reference": [ + "1560443422", + "1577622164", + "1658908529", + "1965480377", + "1968837709", + "1969593871", + "2020559432", + "2035125280", + "2067760434", + "2100195487", + "2104113460", + "2112387267", + "2118981997", + "2119791462", + "2139577842", + "2143712116", + "2150634379", + "2154825228", + "2161047461", + "2522273813" + ], + "abstract": "with widespread broadband adoption more households report experiencing sub optimal speeds not only are slow speeds frustrating they may indicate consumers are not receiving the services they are paying for from their internet service providers yet determining the speed and source of slow downs is difficult because few tools exist for broadband management we report on results of a field trial with 10 households using a visual network probe designed to address these problems we describe the results of the study and provide design implications for future tools more importantly we argue that tools like this can educate and empower consumers by making broadband speeds and sources of slow downs more visible", + "title_raw": "Why is my internet slow?: making network speeds visible", + "abstract_raw": "With widespread broadband adoption, more households report experiencing sub-optimal speeds. Not only are slow speeds frustrating, they may indicate consumers are not receiving the services they are paying for from their internet service providers. Yet, determining the speed and source of slow-downs is difficult because few tools exist for broadband management. We report on results of a field trial with 10 households using a visual network probe designed to address these problems. We describe the results of the study and provide design implications for future tools. More importantly, we argue that tools like this can educate and empower consumers by making broadband speeds and sources of slow-downs more visible.", + "link": "https://www.semanticscholar.org/paper/0c4302ac824cf7ad7b5eccfdd1aa03e3ec353e0f", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2146852151", + "venue": "1163450153", + "year": "2011", + "title": "synchronous interaction among hundreds an evaluation of a conference in an avatar based virtual environment", + "label": [ + "2777365542", + "25344961", + "107457646", + "49774154", + "178624890", + "2778477957" + ], + "author": [ + "2183602536", + "2285964534", + "2079158144", + "2214700285" + ], + "reference": [ + "106656158", + "1546427807", + "1595229305", + "1983688148", + "1985387291", + "1987235421", + "2000696692", + "2010744597", + "2015535370", + "2017737164", + "2030209391", + "2062728402", + "2064780509", + "2067589702", + "2078226637", + "2079644516", + "2080422783", + "2084548963", + "2100911854", + "2112937788", + "2566531461" + ], + "abstract": "this paper presents the first in depth evaluation of a large multi format virtual conference the conference took place in an avatar based 3d virtual world with spatialized audio and had keynote poster and social sessions we studied it by drawing on logs a survey and interviews with 30 participants we develop a model coalescence focused interaction remixing cofire of large synchronous interactions and use it to discuss how the technology supported or failed to support the interactions that are the raison d etre of conferences we conclude by discussing the prospects for such large virtual gatherings", + "title_raw": "Synchronous interaction among hundreds: an evaluation of a conference in an avatar-based virtual environment", + "abstract_raw": "This paper presents the first in-depth evaluation of a large multi-format virtual conference. The conference took place in an avatar-based 3D virtual world with spatialized audio, and had keynote, poster and social sessions. We studied it by drawing on logs, a survey and interviews with 30 participants. We develop a model - Coalescence, Focused Interaction, Remixing (CoFIRe) -- of large synchronous interactions, and use it to discuss how the technology supported, or failed to support, the interactions that are the raison d'etre of conferences. We conclude by discussing the prospects for such large virtual gatherings.", + "link": "https://www.semanticscholar.org/paper/852f2edd7a736da693c55d323e16b927367f3fe1", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2105697580", + "venue": "1163450153", + "year": "2011", + "title": "your noise is my command sensing gestures using the body as an antenna", + "label": [ + "153083717", + "107457646", + "207347870", + "113841659", + "21822782", + "186967261", + "45584529", + "2780226545" + ], + "author": [ + "2161192829", + "2105140892", + "2127286128", + "2168727892" + ], + "reference": [ + "1575597416", + "1879859518", + "1964357740", + "1969037571", + "1970070732", + "1972254508", + "2022981781", + "2058871194", + "2063812706", + "2098565684", + "2102597423", + "2114262264", + "2119766587", + "2122284941", + "2125547343", + "2130851966", + "2133990480", + "2141964067", + "2148513731", + "2158707444", + "2160281049", + "2161708246", + "2169709590", + "3128832010" + ], + "abstract": "touch sensing and computer vision have made human computer interaction possible in environments where keyboards mice or other handheld implements are not available or desirable however the high cost of instrumenting environments limits the ubiquity of these technologies particularly in home scenarios where cost constraints dominate installation decisions fortunately home environments frequently offer a signal that is unique to locations and objects within the home electromagnetic noise in this work we use the body as a receiving antenna and leverage this noise for gestural interaction we demonstrate that it is possible to robustly recognize touched locations on an uninstrumented home wall using no specialized sensors we conduct a series of experiments to explore the capabilities that this new sensing modality may offer specifically we show robust classification of gestures such as the position of discrete touches around light switches the particular light switch being touched which appliances are touched differentiation between hands as well as continuous proximity of hand to the switch among others we close by discussing opportunities limitations and future work", + "title_raw": "Your noise is my command: sensing gestures using the body as an antenna", + "abstract_raw": "Touch sensing and computer vision have made human-computer interaction possible in environments where keyboards, mice, or other handheld implements are not available or desirable. However, the high cost of instrumenting environments limits the ubiquity of these technologies, particularly in home scenarios where cost constraints dominate installation decisions. Fortunately, home environments frequently offer a signal that is unique to locations and objects within the home: electromagnetic noise. In this work, we use the body as a receiving antenna and leverage this noise for gestural interaction. We demonstrate that it is possible to robustly recognize touched locations on an uninstrumented home wall using no specialized sensors. We conduct a series of experiments to explore the capabilities that this new sensing modality may offer. Specifically, we show robust classification of gestures such as the position of discrete touches around light switches, the particular light switch being touched, which appliances are touched, differentiation between hands, as well as continuous proximity of hand to the switch, among others. We close by discussing opportunities, limitations, and future work.", + "link": "https://www.semanticscholar.org/paper/ca0c9bb3ccb54e7ad7bec1023cd1b9a28ab1791e", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2081964782", + "venue": "1163450153", + "year": "2011", + "title": "enhancing physicality in touch interaction with programmable friction", + "label": [ + "170130773", + "107457646", + "181321632", + "21442007", + "108265739", + "152086174", + "2778539339" + ], + "author": [ + "1990108045", + "2229007243", + "2105254560", + "2101644873", + "2215643027", + "2915492427", + "2045926442", + "253928778" + ], + "reference": [ + "1586442777", + "1993466468", + "2004403975", + "2004999150", + "2007321502", + "2014788716", + "2017881547", + "2018295629", + "2019922470", + "2020332181", + "2024421080", + "2055389565", + "2055465369", + "2056027583", + "2057243093", + "2082045139", + "2094361162", + "2097598502", + "2099305423", + "2104204062", + "2109580619", + "2111219801", + "2111644757", + "2122544819", + "2122909041", + "2125383398", + "2139015628", + "2141153660", + "2145364537", + "2161401111", + "2179427518", + "2737426679" + ], + "abstract": "touch interactions have refreshed some of the glowing enthusiasm of thirty years ago for direct manipulation interfaces however today s touch technologies whose interactions are supported by graphics sounds or crude clicks have a tactile sameness and gaps in usability we use a large area tactile pattern display latpad to examine design possibilities and outcomes when touch interactions are enhanced with variable surface friction in a series of four studies we first confirm that variable friction gives significant performance advantages in low level targeting activities we then explore the design space of variable friction interface controls and assess user reactions most importantly we demonstrate that variable friction can have a positive impact on the enjoyment engagement and sense of realism experienced by users of touch interfaces", + "title_raw": "Enhancing physicality in touch interaction with programmable friction", + "abstract_raw": "Touch interactions have refreshed some of the 'glowing enthusiasm' of thirty years ago for direct manipulation interfaces. However, today's touch technologies, whose interactions are supported by graphics, sounds or crude clicks, have a tactile sameness and gaps in usability. We use a Large Area Tactile Pattern Display (LATPaD) to examine design possibilities and outcomes when touch interactions are enhanced with variable surface friction. In a series of four studies, we first confirm that variable friction gives significant performance advantages in low-level targeting activities. We then explore the design space of variable friction interface controls and assess user reactions. Most importantly, we demonstrate that variable friction can have a positive impact on the enjoyment, engagement and sense of realism experienced by users of touch interfaces.", + "link": "https://www.semanticscholar.org/paper/00ab8e809434c381d986524cbe90e9cdedf6c60b", + "scraped_abstract": null, + "citation_best": 131 + }, + { + "paper": "2136691781", + "venue": "1163450153", + "year": "2011", + "title": "usable gestures for blind people understanding preference and performance", + "label": [ + "49774154", + "159437735", + "207347870" + ], + "author": [ + "2107871967", + "318516288", + "1505021651" + ], + "reference": [ + "45067439", + "119458596", + "1480985346", + "1514611945", + "1671946498", + "1697827404", + "1968311814", + "1974779790", + "1977874442", + "1987716396", + "1994610034", + "1999340665", + "2028251991", + "2041571037", + "2046254084", + "2067870229", + "2075965004", + "2092102827", + "2100019621", + "2111644757", + "2119036186", + "2122723787", + "2127639660", + "2128513346", + "2133382749", + "2133495565", + "2137664882", + "2160843249", + "2161304134" + ], + "abstract": "despite growing awareness of the accessibility issues surrounding touch screen use by blind people designers still face challenges when creating accessible touch screen interfaces one major stumbling block is a lack of understanding about how blind people actually use touch screens we conducted two user studies that compared how blind people and sighted people use touch screen gestures first we conducted a gesture elicitation study in which 10 blind and 10 sighted people invented gestures to perform common computing tasks on a tablet pc we found that blind people have different gesture preferences than sighted people including preferences for edge based gestures and gestures that involve tapping virtual keys on a keyboard second we conducted a performance study in which the same participants performed a set of reference gestures we found significant differences in the speed size and shape of gestures performed by blind people versus those performed by sighted people our results suggest new design guidelines for accessible touch screen interfaces", + "title_raw": "Usable gestures for blind people: understanding preference and performance", + "abstract_raw": "Despite growing awareness of the accessibility issues surrounding touch screen use by blind people, designers still face challenges when creating accessible touch screen interfaces. One major stumbling block is a lack of understanding about how blind people actually use touch screens. We conducted two user studies that compared how blind people and sighted people use touch screen gestures. First, we conducted a gesture elicitation study in which 10 blind and 10 sighted people invented gestures to perform common computing tasks on a tablet PC. We found that blind people have different gesture preferences than sighted people, including preferences for edge-based gestures and gestures that involve tapping virtual keys on a keyboard. Second, we conducted a performance study in which the same participants performed a set of reference gestures. We found significant differences in the speed, size, and shape of gestures performed by blind people versus those performed by sighted people. Our results suggest new design guidelines for accessible touch screen interfaces.", + "link": "https://www.semanticscholar.org/paper/9e93e21f6e300a80b870a31ed6c33e8c6ec2d81e", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2127473562", + "venue": "1163450153", + "year": "2011", + "title": "automics souvenir generating photoware for theme parks", + "label": [ + "201025465", + "49774154", + "33566652", + "186967261", + "136764020", + "2776321320" + ], + "author": [ + "2230371817", + "2080749038", + "2617703163", + "2088024608", + "2143204226", + "2038094089" + ], + "reference": [ + "17786220", + "79918094", + "142873208", + "1492599293", + "1556168886", + "1575662925", + "1982130100", + "1988345122", + "2016196091", + "2027003224", + "2040483337", + "2061130282", + "2094348137", + "2106314090", + "2111833643", + "2116783750", + "2132800627", + "2137923147", + "2155845839", + "2164303023", + "2164551073", + "2296297747" + ], + "abstract": "automics is a photo souvenir service which utilises mobile devices to support the capture sharing and annotation of digital images amongst groups of visitors to theme parks the prototype service mixes individual and group photo capture with existing in park on ride photo services to allow users to create printed photo stories herein we discuss initial fieldwork in theme parks that grounded the design of automics our development of the service prototype and its real world evaluation with theme park visitors we relate our findings on user experience of the service to a literature on mobile photoware finding implications for the design of souvenir services", + "title_raw": "Automics: souvenir generating photoware for theme parks", + "abstract_raw": "Automics is a photo-souvenir service which utilises mobile devices to support the capture, sharing and annotation of digital images amongst groups of visitors to theme parks. The prototype service mixes individual and group photo-capture with existing in-park, on-ride photo services, to allow users to create printed photo-stories. Herein we discuss initial fieldwork in theme parks that grounded the design of Automics, our development of the service prototype, and its real-world evaluation with theme park visitors. We relate our findings on user experience of the service to a literature on mobile photoware, finding implications for the design of souvenir services.", + "link": "https://www.semanticscholar.org/paper/e9ff89d18e904f8807a9906d66b10d5f169a9ccb", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2097331574", + "venue": "1163450153", + "year": "2011", + "title": "effects of community size and contact rate in synchronous social q a", + "label": [ + "173853756", + "136764020", + "31352089" + ], + "author": [ + "2096583854", + "2131647568", + "2309858515" + ], + "reference": [ + "8887994", + "106656158", + "1564939167", + "1673670951", + "1979549501", + "1983005620", + "1986074543", + "1995188862", + "2031484081", + "2037717074", + "2048089261", + "2051057301", + "2053371265", + "2058229508", + "2066590388", + "2071331323", + "2077802345", + "2083973186", + "2096158028", + "2096894832", + "2100359690", + "2105019053", + "2111141603", + "2116339812", + "2118553383", + "2126226055", + "2131019951", + "2135555017", + "2136056394", + "2144211451", + "2155048531", + "2158451634", + "2163881971" + ], + "abstract": "social question and answer q a involves the location of answers to questions through communication with people social q a systems such as mailing lists and web forums are popular but their asynchronous nature can lead to high answer latency synchronous q a systems facilitate real time dialog usually via instant messaging but face challenges with interruption costs and the availability of knowledgeable answerers at question time we ran a longitudinal study of a synchronous social q a system to investigate the effects of the rate with which potential answerers were contacted trading off time to answer against interruption cost and community size varying total number of members we found important differences in subjective and objective measures of system performance with these variations our findings help us understand the costs and benefits of varying contact rate and community size in synchronous social q a and inform system design for social q a", + "title_raw": "Effects of community size and contact rate in synchronous social q&a", + "abstract_raw": "Social question-and-answer (Q&A) involves the location of answers to questions through communication with people. Social Q&A systems, such as mailing lists and Web forums are popular, but their asynchronous nature can lead to high answer latency. Synchronous Q&A systems facilitate real-time dialog, usually via instant messaging, but face challenges with interruption costs and the availability of knowledgeable answerers at question time. We ran a longitudinal study of a synchronous social Q&A system to investigate the effects of the rate with which potential answerers were contacted (trading off time-to-answer against interruption cost) and community size (varying total number of members). We found important differences in subjective and objective measures of system performance with these variations. Our findings help us understand the costs and benefits of varying contact rate and community size in synchronous social Q&A, and inform system design for social Q&A.", + "link": "https://www.semanticscholar.org/paper/c33e210f9708cf32215386246c416acea6a41c48", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2153207410", + "venue": "1163450153", + "year": "2011", + "title": "review spotlight a user interface for summarizing user generated reviews using adjective noun word pairs", + "label": [ + "2777683214", + "21959979", + "136764020", + "89505385", + "121934690", + "170858558", + "23123220" + ], + "author": [ + "1932678941", + "2084699435", + "1973515586", + "2144340954" + ], + "reference": [ + "38739846", + "1612272928", + "1973406306", + "1989994111", + "1993032259", + "2027038623", + "2044429219", + "2050023720", + "2066595497", + "2090314594", + "2108105891", + "2114103606", + "2114461832", + "2115023510", + "2115035636", + "2115234940", + "2141631351", + "2150206717", + "2152448680", + "2155328222", + "2158512572", + "2160660844", + "2166706824", + "2167797769", + "3146306708" + ], + "abstract": "many people read online reviews written by other users to learn more about a product or venue however the overwhelming amount of user generated reviews and variance in length detail and quality across the reviews make it difficult to glean useful information in this paper we present the iterative design of our system called review spotlight it provides a brief overview of reviews using adjective noun word pairs and allows the user to quickly explore the reviews in greater detail through a laboratory user study which required participants to perform decision making tasks we showed that participants could form detailed impressions about restaurants and decide between two options significantly faster with review spotlight than with traditional review webpages", + "title_raw": "Review spotlight: a user interface for summarizing user-generated reviews using adjective-noun word pairs", + "abstract_raw": "Many people read online reviews written by other users to learn more about a product or venue. However, the overwhelming amount of user-generated reviews and variance in length, detail and quality across the reviews make it difficult to glean useful information. In this paper, we present the iterative design of our system, called Review Spotlight. It provides a brief overview of reviews using adjective-noun word pairs, and allows the user to quickly explore the reviews in greater detail. Through a laboratory user study which required participants to perform decision making tasks, we showed that participants could form detailed impressions about restaurants and decide between two options significantly faster with Review Spotlight than with traditional review webpages.", + "link": "/", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1986692274", + "venue": "1163450153", + "year": "2011", + "title": "ease of juggling studying the effects of manual multitasking", + "label": [ + "170130773", + "107457646", + "186967261", + "133489148" + ], + "author": [ + "2969454835", + "120606590" + ], + "reference": [ + "20790251", + "46830337", + "58804484", + "1878594119", + "1964077143", + "1964656041", + "1980513660", + "1993448335", + "1997226678", + "2006380619", + "2027139744", + "2043143027", + "2052942348", + "2066467781", + "2067870298", + "2077773212", + "2078155644", + "2094267093", + "2096125945", + "2099287431", + "2099913748", + "2101445173", + "2118755902", + "2120076005", + "2124781093", + "2133855304", + "2149504281", + "2156665908", + "2162163122" + ], + "abstract": "everyday activities often involve using an interactive device while one is handling various other physical objects wallets bags doors pens mugs etc this paper presents the manual multitasking test a test with 12 conditions emulating manual demands of everyday multitasking situations it allows experimenters to expose the effects of design on manual flexibility users ability to reconfigure the sensorimotor control of arms hands and fingers in order to regain the high performance levels they experience when using the device on its own the test was deployed for pointing devices on laptops and qwerty keyboards of mobile devices in these studies we identified facilitative design features whose absence explains for example why the mouse and stylus function poorly in multi object performance the issue deserves more attention because interfaces that are nominally similar e g one handed input can vary dramatically in terms of ease of juggling", + "title_raw": "Ease of juggling: studying the effects of manual multitasking", + "abstract_raw": "Everyday activities often involve using an interactive device while one is handling various other physical objects (wallets, bags, doors, pens, mugs, etc.). This paper presents the Manual Multitasking Test, a test with 12 conditions emulating manual demands of everyday multitasking situations. It allows experimenters to expose the effects of design on \"manual flexibility\": users' ability to reconfigure the sensorimotor control of arms, hands, and fingers in order to regain the high performance levels they experience when using the device on its own. The test was deployed for pointing devices on laptops and Qwerty keyboards of mobile devices. In these studies, we identified facilitative design features whose absence explains, for example, why the mouse and stylus function poorly in multi-object performance. The issue deserves more attention, because interfaces that are nominally similar (e.g., \"one-handed input\") can vary dramatically in terms of \"ease of juggling\".", + "link": "https://www.semanticscholar.org/paper/1d285021a58b9636de54bf83a2c60d5d82b2540d", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2069121074", + "venue": "1150208541", + "year": "2011", + "title": "a polylogarithmic competitive algorithm for the k server problem", + "label": [ + "22684755", + "93996380", + "106516650", + "80444323", + "128669082" + ], + "author": [ + "2145779519", + "2306984392", + "2137713989", + "2125762281" + ], + "reference": [ + "1998843748", + "2012286291", + "2025874591", + "2033156334", + "2041571997", + "2041645394", + "2051069559", + "2054033098", + "2069255833", + "2070951974", + "2082099352", + "2082353536", + "2083392624", + "2086989193", + "2089998595", + "2114493937", + "2119565978", + "2134666295", + "2147541332", + "2150495379", + "2157650361", + "2161029651", + "2168823865", + "2571257893" + ], + "abstract": "we give the first polylogarithmic competitive randomized algorithm for the k server problem on an arbitrary finite metric space in particular our algorithm achieves a competitive ratio of o log 3 n log 2 k for any metric space on n points this improves upon the 2k 1 competitive algorithm of koutsoupias and papadimitriou j acm 1995 whenever n is sub exponential in k", + "title_raw": "A Polylogarithmic-Competitive Algorithm for the k-Server Problem", + "abstract_raw": "We give the first polylogarithmic-competitive randomized algorithm for the k-server problem on an arbitrary finite metric space. In particular, our algorithm achieves a competitive ratio of O~(log^3 n log^2 k) for any metric space on n points. This improves upon the (2k-1)-competitive algorithm of Koutsoupias and Papadimitriou (J. ACM 1995) whenever n is sub-exponential in k.", + "link": "https://www.semanticscholar.org/paper/cc5336660ac2f8e03a203ac9a1e3898cb8678e15", + "scraped_abstract": null, + "citation_best": 65 + }, + { + "paper": "2107220315", + "venue": "1199533187", + "year": "2011", + "title": "proving programs robust", + "label": [ + "138958017", + "98183937", + "97970142", + "23130292", + "160191386", + "11413529", + "97686452" + ], + "author": [ + "2153285360", + "310804771", + "38701517", + "2720023829" + ], + "reference": [ + "82594748", + "1511668635", + "1526045365", + "1526328753", + "1531018690", + "1552535138", + "1579164394", + "1602616602", + "1606406979", + "1611084195", + "1723675912", + "1791348790", + "2016524761", + "2097061283", + "2112391699", + "2114052185", + "2114423474", + "2114614628", + "2114703523", + "2115612093", + "2127261008", + "2131275552", + "2132661148", + "2133209493", + "2142883190", + "2144743033", + "2158681729", + "2165448367", + "2169239645", + "2505324119", + "2911978475" + ], + "abstract": "we present a program analysis for verifying quantitative robustness properties of programs stated generally as if the inputs of a program are perturbed by an arbitrary amount epsilon then its outputs change at most by k epsilon where k can depend on the size of the input but not its value robustness properties generalize the analytic notion of continuity e g while the function ex is continuous it is not robust our problem is to verify the robustness of a function p that is coded as an imperative program and can use diverse data types and features such as branches and loops our approach to the problem soundly decomposes it into two subproblems a verifying that the smallest possible perturbations to the inputs of p do not change the corresponding outputs significantly even if control now flows along a different control path and b verifying the robustness of the computation along each control flow path of p to solve the former subproblem we build on an existing method for verifying that a program encodes a continuous function 5 the latter is solved using a static analysis that bounds the magnitude of the slope of any function computed by a control flow path of p the outcome is a sound program analysis for robustness that uses proof obligations which do not refer to epsilon changes and can often be fully automated using off the shelf smt solvers we identify three application domains for our analysis first our analysis can be used to guarantee the predictable execution of embedded control software whose inputs come from physical sources and can suffer from error and uncertainty a guarantee of robustness ensures that the system does not react disproportionately to such uncertainty second our analysis is directly applicable to approximate computation and can be used to provide foundations for a recently proposed program approximation scheme called loop perforation a third application is in database privacy proofs of robustness of queries are essential to differential privacy the most popular notion of privacy for statistical databases", + "title_raw": "Proving programs robust", + "abstract_raw": "We present a program analysis for verifying quantitative robustness properties of programs, stated generally as: \"If the inputs of a program are perturbed by an arbitrary amount epsilon, then its outputs change at most by (K . epsilon), where K can depend on the size of the input but not its value.\" Robustness properties generalize the analytic notion of continuity---e.g., while the function ex is continuous, it is not robust. Our problem is to verify the robustness of a function P that is coded as an imperative program, and can use diverse data types and features such as branches and loops. Our approach to the problem soundly decomposes it into two subproblems: (a) verifying that the smallest possible perturbations to the inputs of P do not change the corresponding outputs significantly, even if control now flows along a different control path; and (b) verifying the robustness of the computation along each control-flow path of P. To solve the former subproblem, we build on an existing method for verifying that a program encodes a continuous function [5]. The latter is solved using a static analysis that bounds the magnitude of the slope of any function computed by a control flow path of P. The outcome is a sound program analysis for robustness that uses proof obligations which do not refer to epsilon-changes and can often be fully automated using off-the-shelf SMT-solvers. We identify three application domains for our analysis. First, our analysis can be used to guarantee the predictable execution of embedded control software, whose inputs come from physical sources and can suffer from error and uncertainty. A guarantee of robustness ensures that the system does not react disproportionately to such uncertainty. Second, our analysis is directly applicable to approximate computation, and can be used to provide foundations for a recently-proposed program approximation scheme called {loop perforation}. A third application is in database privacy: proofs of robustness of queries are essential to differential privacy, the most popular notion of privacy for statistical databases.", + "link": "https://www.semanticscholar.org/paper/df219af33c5d4f41f28513570176692cadaef1ad", + "scraped_abstract": null, + "citation_best": 139 + }, + { + "paper": "2167626029", + "venue": "1199533187", + "year": "2011", + "title": "proactive detection of collaboration conflicts", + "label": [ + "2775924081", + "38652104", + "199519371", + "2781285689" + ], + "author": [ + "2108853658", + "2260674364", + "2235702021", + "477007165" + ], + "reference": [ + "1579672335", + "1974532567", + "1997493115", + "2001907377", + "2019817607", + "2028168446", + "2031627901", + "2036332715", + "2049756221", + "2057092702", + "2061456028", + "2064625489", + "2088663656", + "2096800101", + "2100144432", + "2100849134", + "2105300539", + "2116525104", + "2116821290", + "2121668868", + "2122230059", + "2123802152", + "2124666592", + "2125981604", + "2126101048", + "2128465105", + "2135295137", + "2138482003", + "2147018965", + "2147397672", + "2149008088", + "2153956429", + "2154007683", + "2161946348", + "2183088835", + "3083067256" + ], + "abstract": "collaborative development can be hampered when conflicts arise because developers have inconsistent copies of a shared project we present an approach to help developers identify and resolve conflicts early before those conflicts become severe and before relevant changes fade away in the developers memories this paper presents three results first a study of open source systems establishes that conflicts are frequent persistent and appear not only as overlapping textual edits but also as subsequent build and test failures the study spans nine open source systems totaling 3 4 million lines of code our conflict data is derived from 550 000 development versions of the systems second using previously unexploited information we precisely diagnose important classes of conflicts using the novel technique of speculative analysis over version control operations third we describe the design of crystal a publicly available tool that uses speculative analysis to make concrete advice unobtrusively available to developers helping them identify manage and prevent conflicts", + "title_raw": "Proactive detection of collaboration conflicts", + "abstract_raw": "Collaborative development can be hampered when conflicts arise because developers have inconsistent copies of a shared project. We present an approach to help developers identify and resolve conflicts early, before those conflicts become severe and before relevant changes fade away in the developers' memories. This paper presents three results. First, a study of open-source systems establishes that conflicts are frequent, persistent, and appear not only as overlapping textual edits but also as subsequent build and test failures. The study spans nine open-source systems totaling 3.4 million lines of code; our conflict data is derived from 550,000 development versions of the systems. Second, using previously-unexploited information, we precisely diagnose important classes of conflicts using the novel technique of speculative analysis over version control operations. Third, we describe the design of Crystal, a publicly-available tool that uses speculative analysis to make concrete advice unobtrusively available to developers, helping them identify, manage, and prevent conflicts.", + "link": "https://www.semanticscholar.org/paper/8d99452a2d4212cede2fbd630bb3d3869c565de0", + "scraped_abstract": null, + "citation_best": 202 + }, + { + "paper": "2294130536", + "venue": "1164975091", + "year": "2011", + "title": "relative attributes", + "label": [ + "119857082", + "189430467", + "12692103", + "167966045", + "64729616", + "5274069", + "197927960" + ], + "author": [ + "2223275083", + "2239304286" + ], + "reference": [ + "1566135517", + "1992454046", + "1996309403", + "1998197760", + "2047221353", + "2051834357", + "2076291620", + "2080171500", + "2084435358", + "2098411764", + "2108862644", + "2129156852", + "2137675680", + "2147898188", + "2149427297", + "2534114659", + "2536626143", + "3143107425" + ], + "abstract": "human nameable visual attributes can benefit various recognition tasks however existing techniques restrict these properties to categorical labels for example a person is smiling or not a scene is dry or not and thus fail to capture more general semantic relationships we propose to model relative attributes given training data stating how object scene categories relate according to different attributes we learn a ranking function per attribute the learned ranking functions predict the relative strength of each property in novel images we then build a generative model over the joint space of attribute ranking outputs and propose a novel form of zero shot learning in which the supervisor relates the unseen object category to previously seen objects via attributes for example bears are furrier than giraffes we further show how the proposed relative attributes enable richer textual descriptions for new images which in practice are more precise for human interpretation we demonstrate the approach on datasets of faces and natural scenes and show its clear advantages over traditional binary attribute prediction for these new tasks", + "title_raw": "Relative attributes", + "abstract_raw": "Human-nameable visual \u201cattributes\u201d can benefit various recognition tasks. However, existing techniques restrict these properties to categorical labels (for example, a person is \u2018smiling\u2019 or not, a scene is \u2018dry\u2019 or not), and thus fail to capture more general semantic relationships. We propose to model relative attributes. Given training data stating how object/scene categories relate according to different attributes, we learn a ranking function per attribute. The learned ranking functions predict the relative strength of each property in novel images. We then build a generative model over the joint space of attribute ranking outputs, and propose a novel form of zero-shot learning in which the supervisor relates the unseen object category to previously seen objects via attributes (for example, \u2018bears are furrier than giraffes\u2019). We further show how the proposed relative attributes enable richer textual descriptions for new images, which in practice are more precise for human interpretation. We demonstrate the approach on datasets of faces and natural scenes, and show its clear advantages over traditional binary attribute prediction for these new tasks.", + "link": "https://www.semanticscholar.org/paper/23e568fcf0192e4ff5e6bed7507ee5b9e6c43598", + "scraped_abstract": null, + "citation_best": 903 + }, + { + "paper": "2963584844", + "venue": "1180662882", + "year": "2011", + "title": "computational rationalization the inverse equilibrium problem", + "label": [ + "9679016", + "115988155", + "50817715" + ], + "author": [ + "2116032864", + "2074347517", + "217392467" + ], + "reference": [ + "1590693676", + "1963649406", + "1975463331", + "1978907431", + "1990199628", + "1999874108", + "2032558547", + "2061562262", + "2069317438", + "2082764616", + "2097160297", + "2098774185", + "2109100253", + "2115450697", + "2132339352", + "2147544021", + "2160842254", + "2169498096", + "2605898369", + "3122590067" + ], + "abstract": "modeling the purposeful behavior of imperfect agents from a small number of observations is a challenging task when restricted to the single agent decision theoretic setting inverse optimal control techniques assume that observed behavior is an approximately optimal solution to an unknown decision problem these techniques learn a utility function that explains the example behavior and can then be used to accurately predict or imitate future behavior in similar observed or unobserved situations in this work we consider similar tasks in competitive and cooperative multi agent domains here unlike single agent settings a player cannot myopically maximize its reward it must speculate on how the other agents may act to influence the game s outcome employing the game theoretic notion of regret and the principle of maximum entropy we introduce a technique for predicting and generalizing behavior as well as recovering a reward function in these domains", + "title_raw": "Computational Rationalization: The Inverse Equilibrium Problem", + "abstract_raw": "Modeling the purposeful behavior of imperfect agents from a small number of observations is a challenging task. When restricted to the single-agent decision-theoretic setting, inverse optimal control techniques assume that observed behavior is an approximately optimal solution to an unknown decision problem. These techniques learn a utility function that explains the example behavior and can then be used to accurately predict or imitate future behavior in similar observed or unobserved situations.\r\n\r\nIn this work, we consider similar tasks in competitive and cooperative multi-agent domains. Here, unlike single-agent settings, a player cannot myopically maximize its reward \u2014 it must speculate on how the other agents may act to influence the game's outcome. Employing the game-theoretic notion of regret and the principle of maximum entropy, we introduce a technique for predicting and generalizing behavior, as well as recovering a reward function in these domains.", + "link": "https://www.semanticscholar.org/paper/85402ce171bc0ab960b0e50d9735ddb1cfcce64a", + "scraped_abstract": null, + "citation_best": 20 + }, + { + "paper": "2111765806", + "venue": "1174403976", + "year": "2011", + "title": "run time efficient probabilistic model checking", + "label": [ + "49937458", + "110251889", + "40398212", + "111498074", + "149091818", + "2777904410", + "117447612", + "30888246" + ], + "author": [ + "977474451", + "2233149120", + "299671335" + ], + "reference": [ + "125598877", + "1498432697", + "1516256348", + "1600556058", + "1821563272", + "1947095868", + "1965673742", + "1972203711", + "1972332180", + "1977832770", + "1994840943", + "2010798306", + "2015699956", + "2071616717", + "2075813176", + "2102904625", + "2120549308", + "2133859873", + "2134991157", + "2138178819", + "2146416787", + "2148918736", + "2149614712", + "2161330625", + "2164096878", + "2164501290", + "2410243683", + "2583833536", + "2751555667", + "2799087070", + "2913459036" + ], + "abstract": "unpredictable changes continuously affect software systems and may have a severe impact on their quality of service potentially jeopardizing the system s ability to meet the desired requirements changes may occur in critical components of the system clients operational profiles requirements or deployment environments the adoption of software models and model checking techniques at run time may support automatic reasoning about such changes detect harmful configurations and potentially enable appropriate self reactions however traditional model checking techniques and tools may not be simply applied as they are at run time since they hardly meet the constraints imposed by on the fly analysis in terms of execution time and memory occupation this paper precisely addresses this issue and focuses on reliability models given in terms of discrete time markov chains and probabilistic model checking it develops a mathematical framework for run time probabilistic model checking that given a reliability model and a set of requirements statically generates a set of expressions which can be efficiently used at run time to verify system requirements an experimental comparison of our approach with existing probabilistic model checkers shows its practical applicability in run time verification", + "title_raw": "Run-time efficient probabilistic model checking", + "abstract_raw": "Unpredictable changes continuously affect software systems and may have a severe impact on their quality of service, potentially jeopardizing the system's ability to meet the desired requirements. Changes may occur in critical components of the system, clients' operational profiles, requirements, or deployment environments. The adoption of software models and model checking techniques at run time may support automatic reasoning about such changes, detect harmful configurations, and potentially enable appropriate (self-)reactions. However, traditional model checking techniques and tools may not be simply applied as they are at run time, since they hardly meet the constraints imposed by on-the-fly analysis, in terms of execution time and memory occupation. This paper precisely addresses this issue and focuses on reliability models, given in terms of Discrete Time Markov Chains, and probabilistic model checking. It develops a mathematical framework for run-time probabilistic model checking that, given a reliability model and a set of requirements, statically generates a set of expressions, which can be efficiently used at run-time to verify system requirements. An experimental comparison of our approach with existing probabilistic model checkers shows its practical applicability in run-time verification.", + "link": "https://www.semanticscholar.org/paper/9b1a1cadafef8450c0e5667b280375d2bd748f6a", + "scraped_abstract": null, + "citation_best": 191 + }, + { + "paper": "2119142490", + "venue": "1174403976", + "year": "2011", + "title": "verifying multi threaded software using smt based context bounded model checking", + "label": [ + "110251889", + "41138395", + "164155591", + "152062344", + "10784920", + "111498074", + "199360897", + "173608175" + ], + "author": [ + "2117275544", + "2616912264" + ], + "reference": [ + "64150172", + "125598877", + "301824129", + "1480909796", + "1493367105", + "1498432697", + "1502963625", + "1549714654", + "1560743762", + "1562679818", + "1782479956", + "1836483134", + "1901085887", + "1981298033", + "2054903937", + "2084330491", + "2087046173", + "2087771409", + "2106892818", + "2117587600", + "2128453996", + "2129538349", + "2135948849", + "2143502475", + "2162339514", + "2163477945", + "2401003083", + "3145128584" + ], + "abstract": "we describe and evaluate three approaches to model check multi threaded software with shared variables and locks using bounded model checking based on satisfiability modulo theories smt and our modelling of the synchronization primitives of the pthread library in the lazy approach we generate all possible interleavings and call the smt solver on each of them individually until we either find a bug or have systematically explored all interleavings in the schedule recording approach we encode all possible interleavings into one single formula and then exploit the high speed of the smt solvers in the underapproximation and widening approach we reduce the state space by abstracting the number of interleavings from the proofs of unsatisfiability generated by the smt solvers in all three approaches we bound the number of context switches allowed among threads in order to reduce the number of interleavings explored we implemented these approaches in esbmc our smt based bounded model checker for ansi c programs our experiments show that esbmc can analyze larger problems and substantially reduce the verification time compared to state of the art techniques that use iterative context bounding algorithms or counter example guided abstraction refinement", + "title_raw": "Verifying multi-threaded software using smt-based context-bounded model checking", + "abstract_raw": "We describe and evaluate three approaches to model check multi-threaded software with shared variables and locks using bounded model checking based on Satisfiability Modulo Theories (SMT) and our modelling of the synchronization primitives of the Pthread library. In the lazy approach, we generate all possible interleavings and call the SMT solver on each of them individually, until we either find a bug, or have systematically explored all interleavings. In the schedule recording approach, we encode all possible interleavings into one single formula and then exploit the high speed of the SMT solvers. In the underapproximation and widening approach, we reduce the state space by abstracting the number of interleavings from the proofs of unsatisfiability generated by the SMT solvers. In all three approaches, we bound the number of context switches allowed among threads in order to reduce the number of interleavings explored. We implemented these approaches in ESBMC, our SMT-based bounded model checker for ANSI-C programs. Our experiments show that ESBMC can analyze larger problems and substantially reduce the verification time compared to state-of-the-art techniques that use iterative context-bounding algorithms or counter-example guided abstraction refinement.", + "link": "https://www.semanticscholar.org/paper/2c8a30dc6f3ac66522609349c40542a7fb4afdb8", + "scraped_abstract": null, + "citation_best": 131 + }, + { + "paper": "2106860490", + "venue": "1174403976", + "year": "2011", + "title": "programs tests and oracles the foundations of testing revisited", + "label": [ + "2777904410", + "55166926" + ], + "author": [ + "2227343783", + "1950713449", + "2001238367" + ], + "reference": [ + "97502223", + "1508279398", + "1527688737", + "1583551519", + "1885303164", + "1975788482", + "1980574120", + "1999785779", + "2006159725", + "2023730632", + "2027106228", + "2033140592", + "2041027758", + "2042492244", + "2045824418", + "2049695835", + "2055647675", + "2090606154", + "2093850391", + "2095710561", + "2099855131", + "2110441383", + "2111038766", + "2112687552", + "2114503251", + "2117963349", + "2119249385", + "2125831829", + "2126627822", + "2134691366", + "2136043644", + "2143251912", + "2143712427", + "2145071552", + "2145867204", + "2150288151", + "2151549581", + "2152949369", + "2154897437", + "2155680523", + "2156313607", + "2157755550", + "2159901505", + "2169249706", + "2171441042", + "2172253171", + "2601354821", + "3151223194" + ], + "abstract": "in previous decades researchers have explored the formal foundations of program testing by exploring the foundations of testing largely separate from any specific method of testing these researchers provided a general discussion of the testing process including the goals the underlying problems and the limitations of testing unfortunately a common rigorous foundation has not been widely adopted in empirical software testing research making it difficult to generalize and compare empirical research we continue this foundational work providing a framework intended to serve as a guide for future discussions and empirical studies concerning software testing specifically we extend gourlay s functional description of testing with the notion of a test oracle an aspect of testing largely overlooked in previous foundational work and only lightly explored in general we argue additional work exploring the interrelationship between programs tests and oracles should be performed and use our extension to clarify concepts presented in previous work present new concepts related to test oracles and demonstrate that oracle selection must be considered when discussing the efficacy of a testing process", + "title_raw": "Programs, tests, and oracles: the foundations of testing revisited", + "abstract_raw": "In previous decades, researchers have explored the formal foundations of program testing. By exploring the foundations of testing largely separate from any specific method of testing, these researchers provided a general discussion of the testing process, including the goals, the underlying problems, and the limitations of testing. Unfortunately, a common, rigorous foundation has not been widely adopted in empirical software testing research, making it difficult to generalize and compare empirical research. We continue this foundational work, providing a framework intended to serve as a guide for future discussions and empirical studies concerning software testing. Specifically, we extend Gourlay's functional description of testing with the notion of a test oracle, an aspect of testing largely overlooked in previous foundational work and only lightly explored in general. We argue additional work exploring the interrelationship between programs, tests, and oracles should be performed, and use our extension to clarify concepts presented in previous work, present new concepts related to test oracles, and demonstrate that oracle selection must be considered when discussing the efficacy of a testing process.", + "link": "https://www.semanticscholar.org/paper/2dbb1ed731459c1c2511dadc976941c16665d767", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2116907335", + "venue": "1174403976", + "year": "2011", + "title": "on demand feature recommendations derived from mining public product descriptions", + "label": [ + "101814296", + "49937458", + "40608802", + "557471498", + "71472368", + "124101348", + "73555534", + "52622490", + "2777904410", + "193524817", + "15708719" + ], + "author": [ + "2498412313", + "2030548913", + "2058044039", + "231137837", + "1892801027", + "1240528779", + "1075776992" + ], + "reference": [ + "153250912", + "281665770", + "1484413656", + "1506285740", + "1530276735", + "1571254960", + "1574241439", + "1576674196", + "1577699866", + "1587775262", + "1790954942", + "1971040550", + "1980122825", + "1996283866", + "2040810008", + "2049961680", + "2054141820", + "2057293921", + "2078663894", + "2099213660", + "2100742239", + "2109820570", + "2114353347", + "2125247206", + "2145360759", + "2151883549", + "2157356427", + "2163351999", + "2166559705", + "2167700889" + ], + "abstract": "we present a recommender system that models and recommends product features for a given domain our approach mines product descriptions from publicly available online specifications utilizes text mining and a novel incremental diffusive clustering algorithm to discover domain specific features generates a probabilistic feature model that represents commonalities variants and cross category features and then uses association rule mining and the k nearest neighbor machine learning strategy to generate product specific feature recommendations our recommender system supports the relatively labor intensive task of domain analysis potentially increasing opportunities for re use reducing time to market and delivering more competitive software products the approach is empirically validated against 20 different product categories using thousands of product descriptions mined from a repository of free software applications", + "title_raw": "On-demand feature recommendations derived from mining public product descriptions", + "abstract_raw": "We present a recommender system that models and recommends product features for a given domain. Our approach mines product descriptions from publicly available online specifications, utilizes text mining and a novel incremental diffusive clustering algorithm to discover domain-specific features, generates a probabilistic feature model that represents commonalities, variants, and cross-category features, and then uses association rule mining and the k-Nearest-Neighbor machine learning strategy to generate product specific feature recommendations. Our recommender system supports the relatively labor-intensive task of domain analysis, potentially increasing opportunities for re-use, reducing time-to-market, and delivering more competitive software products. The approach is empirically validated against 20 different product categories using thousands of product descriptions mined from a repository of free software applications.", + "link": "https://www.semanticscholar.org/paper/7327ce62fa3e2f1def67c9ad2e17eff1d105fec2", + "scraped_abstract": null, + "citation_best": 142 + }, + { + "paper": "2162439064", + "venue": "1174403976", + "year": "2011", + "title": "configuring global software teams a multi company analysis of project productivity quality and profits", + "label": [ + "36183442", + "2777904410", + "56739046" + ], + "author": [ + "1765021856", + "2169789338", + "2403481355", + "202682525" + ], + "reference": [ + "99742719", + "1487725643", + "1529921276", + "1972160098", + "2026414324", + "2037106942", + "2045571829", + "2046434103", + "2052937628", + "2067254283", + "2070527651", + "2091020207", + "2102234277", + "2112389050", + "2115918232", + "2117369957", + "2127369273", + "2134906666", + "2141883955", + "2143087273", + "2163643791", + "2167514255", + "2325530759", + "2911311425", + "3012413987", + "3121143339", + "3125342462" + ], + "abstract": "in this paper we examined the impact of project level configurational choices of globally distributed software teams on project productivity quality and profits our analysis used data from 362 projects of four different firms these projects spanned a wide range of programming languages application domain process choices and development sites spread over 15 countries and 5 continents our analysis revealed fundamental tradeoffs in choosing configurational choices that are optimized for productivity quality and or profits in particular achieving higher levels of productivity and quality require diametrically opposed configurational choices in addition creating imbalances in the expertise and personnel distribution of project teams significantly helps increase profit margins however a profit oriented imbalance could also significantly affect productivity and or quality outcomes analyzing these complex tradeoffs we provide actionable managerial insights that can help software firms and their clients choose configurations that achieve desired project outcomes in globally distributed software development", + "title_raw": "Configuring global software teams: a multi-company analysis of project productivity, quality, and profits", + "abstract_raw": "In this paper, we examined the impact of project-level configurational choices of globally distributed software teams on project productivity, quality, and profits. Our analysis used data from 362 projects of four different firms. These projects spanned a wide range of programming languages, application domain, process choices, and development sites spread over 15 countries and 5 continents. Our analysis revealed fundamental tradeoffs in choosing configurational choices that are optimized for productivity, quality, and/or profits. In particular, achieving higher levels of productivity and quality require diametrically opposed configurational choices. In addition, creating imbalances in the expertise and personnel distribution of project teams significantly helps increase profit margins. However, a profit-oriented imbalance could also significantly affect productivity and/or quality outcomes. Analyzing these complex tradeoffs, we provide actionable managerial insights that can help software firms and their clients choose configurations that achieve desired project outcomes in globally distributed software development.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Configuring+Global+Software+Teams:+A+Multi-Company+Analysis+of+Project+Productivity,+Quality,+and+Profits&as_oq=&as_eq=&as_occt=any&as_sauthors=Ramasubbu", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "89766480", + "venue": "1203999783", + "year": "2011", + "title": "nested rollout policy adaptation for monte carlo tree search", + "label": [ + "101056560", + "46149586", + "137955351", + "137836250", + "153258448" + ], + "author": [ + "2303400247" + ], + "reference": [ + "164549", + "26839411", + "43710935", + "84074851", + "1500868819", + "1540706608", + "1588577747", + "1824805769", + "1954898856", + "2020135152", + "2026747854", + "2028315154", + "2108464053", + "2128547596", + "2135997697", + "2137509429", + "2154291838", + "2171084228", + "2414365832", + "2914215521" + ], + "abstract": "monte carlo tree search mcts methods have had recent success in games planning and optimization mcts uses results from rollouts to guide search a rollout is a path that descends the tree with a randomized decision at each ply until reaching a leaf mcts results can be strongly influenced by the choice of appropriate policy to bias the rollouts most previous work on mcts uses static uniform random or domain specific policies we describe a new mcts method that dynamically adapts the rollout policy during search in deterministic optimization problems our starting point is cazenave s original nested monte carlo search nmcs but rather than navigating the tree directly we instead use gradient ascent on the rollout policy at each level of the nested search we benchmark this new nested rollout policy adaptation nrpa algorithm and examine its behavior our test problems are instances of crossword puzzle construction and morpion solitaire over moderate time scales nrpa can substantially improve search efficiency compared to nmcs and over longer time scales nrpa improves upon all previous published solutions for the test problems results include a new morpion solitaire solution that improves upon the previous human generated record that had stood for over 30 years", + "title_raw": "Nested rollout policy adaptation for Monte Carlo tree search", + "abstract_raw": "Monte Carlo tree search (MCTS) methods have had recent success in games, planning, and optimization. MCTS uses results from rollouts to guide search; a rollout is a path that descends the tree with a randomized decision at each ply until reaching a leaf. MCTS results can be strongly influenced by the choice of appropriate policy to bias the rollouts. Most previous work on MCTS uses static uniform random or domain-specific policies. We describe a new MCTS method that dynamically adapts the rollout policy during search, in deterministic optimization problems. Our starting point is Cazenave's original Nested Monte Carlo Search (NMCS), but rather than navigating the tree directly we instead use gradient ascent on the rollout policy at each level of the nested search. We benchmark this new Nested Rollout Policy Adaptation (NRPA) algorithm and examine its behavior. Our test problems are instances of Crossword Puzzle Construction and Morpion Solitaire. Over moderate time scales NRPA can substantially improve search efficiency compared to NMCS, and over longer time scales NRPA improves upon all previous published solutions for the test problems. Results include a new Morpion Solitaire solution that improves upon the previous human-generated record that had stood for over 30 years.", + "link": "https://www.semanticscholar.org/paper/64004549770b2dad4478331c8555ec25e9589ad1", + "scraped_abstract": null, + "citation_best": 100 + }, + { + "paper": "2044879407", + "venue": "1130985203", + "year": "2011", + "title": "leakage in data mining formulation detection and avoidance", + "label": [ + "1668388", + "124101348", + "2522767166" + ], + "author": [ + "2156462086", + "2129867074", + "164824025" + ], + "reference": [ + "46790137", + "124884934", + "1501359788", + "1506509530", + "1530010412", + "1554944419", + "1976645306", + "2022775778", + "2063014187", + "2073404525", + "2099085089", + "2111322878", + "2126371452", + "2140937627", + "2144620969", + "2319794630", + "2951118224", + "3122118888" + ], + "abstract": "deemed one of the top ten data mining mistakes leakage is essentially the introduction of information about the data mining target which should not be legitimately available to mine from in addition to our own industry experience with real life projects controversies around several major public data mining competitions held recently such as the informs 2010 data mining challenge and the ijcnn 2011 social network challenge are evidence that this issue is as relevant today as it has ever been while acknowledging the importance and prevalence of leakage in both synthetic competitions and real life data mining projects existing literature has largely left this idea unexplored what little has been said turns out not to be broad enough to cover more complex cases of leakage such as those where the classical i i d assumption is violated that have been recently documented in our new approach these cases and others are explained by explicitly defining modeling goals and analyzing the broader framework of the data mining problem the resulting definition enables us to derive general methodology for dealing with the issue we show that it is possible to avoid leakage with a simple specific approach to data management followed by what we call a learn predict separation and present several ways of detecting leakage when the modeler has no control over how the data have been collected", + "title_raw": "Leakage in data mining: formulation, detection, and avoidance", + "abstract_raw": "Deemed \"one of the top ten data mining mistakes\", leakage is essentially the introduction of information about the data mining target, which should not be legitimately available to mine from. In addition to our own industry experience with real-life projects, controversies around several major public data mining competitions held recently such as the INFORMS 2010 Data Mining Challenge and the IJCNN 2011 Social Network Challenge are evidence that this issue is as relevant today as it has ever been. While acknowledging the importance and prevalence of leakage in both synthetic competitions and real-life data mining projects, existing literature has largely left this idea unexplored. What little has been said turns out not to be broad enough to cover more complex cases of leakage, such as those where the classical i.i.d. assumption is violated, that have been recently documented. In our new approach, these cases and others are explained by explicitly defining modeling goals and analyzing the broader framework of the data mining problem. The resulting definition enables us to derive general methodology for dealing with the issue. We show that it is possible to avoid leakage with a simple specific approach to data management followed by what we call a learn-predict separation, and present several ways of detecting leakage when the modeler has no control over how the data have been collected.", + "link": "https://www.semanticscholar.org/paper/381de4becac0910d1a74c905a3d579dda3571dbd", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2138961375", + "venue": "1123349196", + "year": "2011", + "title": "e mili energy minimizing idle listening in wireless networks", + "label": [ + "31258907", + "158379750", + "192220659", + "171115542", + "113200698", + "108037233", + "79403827" + ], + "author": [ + "2240635727", + "2160067404" + ], + "reference": [ + "1502707958", + "1545693521", + "1573422257", + "1583390654", + "1591310741", + "1964678824", + "1974910195", + "2044993311", + "2057923864", + "2077687428", + "2089712705", + "2096286101", + "2102975076", + "2106048836", + "2125562505", + "2126495708", + "2128356550", + "2133339421", + "2135505702", + "2137096913", + "2137572197", + "2150039965", + "2150135189", + "2154967595", + "2156920786", + "2157936671", + "2163742434", + "2167330318" + ], + "abstract": "wifi interface is known to be a primary energy consumer in mobile devices and idle listening il is the dominant source of energy consumption in wifi most existing protocols such as the 802 11 power saving mode psm attempt to reduce the time spent in il by sleep scheduling however through an extensive analysis of real world traffic we found more than 60 of energy is consumed in il even with psm enabled to remedy this problem we propose e mili energy minimizing idle listening that reduces the power consumption in il given that the time spent in il has already been optimized by sleep scheduling observing that radio power consumption decreases proportionally to its clock rate e mili adaptively downclocks the radio during il and reverts to full clock rate when an incoming packet is detected or a packet has to be transmitted e mili incorporates sampling rate invariant detection ensuring accurate packet detection and address filtering even when the receiver s sampling clock rate is much lower than the signal bandwidth further it employs an opportunistic downclocking mechanism to optimize the efficiency of switching clock rate based on a simple interface to existing mac layer scheduling protocols we have implemented e mili on the usrp software radio platform our experimental evaluation shows that e mili can detect packets with close to 100 accuracy even with downclocking by a factor of 16 when integrated with 802 11 e mili can reduce energy consumption by around 44 for 92 of users in real world wireless networks", + "title_raw": "E-MiLi: energy-minimizing idle listening in wireless networks", + "abstract_raw": "WiFi interface is known to be a primary energy consumer in mobile devices, and idle listening (IL) is the dominant source of energy consumption in WiFi. Most existing protocols, such as the 802.11 power-saving mode (PSM), attempt to reduce the time spent in IL by sleep scheduling. However, through an extensive analysis of real-world traffic, we found more than 60% of energy is consumed in IL, even with PSM enabled. To remedy this problem, we propose E-MiLi (Energy-Minimizing idle Listening) that reduces the power consumption in IL, given that the time spent in IL has already been optimized by sleep scheduling. Observing that radio power consumption decreases proportionally to its clock-rate, E-MiLi adaptively downclocks the radio during IL, and reverts to full clock-rate when an incoming packet is detected or a packet has to be transmitted. E-MiLi incorporates sampling rate invariant detection, ensuring accurate packet detection and address filtering even when the receiver's sampling clock-rate is much lower than the signal bandwidth. Further, it employs an opportunistic downclocking mechanism to optimize the efficiency of switching clock-rate, based on a simple interface to existing MAC-layer scheduling protocols. We have implemented E-MiLi on the USRP software radio platform. Our experimental evaluation shows that E-MiLi can detect packets with close to 100% accuracy even with downclocking by a factor of 16. When integrated with 802.11, E-MiLi can reduce energy consumption by around 44% for 92% of users in real-world wireless networks.", + "link": "https://www.semanticscholar.org/paper/9d9426b91759c1e0faf963fe10405694f436e21d", + "scraped_abstract": null, + "citation_best": 136 + }, + { + "paper": "2126541908", + "venue": "1123349196", + "year": "2011", + "title": "detecting driver phone use leveraging car speakers", + "label": [ + "28490314", + "546215728", + "140779682", + "24493144", + "79403827", + "104267543", + "108265739", + "2777421447" + ], + "author": [ + "2126101459", + "2159223301", + "1553880687", + "2113796895", + "2617529812", + "2227752088", + "2163180513", + "593974664", + "2170939009" + ], + "reference": [ + "1550852443", + "1971243715", + "1985120006", + "2018292842", + "2018319586", + "2019993146", + "2028526753", + "2043843997", + "2044535354", + "2045586743", + "2051903196", + "2052770734", + "2079797201", + "2098309986", + "2100096365", + "2106126246", + "2110457540", + "2112737587", + "2118602640", + "2129855378", + "2136153466", + "2148443935", + "2158882008", + "2162024818", + "2165158100", + "2188292175", + "2992892880" + ], + "abstract": "this work addresses the fundamental problem of distinguishing between a driver and passenger using a mobile phone which is the critical input to enable numerous safety and interface enhancements our detection system leverages the existing car stereo infrastructure in particular the speakers and bluetooth network our acoustic approach has the phone send a series of customized high frequency beeps via the car stereo the beeps are spaced in time across the left right and if available front and rear speakers after sampling the beeps we use a sequential change point detection scheme to time their arrival and then use a differential approach to estimate the phone s distance from the car s center from these differences a passenger or driver classification can be made to validate our approach we experimented with two kinds of phones and in two different cars we found that our customized beeps were imperceptible to most users yet still playable and recordable in both cars our customized beeps were also robust to background sounds such as music and wind and we found the signal processing did not require excessive computational resources in spite of the cars heavy multi path environment our approach had a classification accuracy of over 90 and around 95 with some calibrations we also found we have a low false positive rate on the order of a few percent", + "title_raw": "Detecting driver phone use leveraging car speakers", + "abstract_raw": "This work addresses the fundamental problem of distinguishing between a driver and passenger using a mobile phone, which is the critical input to enable numerous safety and interface enhancements. Our detection system leverages the existing car stereo infrastructure, in particular the speakers and Bluetooth network. Our acoustic approach has the phone send a series of customized high frequency beeps via the car stereo. The beeps are spaced in time across the left, right, and if available, front and rear speakers. After sampling the beeps, we use a sequential change-point detection scheme to time their arrival, and then use a differential approach to estimate the phone's distance from the car's center. From these differences a passenger or driver classification can be made. To validate our approach, we experimented with two kinds of phones and in two different cars. We found that our customized beeps were imperceptible to most users, yet still playable and recordable in both cars. Our customized beeps were also robust to background sounds such as music and wind, and we found the signal processing did not require excessive computational resources. In spite of the cars' heavy multi-path environment, our approach had a classification accuracy of over 90%, and around 95% with some calibrations. We also found we have a low false positive rate, on the order of a few percent.", + "link": "https://www.semanticscholar.org/paper/42fdd0a678a9a0d8359d6284f2e7f583ddd0a9ac", + "scraped_abstract": null, + "citation_best": 206 + }, + { + "paper": "174578849", + "venue": "1158363782", + "year": "2011", + "title": "serverswitch a programmable and high performance platform for data center networks", + "label": [ + "10597312", + "31258907", + "149635348", + "2776767758", + "120317029", + "195563490", + "192697461", + "44010500", + "119404949", + "2779581428" + ], + "author": [ + "2099565375", + "2279302698", + "2407389540", + "2718484649", + "2396858663", + "2105028595", + "2154795548", + "2778651595", + "2236138046" + ], + "reference": [ + "1539914100", + "1698388015", + "2003838150", + "2010365467", + "2044454514", + "2097882016", + "2099283075", + "2104910040", + "2111734949", + "2123016589", + "2126210439", + "2130182604", + "2130531694", + "2143065961", + "2151062909", + "2157614013", + "2162597606", + "2165699948", + "2170660950", + "2294246362" + ], + "abstract": "as one of the fundamental infrastructures for cloud computing data center networks dcn have recently been studied extensively we currently use pure software based systems fpga based platforms e g netfpga or openflow switches to implement and evaluate various dcn designs including topology design control plane and routing and congestion control however software based approaches suffer from high cpu overhead and processing latency fpga based platforms are difficult to program and incur high cost and openflow focuses on control plane functions at present in this paper we design a serverswitch to address the above problems serverswitch is motivated by the observation that commodity ethernet switching chips are becoming programmable and that the pci e interface provides high throughput and low latency between the server cpu and i o subsystem serverswitch uses a commodity switching chip for various customized packet forwarding and leverages the server cpu for control and data plane packet processing due to the low latency and high throughput between the switching chip and server cpu we have built our serverswitch at low cost our experiments demonstrate that serverswitch is fully programmable and achieves high performance specifically we have implemented various forwarding schemes including source routing in hardware our in network caching experiment showed high throughput and flexible data processing our qcn quantized congestion notification implementation further demonstrated that serverswitch can react to network congestions in 23us", + "title_raw": "ServerSwitch: a programmable and high performance platform for data center networks", + "abstract_raw": "As one of the fundamental infrastructures for cloud computing, data center networks (DCN) have recently been studied extensively. We currently use pure software-based systems, FPGA based platforms, e.g., NetFPGA, or OpenFlow switches, to implement and evaluate various DCN designs including topology design, control plane and routing, and congestion control. However, software-based approaches suffer from high CPU overhead and processing latency; FPGA based platforms are difficult to program and incur high cost; and OpenFlow focuses on control plane functions at present.\r\n\r\nIn this paper, we design a ServerSwitch to address the above problems. ServerSwitch is motivated by the observation that commodity Ethernet switching chips are becoming programmable and that the PCI-E interface provides high throughput and low latency between the server CPU and I/O subsystem. ServerSwitch uses a commodity switching chip for various customized packet forwarding, and leverages the server CPU for control and data plane packet processing, due to the low latency and high throughput between the switching chip and server CPU.\r\n\r\nWe have built our ServerSwitch at low cost. Our experiments demonstrate that ServerSwitch is fully programmable and achieves high performance. Specifically, we have implemented various forwarding schemes including source routing in hardware. Our in-network caching experiment showed high throughput and flexible data processing. Our QCN (Quantized Congestion Notification) implementation further demonstrated that ServerSwitch can react to network congestions in 23us.", + "link": "https://www.semanticscholar.org/paper/d02a022ce4061a2dfafb93ee9cc1fa4c19dfbd1c", + "scraped_abstract": null, + "citation_best": 129 + }, + { + "paper": "1515106148", + "venue": "1158363782", + "year": "2011", + "title": "design implementation and evaluation of congestion control for multipath tcp", + "label": [ + "31258907", + "2779166880", + "65714651", + "30305156", + "195563490", + "157764524", + "167677733", + "93996380", + "161218011", + "120314980" + ], + "author": [ + "2018183519", + "223441212", + "2157300389", + "2281506998" + ], + "reference": [ + "239964209", + "1518389375", + "1562069137", + "1698388015", + "1823805418", + "1966867318", + "2011222041", + "2022844530", + "2065770510", + "2067764765", + "2089118315", + "2089118982", + "2109055312", + "2109679596", + "2111202423", + "2118442492", + "2126210439", + "2130531694", + "2143163492", + "2156771316", + "2161624011", + "2163038493", + "2532475208", + "2993706756" + ], + "abstract": "multipath tcp as proposed by the ietf working group mptcp allows a single data stream to be split across multiple paths this has obvious benefits for reliability and it can also lead to more efficient use of networked resources we describe the design of a multipath congestion control algorithm we implement it in linux and we evaluate it for multihomed servers data centers and mobile clients we show that some obvious solutions for multipath congestion control can be harmful but that our algorithm improves throughput and fairness compared to single path tcp our algorithmis a drop in replacement for tcp and we believe it is safe to deploy", + "title_raw": "Design, implementation and evaluation of congestion control for multipath TCP", + "abstract_raw": "Multipath TCP, as proposed by the IETF working group mptcp, allows a single data stream to be split across multiple paths. This has obvious benefits for reliability, and it can also lead to more efficient use of networked resources. We describe the design of a multipath congestion control algorithm, we implement it in Linux, and we evaluate it for multihomed servers, data centers and mobile clients. We show that some 'obvious' solutions for multipath congestion control can be harmful, but that our algorithm improves throughput and fairness compared to single-path TCP. Our algorithmis a drop-in replacement for TCP, and we believe it is safe to deploy.", + "link": "https://www.semanticscholar.org/paper/eca709edf2c6cd05ded2728d9c2215d90ecabd94", + "scraped_abstract": null, + "citation_best": 582 + }, + { + "paper": "2137824953", + "venue": "1127352206", + "year": "2011", + "title": "data representation synthesis", + "label": [ + "95916125", + "167955471", + "116409475", + "169590947", + "199360897", + "162319229", + "26320393", + "80444323", + "175971053" + ], + "author": [ + "2785283258", + "2067453598", + "2134374908", + "343541395", + "1345056057" + ], + "reference": [ + "117718117", + "166724137", + "1511185067", + "1528617049", + "1530314682", + "1538707437", + "1541825597", + "1550291575", + "1833029986", + "1851390469", + "1991050134", + "2021028301", + "2029214188", + "2036984542", + "2040073555", + "2053154567", + "2088675571", + "2089901765", + "2098935637", + "2104376684", + "2110272713", + "2124438843", + "2129671708", + "2135275954", + "2137628566", + "2147650421", + "2152686702", + "2171313158", + "2186456727", + "2295622790", + "2912166712" + ], + "abstract": "we consider the problem of specifying combinations of data structures with complex sharing in a manner that is both declarative and results in provably correct code in our approach abstract data types are specified using relational algebra and functional dependencies we describe a language of decompositions that permit the user to specify different concrete representations for relations and show that operations on concrete representations soundly implement their relational specification it is easy to incorporate data representations synthesized by our compiler into existing systems leading to code that is simpler correct by construction and comparable in performance to the code it replaces", + "title_raw": "Data representation synthesis", + "abstract_raw": "We consider the problem of specifying combinations of data structures with complex sharing in a manner that is both declarative and results in provably correct code. In our approach, abstract data types are specified using relational algebra and functional dependencies. We describe a language of decompositions that permit the user to specify different concrete representations for relations, and show that operations on concrete representations soundly implement their relational specification. It is easy to incorporate data representations synthesized by our compiler into existing systems, leading to code that is simpler, correct by construction, and comparable in performance to the code it replaces.", + "link": "https://www.semanticscholar.org/paper/c06790296e39061ef9e17c1011a85a1a0314d287", + "scraped_abstract": null, + "citation_best": 5 + }, + { + "paper": "2095609152", + "venue": "1184151122", + "year": "2011", + "title": "data exchange beyond complete data", + "label": [ + "2779489174", + "97970142", + "192028432", + "199360897", + "15845906", + "80444323", + "72634772" + ], + "author": [ + "2117861229", + "2129336444", + "1990856936" + ], + "reference": [ + "629861355", + "1203261677", + "1558832481", + "1965776893", + "1974717772", + "1980103601", + "1980368539", + "1985581502", + "1989783863", + "1990391007", + "1993268751", + "2016785840", + "2020228538", + "2023757326", + "2024796520", + "2028222220", + "2050171146", + "2058277150", + "2072393481", + "2088044688", + "2089546742", + "2095395853", + "2102729564", + "2116929168", + "2122816361", + "2124627636", + "2125822162", + "2126004201", + "2126225192", + "2130678865", + "2140957972", + "2142864156", + "2151322578", + "2162806915", + "2163009148", + "2163668347", + "2167685423", + "2169602474", + "2174018523", + "2293299776", + "2406474465", + "2408255337", + "2902200930", + "2952395685", + "2997050146" + ], + "abstract": "in the traditional data exchange setting source instances are restricted to be complete in the sense that every fact is either true or false in these instances although natural for a typical database translation scenario this restriction is gradually becoming an impediment to the development of a wide range of applications that need to exchange objects that admit several interpretations in particular we are motivated by two specific applications that go beyond the usual data exchange scenario exchanging incomplete information and exchanging knowledge bases in this paper we propose a general framework for data exchange that can deal with these two applications more specifically we address the problem of exchanging information given by representation systems which are essentially finite descriptions of possibly infinite sets of complete instances we make use of the classical semantics of mappings specified by sets of logical sentences to give a meaningful semantics to the notion of exchanging representatives from which the standard notions of solution space of solutions and universal solution naturally arise we also introduce the notion of strong representation system for a class of mappings that resembles the concept of strong representation system for a query language we show the robustness of our proposal by applying it to the two applications mentioned above exchanging incomplete information and exchanging knowledge bases which are both instantiations of the exchanging problem for representation systems we study these two applications in detail presenting results regarding expressiveness query answering and complexity of computing solutions and also algorithms to materialize solutions", + "title_raw": "Data exchange beyond complete data", + "abstract_raw": "In the traditional data exchange setting, source instances are restricted to be complete in the sense that every fact is either true or false in these instances. Although natural for a typical database translation scenario, this restriction is gradually becoming an impediment to the development of a wide range of applications that need to exchange objects that admit several interpretations. In particular, we are motivated by two specific applications that go beyond the usual data exchange scenario: exchanging incomplete information and exchanging knowledge bases. In this paper, we propose a general framework for data exchange that can deal with these two applications. More specifically, we address the problem of exchanging information given by representation systems, which are essentially finite descriptions of (possibly infinite) sets of complete instances. We make use of the classical semantics of mappings specified by sets of logical sentences to give a meaningful semantics to the notion of exchanging representatives, from which the standard notions of solution, space of solutions, and universal solution naturally arise. We also introduce the notion of strong representation system for a class of mappings, that resembles the concept of strong representation system for a query language. We show the robustness of our proposal by applying it to the two applications mentioned above: exchanging incomplete information and exchanging knowledge bases, which are both instantiations of the exchanging problem for representation systems. We study these two applications in detail, presenting results regarding expressiveness, query answering and complexity of computing solutions, and also algorithms to materialize solutions.", + "link": "https://www.semanticscholar.org/paper/756076c6fff32c2f3cc6f71cf555ed3b62240e0c", + "scraped_abstract": null, + "citation_best": 30 + }, + { + "paper": "2099129595", + "venue": "1163618098", + "year": "2011", + "title": "phonotactic reconstruction of encrypted voip conversations hookt on fon iks", + "label": [ + "28490314", + "158379750", + "182590292", + "178489894", + "171246234", + "148730421" + ], + "author": [ + "2342595267", + "2164133841", + "2159745255", + "126037460" + ], + "reference": [ + "90499676", + "105475431", + "202255868", + "606759049", + "1481277647", + "1481628066", + "1508165687", + "1557074680", + "1579838312", + "1603200341", + "1608222975", + "1631260214", + "1773803948", + "1916902331", + "1955645522", + "1969844544", + "1972682612", + "2002276107", + "2009570821", + "2024561884", + "2032558547", + "2045157414", + "2048702750", + "2051434435", + "2054662815", + "2070150502", + "2086699924", + "2093537029", + "2096175520", + "2097042314", + "2097496174", + "2100100366", + "2101284612", + "2116762892", + "2117325844", + "2120321299", + "2122226347", + "2131700150", + "2134632326", + "2135088779", + "2146211060", + "2148472326", + "2151626637", + "2157349061", + "2157427027", + "2163735027", + "2166402529", + "2167266744", + "2174992124", + "2291367281", + "2399048812", + "2953320089", + "3127686677" + ], + "abstract": "in this work we unveil new privacy threats against voice over ip voip communications although prior work has shown that the interaction of variable bit rate codecs and length preserving stream ciphers leaks information we show that the threat is more serious than previously thought in particular we derive approximate transcripts of encrypted voip conversations by segmenting an observed packet stream into subsequences representing individual phonemes and classifying those subsequences by the phonemes they encode drawing on insights from the computational linguistics and speech recognition communities we apply novel techniques for unmasking parts of the conversation we believe our ability to do so underscores the importance of designing secure yet efficient ways to protect the confidentiality of voip conversations", + "title_raw": "Phonotactic Reconstruction of Encrypted VoIP Conversations: Hookt on Fon-iks", + "abstract_raw": "In this work, we unveil new privacy threats against Voice-over-IP (VoIP) communications. Although prior work has shown that the interaction of variable bit-rate codecs and length-preserving stream ciphers leaks information, we show that the threat is more serious than previously thought. In particular, we derive approximate transcripts of encrypted VoIP conversations by segmenting an observed packet stream into subsequences representing individual phonemes and classifying those subsequences by the phonemes they encode. Drawing on insights from the computational linguistics and speech recognition communities, we apply novel techniques for unmasking parts of the conversation. We believe our ability to do so underscores the importance of designing secure (yet efficient) ways to protect the confidentiality of VoIP conversations.", + "link": "https://www.semanticscholar.org/paper/a5d6544de55fb0ea3c91904ff4d021804efd53bd", + "scraped_abstract": null, + "citation_best": 117 + }, + { + "paper": "2008251177", + "venue": "1140684652", + "year": "2011", + "title": "find it if you can a game for modeling different types of web search success using interaction data", + "label": [ + "166423231", + "14838553", + "164120249", + "48044578", + "97854310", + "62230096", + "23123220" + ], + "author": [ + "2030298875", + "2142973983", + "826397576", + "2283615530" + ], + "reference": [ + "219090214", + "1497326661", + "1508509952", + "2007750197", + "2015596206", + "2027382829", + "2034826792", + "2035569891", + "2038231053", + "2043930359", + "2044072332", + "2054725756", + "2058473041", + "2064522604", + "2095627566", + "2096946253", + "2113455164", + "2116008435", + "2124318441", + "2126146218", + "2132314908", + "2144664841", + "2145734075", + "2147880316", + "2153783077", + "2154724067", + "2163987313", + "2169783907" + ], + "abstract": "a better understanding of strategies and behavior of successful searchers is crucial for improving the experience of all searchers however research of search behavior has been struggling with the tension between the relatively small scale but controlled lab studies and the large scale log based studies where the searcher intent and many other important factors have to be inferred we present our solution for performing controlled yet realistic scalable and reproducible studies of searcher behavior we focus on difficult informational tasks which tend to frustrate many users of the current web search technology first we propose a principled formalization of different types of success for informational search which encapsulate and sharpen previously proposed models second we present a scalable game like infrastructure for crowdsourcing search behavior studies specifically targeted towards capturing and evaluating successful search strategies on informational tasks with known intent third we report our analysis of search success using these data which confirm and extends previous findings finally we demonstrate that our model can predict search success more effectively than the existing state of the art methods on both our data and on a different set of log data collected from regular search engine sessions together our search success models the data collection infrastructure and the associated behavior analysis techniques significantly advance the study of success in web search", + "title_raw": "Find it if you can: a game for modeling different types of web search success using interaction data", + "abstract_raw": "A better understanding of strategies and behavior of successful searchers is crucial for improving the experience of all searchers. However, research of search behavior has been struggling with the tension between the relatively small-scale, but controlled lab studies, and the large-scale log-based studies where the searcher intent and many other important factors have to be inferred. We present our solution for performing controlled, yet realistic, scalable, and reproducible studies of searcher behavior. We focus on difficult informational tasks, which tend to frustrate many users of the current web search technology. First, we propose a principled formalization of different types of \"success\" for informational search, which encapsulate and sharpen previously proposed models. Second, we present a scalable game-like infrastructure for crowdsourcing search behavior studies, specifically targeted towards capturing and evaluating successful search strategies on informational tasks with known intent. Third, we report our analysis of search success using these data, which confirm and extends previous findings. Finally, we demonstrate that our model can predict search success more effectively than the existing state-of-the-art methods, on both our data and on a different set of log data collected from regular search engine sessions. Together, our search success models, the data collection infrastructure, and the associated behavior analysis techniques, significantly advance the study of success in web search.", + "link": "https://www.semanticscholar.org/paper/d2f20cd0d58e60c8bfdebf424d354f24e98aad0c", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2136110891", + "venue": "1131589359", + "year": "2011", + "title": "topology discovery of sparse random graphs with few participants", + "label": [ + "74172769", + "44359876" + ], + "author": [ + "2101964938", + "340644367", + "1205259972" + ], + "reference": [ + "62833799", + "160062981", + "568057840", + "1501398344", + "1532569127", + "1735349781", + "1922394927", + "1963699033", + "1973911938", + "1975844474", + "1985977780", + "1987268277", + "1991531041", + "1995615099", + "1996816151", + "2004671457", + "2009570821", + "2017274979", + "2026014023", + "2028891984", + "2042959539", + "2044807855", + "2050935509", + "2056711111", + "2066327035", + "2068871408", + "2073415627", + "2096320713", + "2098825044", + "2098865491", + "2099111195", + "2101267973", + "2106034715", + "2117825463", + "2120340025", + "2122759946", + "2124165698", + "2126986904", + "2128020980", + "2129094430", + "2140536181", + "2141353180", + "2148662748", + "2150105124", + "2159080219", + "2164355806", + "2165084894", + "2166692257", + "2905110430", + "2949567784", + "3102641634", + "3103572255", + "3121562466" + ], + "abstract": "we consider the task of topology discovery of sparse random graphs using end to end random measurements e g delay between a subset of nodes referred to as the participants the rest of the nodes are hidden and do not provide any information for topology discovery we consider topology discovery under two routing models a the participants exchange messages along the shortest paths and obtain end to end measurements and b additionally the participants exchange messages along the second shortest path for scenario a our proposed algorithm results in a sub linear edit distance guarantee using a sub linear number of uniformly selected participants for scenario b we obtain a much stronger result and show that we can achieve consistent reconstruction when a sub linear number of uniformly selected nodes participate this implies that accurate discovery of sparse random graphs is tractable using an extremely small number of participants we finally obtain a lower bound on the number of participants required by any algorithm to reconstruct the original random graph up to a given edit distance we also demonstrate that while consistent discovery is tractable for sparse random graphs using a small number of participants in general there are graphs which cannot be discovered by any algorithm even with a significant number of participants and with the availability of end to end information along all the paths between the participants", + "title_raw": "Topology discovery of sparse random graphs with few participants", + "abstract_raw": "We consider the task of topology discovery of sparse random graphs using end-to-end random measurements (e.g., delay) between a subset of nodes, referred to as the participants. The rest of the nodes are hidden, and do not provide any information for topology discovery. We consider topology discovery under two routing models: (a) the participants exchange messages along the shortest paths and obtain end-to-end measurements, and (b) additionally, the participants exchange messages along the second shortest path. For scenario (a), our proposed algorithm results in a sub-linear edit-distance guarantee using a sub-linear number of uniformly selected participants. For scenario (b), we obtain a much stronger result, and show that we can achieve consistent reconstruction when a sub-linear number of uniformly selected nodes participate. This implies that accurate discovery of sparse random graphs is tractable using an extremely small number of participants. We finally obtain a lower bound on the number of participants required by any algorithm to reconstruct the original random graph up to a given edit distance. We also demonstrate that while consistent discovery is tractable for sparse random graphs using a small number of participants, in general, there are graphs which cannot be discovered by any algorithm even with a significant number of participants, and with the availability of end-to-end information along all the paths between the participants.", + "link": "https://www.semanticscholar.org/paper/4fb296fe001c6c0710a11850e166731deda99ad0", + "scraped_abstract": null, + "citation_best": 38 + }, + { + "paper": "2115799249", + "venue": "1175089206", + "year": "2011", + "title": "entangled queries enabling declarative data driven coordination", + "label": [ + "120314980", + "19754495", + "1668388", + "118643609", + "61455927", + "2780440489", + "200632571", + "80444323", + "510870499" + ], + "author": [ + "3011764793", + "2111514808", + "2144056259", + "2225732371", + "2083845045", + "2131931065" + ], + "reference": [ + "114470091", + "316061934", + "1495087637", + "1555797260", + "1603799276", + "1847254111", + "1999704832", + "2114811697", + "2129674434", + "2162414492", + "2296715995", + "2337098149", + "2955325419", + "3023179637" + ], + "abstract": "many data driven social and web applications involve collaboration and coordination the vision of declarative data driven coordination d3c proposed in 9 is to support coordination in the spirit of data management to make it data centric and to specify it using convenient declarative languages this paper introduces entangled queries a language that extends sql by constraints that allow for the coordinated choice of result tuples across queries originating from different users or applications it is nontrivial to define a declarative coordination formalism without arriving at the general np complete constraint satisfaction problem from ai in this paper we propose an efficiently enforcible syntactic safety condition that we argue is at the sweet spot where interesting declarative power meets applicability in large scale data management systems and applications the key computational problem of d3c is to match entangled queries to achieve coordination we present an efficient matching algorithm which statically analyzes query workloads and merges coordinating entangled queries into compound sql queries these can be sent to a standard database system and return only coordinated results we present the overall architecture of an implemented system that contains our evaluation algorithm we also evaluate the performance of the matching algorithm experimentally on realistic coordination workloads", + "title_raw": "Entangled queries: enabling declarative data-driven coordination", + "abstract_raw": "Many data-driven social and Web applications involve collaboration and coordination. The vision of declarative data-driven coordination (D3C), proposed in [9], is to support coordination in the spirit of data management: to make it data-centric and to specify it using convenient declarative languages. This paper introduces entangled queries, a language that extends SQL by constraints that allow for the coordinated choice of result tuples across queries originating from different users or applications. It is nontrivial to define a declarative coordination formalism without arriving at the general (NP-complete) Constraint Satisfaction Problem from AI. In this paper, we propose an efficiently enforcible syntactic safety condition that we argue is at the sweet spot where interesting declarative power meets applicability in large scale data management systems and applications. The key computational problem of D3C is to match entangled queries to achieve coordination. We present an efficient matching algorithm which statically analyzes query workloads and merges coordinating entangled queries into compound SQL queries. These can be sent to a standard database system and return only coordinated results. We present the overall architecture of an implemented system that contains our evaluation algorithm; we also evaluate the performance of the matching algorithm experimentally on realistic coordination workloads.", + "link": "https://www.semanticscholar.org/paper/2f51f197f01d3e8fd4fc6d13618f6a1238ec5d15", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2066383384", + "venue": "1171178643", + "year": "2011", + "title": "cells a virtual mobile smartphone architecture", + "label": [ + "25344961", + "513985346", + "557433098", + "47878483", + "111919701", + "149635348", + "195358072", + "66629338" + ], + "author": [ + "2100318932", + "2101933218", + "2230035587", + "92865957", + "2241681790" + ], + "reference": [ + "111672816", + "179248534", + "1829813581", + "1982132112", + "2003789895", + "2028823272", + "2070093900", + "2117923448", + "2121542813", + "2124365587", + "2124910854", + "2144838723", + "2157801087", + "2160895517" + ], + "abstract": "smartphones are increasingly ubiquitous and many users carry multiple phones to accommodate work personal and geographic mobility needs we present cells a virtualization architecture for enabling multiple virtual smartphones to run simultaneously on the same physical cellphone in an isolated secure manner cells introduces a usage model of having one foreground virtual phone and multiple background virtual phones this model enables a new device namespace mechanism and novel device proxies that integrate with lightweight operating system virtualization to multiplex phone hardware across multiple virtual phones while providing native hardware device performance cells virtual phone features include fully accelerated 3d graphics complete power management features and full telephony functionality with separately assignable telephone numbers and caller id support we have implemented a prototype of cells that supports multiple android virtual phones on the same phone our performance results demonstrate that cells imposes only modest runtime and memory overhead works seamlessly across multiple hardware devices including google nexus 1 and nexus s phones and transparently runs android applications at native speed without any modifications", + "title_raw": "Cells: a virtual mobile smartphone architecture", + "abstract_raw": "Smartphones are increasingly ubiquitous, and many users carry multiple phones to accommodate work, personal, and geographic mobility needs. We present Cells, a virtualization architecture for enabling multiple virtual smartphones to run simultaneously on the same physical cellphone in an isolated, secure manner. Cells introduces a usage model of having one foreground virtual phone and multiple background virtual phones. This model enables a new device namespace mechanism and novel device proxies that integrate with lightweight operating system virtualization to multiplex phone hardware across multiple virtual phones while providing native hardware device performance. Cells virtual phone features include fully accelerated 3D graphics, complete power, management features, and full telephony functionality with separately assignable telephone numbers and caller ID support. We have implemented a prototype of Cells that supports multiple Android virtual phones on the same phone. Our performance results demonstrate that Cells imposes only modest runtime and memory overhead, works seamlessly across multiple hardware devices including Google Nexus 1 and Nexus S phones, and transparently runs Android applications at native speed without any modifications.", + "link": "https://www.semanticscholar.org/paper/60278c1f10bf1138f7f387e76e160cf88e599d2f", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1995790197", + "venue": "1171178643", + "year": "2011", + "title": "a file is not a file understanding the i o behavior of apple desktop applications", + "label": [ + "18484406", + "111919701", + "77088390", + "88520388", + "26656859", + "58861120", + "21729314", + "82820731", + "193769178", + "166807848" + ], + "author": [ + "2139651645", + "917262090", + "2424066054", + "1979388515", + "74145742" + ], + "reference": [ + "28403229", + "135447215", + "148956775", + "303190792", + "1540779330", + "1677118421", + "1967141605", + "2002044167", + "2005373714", + "2007415020", + "2035379912", + "2065077642", + "2070834743", + "2101021990", + "2104641431", + "2114167330", + "2114904741", + "2115600841", + "2117289367", + "2119565742", + "2124288146", + "2125788329", + "2135652458", + "2140070224", + "2143730413", + "2145021036", + "2147504831", + "2150864656", + "2153704625", + "2157762234", + "2160127232", + "2171796885", + "2205436351" + ], + "abstract": "we analyze the i o behavior of ibench a new collection of productivity and multimedia application workloads our analysis reveals a number of differences between ibench and typical file system workload studies including the complex organization of modern files the lack of pure sequential access the influence of underlying frameworks on i o patterns the widespread use of file synchronization and atomic operations and the prevalence of threads our results have strong ramifications for the design of next generation local and cloud based storage systems", + "title_raw": "A file is not a file: understanding the I/O behavior of Apple desktop applications", + "abstract_raw": "We analyze the I/O behavior of iBench, a new collection of productivity and multimedia application workloads. Our analysis reveals a number of differences between iBench and typical file-system workload studies, including the complex organization of modern files, the lack of pure sequential access, the influence of underlying frameworks on I/O patterns, the widespread use of file synchronization and atomic operations, and the prevalence of threads. Our results have strong ramifications for the design of next generation local and cloud-based storage systems.", + "link": "https://www.semanticscholar.org/paper/e7a405c76a186427c12f51122a795b12db1cd002", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2166493117", + "venue": "1166315290", + "year": "2011", + "title": "sidebyside ad hoc multi user interaction with handheld projectors", + "label": [ + "2778002699", + "2779346582", + "118530786", + "173974348", + "186967261", + "25343380", + "121684516", + "41904074" + ], + "author": [ + "2119399829", + "2728936", + "2171298838", + "2138501373" + ], + "reference": [ + "80567811", + "101662713", + "131977530", + "204764415", + "1559983024", + "1968315234", + "1971616954", + "1979919432", + "1980640125", + "1997556709", + "1999013213", + "2002690207", + "2006438786", + "2042456435", + "2054009460", + "2058796629", + "2081048174", + "2101102258", + "2107845924", + "2108715885", + "2127931334", + "2139490115", + "2139800043", + "2142620629", + "2148819007", + "2729161127", + "2797816625", + "2913524657" + ], + "abstract": "we introduce sidebyside a system designed for ad hoc multi user interaction with handheld projectors sidebyside uses device mounted cameras and hybrid visible infrared light projectors to track multiple independent projected images in relation to one another this is accomplished by projecting invisible fiducial markers in the near infrared spectrum our system is completely self contained and can be deployed as a handheld device without instrumentation of the environment we present the design and implementation of our system including a hybrid handheld projector to project visible and infrared light and techniques for tracking projected fiducial markers that move and overlap we introduce a range of example applications that demonstrate the applicability of our system to real world scenarios such as mobile content exchange gaming and education", + "title_raw": "SideBySide: ad-hoc multi-user interaction with handheld projectors", + "abstract_raw": "We introduce SideBySide, a system designed for ad-hoc multi-user interaction with handheld projectors. SideBySide uses device-mounted cameras and hybrid visible/infrared light projectors to track multiple independent projected images in relation to one another. This is accomplished by projecting invisible fiducial markers in the near-infrared spectrum. Our system is completely self-contained and can be deployed as a handheld device without instrumentation of the environment. We present the design and implementation of our system including a hybrid handheld projector to project visible and infrared light, and techniques for tracking projected fiducial markers that move and overlap. We introduce a range of example applications that demonstrate the applicability of our system to real-world scenarios such as mobile content exchange, gaming, and education.", + "link": "https://www.semanticscholar.org/paper/7b83536c5ad86051ae3aa892e30b9044a65170c4", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2130846554", + "venue": "1133523790", + "year": "2013", + "title": "remusdb transparent high availability for database systems", + "label": [ + "25344961", + "100850083", + "65813073", + "513985346", + "111919701", + "77088390", + "109751979", + "70440993" + ], + "author": [ + "323964158", + "2112165111", + "2101202752", + "2066833720", + "2157592790", + "2106730373" + ], + "reference": [ + "8329419", + "187271159", + "1515932031", + "1549404527", + "1572904055", + "1967155922", + "1978714022", + "1989492148", + "1997269120", + "2052915895", + "2085463701", + "2100889285", + "2104954161", + "2108806129", + "2110765100", + "2111586962", + "2131726714", + "2142849519", + "2142892618", + "2150348590", + "2154698535", + "2161351872", + "2166470299", + "2171956059", + "2758694497", + "3204551882" + ], + "abstract": "in this paper we present a technique for building a high availability ha database management system dbms the proposed technique can be applied to any dbms with little or no customization and with reasonable performance overhead our approach is based on remus a commodity ha solution implemented in the virtualization layer that uses asynchronous virtual machine state replication to provide transparent ha and failover capabilities we show that while remus and similar systems can protect a dbms database workloads incur a performance overhead of up to 32 as compared to an unprotected dbms we identify the sources of this overhead and develop optimizations that mitigate the problems we present an experimental evaluation using two popular database systems and industry standard benchmarks showing that for certain workloads our optimized approach provides fast failover le 3 s of downtime with low performance overhead when compared to an unprotected dbms our approach provides a practical means for existing deployed database systems to be made more reliable with a minimum of risk cost and effort furthermore this paper invites new discussion about whether the complexity of ha is best implemented within the dbms or as a service by the infrastructure below it", + "title_raw": "RemusDB: transparent high availability for database systems", + "abstract_raw": "In this paper, we present a technique for building a high-availability (HA) database management system (DBMS). The proposed technique can be applied to any DBMS with little or no customization, and with reasonable performance overhead. Our approach is based on Remus, a commodity HA solution implemented in the virtualization layer, that uses asynchronous virtual machine state replication to provide transparent HA and failover capabilities. We show that while Remus and similar systems can protect a DBMS, database workloads incur a performance overhead of up to 32 % as compared to an unprotected DBMS. We identify the sources of this overhead and develop optimizations that mitigate the problems. We present an experimental evaluation using two popular database systems and industry standard benchmarks showing that for certain workloads, our optimized approach provides fast failover ( $$\\le $$ 3 s of downtime) with low performance overhead when compared to an unprotected DBMS. Our approach provides a practical means for existing, deployed database systems to be made more reliable with a minimum of risk, cost, and effort. Furthermore, this paper invites new discussion about whether the complexity of HA is best implemented within the DBMS, or as a service by the infrastructure below it.", + "link": "https://www.semanticscholar.org/paper/5c2efd16718cc5b0c8c37bba026f57c6d592d1e0", + "scraped_abstract": null, + "citation_best": 46 + }, + { + "paper": "2164173709", + "venue": "1135342153", + "year": "2011", + "title": "towards a theory model for product search", + "label": [ + "2522767166", + "77088390", + "171089853", + "97854310", + "71472368", + "197927960" + ], + "author": [ + "2096413958", + "2095776928", + "94049422" + ], + "reference": [ + "1550784417", + "1550812296", + "1832817320", + "1978907431", + "2012327977", + "2028995298", + "2029636682", + "2051688880", + "2093714429", + "2097726431", + "2102372499", + "2106774319", + "2110040748", + "2118322263", + "2127480961", + "2128541531", + "2130706307", + "2137344397", + "2140552474", + "2147709600", + "2152119282", + "2154334393", + "2171074980", + "2171960770", + "2752710160", + "3122724846", + "3124946654" + ], + "abstract": "with the growing pervasiveness of the internet online search for products and services is constantly increasing most product search engines are based on adaptations of theoretical models devised for information retrieval however the decision mechanism that underlies the process of buying a product is different than the process of locating relevant documents or objects we propose a theory model for product search based on expected utility theory from economics specifically we propose a ranking technique in which we rank highest the products that generate the highest surplus after the purchase in a sense the top ranked products are the best value for money for a specific user our approach builds on research on demand estimation from economics and presents a solid theoretical foundation on which further research can build on we build algorithms that take into account consumer demographics heterogeneity of consumer preferences and also account for the varying price of the products we show how to achieve this without knowing the demographics or purchasing histories of individual consumers but by using aggregate demand data we evaluate our work by applying the techniques on hotel search our extensive user studies using more than 15 000 user provided ranking comparisons demonstrate an overwhelming preference for the rankings generated by our techniques compared to a large number of existing strong state of the art baselines", + "title_raw": "Towards a theory model for product search", + "abstract_raw": "With the growing pervasiveness of the Internet, online search for products and services is constantly increasing. Most product search engines are based on adaptations of theoretical models devised for information retrieval. However, the decision mechanism that underlies the process of buying a product is different than the process of locating relevant documents or objects. We propose a theory model for product search based on expected utility theory from economics. Specifically, we propose a ranking technique in which we rank highest the products that generate the highest surplus, after the purchase. In a sense, the top ranked products are the \"best value for money\" for a specific user. Our approach builds on research on \"demand estimation\" from economics and presents a solid theoretical foundation on which further research can build on. We build algorithms that take into account consumer demographics, heterogeneity of consumer preferences, and also account for the varying price of the products. We show how to achieve this without knowing the demographics or purchasing histories of individual consumers but by using aggregate demand data. We evaluate our work, by applying the techniques on hotel search. Our extensive user studies, using more than 15,000 user-provided ranking comparisons, demonstrate an overwhelming preference for the rankings generated by our techniques, compared to a large number of existing strong state-of-the-art baselines.", + "link": "https://www.semanticscholar.org/paper/ce45b08b95e7f67658fdd0e797431f30c4d74802", + "scraped_abstract": null, + "citation_best": 59 + }, + { + "paper": "1538864647", + "venue": "1184914352", + "year": "2010", + "title": "a novel transition based encoding scheme for planning as satisfiability", + "label": [ + "168773769", + "11413529", + "200925200", + "80444323" + ], + "author": [ + "2149586599", + "2295009377", + "2138370184" + ], + "reference": [ + "319070645", + "1569757069", + "1593059365", + "1973898358", + "1986006371", + "2025460523", + "2045526507", + "2053990123", + "2146220671", + "2161414194", + "2164322797", + "2912242901" + ], + "abstract": "planning as satisfiability is a principal approach to planning with many eminent advantages the existing planning as satisfiability techniques usually use encodings compiled from the strips formalism we introduce a novel sat encoding scheme based on the sas formalism it exploits the structural information in the sas formalism resulting in more compact sat instances and reducing the number of clauses by up to 50 fold our results show that this encoding scheme improves upon the strips based encoding in terms of both time and memory efficiency", + "title_raw": "A novel transition based encoding scheme for planning as satisfiability", + "abstract_raw": "Planning as satisfiability is a principal approach to planning with many eminent advantages. The existing planning as satisfiability techniques usually use encodings compiled from the STRIPS formalism. We introduce a novel SAT encoding scheme based on the SAS+formalism. It exploits the structural information in the SAS+ formalism, resulting in more compact SAT instances and reducing the number of clauses by up to 50 fold. Our results show that this encoding scheme improves upon the STRIPS-based encoding, in terms of both time and memory efficiency.", + "link": "https://www.semanticscholar.org/paper/8d6c0fef0afd9f7654394a8050ae74faaece5bb1", + "scraped_abstract": null, + "citation_best": 54 + }, + { + "paper": "2468755302", + "venue": "1184914352", + "year": "2010", + "title": "how incomplete is your semantic web reasoner systematic analysis of the completeness of query answering systems", + "label": [ + "166724064", + "164120249", + "65647387", + "118689300", + "2129575", + "61673122", + "192028432", + "9616225", + "157692150", + "23123220" + ], + "author": [ + "170620773", + "2117709049", + "2010142017" + ], + "reference": [ + "62327733", + "1481314305", + "1482134990", + "1523017397", + "1555563750", + "1589560471", + "1989009626", + "2135109168", + "2137862151", + "2248098515", + "2261395231", + "2590474552" + ], + "abstract": "conjunctive query answering is a key reasoning service for many ontology based applications in order to improve scalability many semantic web query answering systems give up completeness i e they do not guarantee to return all query answers it may be useful or even critical to the designers and users of such systems to understand how much and what kind of information is potentially being lost we present a method for generating test data that can be used to provide at least partial answers to these questions a purpose for which existing benchmarks are not well suited in addition to developing a general framework that formalises the problem we describe practical data generation algorithms for some popular ontology languages and present some very encouraging results from our preliminary evaluation", + "title_raw": "How incomplete is your semantic web reasoner? systematic analysis of the completeness of query answering systems", + "abstract_raw": "Conjunctive query answering is a key reasoning service for many ontology-based applications. In order to improve scalability, many Semantic Web query answering systems give up completeness (i.e., they do not guarantee to return all query answers). It may be useful or even critical to the designers and users of such systems to understand how much and what kind of information is (potentially) being lost. We present a method for generating test data that can be used to provide at least partial answers to these questions, a purpose for which existing benchmarks are not well suited. In addition to developing a general framework that formalises the problem, we describe practical data generation algorithms for some popular ontology languages, and present some very encouraging results from our preliminary evaluation.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=How+Incomplete+Is+Your+Semantic+Web+Reasoner?+Systematic+Analysis+of+the+Completeness+of+Query+Answering+Systems&as_oq=&as_eq=&as_occt=any&as_sauthors=Stoilos", + "scraped_abstract": null, + "citation_best": 5 + }, + { + "paper": "2108788053", + "venue": "1188739475", + "year": "2010", + "title": "beyond nombank a study of implicit arguments for nominal predicates", + "label": [ + "40738166", + "124101348", + "109364899", + "204321447" + ], + "author": [ + "2121066561", + "2144944523" + ], + "reference": [ + "69597389", + "1579035156", + "1632114991", + "1970849810", + "1986664011", + "1988912276", + "1995945562", + "2000654059", + "2014915963", + "2035305552", + "2038324640", + "2038721957", + "2053154970", + "2058008886", + "2098132066", + "2118585731", + "2126851059", + "2136842867", + "2142222368", + "2151295812", + "2183246134" + ], + "abstract": "despite its substantial coverage nombank does not account for all within sentence arguments and ignores extra sentential arguments altogether these arguments which we call implicit are important to semantic processing and their recovery could potentially benefit many nlp applications we present a study of implicit arguments for a select group of frequent nominal predicates we show that implicit arguments are pervasive for these predicates adding 65 to the coverage of nombank we demonstrate the feasibility of recovering implicit arguments with a supervised classification model our results and analyses provide a baseline for future work on this emerging task", + "title_raw": "Beyond NomBank: A Study of Implicit Arguments for Nominal Predicates", + "abstract_raw": "Despite its substantial coverage, NomBank does not account for all within-sentence arguments and ignores extra-sentential arguments altogether. These arguments, which we call implicit, are important to semantic processing, and their recovery could potentially benefit many NLP applications. We present a study of implicit arguments for a select group of frequent nominal predicates. We show that implicit arguments are pervasive for these predicates, adding 65% to the coverage of NomBank. We demonstrate the feasibility of recovering implicit arguments with a supervised classification model. Our results and analyses provide a baseline for future work on this emerging task.", + "link": "https://www.semanticscholar.org/paper/d8259bcbe9cb0cf5bad6ea25645f4407fc544a1c", + "scraped_abstract": null, + "citation_best": 120 + }, + { + "paper": "2124318441", + "venue": "1163450153", + "year": "2010", + "title": "how does search behavior change as search becomes more difficult", + "label": [ + "166423231", + "14838553", + "107457646", + "97854310", + "139979381", + "136764020" + ], + "author": [ + "2133697036", + "2336495264", + "2496663904" + ], + "reference": [ + "69461734", + "1244079914", + "1488961089", + "1590622490", + "1982889956", + "1995101231", + "2068912592", + "2073853190", + "2098407172", + "2104217798", + "2124090256", + "2124449410", + "2126146218", + "2131805863", + "2139336883", + "2140999867", + "2145734075", + "2149246474", + "2149600263", + "2151592910", + "2152679188", + "2153828680", + "2154724067", + "2158450083", + "2171593626", + "2569629236" + ], + "abstract": "search engines make it easy to check facts online but finding some specific kinds of information sometimes proves to be difficult we studied the behavioral signals that suggest that a user is having trouble in a search task first we ran a lab study with 23 users to gain a preliminary understanding on how users behavior changes when they struggle finding the information they re looking for the observations were then tested with 179 participants who all completed an average of 22 3 tasks from a pool of 100 tasks the large scale study provided quantitative support for our qualitative observations from the lab study when having difficulty in finding information users start to formulate more diverse queries they use advanced operators more and they spend a longer time on the search result page as compared to the successful tasks the results complement the existing body of research focusing on successful search strategies", + "title_raw": "How does search behavior change as search becomes more difficult", + "abstract_raw": "Search engines make it easy to check facts online, but finding some specific kinds of information sometimes proves to be difficult. We studied the behavioral signals that suggest that a user is having trouble in a search task. First, we ran a lab study with 23 users to gain a preliminary understanding on how users' behavior changes when they struggle finding the information they're looking for. The observations were then tested with 179 participants who all completed an average of 22.3 tasks from a pool of 100 tasks. The large-scale study provided quantitative support for our qualitative observations from the lab study. When having difficulty in finding information, users start to formulate more diverse queries, they use advanced operators more, and they spend a longer time on the search result page as compared to the successful tasks. The results complement the existing body of research focusing on successful search strategies.", + "link": "https://www.semanticscholar.org/paper/ef5a85e8e57046b027db5f0971fe918310b44ed3", + "scraped_abstract": null, + "citation_best": 202 + }, + { + "paper": "2119188105", + "venue": "1163450153", + "year": "2010", + "title": "the tower of babel meets web 2 0 user generated content and its applications in a multilingual context", + "label": [ + "173862523", + "101293273", + "130318100", + "136699151", + "136764020" + ], + "author": [ + "1985391146", + "2043473106" + ], + "reference": [ + "14574270", + "50119190", + "58646613", + "316763119", + "1535555350", + "1546895418", + "1549343721", + "1578948434", + "1631016350", + "1964209958", + "2007232175", + "2026051344", + "2065100127", + "2067438047", + "2081157767", + "2084377579", + "2094064503", + "2099938389", + "2100341149", + "2103318667", + "2106836779", + "2114558668", + "2120779048", + "2136930489", + "2143308546", + "2144668267", + "2146707041", + "2168943018", + "2169495811" + ], + "abstract": "this study explores language s fragmenting effect on user generated content by examining the diversity of knowledge representations across 25 different wikipedia language editions this diversity is measured at two levels the concepts that are included in each edition and the ways in which these concepts are described we demonstrate that the diversity present is greater than has been presumed in the literature and has a significant influence on applications that use wikipedia as a source of world knowledge we close by explicating how knowledge diversity can be beneficially leveraged to create culturally aware applications and hyperlingual applications", + "title_raw": "The tower of Babel meets web 2.0: user-generated content and its applications in a multilingual context", + "abstract_raw": "This study explores language's fragmenting effect on user-generated content by examining the diversity of knowledge representations across 25 different Wikipedia language editions. This diversity is measured at two levels: the concepts that are included in each edition and the ways in which these concepts are described. We demonstrate that the diversity present is greater than has been presumed in the literature and has a significant influence on applications that use Wikipedia as a source of world knowledge. We close by explicating how knowledge diversity can be beneficially leveraged to create \"culturally-aware applications\" and \"hyperlingual applications\".", + "link": "https://www.semanticscholar.org/paper/7a3c5ea6e902a9b46bba2a7f91add956dce7f16c", + "scraped_abstract": null, + "citation_best": 33 + }, + { + "paper": "2155385126", + "venue": "1163450153", + "year": "2010", + "title": "occlusion aware interfaces", + "label": [ + "17777890", + "9417928", + "31972630", + "121684516", + "112972136" + ], + "author": [ + "2154794983", + "2130130894" + ], + "reference": [ + "1548751954", + "1801329930", + "1972062093", + "1991648877", + "1994304205", + "2001606898", + "2007321502", + "2033288247", + "2050099917", + "2055465369", + "2094567668", + "2096986574", + "2108518773", + "2111559205", + "2124783468", + "2128039206", + "2141716420", + "2148884993", + "2150337334", + "2151685857", + "2165500877" + ], + "abstract": "we define occlusion aware interfaces as interaction techniques which know what area of the display is currently occluded and use this knowledge to counteract potential problems and or utilize the hidden area as a case study we describe the occlusion aware viewer which identifies important regions hidden beneath the hand and displays them in a non occluded area using a bubble like callout to determine what is important we use an application agnostic image processing layer for the occluded area we use a user configurable real time version of vogel et al s 21 geometric model in an evaluation with a simultaneous monitoring task we find the technique can successfully mitigate the effects of occlusion although issues with ambiguity and stability suggest further refinements finally we present designs for three other occlusion aware techniques for pop ups dragging and a hidden widget", + "title_raw": "Occlusion-aware interfaces", + "abstract_raw": "We define occlusion-aware interfaces as interaction techniques which know what area of the display is currently occluded, and use this knowledge to counteract potential problems and/or utilize the hidden area. As a case study, we describe the Occlusion-Aware Viewer, which identifies important regions hidden beneath the hand and displays them in a non-occluded area using a bubble-like callout. To determine what is important, we use an application agnostic image processing layer. For the occluded area, we use a user configurable, real-time version of Vogel et al.'s [21] geometric model. In an evaluation with a simultaneous monitoring task, we find the technique can successfully mitigate the effects of occlusion, although issues with ambiguity and stability suggest further refinements. Finally, we present designs for three other occlusion-aware techniques for pop-ups, dragging, and a hidden widget.", + "link": "https://www.semanticscholar.org/paper/1cb38f550a94b66e56c9e0558d567b8c40aec060", + "scraped_abstract": null, + "citation_best": 67 + }, + { + "paper": "2169709590", + "venue": "1163450153", + "year": "2010", + "title": "skinput appropriating the body as an input surface", + "label": [ + "44154836", + "207347870" + ], + "author": [ + "2123491528", + "2168727892", + "2105140892" + ], + "reference": [ + "1570448133", + "1970334548", + "1999880904", + "2015302753", + "2083742331", + "2084397606", + "2102413118", + "2104326601", + "2105018998", + "2106887634", + "2107188506", + "2111995343", + "2113019943", + "2124917042", + "2125228090", + "2134418753", + "2137940226", + "2138745909", + "2139212933", + "2139670260", + "2145080451", + "2148819007", + "2152528000", + "2155207172", + "2156577008", + "2163097095", + "2216075424", + "2532441771", + "2566492183", + "2966207845", + "3172478362", + "3193477162" + ], + "abstract": "we present skinput a technology that appropriates the human body for acoustic transmission allowing the skin to be used as an input surface in particular we resolve the location of finger taps on the arm and hand by analyzing mechanical vibrations that propagate through the body we collect these signals using a novel array of sensors worn as an armband this approach provides an always available naturally portable and on body finger input system we assess the capabilities accuracy and limitations of our technique through a two part twenty participant user study to further illustrate the utility of our approach we conclude with several proof of concept applications we developed", + "title_raw": "Skinput: appropriating the body as an input surface", + "abstract_raw": "We present Skinput, a technology that appropriates the human body for acoustic transmission, allowing the skin to be used as an input surface. In particular, we resolve the location of finger taps on the arm and hand by analyzing mechanical vibrations that propagate through the body. We collect these signals using a novel array of sensors worn as an armband. This approach provides an always available, naturally portable, and on-body finger input system. We assess the capabilities, accuracy and limitations of our technique through a two-part, twenty-participant user study. To further illustrate the utility of our approach, we conclude with several proof-of-concept applications we developed.", + "link": "https://www.semanticscholar.org/paper/ba7d6b7c0fec3468085d927151695cd563428638", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2129146498", + "venue": "1163450153", + "year": "2010", + "title": "lumino tangible blocks for tabletop computers based on glass fiber bundles", + "label": [ + "19453392", + "2779550310", + "45235069", + "89992363", + "121684516" + ], + "author": [ + "2009751849", + "2230933009", + "2222537081" + ], + "reference": [ + "1495709212", + "1578102512", + "1985007221", + "2010353889", + "2017231306", + "2017525862", + "2037703416", + "2053232640", + "2082142738", + "2090110089", + "2101577246", + "2103033574", + "2107460475", + "2107472300", + "2117425857", + "2121564061", + "2122909041", + "2131417962", + "2132854028", + "2142137111", + "2143450383", + "2149891956", + "2155037366", + "2163251699", + "2164139712", + "2167023109", + "2167686873", + "3182768564" + ], + "abstract": "tabletop computers based on diffuse illumination can track fiducial markers placed on the table s surface in this paper we demonstrate how to do the same with objects arranged in a three dimensional structure without modifying the table we present lumino a system of building blocks in addition to a marker each block contains a glass fiber bundle the bundle optically guides the light reflected off markers in the higher levels down to the table surface where the table s built in camera reads it while guiding marker images down the bundle optically scales and rearranges them it thereby fits the images of an entire vertical arrangement of markers into the horizontal space usually occupied by a single 2d marker we present six classes of blocks and matching marker designs each of which is optimized for different requirements we show three demo applications one of them is a construction kit that logs and critiques constructions the presented blocks are unpowered and maintenance free keeping larger numbers of blocks manageable", + "title_raw": "Lumino: tangible blocks for tabletop computers based on glass fiber bundles", + "abstract_raw": "Tabletop computers based on diffuse illumination can track fiducial markers placed on the table's surface. In this paper, we demonstrate how to do the same with objects arranged in a three-dimensional structure without modifying the table. We present lumino, a system of building blocks. In addition to a marker, each block contains a glass fiber bundle. The bundle optically guides the light reflected off markers in the higher levels down to the table surface, where the table's built-in camera reads it. While guiding marker images down, the bundle optically scales and rearranges them. It thereby fits the images of an entire vertical arrangement of markers into the horizontal space usually occupied by a single 2D marker. We present six classes of blocks and matching marker designs, each of which is optimized for different requirements. We show three demo applications. One of them is a construction kit that logs and critiques constructions. The presented blocks are unpowered and maintenance-free, keeping larger numbers of blocks manageable.", + "link": "https://www.semanticscholar.org/paper/8178c77ed13c798a8dcaf56ec006024a127ab01f", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2158447593", + "venue": "1163450153", + "year": "2010", + "title": "prefab implementing advanced behaviors using pixel based reverse engineering of interface structure", + "label": [ + "37789001", + "163847158", + "107457646", + "187482481", + "89505385", + "25621077", + "207850805", + "66153210" + ], + "author": [ + "2230909889", + "2158378096" + ], + "reference": [ + "1966756247", + "1990597096", + "2012633374", + "2034079000", + "2039425613", + "2040244898", + "2044054519", + "2051593684", + "2054658115", + "2055380454", + "2062049807", + "2077057201", + "2077770823", + "2078091018", + "2101096043", + "2109676148", + "2112103637", + "2122410182", + "2130917910", + "2134816385", + "2135345119", + "2141125339", + "2148384385", + "2148644727", + "2151180068", + "2341994895", + "2399954834" + ], + "abstract": "current chasms between applications implemented with different user interface toolkits make it difficult to implement and explore potentially important interaction techniques in new and existing applications limiting the progress and impact of human computer interaction research we examine an approach based in the single most common characteristic of all graphical user interface toolkits that they ultimately paint pixels to a display we present prefab a system for implementing advanced behaviors through the reverse engineering of the pixels in graphical interfaces informed by how user interface toolkits paint interfaces prefab features a separation of the modeling of widget layout from the recognition of widget appearance we validate prefab in implementations of three applications target aware pointing techniques phosphor transitions and side views parameter spectrums working only from pixels we demonstrate a single implementation of these enhancements in complex existing applications created in different user interface toolkits running on different windowing systems", + "title_raw": "Prefab: implementing advanced behaviors using pixel-based reverse engineering of interface structure", + "abstract_raw": "Current chasms between applications implemented with different user interface toolkits make it difficult to implement and explore potentially important interaction techniques in new and existing applications, limiting the progress and impact of human-computer interaction research. We examine an approach based in the single most common characteristic of all graphical user interface toolkits, that they ultimately paint pixels to a display. We present Prefab, a system for implementing advanced behaviors through the reverse engineering of the pixels in graphical interfaces. Informed by how user interface toolkits paint interfaces, Prefab features a separation of the modeling of widget layout from the recognition of widget appearance. We validate Prefab in implementations of three applications: target-aware pointing techniques, Phosphor transitions, and Side Views parameter spectrums. Working only from pixels, we demonstrate a single implementation of these enhancements in complex existing applications created in different user interface toolkits running on different windowing systems.", + "link": "https://www.semanticscholar.org/paper/8bf4a568c7e4e8a7bd712fd872317d36c42a8149", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2120326355", + "venue": "1163450153", + "year": "2010", + "title": "useful junk the effects of visual embellishment on comprehension and memorability of charts", + "label": [ + "511192102", + "49774154", + "12656906", + "527412718", + "2776063143" + ], + "author": [ + "2152464095", + "2063803523", + "2071700171", + "2112001010", + "1963794123", + "2351090313" + ], + "reference": [ + "4985056", + "74355995", + "594050221", + "1565253051", + "1721597202", + "1823329909", + "1977164212", + "1984022835", + "2002299766", + "2008293781", + "2008983212", + "2010067467", + "2017574778", + "2021682498", + "2073796368", + "2073900861", + "2105219043", + "2114208428", + "2121564094", + "2155843307", + "2234058859", + "2264140324", + "2327525319", + "2480153073", + "2610685016", + "2965936197" + ], + "abstract": "guidelines for designing information charts such as bar charts often state that the presentation should reduce or remove chart junk visual embellishments that are not essential to understanding the data in contrast some popular chart designers wrap the presented data in detailed and elaborate imagery raising the questions of whether this imagery is really as detrimental to understanding as has been proposed and whether the visual embellishment may have other benefits to investigate these issues we conducted an experiment that compared embellished charts with plain ones and measured both interpretation accuracy and long term recall we found that people s accuracy in describing the embellished charts was no worse than for plain charts and that their recall after a two to three week gap was significantly better although we are cautious about recommending that all charts be produced in this style our results question some of the premises of the minimalist approach to chart design", + "title_raw": "Useful junk?: the effects of visual embellishment on comprehension and memorability of charts", + "abstract_raw": "Guidelines for designing information charts (such as bar charts) often state that the presentation should reduce or remove 'chart junk' - visual embellishments that are not essential to understanding the data. In contrast, some popular chart designers wrap the presented data in detailed and elaborate imagery, raising the questions of whether this imagery is really as detrimental to understanding as has been proposed, and whether the visual embellishment may have other benefits. To investigate these issues, we conducted an experiment that compared embellished charts with plain ones, and measured both interpretation accuracy and long-term recall. We found that people's accuracy in describing the embellished charts was no worse than for plain charts, and that their recall after a two-to-three-week gap was significantly better. Although we are cautious about recommending that all charts be produced in this style, our results question some of the premises of the minimalist approach to chart design.", + "link": "https://www.semanticscholar.org/paper/dafef547aacaef628f71ca76d821f2e4fdd2086b", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2056637671", + "venue": "1199533187", + "year": "2010", + "title": "staged concurrent program analysis", + "label": [ + "98183937", + "164155591", + "89288958", + "82029504", + "48044578", + "138101251", + "193702766", + "40422974", + "199360897" + ], + "author": [ + "2632594056", + "2854535502" + ], + "reference": [ + "1480909796", + "1493367105", + "1498760855", + "1519361875", + "1522334395", + "1545371423", + "1560743762", + "1562915062", + "1568729458", + "1580380563", + "1606540187", + "1782479956", + "1819989006", + "1965662337", + "1989884524", + "2010609147", + "2029601347", + "2080573945", + "2080703024", + "2086460553", + "2087046173", + "2088139180", + "2090551028", + "2101512909", + "2111050128", + "2114067856", + "2125967324", + "2131135493", + "2135948849", + "2154713600", + "2160781056", + "2164496154", + "2166089338", + "2169870841", + "2340735175", + "2735509417", + "2913459036", + "3199565109" + ], + "abstract": "concurrent program verification is challenging because it involves exploring a large number of possible thread interleavings together with complex sequential reasoning as a result concurrent program verifiers resort to bi modal reasoning which alternates between reasoning over intra thread sequential semantics and inter thread concurrent semantics such reasoning often involves repeated intra thread reasoning for exploring each interleaving inter thread reasoning and leads to inefficiency in this paper we present a new two stage analysis which completely separates intra and inter thread reasoning the first stage uses sequential program semantics to obtain a precise summary of each thread in terms of the global accesses made by the thread the second stage performs inter thread reasoning by composing these thread modular summaries using the notion of sequential consistency assertion violations and other concurrency errors are then checked in this composition with the help of an off the shelf smt solver we have implemented our approach in the fusion framework for checking concurrent c programs shows that avoiding redundant bi modal reasoning makes the analysis more scalable", + "title_raw": "Staged concurrent program analysis", + "abstract_raw": "Concurrent program verification is challenging because it involves exploring a large number of possible thread interleavings together with complex sequential reasoning. As a result, concurrent program verifiers resort to bi-modal reasoning, which alternates between reasoning over intra-thread (sequential) semantics and inter-thread (concurrent) semantics. Such reasoning often involves repeated intra-thread reasoning for exploring each interleaving (inter-thread reasoning) and leads to inefficiency. In this paper, we present a new two-stage analysis which completely separates intra- and inter-thread reasoning. The first stage uses sequential program semantics to obtain a precise summary of each thread in terms of the global accesses made by the thread. The second stage performs inter-thread reasoning by composing these thread-modular summaries using the notion of sequential consistency. Assertion violations and other concurrency errors are then checked in this composition with the help of an off-the-shelf SMT solver. We have implemented our approach in the FUSION framework for checking concurrent C programs shows that avoiding redundant bi-modal reasoning makes the analysis more scalable.", + "link": "https://www.semanticscholar.org/paper/c72b573109198eca6d9b01ae16236d11e29b6310", + "scraped_abstract": null, + "citation_best": 53 + }, + { + "paper": "2082638814", + "venue": "1199533187", + "year": "2010", + "title": "developer fluency achieving true mastery in software projects", + "label": [ + "2777904410", + "56739046" + ], + "author": [ + "2168675810", + "702655443" + ], + "reference": [ + "1526188771", + "1583467731", + "1622527855", + "1761828760", + "1965422262", + "1989532282", + "2015440464", + "2030920007", + "2044906492", + "2046411892", + "2051098322", + "2099332975", + "2099372171", + "2116199508", + "2116484544", + "2120244029", + "2123945507", + "2151278638", + "2154236932", + "2155223492", + "2162815086", + "2167514255", + "2784146925", + "2797583072", + "2980298611", + "3014310718", + "3124560715", + "3148527572" + ], + "abstract": "outsourcing and offshoring lead to a rapid influx of new developers in software projects that in turn manifests in lower productivity and project delays to address this common problem we study how the developers become fluent in software projects we found that developer productivity in terms of number of tasks per month increases with project tenure and plateaus within a few months in three small and medium projects and it takes up to 12 months in a large project when adjusted for the task difficulty developer productivity did not plateau but continued to increase over the entire three year measurement interval we also discovered that tasks vary according to their importance centrality to a project the increase in task centrality along four dimensions customer system wide team and future impact was approximately linear over the entire period by studying developer fluency we contribute by determining dimensions along which developer expertise is acquired finding ways to measure them and quantifying the trajectories of developer learning", + "title_raw": "Developer fluency: achieving true mastery in software projects", + "abstract_raw": "Outsourcing and offshoring lead to a rapid influx of new developers in software projects. That, in turn, manifests in lower productivity and project delays. To address this common problem we study how the developers become fluent in software projects. We found that developer productivity in terms of number of tasks per month increases with project tenure and plateaus within a few months in three small and medium projects and it takes up to 12 months in a large project. When adjusted for the task difficulty, developer productivity did not plateau but continued to increase over the entire three year measurement interval. We also discovered that tasks vary according to their importance(centrality) to a project. The increase in task centrality along four dimensions: customer, system-wide, team, and future impact was approximately linear over the entire period. By studying developer fluency we contribute by determining dimensions along which developer expertise is acquired, finding ways to measure them, and quantifying the trajectories of developer learning.", + "link": "https://www.semanticscholar.org/paper/618605ffbf80e42171aa1595c33a89d4c416ab38", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2166271660", + "venue": "1199533187", + "year": "2010", + "title": "creating and evolving developer documentation understanding the decisions of open source contributors", + "label": [ + "140396857", + "189348574", + "81587897", + "25688753", + "56666940", + "56739046", + "136764020", + "117447612" + ], + "author": [ + "2047072155", + "2136878537" + ], + "reference": [ + "90447038", + "1658908529", + "1901723754", + "1969429349", + "1984402149", + "1984529712", + "2008107570", + "2021436267", + "2079338956", + "2083582553", + "2096061896", + "2109125971", + "2117329005", + "2118435948", + "2123945507", + "2153273777", + "2333291448", + "2900509361" + ], + "abstract": "developer documentation helps developers learn frameworks and libraries to better understand how documentation in open source projects is created and maintained we performed a qualitative study in which we interviewed core contributors who wrote developer documentation and developers who read documentation in addition we studied the evolution of 19 documents by analyzing more than 1500 document revisions we identified the decisions that contributors make the factors influencing these decisions and the consequences for the project among many findings we observed how working on the documentation could improve the code quality and how constant interaction with the projects community positively impacted the documentation", + "title_raw": "Creating and evolving developer documentation: understanding the decisions of open source contributors", + "abstract_raw": "Developer documentation helps developers learn frameworks and libraries. To better understand how documentation in open source projects is created and maintained, we performed a qualitative study in which we interviewed core contributors who wrote developer documentation and developers who read documentation. In addition, we studied the evolution of 19 documents by analyzing more than 1500 document revisions. We identified the decisions that contributors make, the factors influencing these decisions and the consequences for the project. Among many findings, we observed how working on the documentation could improve the code quality and how constant interaction with the projects' community positively impacted the documentation.", + "link": "https://www.semanticscholar.org/paper/a01c6b8cc13c0e168f2014701ed5556248c36571", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2169970157", + "venue": "1174403976", + "year": "2010", + "title": "collaborative reliability prediction of service oriented systems", + "label": [ + "35578498", + "37836645", + "554579003", + "57041688", + "76518257", + "110875604", + "117447612" + ], + "author": [ + "2945622244", + "2227744130" + ], + "reference": [ + "124052719", + "156721372", + "239964209", + "281665770", + "1554758995", + "1573340842", + "1764952320", + "1947095868", + "1965810726", + "1983936084", + "2015699956", + "2018136058", + "2042281163", + "2071427291", + "2096762662", + "2101267836", + "2102599712", + "2106788000", + "2110325612", + "2114821336", + "2115305047", + "2123476762", + "2124172342", + "2124591829", + "2126087831", + "2130551617", + "2138178819", + "2141880113", + "2142449826", + "2145572818", + "2146444209", + "2149614712", + "2155870701", + "2155911755", + "2166549471", + "2166751683", + "3093577056" + ], + "abstract": "service oriented architecture soa is becoming a major software framework for building complex distributed systems reliability of the service oriented systems heavily depends on the remote web services as well as the unpredictable internet designing effective and accurate reliability prediction approaches for the service oriented systems has become an important research issue in this paper we propose a collaborative reliability prediction approach which employs the past failure data of other similar users to predict the web service reliability for the current user without requiring real world web service invocations we also present a user collaborative failure data sharing mechanism and a reliability composition model for the service oriented systems large scale real world experiments are conducted and the experimental results show that our collaborative reliability prediction approach obtains better reliability prediction accuracy than other approaches", + "title_raw": "Collaborative reliability prediction of service-oriented systems", + "abstract_raw": "Service-oriented architecture (SOA) is becoming a major software framework for building complex distributed systems. Reliability of the service-oriented systems heavily depends on the remote Web services as well as the unpredictable Internet. Designing effective and accurate reliability prediction approaches for the service-oriented systems has become an important research issue. In this paper, we propose a collaborative reliability prediction approach, which employs the past failure data of other similar users to predict the Web service reliability for the current user, without requiring real-world Web service invocations. We also present a user-collaborative failure data sharing mechanism and a reliability composition model for the service-oriented systems. Large-scale real-world experiments are conducted and the experimental results show that our collaborative reliability prediction approach obtains better reliability prediction accuracy than other approaches.", + "link": "https://www.semanticscholar.org/paper/3acdce3baa47c6296e7730730c1170b56fb31133", + "scraped_abstract": null, + "citation_best": 245 + }, + { + "paper": "2127190390", + "venue": "1174403976", + "year": "2010", + "title": "a degree of knowledge model to capture source code familiarity", + "label": [ + "137287247", + "51929080", + "140396857", + "529173508", + "548217200", + "150292731", + "149091818", + "2777904410", + "121957198", + "115903868", + "43126263" + ], + "author": [ + "3187116255", + "2232004635", + "2100357700", + "2342860276" + ], + "reference": [ + "1576040511", + "2039142846", + "2050187629", + "2055821246", + "2070321219", + "2083973186", + "2096624252", + "2099332975", + "2104342335", + "2110957405", + "2113422810", + "2130344546", + "2134876531", + "2142685591", + "3148527572" + ], + "abstract": "the size and high rate of change of source code comprising a software system make it difficult for software developers to keep up with who on the team knows about particular parts of the code existing approaches to this problem are based solely on authorship of code in this paper we present data from two professional software development teams to show that both authorship and interaction information about how a developer interacts with the code are important in characterizing a developer s knowledge of code we introduce the degree of knowledge model that computes automatically a real value for each source code element based on both authorship and interaction information we show that the degree of knowledge model can provide better results than an existing expertise finding approach and also report on case studies of the use of the model to support knowledge transfer and to identify changes of interest", + "title_raw": "A degree-of-knowledge model to capture source code familiarity", + "abstract_raw": "The size and high rate of change of source code comprising a software system make it difficult for software developers to keep up with who on the team knows about particular parts of the code. Existing approaches to this problem are based solely on authorship of code. In this paper, we present data from two professional software development teams to show that both authorship and interaction information about how a developer interacts with the code are important in characterizing a developer's knowledge of code. We introduce the degree-of-knowledge model that computes automatically a real value for each source code element based on both authorship and interaction information. We show that the degree-of-knowledge model can provide better results than an existing expertise finding approach and also report on case studies of the use of the model to support knowledge transfer and to identify changes of interest.", + "link": "https://www.semanticscholar.org/paper/ee81a50eae3a2d1b62ee5951aeb20f9118ae962e", + "scraped_abstract": null, + "citation_best": 123 + }, + { + "paper": "2122987719", + "venue": "1174403976", + "year": "2010", + "title": "a machine learning approach for tracing regulatory codes to product specific requirements", + "label": [ + "152752567", + "138673069", + "63000827", + "101317890", + "2777904410", + "115903868", + "153876917" + ], + "author": [ + "231137837", + "2092735656", + "2030548913", + "2230458712" + ], + "reference": [ + "49486804", + "1491587310", + "1833785989", + "1965177473", + "1969353325", + "1991334006", + "2034927834", + "2045895677", + "2098876286", + "2099175080", + "2102029756", + "2103107119", + "2107930672", + "2108155297", + "2110008837", + "2118202700", + "2127626360", + "2129559874", + "2142809450", + "2148484494", + "2149072143", + "2163960678", + "2166528905", + "2166568068", + "2167498005", + "2168278514", + "2506353248", + "3108602244" + ], + "abstract": "regulatory standards designed to protect the safety security and privacy of the public govern numerous areas of software intensive systems project personnel must therefore demonstrate that an as built system meets all relevant regulatory codes current methods for demonstrating compliance rely either on after the fact audits which can lead to significant refactoring when regulations are not met or else require analysts to construct and use traceability matrices to demonstrate compliance manual tracing can be prohibitively time consuming however automated trace retrieval methods are not very effective due to the vocabulary mismatches that often occur between regulatory codes and product level requirements this paper introduces and evaluates two machine learning methods designed to improve the quality of traces generated between regulatory codes and product level requirements the first approach uses manually created traceability matrices to train a trace classifier while the second approach uses web mining techniques to reconstruct the original trace query the techniques were evaluated against security regulations from the usa government s health insurance privacy and portability act hipaa traced against ten healthcare related requirements specifications results demonstrated improvements for the subset of hipaa regulations that exhibited high fan out behavior across the requirements datasets", + "title_raw": "A machine learning approach for tracing regulatory codes to product specific requirements", + "abstract_raw": "Regulatory standards, designed to protect the safety, security, and privacy of the public, govern numerous areas of software intensive systems. Project personnel must therefore demonstrate that an as-built system meets all relevant regulatory codes. Current methods for demonstrating compliance rely either on after-the-fact audits, which can lead to significant refactoring when regulations are not met, or else require analysts to construct and use traceability matrices to demonstrate compliance. Manual tracing can be prohibitively time-consuming; however automated trace retrieval methods are not very effective due to the vocabulary mismatches that often occur between regulatory codes and product level requirements. This paper introduces and evaluates two machine-learning methods, designed to improve the quality of traces generated between regulatory codes and product level requirements. The first approach uses manually created traceability matrices to train a trace classifier, while the second approach uses web-mining techniques to reconstruct the original trace query. The techniques were evaluated against security regulations from the USA government's Health Insurance Privacy and Portability Act (HIPAA) traced against ten healthcare related requirements specifications. Results demonstrated improvements for the subset of HIPAA regulations that exhibited high fan-out behavior across the requirements datasets.", + "link": "https://www.semanticscholar.org/paper/c95edc23729a5d6dafd989eca89c33c26a7248da", + "scraped_abstract": null, + "citation_best": 180 + }, + { + "paper": "2109039735", + "venue": "1174403976", + "year": "2010", + "title": "a cut off approach for bounded verification of parameterized systems", + "label": [ + "106663253", + "165464430", + "136643341", + "55439883", + "110251889", + "111498074", + "80444323" + ], + "author": [ + "2144454568", + "2111054303" + ], + "reference": [ + "1488858096", + "1497571013", + "1509819701", + "1561710889", + "1561867409", + "1589760516", + "1854314632", + "1855789069", + "1913196744", + "1967174286", + "1982900911", + "2000210358", + "2036526834", + "2038221635", + "2039812824", + "2042087007", + "2051020320", + "2055081868", + "2106001218", + "2123910665", + "2129073086", + "2147269402", + "2152599613" + ], + "abstract": "the features in multi threaded programs such as recursion dynamic creation and communication pose a great challenge to formal verification a widely adopted strategy is to verify tentatively a system with a smaller size by limiting the depth of recursion or the number of replicated processes to find errors without ensuring the full correctness the model checking of parameterized systems a parametric infinite family of systems is to decide if a property holds in every size instance there has been a quest for finding cut offs for the verification of parameterized systems the basic idea is to find a cut off on the number of replicated processes or on the maximum length of paths needed to prove a property standing a chance of improving verification efficiency substantially if one can come up with small or modest cut offs in this paper a novel approach called forward bounded reachability analysis fbra based upon the cut off on the maximum lengths of paths is proposed for the verification of parameterized systems experimental results show that verification efficiency has been significantly improved as a result of the introduction of our new cut offs", + "title_raw": "A cut-off approach for bounded verification of parameterized systems", + "abstract_raw": "The features in multi-threaded programs, such as recursion, dynamic creation and communication, pose a great challenge to formal verification. A widely adopted strategy is to verify tentatively a system with a smaller size, by limiting the depth of recursion or the number of replicated processes, to find errors without ensuring the full correctness. The model checking of parameterized systems, a parametric infinite family of systems, is to decide if a property holds in every size instance. There has been a quest for finding cut-offs for the verification of parameterized systems. The basic idea is to find a cut-off on the number of replicated processes or on the maximum length of paths needed to prove a property, standing a chance of improving verification efficiency substantially if one can come up with small or modest cut-offs. In this paper, a novel approach, called Forward Bounded Reachability Analysis (FBRA), based upon the cut-off on the maximum lengths of paths is proposed for the verification of parameterized systems. Experimental results show that verification efficiency has been significantly improved as a result of the introduction of our new cut-offs.", + "link": "https://www.semanticscholar.org/paper/03a8db633a8dc7663f156f277a90b0ee46aadc32", + "scraped_abstract": null, + "citation_best": 18 + }, + { + "paper": "2118155805", + "venue": "1174403976", + "year": "2010", + "title": "test generation through programming in udita", + "label": [ + "152752567", + "548217200", + "2779639559", + "199360897", + "162319229", + "25621077", + "128099668", + "117447612" + ], + "author": [ + "1986514872", + "195336981", + "2081947341", + "2103685797", + "37798284", + "1702668839" + ], + "reference": [ + "109452506", + "116108159", + "157156687", + "202942141", + "1480909796", + "1490038304", + "1501207415", + "1517165545", + "1535443236", + "1547793878", + "1551588008", + "1591601664", + "1608825581", + "1710734607", + "1720848645", + "1847665756", + "1920006546", + "1979812412", + "1982395839", + "1992105245", + "2009489720", + "2012997183", + "2058224907", + "2065948900", + "2074500093", + "2074888021", + "2083559580", + "2096449544", + "2101512909", + "2103072691", + "2104993088", + "2108792311", + "2110311336", + "2117058582", + "2119717320", + "2128012547", + "2130729525", + "2132210624", + "2132897303", + "2134291258", + "2139498310", + "2146178292", + "2147647812", + "2152949134", + "2153887189", + "2158248180", + "2159933174", + "2160140538", + "2162120832", + "2163499368", + "2166439419", + "2170682382", + "2171480813", + "2171683519", + "3004540582" + ], + "abstract": "we present an approach for describing tests using non deterministic test generation programs to write such programs we introduce udita a java based language with non deterministic choice operators and an interface for generating linked structures we also describe new algorithms that generate concrete tests by efficiently exploring the space of all executions of non deterministic udita programs we implemented our approach and incorporated it into the official publicly available repository of java pathfinder jpf a popular tool for verifying java programs we evaluate our technique by generating tests for data structures refactoring engines and jpf itself our experiments show that test generation using udita is faster and leads to test descriptions that are easier to write than in previous frameworks moreover the novel execution mechanism of udita is essential for making test generation feasible using udita we have discovered a number of bugs in eclipse netbeans sun javac and jpf", + "title_raw": "Test generation through programming in UDITA", + "abstract_raw": "We present an approach for describing tests using non-deterministic test generation programs. To write such programs, we introduce UDITA, a Java-based language with non-deterministic choice operators and an interface for generating linked structures. We also describe new algorithms that generate concrete tests by efficiently exploring the space of all executions of non-deterministic UDITA programs. We implemented our approach and incorporated it into the official, publicly available repository of Java PathFinder (JPF), a popular tool for verifying Java programs. We evaluate our technique by generating tests for data structures, refactoring engines, and JPF itself. Our experiments show that test generation using UDITA is faster and leads to test descriptions that are easier to write than in previous frameworks. Moreover, the novel execution mechanism of UDITA is essential for making test generation feasible. Using UDITA, we have discovered a number of bugs in Eclipse, NetBeans, Sun javac, and JPF.", + "link": "https://www.semanticscholar.org/paper/6ce09046328ede245bbedb340988123d083c12db", + "scraped_abstract": null, + "citation_best": 160 + }, + { + "paper": "169464374", + "venue": "1203999783", + "year": "2011", + "title": "connecting the dots between news articles", + "label": [ + "40140605", + "16963264", + "136764020" + ], + "author": [ + "1275334909", + "1988556028" + ], + "reference": [ + "600389053", + "1968532429", + "2040466507", + "2061820396", + "2066636486", + "2079234336", + "2089185211", + "2134634502", + "2138621811", + "2147674242", + "2291530570" + ], + "abstract": "the process of extracting useful knowledge from large datasets has become one of the most pressing problems in today s society the problem spans entire sectors from scientists to intelligence analysts and web users all of whom are constantly struggling to keep up with the larger and larger amounts of content published every day with this much data it is often easy to miss the big picture in this paper we investigate methods for automatically connecting the dots providing a structured easy way to navigate within a new topic and discover hidden connections we focus on the news domain given two news articles our system automatically finds a coherent chain linking them together for example it can recover the chain of events leading from the decline of home prices 2007 to the health care debate 2009 we formalize the characteristics of a good chain and provide efficient algorithms to connect two fixed endpoints we incorporate user feedback into our framework allowing the stories to be refined and personalized finally we evaluate our algorithm over real news data our user studies demonstrate the algorithm s effectiveness in helping users understanding the news", + "title_raw": "Connecting the dots between news articles", + "abstract_raw": "The process of extracting useful knowledge from large datasets has become one of the most pressing problems in today's society. The problem spans entire sectors, from scientists to intelligence analysts and web users, all of whom are constantly struggling to keep up with the larger and larger amounts of content published every day. With this much data, it is often easy to miss the big picture.\r\n\r\nIn this paper, we investigate methods for automatically connecting the dots - providing a structured, easy way to navigate within a new topic and discover hidden connections. We focus on the news domain: given two news articles, our system automatically finds a coherent chain linking them together. For example, it can recover the chain of events leading from the decline of home prices (2007) to the health-care debate (2009).\r\n\r\nWe formalize the characteristics of a good chain and provide efficient algorithms to connect two fixed endpoints. We incorporate user feedback into our framework, allowing the stories to be refined and personalized. Finally, we evaluate our algorithm over real news data. Our user studies demonstrate the algorithm's effectiveness in helping users understanding the news.", + "link": "https://www.semanticscholar.org/paper/a351b59f062e49d6a97a13c90df4fe88bace14d0", + "scraped_abstract": null, + "citation_best": 3 + }, + { + "paper": "2180590495", + "venue": "1203999783", + "year": "2011", + "title": "large linear classification when data cannot fit in memory", + "label": [ + "152896618", + "139532973", + "124101348", + "2778770139", + "2780479914", + "98045186", + "58489278" + ], + "author": [ + "2149528706", + "2148022289", + "2137603918", + "2168176072" + ], + "reference": [ + "101697532", + "2030188598", + "2035720976", + "2099262739", + "2118585731", + "2139224857", + "2142623206", + "2153635508", + "2165966284", + "2166706236", + "2912934387" + ], + "abstract": "linear classification is a useful tool for dealing with large scale data in applications such as document classification and natural language processing recent developments of linear classification have shown that the training process can be efficiently conducted however when the data size exceeds the memory capacity most training methods suffer from very slow convergence due to the severe disk swapping although some methods have attempted to handle such a situation they are usually too complicated to support some important functions such as parameter selection in this paper we introduce a block minimization framework for data larger than memory under the framework a solver splits data into blocks and stores them into separate files then at each time the solver trains a data block loaded from disk although the framework is simple the experimental results show that it effectively handles a data set 20 times larger than the memory capacity", + "title_raw": "Large linear classification when data cannot fit in memory", + "abstract_raw": "Linear classification is a useful tool for dealing with large-scale data in applications such as document classification and natural language processing. Recent developments of linear classification have shown that the training process can be efficiently conducted. However, when the data size exceeds the memory capacity, most training methods suffer from very slow convergence due to the severe disk swapping. Although some methods have attempted to handle such a situation, they are usually too complicated to support some important functions such as parameter selection. In this paper, we introduce a block minimization framework for data larger than memory. Under the framework, a solver splits data into blocks and stores them into separate files. Then, at each time, the solver trains a data block loaded from disk. Although the framework is simple, the experimental results show that it effectively handles a data set 20 times larger than the memory capacity.", + "link": "https://www.semanticscholar.org/paper/157f07382e056055167ae7204fb10985c3bf18e2", + "scraped_abstract": null, + "citation_best": 3 + }, + { + "paper": "2911611915", + "venue": "1158363782", + "year": "2010", + "title": "reverse traceroute", + "label": [ + "147494362", + "31258907", + "2780300890", + "22041718", + "2775896111", + "157497606", + "98980195", + "93996380", + "110875604" + ], + "author": [ + "7409492", + "1227894141", + "1959489330", + "2194348062", + "2115429334", + "2914639568", + "2148869393", + "2088689873" + ], + "reference": [ + "130289478", + "1512040076", + "1539009946", + "1542881316", + "1574986862", + "1576815670", + "1938118830", + "1971529969", + "1985977780", + "2031094253", + "2075548827", + "2090223505", + "2098421656", + "2107754621", + "2110980583", + "2115789261", + "2115819049", + "2119127927", + "2122838521", + "2123491545", + "2127904496", + "2130384722", + "2133656166", + "2138194734", + "2139542924", + "2145327498", + "2145343762", + "2146934567", + "2161117629", + "2167868809", + "2295430786", + "2295569936", + "2893848229" + ], + "abstract": "traceroute is the most widely used internet diagnostic tool today network operators use it to help identify routing failures poor performance and router misconfigurations researchers use it to map the internet predict performance geolocate routers and classify the performance of isps however traceroute has a fundamental limitation that affects all these applications it does not provide reverse path information although various public traceroute servers across the internet provide some visibility no general method exists for determining a reverse path from an arbitrary destination in this paper we address this longstanding limitation by building a reverse traceroute system our system provides the same information as traceroute but for the reverse path and it works in the same case as traceroute when the user may lack control of the destination we use a variety of measurement techniques to incrementally piece together the path from the destination back to the source we deploy our system on planetlab and compare reverse traceroute paths with traceroutes issued from the destinations in the median case our tool finds 87 of the hops seen in a directly measured traceroute along the same path versus only 38 if one simply assumes the path is symmetric a common fallback given the lack of available tools we then illustrate how we can use our reverse traceroute system to study previously unmeasurable aspects of the internet we present a case study of how a content provider could use our tool to troubleshoot poor path performance we uncover more than a thousand peer to peer as links invisible to current topology mapping efforts and we measure the latency of individual backbone links with average error under a millisecond", + "title_raw": "Reverse traceroute", + "abstract_raw": "Traceroute is the most widely used Internet diagnostic tool today. Network operators use it to help identify routing failures, poor performance, and router misconfigurations. Researchers use it to map the Internet, predict performance, geolocate routers, and classify the performance of ISPs. However, traceroute has a fundamental limitation that affects all these applications: it does not provide reverse path information. Although various public traceroute servers across the Internet provide some visibility, no general method exists for determining a reverse path from an arbitrary destination.\r\n\r\nIn this paper, we address this longstanding limitation by building a reverse traceroute system. Our system provides the same information as traceroute, but for the reverse path, and it works in the same case as traceroute, when the user may lack control of the destination. We use a variety of measurement techniques to incrementally piece together the path from the destination back to the source. We deploy our system on PlanetLab and compare reverse traceroute paths with traceroutes issued from the destinations. In the median case our tool finds 87% of the hops seen in a directly measured traceroute along the same path, versus only 38% if one simply assumes the path is symmetric, a common fallback given the lack of available tools. We then illustrate how we can use our reverse traceroute system to study previously unmeasurable aspects of the Internet: we present a case study of how a content provider could use our tool to troubleshoot poor path performance, we uncover more than a thousand peer-to-peer AS links invisible to current topology mapping efforts, and we measure the latency of individual backbone links with average error under a millisecond.", + "link": "https://www.semanticscholar.org/paper/8dd85bfa879820641cca911d052ed8814d145b98", + "scraped_abstract": null, + "citation_best": 133 + }, + { + "paper": "2145467766", + "venue": "1185109434", + "year": "2010", + "title": "efficient system enforced deterministic parallelism", + "label": [ + "63540848", + "2777127024", + "111919701", + "2779991843", + "168065819", + "176181172", + "204156049", + "78766204", + "120314980", + "137364921" + ], + "author": [ + "2097463922", + "2122125467", + "2151976910", + "2347146009" + ], + "reference": [ + "190062532", + "1500546894", + "1522250664", + "1597755753", + "1605226475", + "1858287302", + "1877496576", + "1887412317", + "1931688030", + "1965462925", + "1979117305", + "1983587324", + "2002915275", + "2048384964", + "2054739713", + "2055116083", + "2073720444", + "2073961002", + "2078775767", + "2096403564", + "2101161997", + "2101431901", + "2104413677", + "2113751407", + "2114488210", + "2116021422", + "2117260615", + "2118783093", + "2119636671", + "2122532513", + "2122960384", + "2126087831", + "2133373086", + "2134440791", + "2138180780", + "2141253292", + "2142892618", + "2145021036", + "2145087641", + "2149984854", + "2152465173", + "2155750598", + "2158794422", + "2164945245", + "2167756215", + "2169875292", + "2171956059", + "2176864362", + "2281433966", + "2293881818", + "2769656678" + ], + "abstract": "deterministic execution offers many benefits for debugging fault tolerance and security current methods of executing parallel programs deterministically however often incur high costs allow misbehaved software to defeat repeatability and transform time dependent races into input or path dependent races without eliminating them we introduce a new parallel programming model addressing these issues and use determinator a proof of concept os to demonstrate the model s practicality determinator s microkernel api provides only shared nothing address spaces and deterministic interprocess communication primitives to make execution of all unprivileged code well behaved or not precisely repeatable atop this microkernel determinator s user level runtime adapts optimistic replication techniques to offer a private workspace model for both thread level and process level parallel programing this model avoids the introduction of read write data races and converts write write races into reliably detected conflicts coarse grained parallel benchmarks perform and scale comparably to nondeterministic systems on both multicore pcs and across nodes in a distributed cluster", + "title_raw": "Efficient system-enforced deterministic parallelism", + "abstract_raw": "Deterministic execution offers many benefits for debugging, fault tolerance, and security. Current methods of executing parallel programs deterministically, however, often incur high costs, allow misbehaved software to defeat repeatability, and transform time-dependent races into input- or path-dependent races without eliminating them. We introduce a new parallel programming model addressing these issues, and use Determinator, a proof-of-concept OS, to demonstrate the model's practicality. Determinator's microkernel API provides only \"shared-nothing\" address spaces and deterministic interprocess communication primitives to make execution of all unprivileged code--well-behaved or not-- precisely repeatable. Atop this microkernel, Determinator's user-level runtime adapts optimistic replication techniques to offer a private workspace model for both thread-level and process-level parallel programing. This model avoids the introduction of read/write data races, and converts write/write races into reliably-detected conflicts. Coarse-grained parallel benchmarks perform and scale comparably to nondeterministic systems, on both multicore PCs and across nodes in a distributed cluster.", + "link": "https://www.semanticscholar.org/paper/ffb0eff13d21eefe6406a837c48cee9b3711a6b7", + "scraped_abstract": null, + "citation_best": 1 + }, + { + "paper": "1893312510", + "venue": "1185109434", + "year": "2010", + "title": "the turtles project design and implementation of nested virtualization", + "label": [ + "13062989", + "112904061", + "47878483", + "513985346", + "25344961", + "111919701", + "67925016", + "68793194", + "34760210" + ], + "author": [ + "2015078445", + "2963487603", + "2471417143", + "2079346440", + "2161112051", + "2294519711", + "2274293006", + "2085787179", + "775818885" + ], + "reference": [ + "23711711", + "41776504", + "103986934", + "1485906286", + "1492484099", + "1522250664", + "1604135736", + "1604656800", + "1641762327", + "1756783600", + "1829813581", + "1908051842", + "1971739358", + "1986740168", + "1989821390", + "1997269120", + "2003309944", + "2012752667", + "2029224396", + "2032086014", + "2053443819", + "2053891347", + "2089661946", + "2090076638", + "2096165352", + "2112177563", + "2117648703", + "2120715680", + "2121542813", + "2123022344", + "2125895608", + "2131726714", + "2135384752", + "2141253292", + "2149886445", + "2149911573", + "2151182669", + "2152132676", + "2156000710", + "2156368296", + "2165890524", + "2167759172", + "2168075869", + "2170332890", + "2219028763", + "2978655370", + "3005899844" + ], + "abstract": "in classical machine virtualization a hypervisor runs multiple operating systems simultaneously each on its own virtual machine in nested virtualization a hypervisor can run multiple other hypervisors with their associated virtual machines as operating systems gain hypervisor functionality microsoft windows 7 already runs windows xp in a virtual machine nested virtualization will become necessary in hypervisors that wish to host them we present the design implementation analysis and evaluation of high performance nested virtualization on intel x86 based systems the turtles project which is part of the linux kvm hypervisor runs multiple unmodified hypervisors e g kvm and vmware and operating systems e g linux and windows despite the lack of architectural support for nested virtualization in the x86 architecture it can achieve performance that is within 6 8 of single level non nested virtualization for common workloads through multi dimensional paging for mmu virtualization and multi level device assignment for i o virtualization", + "title_raw": "The turtles project: design and implementation of nested virtualization", + "abstract_raw": "In classical machine virtualization, a hypervisor runs multiple operating systems simultaneously, each on its own virtual machine. In nested virtualization, a hypervisor can run multiple other hypervisors with their associated virtual machines. As operating systems gain hypervisor functionality--Microsoft Windows 7 already runs Windows XP in a virtual machine--nested virtualization will become necessary in hypervisors that wish to host them. We present the design, implementation, analysis, and evaluation of high-performance nested virtualization on Intel x86-based systems. The Turtles project, which is part of the Linux/KVM hypervisor, runs multiple unmodified hypervisors (e.g., KVM and VMware) and operating systems (e.g., Linux and Windows). Despite the lack of architectural support for nested virtualization in the x86 architecture, it can achieve performance that is within 6-8% of single-level (non-nested) virtualization for common workloads, through multi-dimensional paging for MMU virtualization and multi-level device assignment for I/O virtualization.", + "link": "https://www.semanticscholar.org/paper/0abc3e83ccd6e685f8d0299f24f03ae28f4c2459", + "scraped_abstract": null, + "citation_best": 239 + }, + { + "paper": "2141729404", + "venue": "1127352206", + "year": "2010", + "title": "safe to the last instruction automated verification of a type safe operating system", + "label": [ + "104949639", + "50831359", + "55439883", + "111919701", + "105122174", + "41661131", + "28180684", + "138101251", + "44779574", + "199360897", + "2775836774", + "504728807", + "2779960034" + ], + "author": [ + "2119417256", + "2277508068" + ], + "reference": [ + "1480909796", + "1965760540", + "2020136418", + "2083469471", + "2084715426", + "2087832144", + "2097166325", + "2097889836", + "2103960969", + "2104963439", + "2106115112", + "2108923990", + "2122142025", + "2131238817", + "2136310957", + "2137186143", + "2137484994", + "2150401476", + "2168075869", + "2504100651", + "3010549432" + ], + "abstract": "typed assembly language tal and hoare logic can verify the absence of many kinds of errors in low level code we use tal and hoare logic to achieve highly automated static verification of the safety of a new operating system called verve our techniques and tools mechanically verify the safety of every assembly language instruction in the operating system run time system drivers and applications in fact every part of the system software except the boot loader verve consists of a nucleus that provides primitive access to hardware and memory a kernel that builds services on top of the nucleus and applications that run on top of the kernel the nucleus written in verified assembly language implements allocation garbage collection multiple stacks interrupt handling and device access the kernel written in c and compiled to tal builds higher level services such as preemptive threads on top of the nucleus a tal checker verifies the safety of the kernel and applications a hoare style verifier with an automated theorem prover verifies both the safety and correctness of the nucleus verve is to the best of our knowledge the first operating system mechanically verified to guarantee both type and memory safety more generally verve s approach demonstrates a practical way to mix high level typed code with low level untyped code in a verifiably safe manner", + "title_raw": "Safe to the last instruction: automated verification of a type-safe operating system", + "abstract_raw": "Typed assembly language (TAL) and Hoare logic can verify the absence of many kinds of errors in low-level code. We use TAL and Hoare logic to achieve highly automated, static verification of the safety of a new operating system called Verve. Our techniques and tools mechanically verify the safety of every assembly language instruction in the operating system, run-time system, drivers, and applications (in fact, every part of the system software except the boot loader). Verve consists of a \"Nucleus\" that provides primitive access to hardware and memory, a kernel that builds services on top of the Nucleus, and applications that run on top of the kernel. The Nucleus, written in verified assembly language, implements allocation, garbage collection, multiple stacks, interrupt handling, and device access. The kernel, written in C# and compiled to TAL, builds higher-level services, such as preemptive threads, on top of the Nucleus. A TAL checker verifies the safety of the kernel and applications. A Hoare-style verifier with an automated theorem prover verifies both the safety and correctness of the Nucleus. Verve is, to the best of our knowledge, the first operating system mechanically verified to guarantee both type and memory safety. More generally, Verve's approach demonstrates a practical way to mix high-level typed code with low-level untyped code in a verifiably safe manner.", + "link": "https://www.semanticscholar.org/paper/7b6b5834f926c9994ca56aeafd859c93451c72c8", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2103126020", + "venue": "1184151122", + "year": "2010", + "title": "an optimal algorithm for the distinct elements problem", + "label": [ + "199845137", + "174809319", + "2778484313", + "11413529", + "157692150" + ], + "author": [ + "2114289750", + "2132089697", + "2142501412" + ], + "reference": [ + "3397180", + "127380945", + "1522055873", + "1525949272", + "1571874655", + "1577072106", + "1785933978", + "1965996575", + "1984566373", + "1992363839", + "1997988698", + "1998272044", + "2002576896", + "2008159385", + "2025051251", + "2047085757", + "2052207834", + "2057529271", + "2071179368", + "2074286662", + "2080745194", + "2084757499", + "2089066317", + "2111678491", + "2112400233", + "2118224498", + "2126105096", + "2131709403", + "2132822431", + "2134786002", + "2139276812", + "2144697110", + "2144982963", + "2153329411", + "2160681854", + "2295428206" + ], + "abstract": "we give the first optimal algorithm for estimating the number of distinct elements in a data stream closing a long line of theoretical research on this problem begun by flajolet and martin in their seminal paper in focs 1983 this problem has applications to query optimization internet routing network topology and data mining for a stream of indices in 1 n our algorithm computes a 1 e approximation using an optimal o 1 e 2 log n bits of space with 2 3 success probability where 0 we also give an algorithm to estimate the hamming norm of a stream a generalization of the number of distinct elements which is useful in data cleaning packet tracing and database auditing our algorithm uses nearly optimal space and has optimal o 1 update and reporting times", + "title_raw": "An optimal algorithm for the distinct elements problem", + "abstract_raw": "We give the first optimal algorithm for estimating the number of distinct elements in a data stream, closing a long line of theoretical research on this problem begun by Flajolet and Martin in their seminal paper in FOCS 1983. This problem has applications to query optimization, Internet routing, network topology, and data mining. For a stream of indices in {1,...,n}, our algorithm computes a (1 \u00b1 e)-approximation using an optimal O(1/e-2 + log(n)) bits of space with 2/3 success probability, where 0 We also give an algorithm to estimate the Hamming norm of a stream, a generalization of the number of distinct elements, which is useful in data cleaning, packet tracing, and database auditing. Our algorithm uses nearly optimal space, and has optimal O(1) update and reporting times.", + "link": "https://www.semanticscholar.org/paper/e67aed9b0ebc982d7657a8401058adccc4743217", + "scraped_abstract": null, + "citation_best": 330 + }, + { + "paper": "1985225657", + "venue": "1163618098", + "year": "2010", + "title": "scifi a system for secure face identification", + "label": [ + "18396474", + "33884865", + "38652104", + "13652956", + "31510193", + "178489894", + "123201435", + "79403827" + ], + "author": [ + "219502362", + "259477981", + "2058581896", + "299608087" + ], + "reference": [ + "62644768", + "179458199", + "1488338708", + "1527581375", + "1895164037", + "2006793117", + "2013623332", + "2023951105", + "2033419168", + "2088492763", + "2103958416", + "2107002953", + "2110132168", + "2118014004", + "2118179522", + "2118774738", + "2125587922", + "2132172731", + "2133203592", + "2138451337", + "2144143728", + "2144596760", + "2146813141", + "2151103935", + "2158054470", + "2163808566", + "2164598857", + "2168829737", + "2169622093", + "2169740160", + "2175582831", + "2179426874", + "2233233025", + "2535446614", + "2616465717", + "2911784370", + "3141353839" + ], + "abstract": "we introduce scifi a system for secure computation of face identification the system performs face identification which compares faces of subjects with a database of registered faces the identification is done in a secure way which protects both the privacy of the subjects and the confidentiality of the database a specific application of scifi is reducing the privacy impact of camera based surveillance in that scenario scifi would be used in a setting which contains a server which has a set of faces of suspects and client machines which might be cameras acquiring images in public places the system runs a secure computation of a face recognition algorithm which identifies if an image acquired by a client matches one of the suspects but otherwise reveals no information to neither of the parties our work includes multiple contributions in different areas a new face identification algorithm which is unique in having been specifically designed for usage in secure computation nonetheless the algorithm has face recognition performance comparable to that of state of the art algorithms we ran experiments which show the algorithm to be robust to different viewing conditions such as illumination occlusions and changes in appearance like wearing glasses a secure protocol for computing the new face recognition algorithm in addition since our goal is to run an actual system considerable effort was made to optimize the protocol and minimize its online latency a system scifi which implements a secure computation of the face identification protocol experiments which show that the entire system can run in near real time the secure computation protocol performs a preprocessing of all public key cryptographic operations its online performance therefore mainly depends on the speed of data communication and our experiments show it to be extremely efficient", + "title_raw": "SCiFI - A System for Secure Face Identification", + "abstract_raw": "We introduce SCiFI, a system for Secure Computation of Face Identification. The system performs face identification which compares faces of subjects with a database of registered faces. The identification is done in a secure way which protects both the privacy of the subjects and the confidentiality of the database. A specific application of SCiFI is reducing the privacy impact of camera based surveillance. In that scenario, SCiFI would be used in a setting which contains a server which has a set of faces of suspects, and client machines which might be cameras acquiring images in public places. The system runs a secure computation of a face recognition algorithm, which identifies if an image acquired by a client matches one of the suspects, but otherwise reveals no information to neither of the parties. Our work includes multiple contributions in different areas: A new face identification algorithm which is unique in having been specifically designed for usage in secure computation. Nonetheless, the algorithm has face recognition performance comparable to that of state of the art algorithms. We ran experiments which show the algorithm to be robust to different viewing conditions, such as illumination, occlusions, and changes in appearance (like wearing glasses). A secure protocol for computing the new face recognition algorithm. In addition, since our goal is to run an actual system, considerable effort was made to optimize the protocol and minimize its online latency. A system - SCiFI, which implements a secure computation of the face identification protocol. Experiments which show that the entire system can run in near real-time: The secure computation protocol performs a preprocessing of all public-key cryptographic operations. Its online performance therefore mainly depends on the speed of data communication, and our experiments show it to be extremely efficient.", + "link": "https://www.semanticscholar.org/paper/b0b6346104cbf878a072da93d49ad6e9f65befaf", + "scraped_abstract": null, + "citation_best": 231 + }, + { + "paper": "2097460929", + "venue": "1152462849", + "year": "2010", + "title": "efficient error estimating coding feasibility and applications", + "label": [ + "153083717", + "158379750", + "56296756", + "108037233", + "76862118", + "152124472", + "83702630", + "79403827" + ], + "author": [ + "2171994361", + "2098692649", + "2171844908", + "2138265396" + ], + "reference": [ + "108253759", + "390075540", + "1517466511", + "1809034209", + "2003531160", + "2006163103", + "2006813606", + "2010813610", + "2011994290", + "2022828605", + "2026391990", + "2041248965", + "2077661079", + "2102373785", + "2108481447", + "2108890528", + "2114909350", + "2125982870", + "2129807739", + "2130306966", + "2141492231", + "2145834330", + "2146076199", + "2147749157", + "2149481390", + "2152121970", + "2155729921", + "2155862841", + "2157936671", + "2158850753", + "2160568005", + "2161024386", + "2161054371", + "2161342511", + "2161822490", + "2293000460", + "2296125569", + "2473845128", + "2914980053", + "2942537621" + ], + "abstract": "motivated by recent emerging systems that can leverage partially correct packets in wireless networks this paper investigates the novel concept of error estimating codes eec without correcting the errors in the packet eec enables the receiver of the packet to estimate the packet s bit error rate which is perhaps the most important meta information of a partially correct packet our eec algorithm provides provable estimation quality with rather low redundancy and computational overhead to demonstrate the utility of eec we exploit and implement eec in two wireless network applications wi fi rate adaptation and real time video streaming our real world experiments show that these applications can significantly benefit from eec", + "title_raw": "Efficient error estimating coding: feasibility and applications", + "abstract_raw": "Motivated by recent emerging systems that can leverage partially correct packets in wireless networks, this paper investigates the novel concept of error estimating codes (EEC). Without correcting the errors in the packet, EEC enables the receiver of the packet to estimate the packet's bit error rate, which is perhaps the most important meta-information of a partially correct packet. Our EEC algorithm provides provable estimation quality, with rather low redundancy and computational overhead. To demonstrate the utility of EEC, we exploit and implement EEC in two wireless network applications, Wi-Fi rate adaptation and real-time video streaming. Our real-world experiments show that these applications can significantly benefit from EEC.", + "link": "https://www.semanticscholar.org/paper/b9f3594cdb2db2386077c36d50c594106a24977c", + "scraped_abstract": null, + "citation_best": 21 + }, + { + "paper": "2109913881", + "venue": "1140684652", + "year": "2010", + "title": "assessing the scenic route measuring the value of search trails in web logs", + "label": [ + "189430467", + "557471498", + "2776291640", + "87546605", + "136764020", + "197927960" + ], + "author": [ + "2096583854", + "2228850527" + ], + "reference": [ + "8870360", + "1489084715", + "1500415735", + "1508509952", + "1964729842", + "1973006075", + "1973103963", + "2015720094", + "2047221353", + "2054149722", + "2069235736", + "2093285800", + "2099768249", + "2104217798", + "2108566279", + "2111216736", + "2123937625", + "2125771191", + "2131567213", + "2132314908", + "2133156844", + "2144495946", + "2146143440", + "2147187250", + "2149156280", + "2152725628", + "2153828680", + "2158450083", + "2162583366", + "2163852103", + "2165152490", + "2171593626" + ], + "abstract": "search trails mined from browser or toolbar logs comprise queries and the post query pages that users visit implicit endorsements from many trails can be useful for search result ranking where the presence of a page on a trail increases its query relevance follow ing a search trail requires user effort yet little is known about the benefit that users obtain from this activity versus say sticking with the clicked search result or jumping directly to the destination page at the end of the trail in this paper we present a log based study estimating the user value of trail following we compare the relevance topic coverage topic diversity novelty and utility of full trails over that provided by sub trails trail origins landing pages and trail destinations pages where trails end our findings demonstrate significant value to users in following trails especially for certain query types the findings have implications for the design of search systems including trail recommendation systems that display trails on search result pages", + "title_raw": "Assessing the scenic route: measuring the value of search trails in web logs", + "abstract_raw": "Search trails mined from browser or toolbar logs comprise queries and the post-query pages that users visit. Implicit endorsements from many trails can be useful for search result ranking, where the presence of a page on a trail increases its query relevance. Follow-ing a search trail requires user effort, yet little is known about the benefit that users obtain from this activity versus, say, sticking with the clicked search result or jumping directly to the destination page at the end of the trail. In this paper, we present a log-based study estimating the user value of trail following. We compare the relevance, topic coverage, topic diversity, novelty, and utility of full trails over that provided by sub-trails, trail origins (landing pages), and trail destinations (pages where trails end). Our findings demonstrate significant value to users in following trails, especially for certain query types. The findings have implications for the design of search systems, including trail recommendation systems that display trails on search result pages.", + "link": "https://www.semanticscholar.org/paper/a97f142557e3645c572a6d144b9db90d97546806", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2152475116", + "venue": "1131589359", + "year": "2010", + "title": "load balancing via random local search in closed and open systems", + "label": [ + "12203424", + "31258907", + "139330139", + "555944384", + "93996380", + "120314980" + ], + "author": [ + "2346785178", + "2063533916", + "2151347674", + "2084777258", + "2147126605" + ], + "reference": [ + "76060997", + "182704713", + "614186738", + "1526250527", + "1966557675", + "1973403707", + "2017552692", + "2021321016", + "2065284761", + "2072951587", + "2109055312", + "2113692632", + "2117702591", + "2121141468", + "2128186778", + "2136388527", + "2139770509", + "2153541270", + "2169088379", + "2199385070", + "2216069195", + "3103388966" + ], + "abstract": "in this paper we analyze the performance of random load resampling and migration strategies in parallel server systems clients initially attach to an arbitrary server but may switch servers independently at random instants of time in an attempt to improve their service rate this approach to load balancing contrasts with traditional approaches where clients make smart server selections upon arrival e g join the shortest queue policy and variants thereof load resampling is particularly relevant in scenarios where clients cannot predict the load of a server before being actually attached to it an important example is in wireless spectrum sharing where clients try to share a set of frequency bands in a distributed manner we first analyze the natural random local search rls strategy under this strategy after sampling a new server randomly clients only switch to it if their service rate is improved in closed systems where the client population is fixed we derive tight estimates of the time it takes under rls strategy to balance the load across servers we then study open systems where clients arrive according to a random process and leave the system upon service completion in this scenario we analyze how client migrations within the system interact with the system dynamics induced by client arrivals and departures we compare the load aware rls strategy to a load oblivious strategy in which clients just randomly switch server without accounting for the server loads surprisingly we show that both load oblivious and load aware strategies stabilize the system whenever this is at all possible we further demonstrate using large system asymptotics that the average client sojourn time under the load oblivious strategy is not considerably reduced when clients apply smarter load aware strategies", + "title_raw": "Load balancing via random local search in closed and open systems", + "abstract_raw": "In this paper, we analyze the performance of random load resampling and migration strategies in parallel server systems. Clients initially attach to an arbitrary server, but may switch servers independently at random instants of time in an attempt to improve their service rate. This approach to load balancing contrasts with traditional approaches where clients make smart server selections upon arrival (e.g., Join-the-Shortest-Queue policy and variants thereof). Load resampling is particularly relevant in scenarios where clients cannot predict the load of a server before being actually attached to it. An important example is in wireless spectrum sharing where clients try to share a set of frequency bands in a distributed manner. We first analyze the natural Random Local Search (RLS) strategy. Under this strategy, after sampling a new server randomly, clients only switch to it if their service rate is improved. In closed systems, where the client population is fixed, we derive tight estimates of the time it takes under RLS strategy to balance the load across servers. We then study open systems where clients arrive according to a random process and leave the system upon service completion. In this scenario, we analyze how client migrations within the system interact with the system dynamics induced by client arrivals and departures. We compare the load-aware RLS strategy to a load-oblivious strategy in which clients just randomly switch server without accounting for the server loads. Surprisingly, we show that both load-oblivious and load-aware strategies stabilize the system whenever this is at all possible. We further demonstrate, using large-system asymptotics, that the average client sojourn time under the load-oblivious strategy is not considerably reduced when clients apply smarter load-aware strategies.", + "link": "https://www.semanticscholar.org/paper/1331d03fbf000e0e62f4d09e5264ad56564eab38", + "scraped_abstract": null, + "citation_best": 10 + }, + { + "paper": "2151224499", + "venue": "1175089206", + "year": "2010", + "title": "fast fast architecture sensitive tree search on modern cpus and gpus", + "label": [ + "101056560", + "61483411", + "163736687", + "150552126", + "173608175", + "140745168", + "188045654", + "197855036", + "189783530" + ], + "author": [ + "2107543706", + "73121025", + "2079263147", + "202245023", + "2143135117", + "1964002351", + "2166437845", + "2581968944", + "2163667412" + ], + "reference": [ + "99156441", + "1598618182", + "1600355623", + "1994085050", + "2008627910", + "2014515453", + "2024794876", + "2054046497", + "2059417738", + "2073216502", + "2088116875", + "2091829363", + "2096496252", + "2097125969", + "2097904740", + "2102967864", + "2105220449", + "2114474644", + "2123686039", + "2125529470", + "2137077706", + "2140453381", + "2147076738", + "2156000104", + "2160404300", + "2161694911", + "2166955231", + "2167911783", + "2169150396", + "2174889101", + "2296760900" + ], + "abstract": "in memory tree structured index search is a fundamental database operation modern processors provide tremendous computing power by integrating multiple cores each with wide vector units there has been much work to exploit modern processor architectures for database primitives like scan sort join and aggregation however unlike other primitives tree search presents significant challenges due to irregular and unpredictable data accesses in tree traversal in this paper we present fast an extremely fast architecture sensitive layout of the index tree fast is a binary tree logically organized to optimize for architecture features like page size cache line size and simd width of the underlying hardware fast eliminates impact of memory latency and exploits thread level and datalevel parallelism on both cpus and gpus to achieve 50 million cpu and 85 million gpu queries per second 5x cpu and 1 7x gpu faster than the best previously reported performance on the same architectures fast supports efficient bulk updates by rebuilding index trees in less than 0 1 seconds for datasets as large as 64mkeys and naturally integrates compression techniques overcoming the memory bandwidth bottleneck and achieving a 6x performance improvement over uncompressed index search for large keys on cpus", + "title_raw": "FAST: fast architecture sensitive tree search on modern CPUs and GPUs", + "abstract_raw": "In-memory tree structured index search is a fundamental database operation. Modern processors provide tremendous computing power by integrating multiple cores, each with wide vector units. There has been much work to exploit modern processor architectures for database primitives like scan, sort, join and aggregation. However, unlike other primitives, tree search presents significant challenges due to irregular and unpredictable data accesses in tree traversal. In this paper, we present FAST, an extremely fast architecture sensitive layout of the index tree. FAST is a binary tree logically organized to optimize for architecture features like page size, cache line size, and SIMD width of the underlying hardware. FAST eliminates impact of memory latency, and exploits thread-level and datalevel parallelism on both CPUs and GPUs to achieve 50 million (CPU) and 85 million (GPU) queries per second, 5X (CPU) and 1.7X (GPU) faster than the best previously reported performance on the same architectures. FAST supports efficient bulk updates by rebuilding index trees in less than 0.1 seconds for datasets as large as 64Mkeys and naturally integrates compression techniques, overcoming the memory bandwidth bottleneck and achieving a 6X performance improvement over uncompressed index search for large keys on CPUs.", + "link": "https://www.semanticscholar.org/paper/fc8aaceff4d907f025ec9b2d8f3a6980cb663090", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2912359042", + "venue": "1190910084", + "year": "2010", + "title": "qip pspace", + "label": [ + "39637292", + "197685441" + ], + "author": [ + "2560011701", + "2107119617", + "2296313008", + "1944891737" + ], + "reference": [ + "45689951", + "1498051073", + "1518796684", + "1527197079", + "1567193096", + "1631356911", + "1965003470", + "1970606468", + "1987280464", + "1989051052", + "1993661095", + "1994584977", + "1995547833", + "1999502904", + "2010699490", + "2011112377", + "2029091075", + "2032626654", + "2056352038", + "2072883586", + "2080578129", + "2082647621", + "2085025327", + "2088141885", + "2090133546", + "2096390054", + "2100827027", + "2106565812", + "2113097540", + "2119901068", + "2120041647", + "2126746199", + "2148957455", + "2151450177", + "2161327939", + "2161829873", + "2174563490", + "2195967161", + "2293988196", + "2588354374", + "2741390383", + "2798707604" + ], + "abstract": "we prove that the complexity class qip which consists of all problems having quantum interactive proof systems is contained in pspace this containment is proved by applying a parallelized form of the matrix multiplicative weights update method to a class of semidefinite programs that captures the computational power of quantum interactive proofs as the containment of pspace in qip follows immediately from the well known equality ip pspace the equality qip pspace follows", + "title_raw": "QIP = PSPACE", + "abstract_raw": "We prove that the complexity class QIP, which consists of all problems having quantum interactive proof systems, is contained in PSPACE. This containment is proved by applying a parallelized form of the matrix multiplicative weights update method to a class of semidefinite programs that captures the computational power of quantum interactive proofs. As the containment of PSPACE in QIP follows immediately from the well-known equality IP = PSPACE, the equality QIP = PSPACE follows.", + "link": "https://www.semanticscholar.org/paper/4963e2a51ebd531470472933013741f1e91818f8", + "scraped_abstract": null, + "citation_best": 57 + }, + { + "paper": "2090048052", + "venue": "1166315290", + "year": "2010", + "title": "vizwiz nearly real time answers to visual questions", + "label": [ + "34127721", + "107457646", + "158495155", + "105339364" + ], + "author": [ + "2112106364", + "1237292554", + "2103437488", + "2156768364", + "2118081206", + "2104582966", + "2485489804", + "2232102094", + "2339819898", + "2232493242", + "2104162850" + ], + "reference": [ + "25499600", + "1968311814", + "2000789222", + "2049684864", + "2062139096", + "2090254904", + "2116705992", + "2118552645", + "2119605622", + "2141282920", + "2148279470", + "2148479118", + "2149489787", + "2151401338", + "2157025439", + "2161538177", + "2163071480", + "2163881971", + "2164533107", + "2168842329", + "3149042249" + ], + "abstract": "the lack of access to visual information like text labels icons and colors can cause frustration and decrease independence for blind people current access technology uses automatic approaches to address some problems in this space but the technology is error prone limited in scope and quite expensive in this paper we introduce vizwiz a talking application for mobile phones that offers a new alternative to answering visual questions in nearly real time asking multiple people on the web to support answering questions quickly we introduce a general approach for intelligently recruiting human workers in advance called quikturkit so that workers are available when new questions arrive a field deployment with 11 blind participants illustrates that blind people can effectively use vizwiz to cheaply answer questions in their everyday lives highlighting issues that automatic approaches will need to address to be useful finally we illustrate the potential of using vizwiz as part of the participatory design of advanced tools by using it to build and evaluate vizwiz locateit an interactive mobile tool that helps blind people solve general visual search problems", + "title_raw": "VizWiz: nearly real-time answers to visual questions", + "abstract_raw": "The lack of access to visual information like text labels, icons, and colors can cause frustration and decrease independence for blind people. Current access technology uses automatic approaches to address some problems in this space, but the technology is error-prone, limited in scope, and quite expensive. In this paper, we introduce VizWiz, a talking application for mobile phones that offers a new alternative to answering visual questions in nearly real-time - asking multiple people on the web. To support answering questions quickly, we introduce a general approach for intelligently recruiting human workers in advance called quikTurkit so that workers are available when new questions arrive. A field deployment with 11 blind participants illustrates that blind people can effectively use VizWiz to cheaply answer questions in their everyday lives, highlighting issues that automatic approaches will need to address to be useful. Finally, we illustrate the potential of using VizWiz as part of the participatory design of advanced tools by using it to build and evaluate VizWiz::LocateIt, an interactive mobile tool that helps blind people solve general visual search problems.", + "link": "https://www.semanticscholar.org/paper/e0c668d1da866617ccfeee910d13eb14fa340bea", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2161163216", + "venue": "1133523790", + "year": "2010", + "title": "towards certain fixes with editing rules and master data", + "label": [ + "22414024", + "61871575", + "33762810", + "48044578", + "177264268", + "124101348", + "136197465" + ], + "author": [ + "2132396280", + "2130201582", + "2119375424", + "2607702498", + "2101311407" + ], + "reference": [ + "4175336", + "987035227", + "1509880128", + "1527197079", + "1539265392", + "1547556487", + "1555797260", + "1558832481", + "1567570895", + "1591780338", + "1700279323", + "1868111879", + "1964786778", + "1967167578", + "1977452859", + "2003159902", + "2007940874", + "2011039300", + "2026147624", + "2044022210", + "2045054164", + "2047745978", + "2059009730", + "2063103859", + "2068376489", + "2073251771", + "2077518845", + "2105423800", + "2106895292", + "2108991785", + "2114473442", + "2115826669", + "2122039644", + "2123561513", + "2131060875", + "2133409729", + "2137775416", + "2145346822", + "2147805208", + "2163600218", + "2167333415", + "2169940602", + "2171332293", + "2178708778", + "2241988237", + "2611804663", + "2891212941", + "2915048391" + ], + "abstract": "a variety of integrity constraints have been studied for data cleaning while these constraints can detect the presence of errors they fall short of guiding us to correct the errors indeed data repairing based on these constraints may not find certain fixes that are absolutely correct and worse may introduce new errors when repairing the data we propose a method for finding certain fixes based on master data a notion of certain regions and a class of editing rules a certain region is a set of attributes that are assured correct by the users given a certain region and master data editing rules tell us what attributes to fix and how to update them we show how the method can be used in data monitoring and enrichment we develop techniques for reasoning about editing rules to decide whether they lead to a unique fix and whether they are able to fix all the attributes in a tuple relative to master data and a certain region we also provide an algorithm to identify minimal certain regions such that a certain fix is warranted by editing rules and master data as long as one of the regions is correct we experimentally verify the effectiveness and scalability of the algorithm", + "title_raw": "Towards certain fixes with editing rules and master data", + "abstract_raw": "A variety of integrity constraints have been studied for data cleaning. While these constraints can detect the presence of errors, they fall short of guiding us to correct the errors. Indeed, data repairing based on these constraints may not find certain fixes that are absolutely correct, and worse, may introduce new errors when repairing the data. We propose a method for finding certain fixes, based on master data, a notion of certain regions, and a class of editing rules. A certain region is a set of attributes that are assured correct by the users. Given a certain region and master data, editing rules tell us what attributes to fix and how to update them. We show how the method can be used in data monitoring and enrichment. We develop techniques for reasoning about editing rules, to decide whether they lead to a unique fix and whether they are able to fix all the attributes in a tuple, relative to master data and a certain region. We also provide an algorithm to identify minimal certain regions, such that a certain fix is warranted by editing rules and master data as long as one of the regions is correct. We experimentally verify the effectiveness and scalability of the algorithm.", + "link": "https://www.semanticscholar.org/paper/8f471aa2b96ab1e1f32b5d3609a4af5d1905c8fe", + "scraped_abstract": null, + "citation_best": 117 + }, + { + "paper": "2171279286", + "venue": "1135342153", + "year": "2010", + "title": "factorizing personalized markov chains for next basket recommendation", + "label": [ + "119857082", + "76408418", + "80444323", + "49555168", + "98763669", + "557471498" + ], + "author": [ + "1585981875", + "2043953584", + "78243962" + ], + "reference": [ + "1994389483", + "2023603028", + "2080320419", + "2089349245", + "2101409192", + "2110325612", + "2117111450", + "2138108551", + "2140310134", + "2157973827" + ], + "abstract": "recommender systems are an important component of many websites two of the most popular approaches are based on matrix factorization mf and markov chains mc mf methods learn the general taste of a user by factorizing the matrix over observed user item preferences on the other hand mc methods model sequential behavior by learning a transition graph over items that is used to predict the next action based on the recent actions of a user in this paper we present a method bringing both approaches together our method is based on personalized transition graphs over underlying markov chains that means for each user an own transition matrix is learned thus in total the method uses a transition cube as the observations for estimating the transitions are usually very limited our method factorizes the transition cube with a pairwise interaction model which is a special case of the tucker decomposition we show that our factorized personalized mc fpmc model subsumes both a common markov chain and the normal matrix factorization model for learning the model parameters we introduce an adaption of the bayesian personalized ranking bpr framework for sequential basket data empirically we show that our fpmc model outperforms both the common matrix factorization and the unpersonalized mc model both learned with and without factorization", + "title_raw": "Factorizing personalized Markov chains for next-basket recommendation", + "abstract_raw": "Recommender systems are an important component of many websites. Two of the most popular approaches are based on matrix factorization (MF) and Markov chains (MC). MF methods learn the general taste of a user by factorizing the matrix over observed user-item preferences. On the other hand, MC methods model sequential behavior by learning a transition graph over items that is used to predict the next action based on the recent actions of a user. In this paper, we present a method bringing both approaches together. Our method is based on personalized transition graphs over underlying Markov chains. That means for each user an own transition matrix is learned - thus in total the method uses a transition cube. As the observations for estimating the transitions are usually very limited, our method factorizes the transition cube with a pairwise interaction model which is a special case of the Tucker Decomposition. We show that our factorized personalized MC (FPMC) model subsumes both a common Markov chain and the normal matrix factorization model. For learning the model parameters, we introduce an adaption of the Bayesian Personalized Ranking (BPR) framework for sequential basket data. Empirically, we show that our FPMC model outperforms both the common matrix factorization and the unpersonalized MC model both learned with and without factorization.", + "link": "https://www.semanticscholar.org/paper/50d85cb114f7c5e779a6772f2931e77dddd54a5e", + "scraped_abstract": null, + "citation_best": 2199 + }, + { + "paper": "2124142520", + "venue": "1163450153", + "year": "2009", + "title": "predicting tie strength with social media", + "label": [ + "2522767166", + "2776351115", + "518677369", + "86256295", + "136764020" + ], + "author": [ + "2141697838", + "2307725321" + ], + "reference": [ + "1641403162", + "1967830590", + "1967848124", + "1984744191", + "1985009457", + "1987235421", + "1989324514", + "1997733761", + "1997841190", + "2002779084", + "2010695531", + "2050635293", + "2059641569", + "2059837179", + "2065730093", + "2075081173", + "2075894438", + "2088793533", + "2091877193", + "2092230135", + "2102451790", + "2109469951", + "2122131676", + "2124637492", + "2128211014", + "2130061303", + "2135509167", + "2136852793", + "2138386320", + "2148386842", + "2154454189", + "2162209053", + "2164802537", + "2319561871", + "2420733993", + "3102050987", + "3121584743" + ], + "abstract": "social media treats all users the same trusted friend or total stranger with little or nothing in between in reality relationships fall everywhere along this spectrum a topic social science has investigated for decades under the theme of tie strength our work bridges this gap between theory and practice in this paper we present a predictive model that maps social media data to tie strength the model builds on a dataset of over 2 000 social media ties and performs quite well distinguishing between strong and weak ties with over 85 accuracy we complement these quantitative findings with interviews that unpack the relationships we could not predict the paper concludes by illustrating how modeling tie strength can improve social media design elements including privacy controls message routing friend introductions and information prioritization", + "title_raw": "Predicting tie strength with social media", + "abstract_raw": "Social media treats all users the same: trusted friend or total stranger, with little or nothing in between. In reality, relationships fall everywhere along this spectrum, a topic social science has investigated for decades under the theme of tie strength. Our work bridges this gap between theory and practice. In this paper, we present a predictive model that maps social media data to tie strength. The model builds on a dataset of over 2,000 social media ties and performs quite well, distinguishing between strong and weak ties with over 85% accuracy. We complement these quantitative findings with interviews that unpack the relationships we could not predict. The paper concludes by illustrating how modeling tie strength can improve social media design elements, including privacy controls, message routing, friend introductions and information prioritization.", + "link": "https://www.semanticscholar.org/paper/fd343a76fea748a99c46e0413b23a94b6a009fb6", + "scraped_abstract": null, + "citation_best": 1367 + }, + { + "paper": "2116041277", + "venue": "1163450153", + "year": "2009", + "title": "undo and erase events as indicators of usability problems", + "label": [ + "170130773", + "107457646", + "2780154230", + "2777904410" + ], + "author": [ + "2144584793", + "2227933846", + "2120519272", + "2091637845" + ], + "reference": [ + "48202622", + "77266977", + "168772177", + "1487240240", + "1605067591", + "1980880315", + "1981610984", + "1992007286", + "2008522363", + "2009823019", + "2034503479", + "2049671505", + "2080743924", + "2083939386", + "2086606137", + "2098903272", + "2099473180", + "2102418365", + "2124696116", + "2128922568", + "2135849570", + "2142325887", + "2152309982", + "2159306398", + "2164558494", + "2412160738" + ], + "abstract": "one approach to reducing the costs of usability testing is to facilitate the automatic detection of critical incidents serious breakdowns in interaction that stand out during software use this research evaluates the use of undo and erase events as indicators of critical incidents in google sketchup a 3d modeling application measuring an indicator s usefulness by the numbers and types of usability problems discovered we compared problems identified using undo and erase events to problems identified using the user reported critical incident technique hartson and castillo 1998 in a within subjects experiment with 35 participants undo and erase episodes together revealed over 90 of the problems rated as severe several of which would not have been discovered by self report alone moreover problems found by all three methods were rated as significantly more severe than those identified by only a subset of methods these results suggest that undo and erase events will serve as useful complements to user reported critical incidents for low cost usability evaluation of creation oriented applications like sketchup", + "title_raw": "Undo and erase events as indicators of usability problems", + "abstract_raw": "One approach to reducing the costs of usability testing is to facilitate the automatic detection of critical incidents: serious breakdowns in interaction that stand out during software use. This research evaluates the use of undo and erase events as indicators of critical incidents in Google SketchUp (a 3D-modeling application), measuring an indicator's usefulness by the numbers and types of usability problems discovered. We compared problems identified using undo and erase events to problems identified using the user-reported critical incident technique [Hartson and Castillo 1998]. In a within-subjects experiment with 35 participants, undo and erase episodes together revealed over 90% of the problems rated as severe, several of which would not have been discovered by self-report alone. Moreover, problems found by all three methods were rated as significantly more severe than those identified by only a subset of methods. These results suggest that undo and erase events will serve as useful complements to user-reported critical incidents for low cost usability evaluation of creation-oriented applications like SketchUp.", + "link": "https://www.semanticscholar.org/paper/18523e261819a8584edd5250620dcafaf39f152e", + "scraped_abstract": null, + "citation_best": 39 + }, + { + "paper": "2162065809", + "venue": "1163450153", + "year": "2009", + "title": "from interaction to trajectories designing coherent journeys through user experiences", + "label": [ + "201025465", + "107457646", + "144430266" + ], + "author": [ + "2088024608", + "70074427", + "2061056646", + "1980670276" + ], + "reference": [ + "1593163947", + "1593846218", + "1595921760", + "1964421638", + "1971670344", + "1993489252", + "2005425904", + "2006890083", + "2027074722", + "2029111825", + "2056133752", + "2075937691", + "2086034674", + "2099001635", + "2110378855", + "2112915525", + "2123106301", + "2125887849", + "2128026023", + "2137940381", + "2141052863", + "2143157063", + "2145199549", + "2159514065", + "2164551073", + "2612435456", + "2979253453" + ], + "abstract": "the idea of interactional trajectories through interfaces has emerged as a sensitizing concept from recent studies of tangible interfaces and interaction in museums and galleries we put this concept to work as a lens to reflect on published studies of complex user experiences that extend over space and time and involve multiple roles and interfaces we develop a conceptual framework in which trajectories explain these user experiences as journeys through hybrid structures punctuated by transitions and in which interactivity and collaboration are orchestrated our framework is intended to sensitize future studies help distill craft knowledge into design guidelines and patterns identify technology requirements and provide a boundary object to connect hci with performance studies", + "title_raw": "From interaction to trajectories: designing coherent journeys through user experiences", + "abstract_raw": "The idea of interactional trajectories through interfaces has emerged as a sensitizing concept from recent studies of tangible interfaces and interaction in museums and galleries. We put this concept to work as a lens to reflect on published studies of complex user experiences that extend over space and time and involve multiple roles and interfaces. We develop a conceptual framework in which trajectories explain these user experiences as journeys through hybrid structures, punctuated by transitions, and in which interactivity and collaboration are orchestrated. Our framework is intended to sensitize future studies, help distill craft knowledge into design guidelines and patterns, identify technology requirements, and provide a boundary object to connect HCI with Performance Studies.", + "link": "https://www.semanticscholar.org/paper/ff3acd4fe133ca93a84cf3d58769e8a4b92304cc", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2144024567", + "venue": "1163450153", + "year": "2009", + "title": "sizing the horizon the effects of chart size and layering on the graphical perception of time series visualizations", + "label": [ + "151406439", + "124101348" + ], + "author": [ + "2112690490", + "2164178758", + "718039462" + ], + "reference": [ + "1655654231", + "1967994486", + "1971781829", + "1974184852", + "1987381915", + "1996136679", + "1999031840", + "2001084593", + "2040641857", + "2059636449", + "2088172714", + "2097924927", + "2106441507", + "2106588364", + "2108230739", + "2117554888", + "2118302132", + "2125215841", + "2126249051", + "2126415191", + "2131737623", + "2132881639", + "2143717026", + "2147997165", + "2152922709", + "2155843307", + "2165467901", + "2610685016", + "2612304202" + ], + "abstract": "we investigate techniques for visualizing time series data and evaluate their effect in value comparison tasks we compare line charts with horizon graphs a space efficient time series visualization technique across a range of chart sizes measuring the speed and accuracy of subjects estimates of value differences between charts we identify transition points at which reducing the chart height results in significantly differing drops in estimation accuracy across the compared chart types and we find optimal positions in the speed accuracy tradeoff curve at which viewers performed quickly without attendant drops in accuracy based on these results we propose approaches for increasing data density that optimize graphical perception", + "title_raw": "Sizing the horizon: the effects of chart size and layering on the graphical perception of time series visualizations", + "abstract_raw": "We investigate techniques for visualizing time series data and evaluate their effect in value comparison tasks. We compare line charts with horizon graphs - a space-efficient time series visualization technique - across a range of chart sizes, measuring the speed and accuracy of subjects' estimates of value differences between charts. We identify transition points at which reducing the chart height results in significantly differing drops in estimation accuracy across the compared chart types, and we find optimal positions in the speed-accuracy tradeoff curve at which viewers performed quickly without attendant drops in accuracy. Based on these results, we propose approaches for increasing data density that optimize graphical perception.", + "link": "http://doi.acm.org/10.1145/1518701.1518897", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2061766138", + "venue": "1163450153", + "year": "2009", + "title": "social immersive media pursuing best practices for multi user interactive camera projector exhibits", + "label": [ + "153715457", + "502989409", + "107457646", + "40458791", + "74216064", + "49774154", + "35173682" + ], + "author": [ + "1983653882", + "679845229" + ], + "reference": [ + "67598112", + "171204329", + "1485705709", + "1507397303", + "1518553298", + "1530681353", + "1555746850", + "1574666341", + "1594772103", + "1966931560", + "1968211101", + "1971675335", + "2000411598", + "2021605849", + "2029709911", + "2042850474", + "2045403074", + "2073823857", + "2077889626", + "2084703563", + "2086451657", + "2090293796", + "2103865647", + "2103991771", + "2107034885", + "2110151826", + "2115647291", + "2116343313", + "2128156516", + "2129353167", + "2140190783", + "2144258930", + "2145019926", + "2145327874", + "2151622932", + "2166227724", + "2167694665", + "2168126980", + "2168442304", + "2295947439", + "2480506430", + "2798977449", + "2895331222", + "2945424237", + "3114284325" + ], + "abstract": "based on ten years experience developing interactive camera projector systems for public science and culture exhibits we define a distinct form of augmented reality focused on social interaction social immersive media our work abandons gui metaphors and builds on the language of cinema casting users as actors within simulated narrative models we articulate philosophical goals design principles and interaction techniques that create strong emotional responses and social engagement through visceral interaction we describe approaches to clearly communicate cultural and scientific ideas through the medium and we demonstrate how practitioners can design interactions that promote specific social behaviors in users", + "title_raw": "Social immersive media: pursuing best practices for multi-user interactive camera/projector exhibits", + "abstract_raw": "Based on ten years' experience developing interactive camera/projector systems for public science and culture exhibits, we define a distinct form of augmented reality focused on social interaction: social immersive media. Our work abandons GUI metaphors and builds on the language of cinema, casting users as actors within simulated narrative models. We articulate philosophical goals, design principles, and interaction techniques that create strong emotional responses and social engagement through visceral interaction. We describe approaches to clearly communicate cultural and scientific ideas through the medium. And we demonstrate how practitioners can design interactions that promote specific social behaviors in users.", + "link": "https://www.semanticscholar.org/paper/c9e557c2340ae2f077af9b819be754dfd177cf11", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2097298348", + "venue": "1163450153", + "year": "2009", + "title": "ephemeral adaptation the use of gradual onset to improve menu selection performance", + "label": [ + "107457646", + "158495155", + "49774154", + "76947770", + "89187990", + "81917197" + ], + "author": [ + "2165969493", + "2046655725", + "2314594501", + "2115637412" + ], + "reference": [ + "1998938084", + "2001590643", + "2003050223", + "2027619182", + "2029469562", + "2034511689", + "2036433269", + "2037246082", + "2043048977", + "2057393879", + "2061059521", + "2076442580", + "2088953643", + "2101847267", + "2108384112", + "2110934090", + "2113725822", + "2128762404", + "2130391181", + "2136920231", + "2165103795" + ], + "abstract": "we introduce ephemeral adaptation a new adaptive gui technique that improves performance by reducing visual search time while maintaining spatial consistency ephemeral adaptive interfaces employ gradual onset to draw the user s attention to predicted items adaptively predicted items appear abruptly when the menu is opened but non predicted items fade in gradually to demonstrate the benefit of ephemeral adaptation we conducted two experiments with a total of 48 users to show 1 that ephemeral adaptive menus are faster than static menus when accuracy is high and are not significantly slower when it is low and 2 that ephemeral adaptive menus are also faster than adaptive highlighting while we focused on user adaptive guis ephemeral adaptation should be applicable to a broad range of visually complex tasks", + "title_raw": "Ephemeral adaptation: the use of gradual onset to improve menu selection performance", + "abstract_raw": "We introduce ephemeral adaptation, a new adaptive GUI technique that improves performance by reducing visual search time while maintaining spatial consistency. Ephemeral adaptive interfaces employ gradual onset to draw the user's attention to predicted items: adaptively predicted items appear abruptly when the menu is opened, but non-predicted items fade in gradually. To demonstrate the benefit of ephemeral adaptation we conducted two experiments with a total of 48 users to show: (1) that ephemeral adaptive menus are faster than static menus when accuracy is high, and are not significantly slower when it is low and (2) that ephemeral adaptive menus are also faster than adaptive highlighting. While we focused on user-adaptive GUIs, ephemeral adaptation should be applicable to a broad range of visually complex tasks.", + "link": "https://www.semanticscholar.org/paper/201c3040d18a224a4d767a39d4934df6ce7bd63e", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2073961002", + "venue": "1199533187", + "year": "2009", + "title": "asserting and checking determinism for multithreaded programs", + "label": [ + "548217200", + "113200698", + "40422974", + "138101251", + "199360897", + "173608175", + "28034677", + "2777904410" + ], + "author": [ + "2048404743", + "2157836386" + ], + "reference": [ + "1482207865", + "1568192366", + "1602164775", + "1605226475", + "1931688030", + "1965462925", + "1965662337", + "1971903460", + "1972461140", + "1972544179", + "1978280181", + "1988888548", + "1996138408", + "1999075586", + "2002179840", + "2016167891", + "2016314068", + "2052363833", + "2061239425", + "2069300761", + "2072794470", + "2082766667", + "2101431901", + "2102640269", + "2103957752", + "2104861744", + "2108066920", + "2109033563", + "2109875364", + "2114338612", + "2119494620", + "2120027538", + "2124545556", + "2127989655", + "2133662847", + "2135395375", + "2138856138", + "2144118030", + "2147506153", + "2149984854", + "2158124716", + "2158449625", + "2161600851", + "2171480813", + "2172255864", + "3146075203" + ], + "abstract": "the trend towards processors with more and more parallel cores is increasing the need for software that can take advantage of parallelism the most widespread method for writing parallel software is to use explicit threads writing correct multithreaded programs however has proven to be quite challenging in practice the key difficulty is non determinism the threads of a parallel application may be interleaved non deterministically during execution in a buggy program non deterministic scheduling will lead to non deterministic results some interleavings will produce the correct result while others will not we propose an assertion framework for specifying that regions of a parallel program behave deterministically despite non deterministic thread interleaving our framework allows programmers to write assertions involving pairs of program states arising from different parallel schedules we describe an implementation of our deterministic assertions as a library for java and evaluate the utility of our specifications on a number of parallel java benchmarks we found specifying deterministic behavior to be quite simple using our assertions further in experiments with our assertions we were able to identify two races as true parallelism errors that lead to incorrect non deterministic behavior these races were distinguished from a number of benign races in the benchmarks", + "title_raw": "Asserting and checking determinism for multithreaded programs", + "abstract_raw": "The trend towards processors with more and more parallel cores is increasing the need for software that can take advantage of parallelism. The most widespread method for writing parallel software is to use explicit threads. Writing correct multithreaded programs, however, has proven to be quite challenging in practice. The key difficulty is non-determinism. The threads of a parallel application may be interleaved non-deterministically during execution. In a buggy program, non-deterministic scheduling will lead to non-deterministic results - some interleavings will produce the correct result while others will not. We propose an assertion framework for specifying that regions of a parallel program behave deterministically despite non-deterministic thread interleaving. Our framework allows programmers to write assertions involving pairs of program states arising from different parallel schedules. We describe an implementation of our deterministic assertions as a library for Java, and evaluate the utility of our specifications on a number of parallel Java benchmarks. We found specifying deterministic behavior to be quite simple using our assertions. Further, in experiments with our assertions, we were able to identify two races as true parallelism errors that lead to incorrect non-deterministic behavior. These races were distinguished from a number of benign races in the benchmarks.", + "link": "https://www.semanticscholar.org/paper/354aa1c8397f6b1c484f4034847d1bfc1521cd5a", + "scraped_abstract": null, + "citation_best": 69 + }, + { + "paper": "2136880809", + "venue": "1199533187", + "year": "2009", + "title": "darwin an approach for debugging evolving programs", + "label": [ + "206118921", + "168065819", + "2779639559", + "98045186", + "199360897", + "11392498", + "160191386", + "202105479", + "115903868", + "136388014" + ], + "author": [ + "2224116908", + "2153766775", + "2112237930", + "1823475869" + ], + "reference": [ + "33043110", + "130558454", + "1480909796", + "1507573541", + "1570533264", + "1579437898", + "1971137495", + "1973828066", + "1984248430", + "1986453394", + "2009489720", + "2016716561", + "2020538887", + "2024442685", + "2036196659", + "2038899190", + "2074888021", + "2080264505", + "2080534028", + "2084136254", + "2091968063", + "2096449544", + "2110066339", + "2113371678", + "2119251836", + "2121081915", + "2129487583", + "2136305343", + "2137008041", + "2137433502", + "2139643778", + "2148329403", + "2150684130", + "2152543417", + "2156515608", + "2158870716", + "2162376048", + "2166007208", + "2168625647", + "2170198454", + "2170224888", + "2295857493", + "3151756653", + "3162561360" + ], + "abstract": "debugging refers to the laborious process of finding causes of program failures often such failures are introduced when a program undergoes changes and evolves from a stable version to a new modified version in this paper we propose an automated approach for debugging evolving programs given two programs a reference stable program and a new modified program and an input that fails on the modified program our approach uses concrete as well as symbolic execution to synthesize new inputs that differ marginally from the failing input in their control flow behavior a comparison of the execution traces of the failing input and the new inputs provides critical clues to the root cause of the failure a notable feature of our approach is that it handles hard to explain bugs like code missing errors by pointing to the relevant code in the reference program we have implemented our approach in a tool called darwin we have conducted experiments with several real life case studies including real world web servers and the libpng library for manipulating png images our experience from these experiments points to the efficacy of darwin in pinpointing bugs moreover while localizing a given observable error the new inputs synthesized by darwin can reveal other undiscovered errors", + "title_raw": "Darwin: an approach for debugging evolving programs", + "abstract_raw": "Debugging refers to the laborious process of finding causes of program failures. Often, such failures are introduced when a program undergoes changes and evolves from a stable version to a new, modified version. In this paper, we propose an automated approach for debugging evolving programs. Given two programs (a reference, stable program and a new, modified program) and an input that fails on the modified program, our approach uses concrete as well as symbolic execution to synthesize new inputs that differ marginally from the failing input in their control flow behavior. A comparison of the execution traces of the failing input and the new inputs provides critical clues to the root-cause of the failure. A notable feature of our approach is that it handles hard-to-explain bugs like code missing errors by pointing to the relevant code in the reference program. We have implemented our approach in a tool called DARWIN. We have conducted experiments with several real-life case studies, including real-world web servers and the libPNG library for manipulating PNG images. Our experience from these experiments points to the efficacy of DARWIN in pinpointing bugs. Moreover, while localizing a given observable error, the new inputs synthesized by DARWIN can reveal other undiscovered errors.", + "link": "https://www.semanticscholar.org/paper/d4563e217619c35f68cf8b91b60286c0ba174f8d", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2059215200", + "venue": "1199533187", + "year": "2009", + "title": "graph based mining of multiple object usage patterns", + "label": [ + "739882", + "133237599", + "124101348", + "136134403", + "80444323" + ], + "author": [ + "2159953522", + "2099719009", + "2147874342", + "1239314518", + "2151252387" + ], + "reference": [ + "1511982475", + "1519220202", + "1767990287", + "1968027261", + "1986453394", + "2025677988", + "2029853454", + "2043811931", + "2054520963", + "2056139220", + "2071088932", + "2073067110", + "2093938715", + "2096491586", + "2098629748", + "2099471229", + "2104301886", + "2106259924", + "2107376583", + "2119467398", + "2121818394", + "2124666592", + "2126775986", + "2131954495", + "2153943889", + "2156417145", + "2156618683", + "2156841542", + "2166019458", + "2171471938", + "2294152467", + "2295399529" + ], + "abstract": "the interplay of multiple objects in object oriented programming often follows specific protocols for example certain orders of method calls and or control structure constraints among them that are parts of the intended object usages unfortunately the information is not always documented that creates long learning curve and importantly leads to subtle problems due to the misuse of objects in this paper we propose grouminer a novel graph based approach for mining the usage patterns of one or multiple objects grouminer approach includes a graph based representation for multiple object usages a pattern mining algorithm and an anomaly detection technique that are efficient accurate and resilient to software changes our experiments on several real world programs show that our prototype is able to find useful usage patterns with multiple objects and control structures and to translate them into user friendly code skeletons to assist developers in programming it could also detect the usage anomalies that caused yet undiscovered defects and code smells in those programs", + "title_raw": "Graph-based mining of multiple object usage patterns", + "abstract_raw": "The interplay of multiple objects in object-oriented programming often follows specific protocols, for example certain orders of method calls and/or control structure constraints among them that are parts of the intended object usages. Unfortunately, the information is not always documented. That creates long learning curve, and importantly, leads to subtle problems due to the misuse of objects. In this paper, we propose GrouMiner, a novel graph-based approach for mining the usage patterns of one or multiple objects. GrouMiner approach includes a graph-based representation for multiple object usages, a pattern mining algorithm, and an anomaly detection technique that are efficient, accurate, and resilient to software changes. Our experiments on several real-world programs show that our prototype is able to find useful usage patterns with multiple objects and control structures, and to translate them into user-friendly code skeletons to assist developers in programming. It could also detect the usage anomalies that caused yet undiscovered defects and code smells in those programs.", + "link": "https://www.semanticscholar.org/paper/53c96fead0dc9307809c57e428d60665483ada9a", + "scraped_abstract": null, + "citation_best": 297 + }, + { + "paper": "2142037471", + "venue": "1164975091", + "year": "2009", + "title": "discriminative models for multi class object layout", + "label": [ + "153083717", + "75294576", + "154945302", + "22367795", + "97931131", + "64876066", + "127705205" + ], + "author": [ + "1993315506", + "1603383808", + "263994822" + ], + "reference": [ + "836197011", + "1525954826", + "1528789833", + "1560380655", + "1680189815", + "1982522767", + "2005688170", + "2026000422", + "2031248101", + "2092423930", + "2095844239", + "2098355199", + "2107640784", + "2108619558", + "2115703234", + "2120419212", + "2122006243", + "2128962821", + "2137117160", + "2141364309", + "2143524933", + "2145850538", + "2146352414", + "2149521538", + "2160254296", + "2161969291", + "2162820221", + "2164877691", + "2164918853", + "2186094539", + "2217896605", + "2429914308", + "3097096317" + ], + "abstract": "many state of the art approaches for object recognition reduce the problem to a 0 1 classification task such reductions allow one to leverage sophisticated classifiers for learning these models are typically trained independently for each class using positive and negative examples cropped from images at test time various post processing heuristics such as non maxima suppression nms are required to reconcile multiple detections within and between different classes for each image though crucial to good performance on benchmarks this post processing is usually defined heuristically", + "title_raw": "Discriminative models for multi-class object layout", + "abstract_raw": "Many state-of-the-art approaches for object recognition reduce the problem to a 0-1 classification task. Such reductions allow one to leverage sophisticated classifiers for learning. These models are typically trained independently for each class using positive and negative examples cropped from images. At test-time, various post-processing heuristics such as non-maxima suppression (NMS) are required to reconcile multiple detections within and between different classes for each image. Though crucial to good performance on benchmarks, this post-processing is usually defined heuristically.", + "link": "https://www.semanticscholar.org/paper/c90a0b7011bece4d7a5af1241410023273aa033e", + "scraped_abstract": null, + "citation_best": 282 + }, + { + "paper": "2132800423", + "venue": "1174403976", + "year": "2009", + "title": "effective static deadlock detection", + "label": [ + "84511453", + "113429609", + "548217200", + "173608175", + "201410400", + "159023740", + "106516650", + "97686452", + "69390755" + ], + "author": [ + "2005763208", + "2116810754", + "2157836386" + ], + "reference": [ + "31195832", + "105397881", + "172519447", + "1541313361", + "1544441750", + "1561114929", + "1587427581", + "1862182621", + "1971327145", + "2040060046", + "2059224852", + "2072950666", + "2080696000", + "2096475201", + "2101946114", + "2133662847", + "2149984854", + "2156428492", + "2159031014", + "2162544703" + ], + "abstract": "we present an effective static deadlock detection algorithm for java our algorithm uses a novel combination of static analyses each of which approximates a different necessary condition for a deadlock we have implemented the algorithm and report upon our experience applying it to a suite of multi threaded java programs while neither sound nor complete our approach is effective in practice finding all known deadlocks as well as discovering previously unknown ones in our benchmarks with few false alarms", + "title_raw": "Effective static deadlock detection", + "abstract_raw": "We present an effective static deadlock detection algorithm for Java. Our algorithm uses a novel combination of static analyses each of which approximates a different necessary condition for a deadlock. We have implemented the algorithm and report upon our experience applying it to a suite of multi-threaded Java programs. While neither sound nor complete, our approach is effective in practice, finding all known deadlocks as well as discovering previously unknown ones in our benchmarks with few false alarms.", + "link": "https://www.semanticscholar.org/paper/068e9f8dd77ecfcc2019fdf3123d163b159fe4eb", + "scraped_abstract": null, + "citation_best": 178 + }, + { + "paper": "2098664130", + "venue": "1174403976", + "year": "2009", + "title": "how we refactor and how we know it", + "label": [ + "152752567", + "529173508", + "548217200", + "101317890", + "2777904410", + "115903868" + ], + "author": [ + "2342860276", + "347693595", + "2153681653" + ], + "reference": [ + "91229993", + "1565935466", + "1973306813", + "1978801314", + "1998265754", + "2013619645", + "2025674334", + "2063955337", + "2102096185", + "2102212651", + "2110851276", + "2125111617", + "2142338268", + "2153887189", + "2157543803", + "2161661398", + "2162424272", + "2165991570", + "3121075510" + ], + "abstract": "much of what we know about how programmers refactor in the wild is based on studies that examine just a few software projects researchers have rarely taken the time to replicate these studies in other contexts or to examine the assumptions on which they are based to help put refactoring research on a sound scientific basis we draw conclusions using four data sets spanning more than 13 000 developers 240 000 tool assisted refactorings 2500 developer hours and 3400 version control commits using these data we cast doubt on several previously stated assumptions about how programmers refactor while validating others for example we find that programmers frequently do not indicate refactoring activity in commit logs which contradicts assumptions made by several previous researchers in contrast we were able to confirm the assumption that programmers do frequently intersperse refactoring with other program changes by confirming assumptions and replicating studies made by other researchers we can have greater confidence that those researchers conclusions are generalizable", + "title_raw": "How we refactor, and how we know it", + "abstract_raw": "Much of what we know about how programmers refactor in the wild is based on studies that examine just a few software projects. Researchers have rarely taken the time to replicate these studies in other contexts or to examine the assumptions on which they are based. To help put refactoring research on a sound scientific basis, we draw conclusions using four data sets spanning more than 13 000 developers, 240 000 tool-assisted refactorings, 2500 developer hours, and 3400 version control commits. Using these data, we cast doubt on several previously stated assumptions about how programmers refactor, while validating others. For example, we find that programmers frequently do not indicate refactoring activity in commit logs, which contradicts assumptions made by several previous researchers. In contrast, we were able to confirm the assumption that programmers do frequently intersperse refactoring with other program changes. By confirming assumptions and replicating studies made by other researchers, we can have greater confidence that those researchers' conclusions are generalizable.", + "link": "https://www.semanticscholar.org/paper/49acfc8071e322c0001b31ea42c51f8c12433fc9", + "scraped_abstract": null, + "citation_best": 192 + }, + { + "paper": "2122947685", + "venue": "1174403976", + "year": "2009", + "title": "automatically finding patches using genetic programming", + "label": [ + "1009929", + "168065819", + "105446022", + "110332635", + "199360897", + "101317890", + "2777904410", + "116253237", + "115903868" + ], + "author": [ + "1977991679", + "2328430926", + "2569774467", + "2159603389" + ], + "reference": [ + "130558454", + "323988595", + "1499459826", + "1512383952", + "1525451871", + "1531717019", + "1543985204", + "1576818901", + "1714109750", + "1978232027", + "1989545482", + "2002934700", + "2006700268", + "2037343293", + "2043811931", + "2079317829", + "2105306314", + "2105899810", + "2107089133", + "2110068396", + "2114334695", + "2121898351", + "2122061262", + "2122689321", + "2125999725", + "2126752493", + "2128705399", + "2130877788", + "2133068784", + "2146136779", + "2148329403", + "2155479707", + "2157215835", + "2158870716", + "2165575313", + "2169565104", + "2295349525", + "3004040842", + "3005462739", + "3162561360" + ], + "abstract": "automatic program repair has been a longstanding goal in software engineering yet debugging remains a largely manual process we introduce a fully automated method for locating and repairing bugs in software the approach works on off the shelf legacy applications and does not require formal specifications program annotations or special coding practices once a program fault is discovered an extended form of genetic programming is used to evolve program variants until one is found that both retains required functionality and also avoids the defect in question standard test cases are used to exercise the fault and to encode program requirements after a successful repair has been discovered it is minimized using structural differencing algorithms and delta debugging we describe the proposed method and report experimental results demonstrating that it can successfully repair ten different c programs totaling 63 000 lines in under 200 seconds on average", + "title_raw": "Automatically finding patches using genetic programming", + "abstract_raw": "Automatic program repair has been a longstanding goal in software engineering, yet debugging remains a largely manual process. We introduce a fully automated method for locating and repairing bugs in software. The approach works on off-the-shelf legacy applications and does not require formal specifications, program annotations or special coding practices. Once a program fault is discovered, an extended form of genetic programming is used to evolve program variants until one is found that both retains required functionality and also avoids the defect in question. Standard test cases are used to exercise the fault and to encode program requirements. After a successful repair has been discovered, it is minimized using structural differencing algorithms and delta debugging. We describe the proposed method and report experimental results demonstrating that it can successfully repair ten different C programs totaling 63,000 lines in under 200 seconds, on average.", + "link": "https://www.semanticscholar.org/paper/820c566bbdb6ddaa7d801b92480cfd2b7e472c7b", + "scraped_abstract": null, + "citation_best": 751 + }, + { + "paper": "2156883549", + "venue": "1174403976", + "year": "2009", + "title": "invariant based automatic testing of ajax user interfaces", + "label": [ + "137922610", + "504723692", + "27458966", + "548217200", + "118643609", + "199360897", + "13743948", + "89505385", + "93996380", + "110875604", + "55166926", + "120314980" + ], + "author": [ + "2022381637", + "2155442793" + ], + "reference": [ + "77717277", + "141765906", + "1503480575", + "1527311855", + "1596127723", + "1980157085", + "2008620926", + "2011282943", + "2034958218", + "2050853996", + "2066237335", + "2067284301", + "2098509204", + "2106514862", + "2110441383", + "2125000816", + "2125672377", + "2126446220", + "2128779711", + "2131435851", + "2132791332", + "2133537879", + "2140857160", + "2143712427", + "2155269847", + "2168255608", + "2171949406" + ], + "abstract": "ajax based web 2 0 applications rely on stateful asynchronous client server communication and client side runtime manipulation of the dom tree this not only makes them fundamentally different from traditional web applications but also more error prone and harder to test we propose a method for testing ajax applications automatically based on a crawler to infer a flow graph for all client side user interface states we identify ajax specific faults that can occur in such states related to dom validity error messages discoverability back button compatibility etc as well as dom tree invariants that can serve as oracle to detect such faults we implemented our approach in atusa a tool offering generic invariant checking components a plugin mechanism to add application specific state validators and generation of a test suite covering the paths obtained during crawling we describe two case studies evaluating the fault revealing capabilities scalability required manual effort and level of automation of our approach", + "title_raw": "Invariant-based automatic testing of AJAX user interfaces", + "abstract_raw": "AJAX-based Web 2.0 applications rely on stateful asynchronous client/server communication, and client-side runtime manipulation of the DOM tree. This not only makes them fundamentally different from traditional web applications, but also more error-prone and harder to test. We propose a method for testing AJAX applications automatically, based on a crawler to infer a flow graph for all (client-side) user interface states. We identify AJAX-specific faults that can occur in such states (related to DOM validity, error messages, discoverability, back-button compatibility, etc.) as well as DOM-tree invariants that can serve as oracle to detect such faults. We implemented our approach in ATUSA, a tool offering generic invariant checking components, a plugin-mechanism to add application-specific state validators, and generation of a test suite covering the paths obtained during crawling. We describe two case studies evaluating the fault revealing capabilities, scalability, required manual effort and level of automation of our approach.", + "link": "https://www.semanticscholar.org/paper/3c71a8ba1f5818af71173ce861e05027e2b3eee3", + "scraped_abstract": null, + "citation_best": 201 + }, + { + "paper": "1528986923", + "venue": "1203999783", + "year": "2009", + "title": "consequence driven reasoning for horn shiq ontologies", + "label": [ + "195344581", + "166724064", + "154945302", + "2778029271", + "2776235265", + "102993220", + "9616225", + "204321447" + ], + "author": [ + "2134860852" + ], + "reference": [ + "62327733", + "1542417898", + "2107910552", + "2109003533", + "2165433828", + "2572885380" + ], + "abstract": "we present a novel reasoning procedure for horn shiq ontologies shiq ontologies that can be translated to the horn fragment of first order logic in contrast to traditional reasoning procedures for ontologies our procedure does not build models or model representations but works by deriving new consequent axioms the procedure is closely related to the so called completion based procedure for el ontologies and can be regarded as an extension thereof in fact our procedure is theoretically optimal for horn shiq ontologies as well as for the common fragment of el and shiq a preliminary empirical evaluation of our procedure on large medical ontologies demonstrates a dramatic improvement over existing ontology reasoners specifically our implementation allows the classification of the largest available owl version of galen to the best of our knowledge no other reasoner is able to classify this ontology", + "title_raw": "Consequence-driven reasoning for horn SHIQ ontologies", + "abstract_raw": "We present a novel reasoning procedure for Horn SHIQ ontologies--SHIQ ontologies that can be translated to the Horn fragment of first-order logic. In contrast to traditional reasoning procedures for ontologies, our procedure does not build models or model representations, but works by deriving new consequent axioms. The procedure is closely related to the so-called completion-based procedure for EL++ ontologies, and can be regarded as an extension thereof. In fact, our procedure is theoretically optimal for Horn SHIQ ontologies as well as for the common fragment of EL++ and SHIQ.\r\n\r\nA preliminary empirical evaluation of our procedure on large medical ontologies demonstrates a dramatic improvement over existing ontology reasoners. Specifically, our implementation allows the classification of the largest available OWL version of Galen. To the best of our knowledge no other reasoner is able to classify this ontology.", + "link": "https://www.semanticscholar.org/paper/85517744a454392e070d59b4e7463400a8e114e7", + "scraped_abstract": null, + "citation_best": 183 + }, + { + "paper": "2080320419", + "venue": "1130985203", + "year": "2009", + "title": "collaborative filtering with temporal dynamics", + "label": [ + "119857082", + "21569690", + "557471498", + "60777511" + ], + "author": [ + "2781213378" + ], + "reference": [ + "139044672", + "188803003", + "1990079212", + "1992270714", + "1993936601", + "1994389483", + "2009727399", + "2022775778", + "2042281163", + "2056760161", + "2057991616", + "2061460268", + "2070786785", + "2095976990", + "2099866409", + "2110222014", + "2153110279", + "2159094788", + "2172249709", + "2341535507" + ], + "abstract": "customer preferences for products are drifting over time product perception and popularity are constantly changing as new selection emerges similarly customer inclinations are evolving leading them to ever redefine their taste thus modeling temporal dynamics should be a key when designing recommender systems or general customer preference models however this raises unique challenges within the eco system intersecting multiple products and customers many different characteristics are shifting simultaneously while many of them influence each other and often those shifts are delicate and associated with a few data instances this distinguishes the problem from concept drift explorations where mostly a single concept is tracked classical time window or instance decay approaches cannot work as they lose too much signal when discarding data instances a more sensitive approach is required which can make better distinctions between transient effects and long term patterns the paradigm we offer is creating a model tracking the time changing behavior throughout the life span of the data this allows us to exploit the relevant components of all data instances while discarding only what is modeled as being irrelevant accordingly we revamp two leading collaborative filtering recommendation approaches evaluation is made on a large movie rating dataset by netflix results are encouraging and better than those previously reported on this dataset", + "title_raw": "Collaborative filtering with temporal dynamics", + "abstract_raw": "Customer preferences for products are drifting over time. Product perception and popularity are constantly changing as new selection emerges. Similarly, customer inclinations are evolving, leading them to ever redefine their taste. Thus, modeling temporal dynamics should be a key when designing recommender systems or general customer preference models. However, this raises unique challenges. Within the eco-system intersecting multiple products and customers, many different characteristics are shifting simultaneously, while many of them influence each other and often those shifts are delicate and associated with a few data instances. This distinguishes the problem from concept drift explorations, where mostly a single concept is tracked. Classical time-window or instance-decay approaches cannot work, as they lose too much signal when discarding data instances. A more sensitive approach is required, which can make better distinctions between transient effects and long term patterns. The paradigm we offer is creating a model tracking the time changing behavior throughout the life span of the data. This allows us to exploit the relevant components of all data instances, while discarding only what is modeled as being irrelevant. Accordingly, we revamp two leading collaborative filtering recommendation approaches. Evaluation is made on a large movie rating dataset by Netflix. Results are encouraging and better than those previously reported on this dataset.", + "link": "https://www.semanticscholar.org/paper/c1aa28159e768b75d0f4637a71d20da02efe1ef2", + "scraped_abstract": null, + "citation_best": 1132 + }, + { + "paper": "2135573205", + "venue": "1123349196", + "year": "2009", + "title": "centaur realizing the full potential of centralized wlans through a hybrid data path", + "label": [ + "31258907", + "113200698", + "177774035", + "555944384", + "105339364", + "120314980" + ], + "author": [ + "2132364384", + "2169898286", + "2337461407", + "3014881324", + "2283071955", + "2941968830", + "2987381829" + ], + "reference": [ + "138866336", + "150992880", + "1515340913", + "1517466511", + "1571396549", + "2025247901", + "2026343420", + "2033996817", + "2099057525", + "2100671335", + "2103331800", + "2110981381", + "2112750122", + "2123123552", + "2123536775", + "2142076918", + "2143026136", + "2143195409", + "2145368408", + "2148099880", + "2152496949", + "2155416924", + "2156061903", + "2161822490" + ], + "abstract": "enterprise wlans have made a dramatic shift towards centralized architectures in the recent past the reasons for such a change have been ease of management and better design of various control and security functions the data path of wlans however continues to use the distributed random access model as defined by the popular dcf mechanism of the 802 11 standard while theoretical results indicate that a centrally scheduled data path can achieve higher efficiency than its distributed counterpart the likely complexity of such a solution has inhibited practical consideration in this paper we take a fresh implementation and deployment oriented view in understanding data path choices in enterprise wlans we perform extensive measurements to characterize the impact of various design choices like scheduling granularity on the performance of a centralized scheduler and identify regions where such a centralized scheduler can provide the best gains our detailed evaluation with scheduling prototypes deployed on two different wireless testbeds indicates that dcf is quite robust in many scenarios but centralization can play a unique role in 1 mitigating hidden terminals scenarios which may occur infrequently but become pain points when they do and 2 exploiting exposed terminals scenarios which occur more frequently and limit the potential of successful concurrent transmissions motivated by these results we design and implement centaur a hybrid data path for enterprise wlans that combines the simplicity and ease of dcf with a limited amount of centralized scheduling from a unique vantage point our mechanisms do not require client cooperation and can support legacy 802 11 clients", + "title_raw": "CENTAUR: realizing the full potential of centralized wlans through a hybrid data path", + "abstract_raw": "Enterprise WLANs have made a dramatic shift towards centralized architectures in the recent past. The reasons for such a change have been ease of management and better design of various control and security functions. The data path of WLANs, however, continues to use the distributed, random-access model, as defined by the popular DCF mechanism of the 802.11 standard. While theoretical results indicate that a centrally scheduled data path can achieve higher efficiency than its distributed counterpart, the likely complexity of such a solution has inhibited practical consideration. In this paper, we take a fresh, implementation and deployment oriented, view in understanding data path choices in enterprise WLANs. We perform extensive measurements to characterize the impact of various design choices, like scheduling granularity on the performance of a centralized scheduler, and identify regions where such a centralized scheduler can provide the best gains. Our detailed evaluation with scheduling prototypes deployed on two different wireless testbeds indicates that DCF is quite robust in many scenarios, but centralization can play a unique role in 1) mitigating hidden terminals - scenarios which may occur infrequently, but become pain points when they do and 2) exploiting exposed terminals - scenarios which occur more frequently, and limit the potential of successful concurrent transmissions. Motivated by these results, we design and implement CENTAUR - a hybrid data path for enterprise WLANs, that combines the simplicity and ease of DCF with a limited amount of centralized scheduling from a unique vantage point. Our mechanisms do not require client cooperation and can support legacy 802.11 clients.", + "link": "https://www.semanticscholar.org/paper/6e8afbd88030ca925f24fe383a884cfbe00132fb", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1519889149", + "venue": "1158363782", + "year": "2009", + "title": "trinc small trusted hardware for large distributed systems", + "label": [ + "100850083", + "49585438", + "31258907", + "520566109", + "169796023", + "105339364", + "200632571", + "2776799293", + "120314980" + ], + "author": [ + "2134681577", + "135218249", + "2150137935", + "139673579" + ], + "reference": [ + "13526124", + "119362848", + "239964209", + "575504419", + "1513557409", + "1562847663", + "1572593068", + "1596387381", + "1775886520", + "1892798954", + "2005903204", + "2020183851", + "2071958655", + "2082222018", + "2098475694", + "2104210894", + "2114349788", + "2114579022", + "2116021422", + "2119164809", + "2119245106", + "2121133177", + "2121510533", + "2122426592", + "2122686225", + "2123839898", + "2129467152", + "2130020754", + "2132779253", + "2139359217", + "2142123158", + "2143541350", + "2147524598", + "2158049821", + "2162733677", + "2165657606", + "2165847233", + "2611515161", + "3137092842" + ], + "abstract": "a simple yet remarkably powerful tool of selfish and malicious participants in a distributed system is equivocation making conflicting statements to others we present trinc a small trusted component that combats equivocation in large distributed systems consisting fundamentally of only a non decreasing counter and a key trinc provides a new primitive unique once in a lifetime attestations we show that trinc is practical versatile and easily applicable to a wide range of distributed systems its deployment is viable because it is simple and because its fundamental components a trusted counter and a key are already deployed in many new personal computers today we demonstrate trinc s versatility with three detailed case studies attested append only memory a2m peerreview and bittorrent we have implemented trinc and our three case studies using real currently available trusted hardware our evaluation shows that trinc eliminates most of the trusted storage needed to implement a2m significantly reduces communication overhead in peerreview and solves an open incentives issue in bittorrent microbenchmarks of our trinc implementation indicate directions for the design of future trusted hardware", + "title_raw": "TrInc: small trusted hardware for large distributed systems", + "abstract_raw": "A simple yet remarkably powerful tool of selfish and malicious participants in a distributed system is \"equivocation\": making conflicting statements to others. We present TrInc, a small, trusted component that combats equivocation in large, distributed systems. Consisting fundamentally of only a non-decreasing counter and a key, TrInc provides a new primitive: unique, once-in-a-lifetime attestations.\r\n\r\nWe show that TrInc is practical, versatile, and easily applicable to a wide range of distributed systems. Its deployment is viable because it is simple and because its fundamental components--a trusted counter and a key--are already deployed in many new personal computers today. We demonstrate TrInc's versatility with three detailed case studies: attested append-only memory (A2M), PeerReview, and BitTorrent.\r\n\r\nWe have implemented TrInc and our three case studies using real, currently available trusted hardware. Our evaluation shows that TrInc eliminates most of the trusted storage needed to implement A2M, significantly reduces communication overhead in PeerReview, and solves an open incentives issue in BitTorrent. Microbenchmarks of our TrInc implementation indicate directions for the design of future trusted hardware.", + "link": "https://www.semanticscholar.org/paper/48453fb3f73a8e5f5fbd3b59cdca06ef190fc214", + "scraped_abstract": null, + "citation_best": 147 + }, + { + "paper": "1693562991", + "venue": "1158363782", + "year": "2009", + "title": "sora high performance software radio using general purpose multi core processors", + "label": [ + "149635348", + "171115542", + "557945733", + "554876149", + "52531181", + "2777904410", + "78766204", + "195917429", + "19247436" + ], + "author": [ + "3002302137", + "2136590344", + "2102843907", + "2247643662", + "2276995589", + "2646669744", + "2236138046", + "2105028595", + "3080431737", + "241629895" + ], + "reference": [ + "190062532", + "1515793664", + "1991130923", + "2110500201", + "2113622688", + "2117876628", + "2123624675", + "2149539594", + "2152496949", + "2157936671", + "2411153393", + "2610335499", + "2911546266", + "2912369344" + ], + "abstract": "this paper presents sora a fully programmable software radio platform on commodity pc architectures sora combines the performance and fidelity of hardware sdr platforms with the programmability and flexibility of general purpose processor gpp sdr platforms sora uses both hardware and software techniques to address the challenges of using pc architectures for high speed sdr the sora hardware components consist of a radio front end for reception and transmission and a radio control board for high throughput low latency data transfer between radio and host memories sora makes extensive use of features of contemporary processor architectures to accelerate wireless protocol processing and satisfy protocol timing requirements including using dedicated cpu cores large low latency caches to store lookup tables and simd processor extensions for highly efficient physical layer processing on gpps using the sora platform we have developed a demonstration radio system called softwifi softwifi seamlessly interoperates with commercial 802 11a b g nics and achieves equivalent performance as commercial nics at each modulation", + "title_raw": "Sora: high performance software radio using general purpose multi-core processors", + "abstract_raw": "This paper presents Sora, a fully programmable software radio platform on commodity PC architectures. Sora combines the performance and fidelity of hardware SDR platforms with the programmability and flexibility of general-purpose processor (GPP) SDR platforms. Sora uses both hardware and software techniques to address the challenges of using PC architectures for high-speed SDR. The Sora hardware components consist of a radio front-end for reception and transmission, and a radio control board for high-throughput, low-latency data transfer between radio and host memories. Sora makes extensive use of features of contemporary processor architectures to accelerate wireless protocol processing and satisfy protocol timing requirements, including using dedicated CPU cores, large low-latency caches to store lookup tables, and SIMD processor extensions for highly efficient physical layer processing on GPPs. Using the Sora platform, we have developed a demonstration radio system called SoftWiFi. SoftWiFi seamlessly interoperates with commercial 802.11a/b/g NICs, and achieves equivalent performance as commercial NICs at each modulation.", + "link": "https://www.semanticscholar.org/paper/089ae21d8104ba919e2524ea0a1e28c506b55dd9", + "scraped_abstract": null, + "citation_best": 265 + }, + { + "paper": "2148707178", + "venue": "1127352206", + "year": "2009", + "title": "binary analysis for measurement and attribution of program performance", + "label": [ + "97686452", + "113775141", + "168065819", + "169590947", + "88482812", + "115168132", + "43126263", + "79403827" + ], + "author": [ + "1591516426", + "2249219846", + "3001419840" + ], + "reference": [ + "1544432721", + "1599277697", + "1983412169", + "2005795572", + "2009741116", + "2020009542", + "2051533028", + "2075402035", + "2082318969", + "2088909053", + "2095455436", + "2125295642", + "2134633067", + "2136434791", + "2144433126", + "2153530968", + "2160468841", + "2161563510", + "2162612712", + "2598939422" + ], + "abstract": "modern programs frequently employ sophisticated modular designs as a result performance problems cannot be identified from costs attributed to routines in isolation understanding code performance requires information about a routine s calling context existing performance tools fall short in this respect prior strategies for attributing context sensitive performance at the source level either compromise measurement accuracy remain too close to the binary or require custom compilers to understand the performance of fully optimized modular code we developed two novel binary analysis techniques 1 on the fly analysis of optimized machine code to enable minimally intrusive and accurate attribution of costs to dynamic calling contexts and 2 post mortem analysis of optimized machine code and its debugging sections to recover its program structure and reconstruct a mapping back to its source code by combining the recovered static program structure with dynamic calling context information we can accurately attribute performance metrics to calling contexts procedures loops and inlined instances of procedures we demonstrate that the fusion of this information provides unique insight into the performance of complex modular codes this work is implemented in the hpctoolkit performance tools http hpctoolkit org", + "title_raw": "Binary analysis for measurement and attribution of program performance", + "abstract_raw": "Modern programs frequently employ sophisticated modular designs. As a result, performance problems cannot be identified from costs attributed to routines in isolation; understanding code performance requires information about a routine's calling context. Existing performance tools fall short in this respect. Prior strategies for attributing context-sensitive performance at the source level either compromise measurement accuracy, remain too close to the binary, or require custom compilers. To understand the performance of fully optimized modular code, we developed two novel binary analysis techniques: 1) on-the-fly analysis of optimized machine code to enable minimally intrusive and accurate attribution of costs to dynamic calling contexts; and 2) post-mortem analysis of optimized machine code and its debugging sections to recover its program structure and reconstruct a mapping back to its source code. By combining the recovered static program structure with dynamic calling context information, we can accurately attribute performance metrics to calling contexts, procedures, loops, and inlined instances of procedures. We demonstrate that the fusion of this information provides unique insight into the performance of complex modular codes. This work is implemented in the HPCToolkit performance tools (http://hpctoolkit.org).", + "link": "https://www.semanticscholar.org/paper/ebe34d86e99516936c6fa961b73d3173d1219be6", + "scraped_abstract": null, + "citation_best": 57 + }, + { + "paper": "2062340141", + "venue": "1163618098", + "year": "2009", + "title": "native client a sandbox for portable untrusted x86 native code", + "label": [ + "134317101", + "202491316", + "111919701", + "63000827", + "548217200", + "169590947", + "118643609", + "138101251", + "199360897", + "123326733", + "115168132", + "41724716" + ], + "author": [ + "2662383020", + "684857623", + "320737890", + "2489443019", + "2143709853", + "214549440", + "2045150998", + "2111780276", + "326198645" + ], + "reference": [ + "15883", + "85177704", + "90663244", + "148956775", + "149441384", + "206628711", + "1491067132", + "1514258760", + "1516211918", + "1524958882", + "1563300346", + "1564165242", + "1644882639", + "1863670505", + "1864473657", + "1999063374", + "2006816934", + "2015657128", + "2033065121", + "2034711041", + "2059346393", + "2060031515", + "2079029390", + "2086277109", + "2095687239", + "2109047032", + "2116283852", + "2121542813", + "2123553986", + "2131726714", + "2142208662", + "2148686658", + "2151480972", + "2156908875", + "2159059513", + "2167800525", + "2171691057", + "2225798081", + "2270183489", + "2402789968", + "3027968135" + ], + "abstract": "this paper describes the design implementation and evaluation of native client a sandbox for untrusted x86 native code native client aims to give browser based applications the computational performance of native applications without compromising safety native client uses software fault isolation and a secure runtime to direct system interaction and side effects through interfaces managed by native client native client provides operating system portability for binary code while supporting performance oriented features generally absent from web application programming environments such as thread support instruction set extensions such as sse and use of compiler intrinsics and hand coded assembler we combine these properties in an open architecture that encourages community review and 3rd party tools", + "title_raw": "Native Client: A Sandbox for Portable, Untrusted x86 Native Code", + "abstract_raw": "This paper describes the design, implementation and evaluation of Native Client, a sandbox for untrusted x86 native code. Native Client aims to give browser-based applications the computational performance of native applications without compromising safety. Native Client uses software fault isolation and a secure runtime to direct system interaction and side effects through interfaces managed by Native Client. Native Client provides operating system portability for binary code while supporting performance-oriented features generally absent from web application programming environments, such as thread support, instruction set extensions such as SSE, and use of compiler intrinsics and hand-coded assembler. We combine these properties in an open architecture that encourages community review and 3rd-party tools.", + "link": "https://www.semanticscholar.org/paper/2a74da762f75eeb53f41d7c93ac1f08c62b15af9", + "scraped_abstract": null, + "citation_best": 540 + }, + { + "paper": "2150957281", + "venue": "1152462849", + "year": "2009", + "title": "white space networking with wi fi like connectivity", + "label": [ + "96122199", + "149946192", + "96391052", + "108037233", + "79403827" + ], + "author": [ + "2092550336", + "2128927081", + "139673579", + "2121229962", + "2149657484" + ], + "reference": [ + "78792064", + "1515340913", + "1552314771", + "1693562991", + "1971415662", + "1982575274", + "2006313952", + "2052642786", + "2053540085", + "2114570682", + "2114671906", + "2121669578", + "2125562505" + ], + "abstract": "networking over uhf white spaces is fundamentally different from conventional wi fi along three axes spatial variation temporal variation and fragmentation of the uhf spectrum each of these differences gives rise to new challenges for implementing a wireless network in this band we present the design and implementation of net7 the first wi fi like system constructed on top of uhf white spaces net7 incorporates a new adaptive spectrum assignment algorithm to handle spectrum variation and fragmentation and proposes a low overhead protocol to handle temporal variation builds on a simple technique called sift that reduces the time to detect transmissions in variable channel width systems by analyzing raw signals in the time domain we provide an extensive evaluation of the system in terms of a prototype implementation and detailed experimental and simulation results", + "title_raw": "White space networking with wi-fi like connectivity", + "abstract_raw": "Networking over UHF white spaces is fundamentally different from conventional Wi-Fi along three axes: spatial variation, temporal variation, and fragmentation of the UHF spectrum. Each of these differences gives rise to new challenges for implementing a wireless network in this band. We present the design and implementation of Net7, the first Wi-Fi like system constructed on top of UHF white spaces. Net7 incorporates a new adaptive spectrum assignment algorithm to handle spectrum variation and fragmentation, and proposes a low overhead protocol to handle temporal variation. builds on a simple technique, called SIFT, that reduces the time to detect transmissions in variable channel width systems by analyzing raw signals in the time domain. We provide an extensive evaluation of the system in terms of a prototype implementation and detailed experimental and simulation results.", + "link": "https://www.semanticscholar.org/paper/9435dfaad3cbb022e427d93c42f53adde15b7419", + "scraped_abstract": null, + "citation_best": 396 + }, + { + "paper": "2134842174", + "venue": "1140684652", + "year": "2009", + "title": "sources of evidence for vertical selection", + "label": [ + "164120249", + "124246873", + "118689300", + "97854310", + "2778563054", + "146658014", + "23123220" + ], + "author": [ + "2128535409", + "2159093489", + "2148123616", + "2614540960" + ], + "reference": [ + "232533489", + "1964653195", + "1986828474", + "1990388042", + "1995262888", + "1998548536", + "2002388301", + "2002682102", + "2016892599", + "2034927834", + "2042186555", + "2079168273", + "2086253379", + "2113641473", + "2155540986", + "2157485377", + "2160306106", + "2163375626" + ], + "abstract": "web search providers often include search services for domain specific subcollections called verticals such as news images videos job postings company summaries and artist profiles we address the problem of vertical selection predicting relevant verticals if any for queries issued to the search engine s main web search page in contrast to prior query classification and resource selection tasks vertical selection is associated with unique resources that can inform the classification decision we focus on three sources of evidence 1 the query string from which features are derived independent of external resources 2 logs of queries previously issued directly to the vertical and 3 corpora representative of vertical content we focus on 18 different verticals which differ in terms of semantics media type size and level of query traffic we compare our method to prior work in federated search and retrieval effectiveness prediction an in depth error analysis reveals unique challenges across different verticals and provides insight into vertical selection for future work", + "title_raw": "Sources of evidence for vertical selection", + "abstract_raw": "Web search providers often include search services for domain-specific subcollections, called verticals, such as news, images, videos, job postings, company summaries, and artist profiles. We address the problem of vertical selection, predicting relevant verticals (if any) for queries issued to the search engine's main web search page. In contrast to prior query classification and resource selection tasks, vertical selection is associated with unique resources that can inform the classification decision. We focus on three sources of evidence: (1) the query string, from which features are derived independent of external resources, (2) logs of queries previously issued directly to the vertical, and (3) corpora representative of vertical content. We focus on 18 different verticals, which differ in terms of semantics, media type, size, and level of query traffic. We compare our method to prior work in federated search and retrieval effectiveness prediction. An in-depth error analysis reveals unique challenges across different verticals and provides insight into vertical selection for future work.", + "link": "https://www.semanticscholar.org/paper/47cdb987ec20ed2c1cdfca75ce2edf982ecfc5c4", + "scraped_abstract": null, + "citation_best": 203 + }, + { + "paper": "2169045095", + "venue": "1131589359", + "year": "2009", + "title": "the age of gossip spatial mean field regime", + "label": [ + "101780184", + "31258907", + "2780717508", + "98045186", + "120314980" + ], + "author": [ + "329545075", + "323205836", + "2232258516" + ], + "reference": [ + "1572481965", + "1914027636", + "2010309395", + "2035232548", + "2038562061", + "2058570444", + "2065284761", + "2072492565", + "2082168227", + "2109528718", + "2128466234", + "2142522527", + "2145117335", + "2165469059" + ], + "abstract": "disseminating a piece of information or updates for a piece of information has been shown to benefit greatly from simple randomized procedures sometimes referred to as gossiping or epidemic algorithms similarly in a network where mobile nodes occasionally receive updated content from a base station gossiping using opportunistic contacts allows for recent updates to be efficiently maintained for a large number of nodes in this case however gossiping depends on node mobility for this reason we introduce a new gossip model with mobile nodes moving between different classes that can represent locations or states which determine gossiping behavior of the nodes here we prove that when the number of mobile nodes becomes large the age of the latest updates received by mobile nodes approaches a deterministic mean field regime more precisely we show that the occupancy measure of the process constructed with the ages defined above converges to a deterministic limit that can be entirely characterized by differential equations this major simplification allows us to characterize how mobility source inputs and gossiping influence the age distribution for low and high ages it also leads to a scalable numerical evaluation of the performance of mobile update systems which we validate using a trace of 500 taxicabs and use to propose infrastructure deployment", + "title_raw": "The age of gossip: spatial mean field regime", + "abstract_raw": "Disseminating a piece of information, or updates for a piece of information, has been shown to benefit greatly from simple randomized procedures, sometimes referred to as gossiping, or epidemic algorithms. Similarly, in a network where mobile nodes occasionally receive updated content from a base station, gossiping using opportunistic contacts allows for recent updates to be efficiently maintained, for a large number of nodes. In this case, however, gossiping depends on node mobility. For this reason, we introduce a new gossip model, with mobile nodes moving between different classes that can represent locations or states, which determine gossiping behavior of the nodes. Here we prove that, when the number of mobile nodes becomes large, the age of the latest updates received by mobile nodes approaches a deterministic mean-field regime. More precisely, we show that the occupancy measure of the process constructed, with the ages defined above, converges to a deterministic limit that can be entirely characterized by differential equations. This major simplification allows us to characterize how mobility, source inputs and gossiping influence the age distribution for low and high ages. It also leads to a scalable numerical evaluation of the performance of mobile update systems, which we validate (using a trace of 500 taxicabs) and use to propose infrastructure deployment.", + "link": "https://www.semanticscholar.org/paper/a7b5506b5652b5ddf944f24cb923d63d737aaf26", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2126354234", + "venue": "1175089206", + "year": "2009", + "title": "generating example data for dataflow programs", + "label": [ + "96324660", + "80444323", + "40140605", + "184337299", + "516187249", + "2779442710", + "199360897", + "2781226572" + ], + "author": [ + "2023450202", + "2700056501", + "2274204266" + ], + "reference": [ + "1532667386", + "1574590769", + "1964857063", + "2022431239", + "2040924621", + "2057128392", + "2098935637", + "2100830825", + "2101629181", + "2103202014", + "2122465391", + "2130622824", + "2149576945", + "2157054705", + "2164636932", + "2171588311" + ], + "abstract": "while developing data centric programs users often run portions of their programs over real data to see how they behave and what the output looks like doing so makes it easier to formulate understand and compose programs correctly compared with examination of program logic alone for large input data sets these experimental runs can be time consuming and inefficient unfortunately sampling the input data does not always work well because selective operations such as filter and join can lead to empty results over sampled inputs and unless certain indexes are present there is no way to generate biased samples efficiently consequently new methods are needed for generating example input data for data centric programs we focus on an important category of data centric programs dataflow programs which are best illustrated by displaying the series of intermediate data tables that occur between each pair of operations we introduce and study the problem of generating example intermediate data for dataflow programs in a manner that illustrates the semantics of the operators while keeping the example data small we identify two major obstacles that impede naive approaches namely 1 highly selective operators and 2 noninvertible operators and offer techniques for dealing with these obstacles our techniques perform well on real dataflow programs used at yahoo for web analytics", + "title_raw": "Generating example data for dataflow programs", + "abstract_raw": "While developing data-centric programs, users often run (portions of) their programs over real data, to see how they behave and what the output looks like. Doing so makes it easier to formulate, understand and compose programs correctly, compared with examination of program logic alone. For large input data sets, these experimental runs can be time-consuming and inefficient. Unfortunately, sampling the input data does not always work well, because selective operations such as filter and join can lead to empty results over sampled inputs, and unless certain indexes are present there is no way to generate biased samples efficiently. Consequently new methods are needed for generating example input data for data-centric programs. We focus on an important category of data-centric programs, dataflow programs, which are best illustrated by displaying the series of intermediate data tables that occur between each pair of operations. We introduce and study the problem of generating example intermediate data for dataflow programs, in a manner that illustrates the semantics of the operators while keeping the example data small. We identify two major obstacles that impede naive approaches, namely (1) highly selective operators and (2) noninvertible operators, and offer techniques for dealing with these obstacles. Our techniques perform well on real dataflow programs used at Yahoo! for web analytics.", + "link": "https://www.semanticscholar.org/paper/a3d069cba4e95b307070ec642e013347acefa891", + "scraped_abstract": null, + "citation_best": 58 + }, + { + "paper": "2133394135", + "venue": "1171178643", + "year": "2009", + "title": "fawn a fast array of wimpy nodes", + "label": [ + "173608175", + "121163568", + "56856351", + "29140674", + "200632571", + "194739806", + "190475519", + "70440993" + ], + "author": [ + "2130397481", + "2101901438", + "2151237659", + "154216248", + "2125613926", + "2089062156" + ], + "reference": [ + "530071", + "151871119", + "652459597", + "1486947867", + "1497150730", + "1524103123", + "1526843360", + "1561336685", + "1571144861", + "1576397915", + "1815612875", + "1967138129", + "1981029608", + "1999377222", + "2006803879", + "2041315700", + "2050619600", + "2058178853", + "2061388644", + "2064359039", + "2075345089", + "2075854425", + "2089138728", + "2103565249", + "2103877554", + "2106264466", + "2112479717", + "2113689712", + "2118950929", + "2119026482", + "2119245106", + "2119565742", + "2119764208", + "2121288607", + "2121527069", + "2122465391", + "2125499854", + "2126210439", + "2130531694", + "2131726714", + "2133718097", + "2134807578", + "2143065961", + "2147504831", + "2150708794", + "2151745115", + "2153204735", + "2153704625", + "2154596969", + "2157614013", + "2158049821", + "2167898414", + "2171299219", + "2205436351", + "2229005743", + "3150290710" + ], + "abstract": "this paper presents a new cluster architecture for low power data intensive computing fawn couples low power embedded cpus to small amounts of local flash storage and balances computation and i o capabilities to enable efficient massively parallel access to data the key contributions of this paper are the principles of the fawn architecture and the design and implementation of fawn kv a consistent replicated highly available and high performance key value storage system built on a fawn prototype our design centers around purely log structured datastores that provide the basis for high performance on flash storage as well as for replication and consistency obtained using chain replication on a consistent hashing ring our evaluation demonstrates that fawn clusters can handle roughly 350 key value queries per joule of energy two orders of magnitude more than a disk based system", + "title_raw": "FAWN: a fast array of wimpy nodes", + "abstract_raw": "This paper presents a new cluster architecture for low-power data-intensive computing. FAWN couples low-power embedded CPUs to small amounts of local flash storage, and balances computation and I/O capabilities to enable efficient, massively parallel access to data. The key contributions of this paper are the principles of the FAWN architecture and the design and implementation of FAWN-KV--a consistent, replicated, highly available, and high-performance key-value storage system built on a FAWN prototype. Our design centers around purely log-structured datastores that provide the basis for high performance on flash storage, as well as for replication and consistency obtained using chain replication on a consistent hashing ring. Our evaluation demonstrates that FAWN clusters can handle roughly 350 key-value queries per Joule of energy--two orders of magnitude more than a disk-based system.", + "link": "https://www.semanticscholar.org/paper/42c39871e3032bca82d40ebc3b0dd26edc5f7c53", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2151062909", + "venue": "1171178643", + "year": "2009", + "title": "routebricks exploiting parallelism to scale software routers", + "label": [ + "118524514", + "152174988", + "111919701", + "165696696", + "93996380", + "2781172179", + "2775896111", + "86726114", + "2777904410", + "78766204" + ], + "author": [ + "1893457898", + "1968177324", + "305900328", + "2117666619", + "2086421335", + "2496012467", + "2079730298", + "1994395177", + "190228929" + ], + "reference": [ + "28413134", + "1501077214", + "1587207268", + "1988294273", + "1997734481", + "2010365467", + "2026505985", + "2033000172", + "2062362478", + "2069749081", + "2096415552", + "2098334439", + "2101893654", + "2102110274", + "2103701891", + "2104428270", + "2106083581", + "2116736776", + "2117358494", + "2118769026", + "2121021091", + "2125825623", + "2130531694", + "2139120850", + "2147118406", + "2168903090", + "2294246362", + "2463612733", + "2496945377", + "2890957263" + ], + "abstract": "we revisit the problem of scaling software routers motivated by recent advances in server technology that enable high speed parallel processing a feature router workloads appear ideally suited to exploit we propose a software router architecture that parallelizes router functionality both across multiple servers and across multiple cores within a single server by carefully exploiting parallelism at every opportunity we demonstrate a 35gbps parallel router prototype this router capacity can be linearly scaled through the use of additional servers our prototype router is fully programmable using the familiar click linux environment and is built entirely from off the shelf general purpose server hardware", + "title_raw": "RouteBricks: exploiting parallelism to scale software routers", + "abstract_raw": "We revisit the problem of scaling software routers, motivated by recent advances in server technology that enable high-speed parallel processing--a feature router workloads appear ideally suited to exploit. We propose a software router architecture that parallelizes router functionality both across multiple servers and across multiple cores within a single server. By carefully exploiting parallelism at every opportunity, we demonstrate a 35Gbps parallel router prototype; this router capacity can be linearly scaled through the use of additional servers. Our prototype router is fully programmable using the familiar Click/Linux environment and is built entirely from off-the-shelf, general-purpose server hardware.", + "link": "https://www.semanticscholar.org/paper/0a59166593f0a2fa260f16fd853299c9f0863fdf", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2136310957", + "venue": "1171178643", + "year": "2009", + "title": "sel4 formal verification of an os kernel", + "label": [ + "2777127024", + "55439883", + "169590947", + "111498074", + "75606506", + "199360897", + "120763227", + "116253237", + "80444323" + ], + "author": [ + "2121963599", + "735375398", + "2229929104", + "121569211", + "2805972962", + "1240396020", + "2893346842", + "2122232256", + "241320150", + "2103706242", + "1976257308", + "2089781502", + "266978572" + ], + "reference": [ + "19830081", + "30213274", + "201784039", + "1476411550", + "1494673397", + "1523275077", + "1525928249", + "1533238174", + "1568755417", + "1596552075", + "1607932714", + "1888392380", + "1937179622", + "1957415375", + "1980491540", + "2014942166", + "2019404692", + "2021994557", + "2027168655", + "2029224396", + "2039804807", + "2048500751", + "2053262709", + "2071542068", + "2083469471", + "2087832144", + "2089661946", + "2093852121", + "2095954493", + "2098592421", + "2104634303", + "2106115112", + "2106192381", + "2106211802", + "2106412703", + "2115696550", + "2116860113", + "2117181435", + "2118341398", + "2129695855", + "2130970533", + "2137186143", + "2138662592", + "2140508184", + "2142286787", + "2146530476", + "2147448476", + "2150210903", + "2160022481", + "2162553649", + "2163117779", + "2166004296", + "2167800525", + "2167911131", + "2171069290", + "2491926874", + "2611598995" + ], + "abstract": "complete formal verification is the only known way to guarantee that a system is free of programming errors we present our experience in performing the formal machine checked verification of the sel4 microkernel from an abstract specification down to its c implementation we assume correctness of compiler assembly code and hardware and we used a unique design approach that fuses formal and operating systems techniques to our knowledge this is the first formal proof of functional correctness of a complete general purpose operating system kernel functional correctness means here that the implementation always strictly follows our high level abstract specification of kernel behaviour this encompasses traditional design and implementation safety properties such as the kernel will never crash and it will never perform an unsafe operation it also proves much more we can predict precisely how the kernel will behave in every possible situation sel4 a third generation microkernel of l4 provenance comprises 8 700 lines of c code and 600 lines of assembler its performance is comparable to other high performance l4 kernels", + "title_raw": "seL4: formal verification of an OS kernel", + "abstract_raw": "Complete formal verification is the only known way to guarantee that a system is free of programming errors. We present our experience in performing the formal, machine-checked verification of the seL4 microkernel from an abstract specification down to its C implementation. We assume correctness of compiler, assembly code, and hardware, and we used a unique design approach that fuses formal and operating systems techniques. To our knowledge, this is the first formal proof of functional correctness of a complete, general-purpose operating-system kernel. Functional correctness means here that the implementation always strictly follows our high-level abstract specification of kernel behaviour. This encompasses traditional design and implementation safety properties such as the kernel will never crash, and it will never perform an unsafe operation. It also proves much more: we can predict precisely how the kernel will behave in every possible situation. seL4, a third-generation microkernel of L4 provenance, comprises 8,700 lines of C code and 600 lines of assembler. Its performance is comparable to other high-performance L4 kernels.", + "link": "https://www.semanticscholar.org/paper/089895ef5f96bdb7eed9dd54f482c22350c2f30d", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2139459444", + "venue": "1166315290", + "year": "2009", + "title": "mouse 2 0 multi touch meets the mouse", + "label": [ + "26713055", + "107457646", + "207347870", + "121449826", + "206539335", + "194995250", + "2780753109" + ], + "author": [ + "2165003359", + "2098553916", + "2617234186", + "1886754024", + "2065024650", + "431806573", + "2148014207", + "1794776656", + "2168200088", + "2151535930", + "2124876992" + ], + "reference": [ + "1600841829", + "1902352447", + "1970884959", + "1970957984", + "2005198142", + "2008150314", + "2049123559", + "2063589647", + "2063812706", + "2077269583", + "2096147482", + "2096986574", + "2097248932", + "2102005779", + "2115918195", + "2125872152", + "2131588614", + "2133258886", + "2140982079", + "2143272542", + "2148109062", + "2158707444", + "2161121228", + "2161401111", + "2200839525", + "2517148449", + "3146114470" + ], + "abstract": "in this paper we present novel input devices that combine the standard capabilities of a computer mouse with multi touch sensing our goal is to enrich traditional pointer based desktop interactions with touch and gestures to chart the design space we present five different multi touch mouse implementations each explores a different touch sensing strategy which leads to differing form factors and hence interactive possibilities in addition to the detailed description of hardware and software implementations of our prototypes we discuss the relative strengths limitations and affordances of these novel input devices as informed by the results of a preliminary user study", + "title_raw": "Mouse 2.0: multi-touch meets the mouse", + "abstract_raw": "In this paper we present novel input devices that combine the standard capabilities of a computer mouse with multi-touch sensing. Our goal is to enrich traditional pointer-based desktop interactions with touch and gestures. To chart the design space, we present five different multi-touch mouse implementations. Each explores a different touch sensing strategy, which leads to differing form-factors and hence interactive possibilities. In addition to the detailed description of hardware and software implementations of our prototypes, we discuss the relative strengths, limitations and affordances of these novel input devices as informed by the results of a preliminary user study.", + "link": "https://www.semanticscholar.org/paper/fe9d0954465aa5e9c9a56fbab1fca784a224463e", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2120342618", + "venue": "1133523790", + "year": "2009", + "title": "a unified approach to ranking in probabilistic databases", + "label": [ + "119857082", + "189430467", + "49937458", + "124975894", + "77088390", + "155846161", + "2778865114", + "98763669", + "177264268", + "124101348", + "197927960", + "86037889", + "137836250" + ], + "author": [ + "3192006532", + "2110419712", + "2002742946" + ], + "reference": [ + "37148511", + "179852215", + "1486776102", + "1491547607", + "1575805590", + "1587990862", + "1603822192", + "1963853643", + "1973435495", + "1989392187", + "1991249762", + "1992609556", + "1996544809", + "1997141048", + "2009688537", + "2013333366", + "2020298878", + "2022501110", + "2024400846", + "2027752285", + "2029948740", + "2041358936", + "2041763948", + "2044494469", + "2047221353", + "2051834357", + "2069870183", + "2078132413", + "2078686663", + "2092819480", + "2104795328", + "2109067452", + "2112070477", + "2114258210", + "2120825705", + "2122483056", + "2125402103", + "2128230033", + "2129035130", + "2133246278", + "2138271690", + "2140237757", + "2143331230", + "2149166361", + "2153508518", + "2154368191", + "2160321559", + "2164688391", + "2165211504", + "2166994031", + "2166998862", + "2167863042", + "2169600045", + "2170896764", + "2171560571", + "2171776999", + "2181455273", + "2293299776", + "2952121399", + "2962942694" + ], + "abstract": "the dramatic growth in the number of application domains that naturally generate probabilistic uncertain data has resulted in a need for efficiently supporting complex querying and decision making over such data in this paper we present a unified approach to ranking and top k query processing in probabilistic databases by viewing it as a multi criteria optimization problem and by deriving a set of features that capture the key properties of a probabilistic dataset that dictate the ranked result we contend that a single specific ranking function may not suffice for probabilistic databases and we instead propose two parameterized ranking functions called prf and prfe that generalize or can approximate many of the previously proposed ranking functions we present novel generating functions based algorithms for efficiently ranking large datasets according to these ranking functions even if the datasets exhibit complex correlations modeled using probabilistic and xor trees or markov networks we further propose that the parameters of the ranking function be learned from user preferences and we develop an approach to learn those parameters finally we present a comprehensive experimental study that illustrates the effectiveness of our parameterized ranking functions especially prfe at approximating other ranking functions and the scalability of our proposed algorithms for exact or approximate ranking", + "title_raw": "A unified approach to ranking in probabilistic databases", + "abstract_raw": "The dramatic growth in the number of application domains that naturally generate probabilistic, uncertain data has resulted in a need for efficiently supporting complex querying and decision-making over such data. In this paper, we present a unified approach to ranking and top-k query processing in probabilistic databases by viewing it as a multi-criteria optimization problem, and by deriving a set of features that capture the key properties of a probabilistic dataset that dictate the ranked result. We contend that a single, specific ranking function may not suffice for probabilistic databases, and we instead propose two parameterized ranking functions, called PRF\u03c9 and PRFe, that generalize or can approximate many of the previously proposed ranking functions. We present novel generating functions-based algorithms for efficiently ranking large datasets according to these ranking functions, even if the datasets exhibit complex correlations modeled using probabilistic and/xor trees or Markov networks. We further propose that the parameters of the ranking function be learned from user preferences, and we develop an approach to learn those parameters. Finally, we present a comprehensive experimental study that illustrates the effectiveness of our parameterized ranking functions, especially PRFe, at approximating other ranking functions and the scalability of our proposed algorithms for exact or approximate ranking.", + "link": "https://www.semanticscholar.org/paper/80b1e35ca6888a1f4441c9dce61ef650df356ba5", + "scraped_abstract": null, + "citation_best": 151 + }, + { + "paper": "192051504", + "venue": "1184914352", + "year": "2008", + "title": "optimal false name proof voting rules with costly voting", + "label": [ + "142696051", + "124101348" + ], + "author": [ + "2161876237", + "24490792" + ], + "reference": [ + "133284343", + "1548462700", + "1600189620", + "1603565383", + "1664015598", + "1826494558", + "2014607560", + "2015007620", + "2029050771", + "2043715088", + "2058936354", + "2071876873", + "2072410439", + "2078040677", + "2106498092", + "2120100612", + "2129678216", + "2131402800", + "2135954328", + "2288510092", + "2333807990", + "3124207685" + ], + "abstract": "one way for agents to reach a joint decision is to vote over the alternatives in open anonymous settings such as the internet an agent can vote more than once without being detected a voting rule is false name proof if no agent ever benefits from casting additional votes previous work has shown that all false name proof voting rules are unresponsive to agents preferences however that work implicitly assumes that casting additional votes is costless in this paper we consider what happens if there is a cost to casting additional votes we characterize the optimal most responsive false name proofwith costs voting rule for 2 alternatives in sharp contrast to the costless setting we prove that as the voting population grows larger the probability that this rule selects the majority winner converges to 1 we also characterize the optimal group false name proof rule for 2 alternatives which is robust to coalitions of agents sharing the costs of additional votes unfortunately the probability that this rule chooses the majority winner as the voting population grows larger is relatively low we derive an analogous rule in a setting with 3 alternatives and provide bounding results and computational approaches for settings with 4 or more alternatives", + "title_raw": "Optimal false-name-proof voting rules with costly voting", + "abstract_raw": "One way for agents to reach a joint decision is to vote over the alternatives. In open, anonymous settings such as the Internet, an agent can vote more than once without being detected. A voting rule is false-name-proof if no agent ever benefits from casting additional votes. Previous work has shown that all false-name-proof voting rules are unresponsive to agents' preferences. However, that work implicitly assumes that casting additional votes is costless. In this paper, we consider what happens if there is a cost to casting additional votes. We characterize the optimal (most responsive) false-name-proofwith-costs voting rule for 2 alternatives. In sharp contrast to the costless setting, we prove that as the voting population grows larger, the probability that this rule selects the majority winner converges to 1. We also characterize the optimal group false-name-proof rule for 2 alternatives, which is robust to coalitions of agents sharing the costs of additional votes. Unfortunately, the probability that this rule chooses the majority winner as the voting population grows larger is relatively low. We derive an analogous rule in a setting with 3 alternatives, and provide bounding results and computational approaches for settings with 4 or more alternatives.", + "link": "https://www.semanticscholar.org/paper/098da9f6ad978959c906be83fd5d5b24bacb214b", + "scraped_abstract": null, + "citation_best": 41 + }, + { + "paper": "1816489643", + "venue": "1184914352", + "year": "2008", + "title": "how good is almost perfect", + "label": [ + "311688", + "19889080", + "139979381", + "46011968", + "200246849", + "194036150", + "173801870", + "114290370", + "125583679", + "85543514", + "127705205" + ], + "author": [ + "451163712", + "2141104126" + ], + "reference": [ + "92677169", + "181299697", + "1493900559", + "1534467528", + "1545688112", + "1571310641", + "1603934459", + "1608409935", + "1646038686", + "1773969059", + "1969483458", + "1971439026", + "2032492581", + "2045734422", + "2061146398", + "2070878381", + "2100527215", + "2138481840", + "2150470619", + "2164322797", + "2171053395", + "2389622903", + "3203954530" + ], + "abstract": "heuristic search using algorithms such as a and ida is the prevalent method for obtaining optimal sequential solutions for classical planning tasks theoretical analyses of these classical search algorithms such as the well known results of pohl gaschnig and pearl suggest that such heuristic search algorithms can obtain better than exponential scaling behaviour provided that the heuristics are accurate enough here we show that for a number of common planning benchmark domains including ones that admit optimal solution in polynomial time general search algorithms such as a must necessarily explore an exponential number of search nodes even under the optimistic assumption of almost perfect heuristic estimators whose heuristic error is bounded by a small additive constant our results shed some light on the comparatively bad performance of optimal heuristic search approaches in simple planning domains such as gripper they suggest that in many applications further improvements in run time require changes to other parts of the search algorithm than the heuristic estimator", + "title_raw": "How good is almost perfect", + "abstract_raw": "Heuristic search using algorithms such as A* and IDA* is the prevalent method for obtaining optimal sequential solutions for classical planning tasks. Theoretical analyses of these classical search algorithms, such as the well-known results of Pohl, Gaschnig and Pearl, suggest that such heuristic search algorithms can obtain better than exponential scaling behaviour, provided that the heuristics are accurate enough.\r\n\r\nHere, we show that for a number of common planning benchmark domains, including ones that admit optimal solution in polynomial time, general search algorithms such as A* must necessarily explore an exponential number of search nodes even under the optimistic assumption of almost perfect heuristic estimators, whose heuristic error is bounded by a small additive constant.\r\n\r\nOur results shed some light on the comparatively bad performance of optimal heuristic search approaches in \"simple\" planning domains such as GRIPPER. They suggest that in many applications, further improvements in run-time require changes to other parts of the search algorithm than the heuristic estimator.", + "link": "https://www.semanticscholar.org/paper/4ed56a5f8138d158a1089bf7b472642a64647b3e", + "scraped_abstract": null, + "citation_best": 107 + }, + { + "paper": "2166905217", + "venue": "1188739475", + "year": "2008", + "title": "a new string to dependency machine translation algorithm with a target dependency language model", + "label": [ + "622187", + "137293760", + "111219384", + "40969351", + "148526163", + "24687705", + "135784402", + "53893814", + "130597682", + "11413529", + "39608478", + "204321447", + "51802942", + "203005215" + ], + "author": [ + "2161198055", + "2139528224", + "2949606947" + ], + "reference": [ + "76590478", + "1479669738", + "1551202288", + "2037894654", + "2096466920", + "2096894677", + "2101105183", + "2105891181", + "2116410915", + "2116957398", + "2129574239", + "2144279206", + "2149327368", + "2152263452", + "2153439141", + "2153800732", + "2156985047", + "2158847908", + "2160382364", + "2166781037", + "2167980204", + "2437005631" + ], + "abstract": "in this paper we propose a novel string todependency algorithm for statistical machine translation with this new framework we employ a target dependency language model during decoding to exploit long distance word relations which are unavailable with a traditional n gram language model our experiments show that the string to dependency decoder achieves 1 48 point improvement in bleu and 2 53 point improvement in ter compared to a standard hierarchical string tostring system on the nist 04 chinese english evaluation set", + "title_raw": "A New String-to-Dependency Machine Translation Algorithm with a Target Dependency Language Model", + "abstract_raw": "In this paper, we propose a novel string-todependency algorithm for statistical machine translation. With this new framework, we employ a target dependency language model during decoding to exploit long distance word relations, which are unavailable with a traditional n-gram language model. Our experiments show that the string-to-dependency decoder achieves 1.48 point improvement in BLEU and 2.53 point improvement in TER compared to a standard hierarchical string-tostring system on the NIST 04 Chinese-English evaluation set.", + "link": "https://www.semanticscholar.org/paper/248d32911670e551db4835a5a5279d2d9673ee37", + "scraped_abstract": null, + "citation_best": 260 + }, + { + "paper": "2134729743", + "venue": "1188739475", + "year": "2008", + "title": "forest reranking discriminative parsing with non local features", + "label": [ + "119857082", + "206134035", + "97931131", + "186644900", + "204321447", + "2776214188" + ], + "author": [ + "2111840783" + ], + "reference": [ + "108437174", + "147273232", + "179314280", + "1529490620", + "1535015163", + "1632114991", + "2008652694", + "2037894654", + "2098050104", + "2098823886", + "2116410915", + "2125712079", + "2152302696", + "2161227214", + "2163568299", + "2168194229", + "2169362686", + "2437005631", + "3021452258" + ], + "abstract": "conventional n best reranking techniques often suffer from the limited scope of the nbest list which rules out many potentially good alternatives we instead propose forest reranking a method that reranks a packed forest of exponentially many parses since exact inference is intractable with non local features we present an approximate algorithm inspired by forest rescoring that makes discriminative training practical over the whole treebank our final result an f score of 91 7 outperforms both 50 best and 100 best reranking baselines and is better than any previously reported systems trained on the treebank", + "title_raw": "Forest Reranking: Discriminative Parsing with Non-Local Features", + "abstract_raw": "Conventional n-best reranking techniques often suffer from the limited scope of the nbest list, which rules out many potentially good alternatives. We instead propose forest reranking, a method that reranks a packed forest of exponentially many parses. Since exact inference is intractable with non-local features, we present an approximate algorithm inspired by forest rescoring that makes discriminative training practical over the whole Treebank. Our final result, an F-score of 91.7, outperforms both 50-best and 100-best reranking baselines, and is better than any previously reported systems trained on the Treebank.", + "link": "https://www.semanticscholar.org/paper/e9c7169aba6c8ff772aba06a19a8570b1e01071f", + "scraped_abstract": null, + "citation_best": 264 + }, + { + "paper": "2105512500", + "venue": "1163450153", + "year": "2008", + "title": "an error model for pointing based on fitts law", + "label": [ + "40969351", + "44154836", + "152877465" + ], + "author": [ + "318516288", + "849908952", + "2264327102", + "2206355151" + ], + "reference": [ + "84091793", + "1857789879", + "1969640700", + "1971475583", + "1973437133", + "1979251983", + "1981828094", + "1991691398", + "1996884089", + "2003987904", + "2010085418", + "2010491516", + "2017385726", + "2022416142", + "2029047147", + "2041337335", + "2050726337", + "2051198855", + "2061737487", + "2072816013", + "2076863986", + "2089669290", + "2094163178", + "2112103637", + "2122544819", + "2134512786", + "2135101375", + "2139419943", + "2146600920", + "2147160183", + "2148334620", + "2153657884", + "2156665908", + "2168443748", + "2179427518", + "2397728720", + "3080898489", + "3188418648" + ], + "abstract": "for decades fitts law 1954 has been used to model pointing time in user interfaces as with any rapid motor act faster pointing movements result in increased errors but although prior work has examined accuracy as the spread of hits no work has formulated a predictive model for error rates 0 100 based on fitts law parameters we show that fitts law mathematically implies a predictive error rate model which we derive we then describe an experiment in which target size target distance and movement time are manipulated our results show a strong model fit a regression analysis of observed vs predicted error rates yields a correlation of r2 959 for n 90 points furthermore we show that the effect on error rate of target size w is greater than that of target distance a indicating a departure from fitts law which maintains that w and a contribute proportionally to index of difficulty id our error model can be used with fitts law to estimate and predict error rates along with speeds providing a framework for unifying this dichotomy", + "title_raw": "An error model for pointing based on Fitts' law", + "abstract_raw": "For decades, Fitts' law (1954) has been used to model pointing time in user interfaces. As with any rapid motor act, faster pointing movements result in increased errors. But although prior work has examined accuracy as the \"spread of hits,\" no work has formulated a predictive model for error rates (0-100%) based on Fitts' law parameters. We show that Fitts' law mathematically implies a predictive error rate model, which we derive. We then describe an experiment in which target size, target distance, and movement time are manipulated. Our results show a strong model fit: a regression analysis of observed vs. predicted error rates yields a correlation of R2=.959 for N=90 points. Furthermore, we show that the effect on error rate of target size (W) is greater than that of target distance (A), indicating a departure from Fitts' law, which maintains that W and A contribute proportionally to index of difficulty (ID). Our error model can be used with Fitts' law to estimate and predict error rates along with speeds, providing a framework for unifying this dichotomy.", + "link": "https://www.semanticscholar.org/paper/2c60770a0535a8c9b03c02d39b48dbc7fe1396d5", + "scraped_abstract": null, + "citation_best": 129 + }, + { + "paper": "2152414382", + "venue": "1163450153", + "year": "2008", + "title": "improving the performance of motor impaired users with automatically generated ability based interfaces", + "label": [ + "107457646", + "49774154", + "89505385", + "2777904410", + "109364899" + ], + "author": [ + "728065937", + "318516288", + "560881892" + ], + "reference": [ + "1512719169", + "1848079635", + "1857789879", + "1965021810", + "1969023376", + "1969780871", + "1993593321", + "2022728909", + "2024281943", + "2041584768", + "2085015768", + "2085705328", + "2094420036", + "2094958958", + "2102151007", + "2106652774", + "2115210820", + "2121432589", + "2130391181", + "2132221661", + "2138539798", + "2149920447", + "2158533558", + "2163790854", + "2166252986" + ], + "abstract": "we evaluate two systems for automatically generating personalized interfaces adapted to the individual motor capabilities of users with motor impairments the first system supple adapts to users capabilities indirectly by first using the arnauld preference elicitation engine to model a user s preferences regarding how he or she likes the interfaces to be created the second system supple models a user s motor abilities directly from a set of one time motor performance tests in a study comparing these approaches to baseline interfaces participants with motor impairments were 26 4 faster using ability based user interfaces generated by supple they also made 73 fewer errors strongly preferred those interfaces to the manufacturers defaults and found them more efficient easier to use and much less physically tiring these findings indicate that rather than requiring some users with motor impairments to adapt themselves to software using separate assistive technologies software can now adapt itself to the capabilities of its users", + "title_raw": "Improving the performance of motor-impaired users with automatically-generated, ability-based interfaces", + "abstract_raw": "We evaluate two systems for automatically generating personalized interfaces adapted to the individual motor capabilities of users with motor impairments. The first system, SUPPLE, adapts to users' capabilities indirectly by first using the ARNAULD preference elicitation engine to model a user's preferences regarding how he or she likes the interfaces to be created. The second system, SUPPLE++, models a user's motor abilities directly from a set of one-time motor performance tests. In a study comparing these approaches to baseline interfaces, participants with motor impairments were 26.4% faster using ability-based user interfaces generated by SUPPLE++. They also made 73% fewer errors, strongly preferred those interfaces to the manufacturers' defaults, and found them more efficient, easier to use, and much less physically tiring. These findings indicate that rather than requiring some users with motor impairments to adapt themselves to software using separate assistive technologies, software can now adapt itself to the capabilities of its users.", + "link": "https://www.semanticscholar.org/paper/de4b2e736615bf25f37efb5305bf7586aa5cbe0e", + "scraped_abstract": null, + "citation_best": 178 + }, + { + "paper": "2140188249", + "venue": "1163450153", + "year": "2008", + "title": "large scale analysis of web revisitation patterns", + "label": [ + "521306242", + "534406577", + "164120249", + "197046077", + "2776324614", + "21959979", + "97854310", + "516187249", + "136699151", + "521815418", + "136764020", + "61096286", + "23123220" + ], + "author": [ + "2305277957", + "1982462162", + "676500258" + ], + "reference": [ + "1535195418", + "1585809172", + "1597878835", + "1924960892", + "1964990064", + "1982896842", + "1991286487", + "2000969683", + "2010975745", + "2038808651", + "2058756162", + "2074721107", + "2086698677", + "2098954597", + "2099768249", + "2103399427", + "2118020653", + "2118168041", + "2128868253", + "2158450083", + "2161240748", + "2165149861", + "2169576055" + ], + "abstract": "our work examines web revisitation patterns everybody revisits web pages but their reasons for doing so can differ depending on the particular web page their topic of interest and their intent to characterize how people revisit web content we analyzed five weeks of web interaction logs of over 612 000 users we supplemented these findings by a survey intended to identify the intent behind the observed revisitation our analysis reveals four primary revisitation patterns each with unique behavioral content and structural characteristics through our analysis we illustrate how understanding revisitation patterns can enable web sites to provide improved navigation web browsers to predict users destinations and search engines to better support fast fresh and effective finding and re finding", + "title_raw": "Large scale analysis of web revisitation patterns", + "abstract_raw": "Our work examines Web revisitation patterns. Everybody revisits Web pages, but their reasons for doing so can differ depending on the particular Web page, their topic of interest, and their intent. To characterize how people revisit Web content, we analyzed five weeks of Web interaction logs of over 612,000 users. We supplemented these findings by a survey intended to identify the intent behind the observed revisitation. Our analysis reveals four primary revisitation patterns, each with unique behavioral, content, and structural characteristics. Through our analysis we illustrate how understanding revisitation patterns can enable Web sites to provide improved navigation, Web browsers to predict users' destinations, and search engines to better support fast, fresh, and effective finding and re-finding.", + "link": "https://www.semanticscholar.org/paper/16f5a2e6d26482f6d925855ff4937723e03270a0", + "scraped_abstract": null, + "citation_best": 204 + }, + { + "paper": "2062709156", + "venue": "1163450153", + "year": "2008", + "title": "multimodal collaborative handwriting training for visually impaired people", + "label": [ + "2779386606", + "107457646", + "49774154", + "152086174" + ], + "author": [ + "2167986527", + "1991177683", + "2157782254", + "2146650051" + ], + "reference": [ + "42193561", + "1273631150", + "1987177365", + "1994893618", + "2020813568", + "2041571037", + "2101027976", + "2103476886", + "2116781200", + "2152870685", + "2162026890", + "2169983797" + ], + "abstract": "mcsig is a multimodal teaching and learning environ ment for visually impaired students to learn character shapes handwriting and signatures collaboratively with their teachers it combines haptic and audio output to realize the teacher s pen input in parallel non visual modalities mcsig is intended for teaching visually impaired children how to handwrite characters and from that signatures something that is very difficult without visual feedback we conducted an evaluation with eight visually impaired children with a pretest to assess their current skills with a set of character shapes a training phase using mcsig and then a post test of the same character shapes to see if there were any improvements the children could all use mcsig and we saw significant improvements in the character shapes drawn particularly by the completely blind children many of whom could draw almost none of the characters before the test in particular the blind participants all expressed enjoyment and excitement about the system and using a computer to learn to handwrite", + "title_raw": "Multimodal collaborative handwriting training for visually-impaired people", + "abstract_raw": "\"McSig\" is a multimodal teaching and learning environ-ment for visually-impaired students to learn character shapes, handwriting and signatures collaboratively with their teachers. It combines haptic and audio output to realize the teacher's pen input in parallel non-visual modalities. McSig is intended for teaching visually-impaired children how to handwrite characters (and from that signatures), something that is very difficult without visual feedback. We conducted an evaluation with eight visually-impaired children with a pretest to assess their current skills with a set of character shapes, a training phase using McSig and then a post-test of the same character shapes to see if there were any improvements. The children could all use McSig and we saw significant improvements in the character shapes drawn, particularly by the completely blind children (many of whom could draw almost none of the characters before the test). In particular, the blind participants all expressed enjoyment and excitement about the system and using a computer to learn to handwrite.", + "link": "https://www.semanticscholar.org/paper/ea1ec0b528649ee60fba30eac8d9edeee0c6812b", + "scraped_abstract": null, + "citation_best": 76 + }, + { + "paper": "2113201641", + "venue": "1158167855", + "year": "2008", + "title": "beyond sliding windows object localization by efficient subwindow search", + "label": [ + "102392041", + "75294576", + "2780142956", + "69952321", + "53533937", + "2776151529", + "68339613", + "94176051", + "1667742", + "64876066", + "178980831", + "12267149", + "113238511" + ], + "author": [ + "1977819825", + "209766775", + "2240984066" + ], + "reference": [ + "1490012878", + "1677409904", + "1995444699", + "2012330712", + "2098895734", + "2103658758", + "2105464770", + "2107034620", + "2112020727", + "2115880351", + "2118743891", + "2120369594", + "2125713050", + "2126326837", + "2131846894", + "2141582975", + "2151103935", + "2154422044", + "2154683974", + "2161567010", + "2161969291", + "2162915993", + "3023786531" + ], + "abstract": "most successful object recognition systems rely on binary classification deciding only if an object is present or not but not providing information on the actual object location to perform localization one can take a sliding window approach but this strongly increases the computational cost because the classifier function has to be evaluated over a large set of candidate subwindows in this paper we propose a simple yet powerful branch and bound scheme that allows efficient maximization of a large class of classifier functions over all possible subimages it converges to a globally optimal solution typically in sublinear time we show how our method is applicable to different object detection and retrieval scenarios the achieved speedup allows the use of classifiers for localization that formerly were considered too slow for this task such as svms with a spatial pyramid kernel or nearest neighbor classifiers based on the chi2 distance we demonstrate state of the art performance of the resulting systems on the uiuc cars dataset the pascal voc 2006 dataset and in the pascal voc 2007 competition", + "title_raw": "Beyond sliding windows: Object localization by efficient subwindow search", + "abstract_raw": "Most successful object recognition systems rely on binary classification, deciding only if an object is present or not, but not providing information on the actual object location. To perform localization, one can take a sliding window approach, but this strongly increases the computational cost, because the classifier function has to be evaluated over a large set of candidate subwindows. In this paper, we propose a simple yet powerful branch-and-bound scheme that allows efficient maximization of a large class of classifier functions over all possible subimages. It converges to a globally optimal solution typically in sublinear time. We show how our method is applicable to different object detection and retrieval scenarios. The achieved speedup allows the use of classifiers for localization that formerly were considered too slow for this task, such as SVMs with a spatial pyramid kernel or nearest neighbor classifiers based on the chi2-distance. We demonstrate state-of-the-art performance of the resulting systems on the UIUC Cars dataset, the PASCAL VOC 2006 dataset and in the PASCAL VOC 2007 competition.", + "link": "https://www.semanticscholar.org/paper/54b224478a63e33441c651175c522f3702062fc4", + "scraped_abstract": null, + "citation_best": 690 + }, + { + "paper": "2122633358", + "venue": "1199533187", + "year": "2008", + "title": "efficient online monitoring of web service slas", + "label": [ + "2778160497", + "35578498", + "30788636", + "120314980" + ], + "author": [ + "2152352937", + "1963512109", + "2298180682" + ], + "reference": [ + "1500983681", + "1740522204", + "1787074469", + "1840628226", + "1852523103", + "1981808971", + "1984936841", + "1988394140", + "2009379862", + "2012959900", + "2033357226", + "2040671495", + "2054620608", + "2065908889", + "2097739043", + "2101508170", + "2116960910", + "2146861402", + "2151755698", + "2153842185", + "2156893561", + "2157855401", + "2159958244", + "2162255975", + "2164023282" + ], + "abstract": "if an organization depends on the service quality provided by another organization it often enters into a bilateral service level agreement sla which mitigates outsourcing risks by associating penalty payments with poor service quality once these agreements are entered into it becomes necessary to monitor their conditions which will commonly relate to timeliness reliability and request throughput at run time we show how these conditions can be translated into timed automata acceptance of a timed word by a timed automaton can be decided in quadratic time and because the timed automata can operate while messages are exchanged at run time there is effectively only a linear run time overhead we present an implementation to derive on line monitors for web services automatically from slas using an eclipse plugin we evaluate the efficiency and scalability of this approach using a large scale case study in a service oriented computational grid", + "title_raw": "Efficient online monitoring of web-service SLAs", + "abstract_raw": "If an organization depends on the service quality provided by another organization it often enters into a bilateral service level agreement (SLA), which mitigates outsourcing risks by associating penalty payments with poor service quality. Once these agreements are entered into, it becomes necessary to monitor their conditions, which will commonly relate to timeliness, reliability and request throughput, at run-time. We show how these conditions can be translated into timed automata. Acceptance of a timed word by a timed automaton can be decided in quadratic time and because the timed automata can operate while messages are exchanged at run-time there is effectively only a linear run-time overhead. We present an implementation to derive on-line monitors for web services automatically from SLAs using an Eclipse plugin. We evaluate the efficiency and scalability of this approach using a large-scale case study in a service-oriented computational grid.", + "link": "https://www.semanticscholar.org/paper/38fc567f15d45bb3c79d4fdc95f013a1dae6b24c", + "scraped_abstract": null, + "citation_best": 142 + }, + { + "paper": "2103188316", + "venue": "1174403976", + "year": "2008", + "title": "recommending adaptive changes for framework evolution", + "label": [ + "152752567", + "548217200", + "101317890", + "2777904410", + "202105479", + "93996380", + "115903868" + ], + "author": [ + "2047072155", + "2136878537" + ], + "reference": [ + "338991206", + "1565935466", + "1606787793", + "1737238207", + "1812582761", + "2014256464", + "2025674334", + "2043791485", + "2081757705", + "2097100252", + "2099056153", + "2103640219", + "2112847033", + "2118581948", + "2119493660", + "2127811329", + "2130344546", + "2140681814", + "2141558501", + "2153150125", + "2157836986", + "2158735796", + "2159933174", + "2542246377" + ], + "abstract": "in the course of a framework s evolution changes ranging from a simple refactoring to a complete rearchitecture can break client programs finding suitable replacements for framework elements that were accessed by a client program and deleted as part of the framework s evolution can be a challenging task we present a recommendation system semdiff that suggests adaptations to client programs by analyzing how a framework adapts to its own changes in a study of the evolution of the eclipse jdt framework and three client programs our approach recommended relevant adaptive changes with a high level of precision and detected non trivial changes typically undiscovered by current refactoring detection techniques", + "title_raw": "Recommending adaptive changes for framework evolution", + "abstract_raw": "In the course of a framework's evolution, changes ranging from a simple refactoring to a complete rearchitecture can break client programs. Finding suitable replacements for framework elements that were accessed by a client program and deleted as part of the framework's evolution can be a challenging task. We present a recommendation system, SemDiff, that suggests adaptations to client programs by analyzing how a framework adapts to its own changes. In a study of the evolution of the Eclipse JDT framework and three client programs, our approach recommended relevant adaptive changes with a high level of precision, and detected non-trivial changes typically undiscovered by current refactoring detection techniques.", + "link": "https://www.semanticscholar.org/paper/304925289ccd95fc7171778c79936d30dd4b8c6a", + "scraped_abstract": null, + "citation_best": 97 + }, + { + "paper": "2154843497", + "venue": "1174403976", + "year": "2008", + "title": "precise memory leak detection for java software using container profiling", + "label": [ + "1009929", + "367424", + "548217200", + "156731835", + "187191949", + "162319229", + "2777904410", + "176649486", + "79403827" + ], + "author": [ + "2311272699", + "2328602998" + ], + "reference": [ + "54833955", + "1510358061", + "1536265389", + "1537003042", + "1554164734", + "1574879889", + "1576911972", + "1598245396", + "1991546210", + "2000100269", + "2001799599", + "2012886500", + "2014530617", + "2069568053", + "2072754671", + "2082827821", + "2098809490", + "2098883184", + "2101857770", + "2102674270", + "2105204977", + "2121094954", + "2124770494", + "2128835824", + "2131734025", + "2133802223", + "2135275954", + "2138369269", + "2157432684", + "2166140339" + ], + "abstract": "a memory leak in a java program occurs when object references that are no longer needed are unnecessarily maintained such leaks are difficult to understand because static analyses typically cannot precisely identify these redundant references and existing dynamic analyses for leak detection track and report fine grained information about individual objects producing results that are usually hard to interpret and lack precision we introduce a novel container based heap tracking technique based on the observation that many memory leaks in java programs occur due to containers that keep references to unused data entries the novelty of the described work is two fold 1 instead of tracking arbitrary objects and finding leaks by analyzing references to unused objects the technique tracks only containers and directly identifies the source of the leak and 2 the approach computes a confidence value for each container based on a combination of its memory consumption and its elements staleness time since last retrieval while previous approaches do not consider such combined metrics our experimental results show that the reports generated by the proposed technique can be very precise for two bugs reported by sun and for a known bug in specjbb the top containers in the reports include the containers that leak memory", + "title_raw": "Precise memory leak detection for java software using container profiling", + "abstract_raw": "A memory leak in a Java program occurs when object references that are no longer needed are unnecessarily maintained. Such leaks are difficult to understand because static analyses typically cannot precisely identify these redundant references, and existing dynamic analyses for leak detection track and report fine-grained information about individual objects, producing results that are usually hard to interpret and lack precision. We introduce a novel container-based heap-tracking technique, based on the observation that many memory leaks in Java programs occur due to containers that keep references to unused data entries. The novelty of the described work is two-fold: (1) instead of tracking arbitrary objects and finding leaks by analyzing references to unused objects, the technique tracks only containers and directly identifies the source of the leak, and (2) the approach computes a confidence value for each container based on a combination of its memory consumption and its elements' staleness (time since last retrieval), while previous approaches do not consider such combined metrics. Our experimental results show that the reports generated by the proposed technique can be very precise: for two bugs reported by Sun and for a known bug in SPECjbb, the top containers in the reports include the containers that leak memory.", + "link": "https://www.semanticscholar.org/paper/bfa6bb83bac509a1d2f7ff4f9589d35da77d13c2", + "scraped_abstract": null, + "citation_best": 142 + }, + { + "paper": "2164372721", + "venue": "1174403976", + "year": "2008", + "title": "debugging reinvented asking and answering why and why not questions about program behavior", + "label": [ + "91071405", + "529173508", + "168065819", + "98045186", + "199360897", + "2777904410", + "115903868", + "136388014" + ], + "author": [ + "2980248491", + "2117127927" + ], + "reference": [ + "1493700809", + "1673079227", + "2007121005", + "2036196659", + "2038687965", + "2038903004", + "2100406974", + "2100894766", + "2113476536", + "2121840944", + "2127225305", + "2139643778", + "2150566739", + "2150662287", + "2151996389", + "2157922094", + "2162376048", + "2165811181", + "2295857493" + ], + "abstract": "when software developers want to understand the reason for a program s behavior they must translate their questions about the behavior into a series of questions about code speculating about the causes in the process the whyline is a new kind of debugging tool that avoids such speculation by instead enabling developers to select a question about program output from a set of why did and why didn t questions derived from the program s code and execution the tool then finds one or more possible explanations for the output in question using a combination of static and dynamic slicing precise call graphs and new algorithms for determining potential sources of values and explanations for why a line of code was not reached evaluations of the tool on one task showed that novice programmers with the whyline were twice as fast as expert programmers without it the tool has the potential to simplify debugging in many software development contexts", + "title_raw": "Debugging reinvented: asking and answering why and why not questions about program behavior", + "abstract_raw": "When software developers want to understand the reason for a program's behavior, they must translate their questions about the behavior into a series of questions about code, speculating about the causes in the process. The Whyline is a new kind of debugging tool that avoids such speculation by instead enabling developers to select a question about program output from a set of why did and why didn't questions derived from the program's code and execution. The tool then finds one or more possible explanations for the output in question, using a combination of static and dynamic slicing, precise call graphs, and new algorithms for determining potential sources of values and explanations for why a line of code was not reached. Evaluations of the tool on one task showed that novice programmers with the Whyline were twice as fast as expert programmers without it. The tool has the potential to simplify debugging in many software development contexts.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Debugging+Reinvented:+Asking+and+Answering+Why+and+Why+Not+Questions+about+Program+Behavior&as_oq=&as_eq=&as_occt=any&as_sauthors=Ko", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2130243914", + "venue": "1174403976", + "year": "2008", + "title": "predicting accurate and actionable static analysis warnings an experimental approach", + "label": [ + "119857082", + "137287247", + "529173508", + "111498074", + "124101348", + "2777904410", + "97686452", + "117447612" + ], + "author": [ + "2038759156", + "2157683378", + "2065997819", + "262280937", + "1949835151" + ], + "reference": [ + "122260558", + "1842503650", + "1912598576", + "1973948212", + "1986453394", + "1986683951", + "1990994334", + "2003885074", + "2010295655", + "2027483357", + "2043811931", + "2053465247", + "2105300539", + "2111949697", + "2114114632", + "2122450421", + "2125343911", + "2149598089", + "2152094063", + "2168411647" + ], + "abstract": "static analysis tools report software defects that may or may not be detected by other verification methods two challenges complicating the adoption of these tools are spurious false positive warnings and legitimate warnings that are not acted on this paper reports automated support to help address these challenges using logistic regression models that predict the foregoing types of warnings from signals in the warnings and implicated code because examining many potential signaling factors in large software development settings can be expensive we use a screening methodology to quickly discard factors with low predictive power and cost effectively build predictive models our empirical evaluation indicates that these models can achieve high accuracy in predicting accurate and actionable static analysis warnings and suggests that the models are competitive with alternative models built without screening", + "title_raw": "Predicting accurate and actionable static analysis warnings: an experimental approach", + "abstract_raw": "Static analysis tools report software defects that may or may not be detected by other verification methods. Two challenges complicating the adoption of these tools are spurious false positive warnings and legitimate warnings that are not acted on. This paper reports automated support to help address these challenges using logistic regression models that predict the foregoing types of warnings from signals in the warnings and implicated code. Because examining many potential signaling factors in large software development settings can be expensive, we use a screening methodology to quickly discard factors with low predictive power and cost-effectively build predictive models. Our empirical evaluation indicates that these models can achieve high accuracy in predicting accurate and actionable static analysis warnings, and suggests that the models are competitive with alternative models built without screening.", + "link": "https://www.semanticscholar.org/paper/230a38c1739d7c9d07614c9c958d983c00ed65ce", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2171183905", + "venue": "1123349196", + "year": "2008", + "title": "assessment of urban scale wireless networks with a small number of measurements", + "label": [ + "31548570", + "62793504", + "555944384", + "108037233", + "124101348", + "152124472", + "123691950" + ], + "author": [ + "2117600084", + "1998346553", + "2300379501" + ], + "reference": [ + "1844891905", + "1882812128", + "2010713022", + "2030796490", + "2053913299", + "2094734495", + "2096369023", + "2104760042", + "2110591978", + "2121261335", + "2132997891", + "2133829090", + "2143738441", + "2145417574", + "2148868685", + "2155477820", + "2157875131", + "2159256278", + "2505093922", + "2886902701" + ], + "abstract": "in order to evaluate improve or expand a deployed city wide wireless mesh network it is necessary to assess the network s spatial performance in this paper we present a general framework to accurately predict a network s well served area termed the metric region via a small number of measurements assessment of deployed networks must address two key issues non uniform physical layer propagation and high spatial variance in performance addressing non uniformity our framework estimates a mesh node s metric region via a data driven sectorization of the region we find each sector s boundary radius with a two stage process of estimation and then measurement driven push pull refinement of the estimated boundary to address high spatial variation our coverage estimation couples signal strength measurements with terrain information from publicly available digital maps to estimate propagation characteristics between a wireless node and the client s location to limit measurements and yield connected metric regions we consider performance metrics such as signal strength to be monotonic with distance from the wireless node within each sector we show that despite measured violations in coverage monotonicity we obtain high accuracy with this assumption we validate our estimation and refinement framework with measurements from 30 000 client locations obtained in each of two currently operational mesh networks googlewifi and tfa we study three illustrative metrics coverage modulation rate and redundancy and find that to achieve a given accuracy our framework requires two to five times fewer measurements than grid sampling strategies finally we use the framework to evaluate the two deployments and study the average size and location of their coverage holes as well as the impact of client association policies on load balancing", + "title_raw": "Assessment of urban-scale wireless networks with a small number of measurements", + "abstract_raw": "In order to evaluate, improve, or expand a deployed, city-wide wireless mesh network, it is necessary to assess the network's spatial performance. In this paper, we present a general framework to accurately predict a network's well-served area, termed the metric region, via a small number of measurements. Assessment of deployed networks must address two key issues: non-uniform physical-layer propagation and high spatial variance in performance. Addressing non-uniformity, our framework estimates a mesh node's metric region via a data-driven sectorization of the region. We find each sector's boundary (radius) with a two-stage process of estimation and then measurement-driven \"push-pull\" refinement of the estimated boundary. To address high spatial variation, our coverage estimation couples signal strength measurements with terrain information from publicly available digital maps to estimate propagation characteristics between a wireless node and the client's location. To limit measurements and yield connected metric regions, we consider performance metrics (such as signal strength) to be monotonic with distance from the wireless node within each sector. We show that despite measured violations in coverage monotonicity, we obtain high accuracy with this assumption. We validate our estimation and refinement framework with measurements from 30,000 client locations obtained in each of two currently operational mesh networks, GoogleWiFi and TFA. We study three illustrative metrics: coverage, modulation rate, and redundancy, and find that to achieve a given accuracy, our framework requires two to five times fewer measurements than grid sampling strategies. Finally, we use the framework to evaluate the two deployments and study the average size and location of their coverage holes as well as the impact of client association policies on load-balancing.", + "link": "https://www.semanticscholar.org/paper/f1751d145657ad93bbc5e6f4aae59b77b1ceffda", + "scraped_abstract": null, + "citation_best": 79 + }, + { + "paper": "1572904055", + "venue": "1158363782", + "year": "2008", + "title": "remus high availability via asynchronous virtual machine replication", + "label": [ + "63540848", + "25344961", + "65813073", + "2780945871", + "149635348", + "111919701", + "77019957", + "180591934", + "70440993", + "195917429" + ], + "author": [ + "2101202752", + "2134815476", + "2110980001", + "2032285811", + "1979509022", + "2106730373" + ], + "reference": [ + "109094004", + "120340316", + "155628284", + "165384900", + "1494987791", + "1500546894", + "1543144479", + "1744795482", + "1997269120", + "2005373714", + "2012426216", + "2101468026", + "2101729333", + "2108806129", + "2117271294", + "2118509347", + "2121178808", + "2130768881", + "2131726714", + "2142892618", + "2146532238", + "2150348590", + "2151388636", + "2157801087", + "2171453084", + "2620706897" + ], + "abstract": "allowing applications to survive hardware failure is an expensive undertaking which generally involves reengineering software to include complicated recovery logic as well as deploying special purpose hardware this represents a severe barrier to improving the dependability of large or legacy applications we describe the construction of a general and transparent high availability service that allows existing unmodified software to be protected from the failure of the physical machine on which it runs remus provides an extremely high degree of fault tolerance to the point that a running system can transparently continue execution on an alternate physical host in the face of failure with only seconds of downtime while completely preserving host state such as active network connections our approach encapsulates protected software in a virtual machine asynchronously propagates changed state to a backup host at frequencies as high as forty times a second and uses speculative execution to concurrently run the active vm slightly ahead of the replicated system state", + "title_raw": "Remus: high availability via asynchronous virtual machine replication", + "abstract_raw": "Allowing applications to survive hardware failure is an expensive undertaking, which generally involves reengineering software to include complicated recovery logic as well as deploying special-purpose hardware; this represents a severe barrier to improving the dependability of large or legacy applications. We describe the construction of a general and transparent high availability service that allows existing, unmodified software to be protected from the failure of the physical machine on which it runs. Remus provides an extremely high degree of fault tolerance, to the point that a running system can transparently continue execution on an alternate physical host in the face of failure with only seconds of downtime, while completely preserving host state such as active network connections. Our approach encapsulates protected software in a virtual machine, asynchronously propagates changed state to a backup host at frequencies as high as forty times a second, and uses speculative execution to concurrently run the active VM slightly ahead of the replicated system state.", + "link": "https://www.semanticscholar.org/paper/881061ab139a89fe629f5663c6da2db43c186493", + "scraped_abstract": null, + "citation_best": 567 + }, + { + "paper": "1598241463", + "venue": "1158363782", + "year": "2008", + "title": "consensus routing the internet as a distributed system", + "label": [ + "184896649", + "31258907", + "87044965", + "104954878", + "94600068", + "115443555", + "196423136", + "204948658", + "101396714", + "120314980" + ], + "author": [ + "2100903914", + "7409492", + "2088689873", + "2148869393", + "2093515963" + ], + "reference": [ + "1550619482", + "1598584073", + "1760148955", + "2003590000", + "2010053579", + "2011440271", + "2019863223", + "2026392294", + "2037142757", + "2075854425", + "2076590364", + "2095234341", + "2100415265", + "2103957314", + "2104826735", + "2110318628", + "2113872681", + "2115076903", + "2118341703", + "2121635209", + "2123491545", + "2126087831", + "2130882486", + "2131053137", + "2131754091", + "2131929623", + "2136377206", + "2138370688", + "2141487810", + "2142065670", + "2147575919", + "2149888524", + "2153266595", + "2160409488", + "2162260997", + "2164284397", + "2182688186", + "2535598766" + ], + "abstract": "internet routing protocols bgp ospf rip have traditionally favored responsiveness over consistency a router applies a received update immediately to its forwarding table before propagating the update to other routers including those that potentially depend upon the outcome of the update responsiveness comes at the cost of routing loops and blackholes a router a thinks its route to a destination is via b but b disagrees by favoring responsiveness a liveness property over consistency a safety property internet routing has lost both our position is that consistent state in a distributed system makes its behavior more predictable and securable to this end we present consensus routing a consistency first approach that cleanly separates safety and liveness using two logically distinct modes of packet delivery a stable mode where a route is adopted only after all dependent routers have agreed upon it and a transient mode that heuristically forwards the small fraction of packets that encounter failed links somewhat surprisingly we find that consensus routing improves overall availability when used in conjunction with existing transient mode heuristics such as backup paths deflections or detouring experiments on the internet s as level topology show that consensus routing eliminates nearly all transient disconnectivity in bgp", + "title_raw": "Consensus routing: the internet as a distributed system", + "abstract_raw": "Internet routing protocols (BGP, OSPF, RIP) have traditionally favored responsiveness over consistency. A router applies a received update immediately to its forwarding table before propagating the update to other routers, including those that potentially depend upon the outcome of the update. Responsiveness comes at the cost of routing loops and blackholes--a router A thinks its route to a destination is via B but B disagrees. By favoring responsiveness (a liveness property) over consistency (a safety property), Internet routing has lost both.\r\n\r\nOur position is that consistent state in a distributed system makes its behavior more predictable and securable. To this end, we present consensus routing, a consistency-first approach that cleanly separates safety and liveness using two logically distinct modes of packet delivery: a stable mode where a route is adopted only after all dependent routers have agreed upon it, and a transient mode that heuristically forwards the small fraction of packets that encounter failed links. Somewhat surprisingly, we find that consensus routing improves overall availability when used in conjunction with existing transient mode heuristics such as backup paths, deflections, or detouring. Experiments on the Internet's AS-level topology show that consensus routing eliminates nearly all transient disconnectivity in BGP.", + "link": "https://www.semanticscholar.org/paper/68b376ea67d95a6cd30431f4c6fb47de2124c35d", + "scraped_abstract": null, + "citation_best": 87 + }, + { + "paper": "1710734607", + "venue": "1185109434", + "year": "2008", + "title": "klee unassisted and automatic generation of high coverage tests for complex systems programs", + "label": [ + "55439883", + "111919701", + "2779639559", + "112968700", + "199360897", + "11219265", + "199519371", + "111065885" + ], + "author": [ + "1243827984", + "2110981794", + "2163716051" + ], + "reference": [ + "33043110", + "157156687", + "1497028280", + "1503159836", + "1720848645", + "1995626000", + "1997960810", + "2009489720", + "2050853996", + "2065675749", + "2086234010", + "2089139117", + "2096449544", + "2096503829", + "2100583963", + "2104993088", + "2113531724", + "2115309705", + "2116818527", + "2117058582", + "2129487583", + "2129538349", + "2132897303", + "2133612077", + "2135274583", + "2145385214", + "2153185479", + "2165100126", + "2171469152", + "2171480813", + "2171683519", + "2171999426" + ], + "abstract": "we present a new symbolic execution tool klee capable of automatically generating tests that achieve high coverage on a diverse set of complex and environmentally intensive programs we used klee to thoroughly check all 89 stand alone programs in the gnu coreutils utility suite which form the core user level environment installed on millions of unix systems and arguably are the single most heavily tested set of open source programs in existence klee generated tests achieve high line coverage on average over 90 per tool median over 94 and significantly beat the coverage of the developers own hand written test suite when we did the same for 75 equivalent tools in the busybox embedded system suite results were even better including 100 coverage on 31 of them we also used klee as a bug finding tool applying it to 452 applications over 430k total lines of code where it found 56 serious bugs including three in coreutils that had been missed for over 15 years finally we used klee to crosscheck purportedly identical busybox and coreutils utilities finding functional correctness errors and a myriad of inconsistencies", + "title_raw": "KLEE: unassisted and automatic generation of high-coverage tests for complex systems programs", + "abstract_raw": "We present a new symbolic execution tool, KLEE, capable of automatically generating tests that achieve high coverage on a diverse set of complex and environmentally-intensive programs. We used KLEE to thoroughly check all 89 stand-alone programs in the GNU COREUTILS utility suite, which form the core user-level environment installed on millions of Unix systems, and arguably are the single most heavily tested set of open-source programs in existence. KLEE-generated tests achieve high line coverage -- on average over 90% per tool (median: over 94%) -- and significantly beat the coverage of the developers' own hand-written test suite. When we did the same for 75 equivalent tools in the BUSYBOX embedded system suite, results were even better, including 100% coverage on 31 of them.\r\n\r\nWe also used KLEE as a bug finding tool, applying it to 452 applications (over 430K total lines of code), where it found 56 serious bugs, including three in COREUTILS that had been missed for over 15 years. Finally, we used KLEE to crosscheck purportedly identical BUSYBOX and COREUTILS utilities, finding functional correctness errors and a myriad of inconsistencies.", + "link": "https://www.semanticscholar.org/paper/b187a252f54f4cc266cdbe5d91aa25b994176073", + "scraped_abstract": null, + "citation_best": 2578 + }, + { + "paper": "1493893823", + "venue": "1185109434", + "year": "2008", + "title": "dryadlinq a system for general purpose distributed data parallel computing using a high level language", + "label": [ + "34165917", + "169590947", + "177264268", + "199360897", + "173608175", + "9476365", + "90392147", + "179531526", + "510870499" + ], + "author": [ + "2170786782", + "2004792601", + "1992570377", + "2324400913", + "691809822", + "2068293816", + "2045911459" + ], + "reference": [ + "33573630", + "1642148853", + "1975278491", + "1975731516", + "1987872267", + "1989152133", + "1997020216", + "1999321073", + "2008750849", + "2063281504", + "2080427212", + "2098935637", + "2100830825", + "2104861744", + "2112101330", + "2119400430", + "2119714163", + "2121054697", + "2126583885", + "2136952952", + "2138237427", + "2139086035", + "2146917903", + "2154894831", + "2160329451", + "2171922437", + "2172143128", + "2173213060", + "2187405563", + "2436525433", + "2493670965", + "2624304035", + "2766000922", + "2788759194", + "2950660196", + "3138135046" + ], + "abstract": "dryadlinq is a system and a set of language extensions that enable a new programming model for large scale distributed computing it generalizes previous execution environments such as sql mapreduce and dryad in two ways by adopting an expressive data model of strongly typed net objects and by supporting general purpose imperative and declarative operations on datasets within a traditional high level programming language a dryadlinq program is a sequential program composed of linq expressions performing arbitrary side effect free transformations on datasets and can be written and debugged using standard net development tools the dryadlinq system automatically and transparently translates the data parallel portions of the program into a distributed execution plan which is passed to the dryad execution platform dryad which has been in continuous operation for several years on production clusters made up of thousands of computers ensures efficient reliable execution of this plan we describe the implementation of the dryadlinq compiler and runtime we evaluate dryadlinq on a varied set of programs drawn from domains such as web graph analysis large scale log mining and machine learning we show that excellent absolute performance can be attained a general purpose sort of 1012 bytes of data executes in 319 seconds on a 240 computer 960 disk cluster as well as demonstrating near linear scaling of execution time on representative applications as we vary the number of computers used for a job", + "title_raw": "DryadLINQ: a system for general-purpose distributed data-parallel computing using a high-level language", + "abstract_raw": "DryadLINQ is a system and a set of language extensions that enable a new programming model for large scale distributed computing. It generalizes previous execution environments such as SQL, MapReduce, and Dryad in two ways: by adopting an expressive data model of strongly typed .NET objects; and by supporting general-purpose imperative and declarative operations on datasets within a traditional high-level programming language.\r\n\r\nA DryadLINQ program is a sequential program composed of LINQ expressions performing arbitrary side-effect-free transformations on datasets, and can be written and debugged using standard .NET development tools. The DryadLINQ system automatically and transparently translates the data-parallel portions of the program into a distributed execution plan which is passed to the Dryad execution platform. Dryad, which has been in continuous operation for several years on production clusters made up of thousands of computers, ensures efficient, reliable execution of this plan.\r\n\r\nWe describe the implementation of the DryadLINQ compiler and runtime. We evaluate DryadLINQ on a varied set of programs drawn from domains such as web-graph analysis, large-scale log mining, and machine learning. We show that excellent absolute performance can be attained--a general-purpose sort of 1012 Bytes of data executes in 319 seconds on a 240-computer, 960- disk cluster--as well as demonstrating near-linear scaling of execution time on representative applications as we vary the number of computers used for a job.", + "link": "https://www.semanticscholar.org/paper/aa8e8283f11913b08326a0f760f2a737e66388c7", + "scraped_abstract": null, + "citation_best": 708 + }, + { + "paper": "2216311525", + "venue": "1185109434", + "year": "2008", + "title": "difference engine harnessing memory redundancy in virtual machines", + "label": [ + "74912251", + "112904061", + "25344961", + "100850083", + "149635348", + "173512123", + "19275194", + "111919701", + "177064316", + "2780513914" + ], + "author": [ + "2142294834", + "2228928683", + "1815696602", + "2171030022", + "2112598219", + "2040462121", + "241629895", + "295064773" + ], + "reference": [ + "89823361", + "187057365", + "1487249727", + "1494202216", + "1522250664", + "1529205966", + "1916709771", + "2006816934", + "2100673955", + "2105321788", + "2124297537", + "2131726714", + "2132627996", + "2133011691", + "2148885851", + "2154766204" + ], + "abstract": "virtual machine monitors vmms are a popular platform for internet hosting centers and cloud based compute services by multiplexing hardware resources among virtual machines vms running commodity operating systems vmms decrease both the capital outlay and management overhead of hosting centers appropriate placement and migration policies can take advantage of statistical multiplexing to effectively utilize available processors however main memory is not amenable to such multiplexing and is often the primary bottleneck in achieving higher degrees of consolidation previous efforts have shown that content based page sharing provides modest decreases in the memory footprint of vms running similar operating systems and applications our studies show that significant additional gains can be had by leveraging both sub page level sharing through page patching and in core memory compression we build difference engine an extension to the xen virtual machine monitor to support each of these in addition to standard copy on write full page sharing and demonstrate substantial savings not only between vms running similar applications and operating systems up to 90 but even across vms running disparate workloads up to 65 in head to head memory savings comparisons difference engine outperforms vmware esx server by a factor of 1 5 for homogeneous workloads and by a factor of 1 6 2 5 for heterogeneous workloads in all cases the performance overhead of difference engine is less than 7", + "title_raw": "Difference engine: harnessing memory redundancy in virtual machines", + "abstract_raw": "Virtual machine monitors (VMMs) are a popular platform for Internet hosting centers and cloud-based compute services. By multiplexing hardware resources among virtual machines (VMs) running commodity operating systems, VMMs decrease both the capital outlay and management overhead of hosting centers. Appropriate placement and migration policies can take advantage of statistical multiplexing to effectively utilize available processors. However, main memory is not amenable to such multiplexing and is often the primary bottleneck in achieving higher degrees of consolidation.\r\n\r\nPrevious efforts have shown that content-based page sharing provides modest decreases in the memory footprint of VMs running similar operating systems and applications. Our studies show that significant additional gains can be had by leveraging both sub-page level sharing (through page patching) and in-core memory compression. We build Difference Engine, an extension to the Xen virtual machine monitor, to support each of these--in addition to standard copy-on-write full page sharing--and demonstrate substantial savings not only between VMs running similar applications and operating systems (up to 90%), but even across VMs running disparate workloads (up to 65%). In head-to-head memory-savings comparisons, Difference Engine outperforms VMware ESX server by a factor of 1.5 for homogeneous workloads and by a factor of 1.6-2.5 for heterogeneous workloads. In all cases, the performance overhead of Difference Engine is less than 7%.", + "link": "https://www.semanticscholar.org/paper/31bf708535a36eef6de57472ab15a49bf1e0c3b2", + "scraped_abstract": null, + "citation_best": 185 + }, + { + "paper": "2143472559", + "venue": "1163618098", + "year": "2008", + "title": "pacemakers and implantable cardiac defibrillators software radio attacks and zero power defenses", + "label": [ + "171115542", + "38652104", + "12269588", + "172195944", + "2777904410" + ], + "author": [ + "2010099130", + "2000007990", + "1804252225", + "2098447750", + "2303143379", + "2662926155", + "1964944407", + "2148790671", + "273677211" + ], + "reference": [ + "103398704", + "125708017", + "1493795471", + "1500356409", + "1533077275", + "1832495378", + "1964570885", + "1976901995", + "1983898909", + "1992016296", + "2024296478", + "2033751220", + "2098727156", + "2099042427", + "2100505193", + "2105374252", + "2114495437", + "2116897550", + "2122742966", + "2126433409", + "2130614965", + "2132303913", + "2136340790", + "2140372921", + "2141136786", + "2141561183", + "2245142629" + ], + "abstract": "our study analyzes the security and privacy properties of an implantable cardioverter defibrillator icd introduced to the u s market in 2003 this model of icd includes pacemaker technology and is designed to communicate wirelessly with a nearby external programmer in the 175 khz frequency range after partially reverse engineering the icd s communications protocol with an oscilloscope and a software radio we implemented several software radio based attacks that could compromise patient safety and patient privacy motivated by our desire to improve patient safety and mindful of conventional trade offs between security and power consumption for resource constrained devices we introduce three new zero power defenses based on rf power harvesting two of these defenses are human centric bringing patients into the loop with respect to the security and privacy of their implantable medical devices imds our contributions provide a scientific baseline for understanding the potential security and privacy risks of current and future imds and introduce human perceptible and zero power mitigation techniques that address those risks to the best of our knowledge this paper is the first in our community to use general purpose software radios to analyze and attack previously unknown radio communications protocols", + "title_raw": "Pacemakers and Implantable Cardiac Defibrillators: Software Radio Attacks and Zero-Power Defenses", + "abstract_raw": "Our study analyzes the security and privacy properties of an implantable cardioverter defibrillator (ICD). Introduced to the U.S. market in 2003, this model of ICD includes pacemaker technology and is designed to communicate wirelessly with a nearby external programmer in the 175 kHz frequency range. After partially reverse-engineering the ICD's communications protocol with an oscilloscope and a software radio, we implemented several software radio-based attacks that could compromise patient safety and patient privacy. Motivated by our desire to improve patient safety, and mindful of conventional trade-offs between security and power consumption for resource-constrained devices, we introduce three new zero-power defenses based on RF power harvesting. Two of these defenses are human-centric, bringing patients into the loop with respect to the security and privacy of their implantable medical devices (IMDs). Our contributions provide a scientific baseline for understanding the potential security and privacy risks of current and future IMDs, and introduce human-perceptible and zero-power mitigation techniques that address those risks. To the best of our knowledge, this paper is the first in our community to use general-purpose software radios to analyze and attack previously unknown radio communications protocols.", + "link": "https://www.semanticscholar.org/paper/9e77651cad732f9960f947b7430b0bcb4a645fc8", + "scraped_abstract": null, + "citation_best": 725 + }, + { + "paper": "2103331800", + "venue": "1152462849", + "year": "2008", + "title": "zigzag decoding combating hidden terminals in wireless networks", + "label": [ + "100850083", + "31395832", + "31258907", + "158379750", + "83204339", + "555944384", + "108037233", + "157764524", + "57273362" + ], + "author": [ + "2039996270", + "194212017" + ], + "reference": [ + "95236662", + "1479887491", + "1487361149", + "1556313079", + "1574326737", + "1594343302", + "2025002714", + "2026111692", + "2095247492", + "2099057525", + "2101793980", + "2105651997", + "2107401636", + "2109858996", + "2110499477", + "2123536775", + "2134698708", + "2136979582", + "2138214500", + "2139914328", + "2141316949", + "2143354557", + "2147227087", + "2148099880", + "2152496949", + "2155416924", + "2160389555", + "2172262965", + "2295555613", + "2409433878", + "2912369344" + ], + "abstract": "this paper presents zigzag an 802 11 receiver design that combats hidden terminals zigzag s core contribution is a new form of interference cancellation that exploits asynchrony across successive collisions specifically 802 11 retransmissions in the case of hidden terminals cause successive collisions these collisions have different interference free stretches at their start which zigzag exploits to bootstrap its decoding zigzag makes no changes to the 802 11 mac and introduces no overhead when there are no collisions but when senders collide zigzag attains the same throughput as if the colliding packets were a priori scheduled in separate time slots we build a prototype of zigzag in gnu radio in a testbed of 14 usrp nodes zigzag reduces the average packet loss rate at hidden terminals from 72 6 to about 0 7", + "title_raw": "Zigzag decoding: combating hidden terminals in wireless networks", + "abstract_raw": "This paper presents ZigZag, an 802.11 receiver design that combats hidden terminals. ZigZag's core contribution is a new form of interference cancellation that exploits asynchrony across successive collisions. Specifically, 802.11 retransmissions, in the case of hidden terminals, cause successive collisions. These collisions have different interference-free stretches at their start, which ZigZag exploits to bootstrap its decoding. ZigZag makes no changes to the 802.11 MAC and introduces no overhead when there are no collisions. But, when senders collide, ZigZag attains the same throughput as if the colliding packets were a priori scheduled in separate time slots. We build a prototype of ZigZag in GNU Radio. In a testbed of 14 USRP nodes, ZigZag reduces the average packet loss rate at hidden terminals from 72.6% to about 0.7%.", + "link": "https://www.semanticscholar.org/paper/5202acf193a3d4c7dd8629c22d64ab350b3309c1", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2005689470", + "venue": "1140684652", + "year": "2008", + "title": "algorithmic mediation for collaborative exploratory search", + "label": [ + "182861755", + "90288658", + "97854310", + "2775942080", + "89505385", + "78999398", + "21025794", + "2777866876", + "23123220" + ], + "author": [ + "2168651388", + "28426143", + "2122808819", + "1976553559", + "2558338154" + ], + "reference": [ + "1491947711", + "1601302335", + "1601995684", + "1964242433", + "1976454383", + "1988686126", + "1994271853", + "2001278497", + "2043777533", + "2077670823", + "2078396654", + "2084457609", + "2085052712", + "2102958620" + ], + "abstract": "we describe a new approach to information retrieval algorithmic mediation for intentional synchronous collaborative exploratory search using our system two or more users with a common information need search together simultaneously the collaborative system provides tools user interfaces and most importantly algorithmically mediated retrieval to focus enhance and augment the team s search and communication activities collaborative search outperformed post hoc merging of similarly instrumented single user runs algorithmic mediation improved both collaborative search allowing a team of searchers to find relevant information more efficiently and effectively and exploratory search allowing the searchers to find relevant information that cannot be found while working individually", + "title_raw": "Algorithmic mediation for collaborative exploratory search", + "abstract_raw": "We describe a new approach to information retrieval: algorithmic mediation for intentional, synchronous collaborative exploratory search. Using our system, two or more users with a common information need search together, simultaneously. The collaborative system provides tools, user interfaces and, most importantly, algorithmically-mediated retrieval to focus, enhance and augment the team's search and communication activities. Collaborative search outperformed post hoc merging of similarly instrumented single user runs. Algorithmic mediation improved both collaborative search (allowing a team of searchers to find relevant information more efficiently and effectively), and exploratory search (allowing the searchers to find relevant information that cannot be found while working individually).", + "link": "https://www.semanticscholar.org/paper/3bb44af2251b89c83a922778be5ba50cc205cc8c", + "scraped_abstract": null, + "citation_best": 181 + }, + { + "paper": "2059739152", + "venue": "1131589359", + "year": "2008", + "title": "counter braids a novel counter architecture for per flow measurement", + "label": [ + "158379750", + "97824396", + "99138194", + "11413529" + ], + "author": [ + "2171887591", + "3176425963", + "2108719851", + "2073170117", + "2040576934" + ], + "reference": [ + "129041085", + "1536930200", + "1977141583", + "1979194036", + "2010973140", + "2029712200", + "2077439570", + "2080234606", + "2099111195", + "2108909367", + "2113935902", + "2123845384", + "2128765501", + "2129638195", + "2137813581", + "2138543759", + "2161342511", + "2296616510", + "2962747496", + "3139815204" + ], + "abstract": "fine grained network measurement requires routers and switches to update large arrays of counters at very high link speed e g 40 gbps a naive algorithm needs an infeasible amount of sram to store both the counters and a flow to counter association rule so that arriving packets can update corresponding counters at link speed this has made accurate per flow measurement complex and expensive and motivated approximate methods that detect and measure only the large flows this paper revisits the problem of accurate per flow measurement we present a counter architecture called counter braids inspired by sparse random graph codes in a nutshell counter braids compresses while counting it solves the central problems counter space and flow to counter association of per flow measurement by braiding a hierarchy of counters with random graphs braiding results in drastic space reduction by sharing counters among flows and using random graphs generated on the fly with hash functions avoids the storage of flow to counter association the counter braids architecture is optimal albeit with a complex decoder as it achieves the maximum compression rate asymptotically for implementation we present a low complexity message passing decoding algorithm which can recover flow sizes with essentially zero error evaluation on internet traces demonstrates that almost all flow sizes are recovered exactly with only a few bits of counter space per flow", + "title_raw": "Counter braids: a novel counter architecture for per-flow measurement", + "abstract_raw": "Fine-grained network measurement requires routers and switches to update large arrays of counters at very high link speed (e.g. 40 Gbps). A naive algorithm needs an infeasible amount of SRAM to store both the counters and a flow-to-counter association rule, so that arriving packets can update corresponding counters at link speed. This has made accurate per-flow measurement complex and expensive, and motivated approximate methods that detect and measure only the large flows. This paper revisits the problem of accurate per-flow measurement. We present a counter architecture, called Counter Braids, inspired by sparse random graph codes. In a nutshell, Counter Braids \"compresses while counting\". It solves the central problems (counter space and flow-to-counter association) of per-flow measurement by \"braiding\" a hierarchy of counters with random graphs. Braiding results in drastic space reduction by sharing counters among flows; and using random graphs generated on-the-fly with hash functions avoids the storage of flow-to-counter association. The Counter Braids architecture is optimal (albeit with a complex decoder) as it achieves the maximum compression rate asymptotically. For implementation, we present a low-complexity message passing decoding algorithm, which can recover flow sizes with essentially zero error. Evaluation on Internet traces demonstrates that almost all flow sizes are recovered exactly with only a few bits of counter space per flow.", + "link": "https://www.semanticscholar.org/paper/476213b2d8914eca261459e7e2fc63a96f967bd9", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1987358714", + "venue": "1175089206", + "year": "2008", + "title": "serializable isolation for snapshot databases", + "label": [ + "108744092", + "84511453", + "77088390", + "55282118", + "93361087", + "19513336", + "191558696", + "52723943", + "72108876", + "193702766", + "120314980" + ], + "author": [ + "2560104409", + "2140136442", + "2154123352" + ], + "reference": [ + "25162621", + "49219822", + "1480006450", + "1493875369", + "1515932031", + "1549414577", + "1550183038", + "1569583874", + "1861475200", + "1916593361", + "1958609913", + "1966308107", + "1968658954", + "1970748906", + "1989306170", + "1991199257", + "1993505169", + "1996952974", + "2000966828", + "2004735493", + "2018464987", + "2035069351", + "2047908203", + "2054644843", + "2101027550", + "2102333161", + "2105636195", + "2112163169", + "2117917070", + "2122696933", + "2131751093", + "2133386065", + "2138536231", + "2139086035", + "2146791244", + "2168227861", + "2169472560", + "2435606122", + "2913877777", + "2968535529" + ], + "abstract": "many popular database management systems offer snapshot isolation rather than full serializability there are well known anomalies permitted by snapshot isolation that can lead to violations of data consistency by interleaving transactions that individually maintain consistency until now the only way to prevent these anomalies was to modify the applications by introducing artificial locking or update conflicts following careful analysis of conflicts between all pairs of transactions this paper describes a modification to the concurrency control algorithm of a database management system that automatically detects and prevents snapshot isolation anomalies at runtime for arbitrary applications thus providing serializable isolation the new algorithm preserves the properties that make snapshot isolation attractive including that readers do not block writers and vice versa an implementation and performance study of the algorithm are described showing that the throughput approaches that of snapshot isolation in most cases", + "title_raw": "Serializable isolation for snapshot databases", + "abstract_raw": "Many popular database management systems offer snapshot isolation rather than full serializability. There are well-known anomalies permitted by snapshot isolation that can lead to violations of data consistency by interleaving transactions that individually maintain consistency. Until now, the only way to prevent these anomalies was to modify the applications by introducing artificial locking or update conflicts, following careful analysis of conflicts between all pairs of transactions. This paper describes a modification to the concurrency control algorithm of a database management system that automatically detects and prevents snapshot isolation anomalies at runtime for arbitrary applications, thus providing serializable isolation. The new algorithm preserves the properties that make snapshot isolation attractive, including that readers do not block writers and vice versa. An implementation and performance study of the algorithm are described, showing that the throughput approaches that of snapshot isolation in most cases.", + "link": "https://www.semanticscholar.org/paper/e48e77dc76485228f7ea2229048f83aabdc3a582", + "scraped_abstract": null, + "citation_best": 173 + }, + { + "paper": "2171901130", + "venue": "1175089206", + "year": "2008", + "title": "scalable network distance browsing in spatial databases", + "label": [ + "11413529", + "80444323" + ], + "author": [ + "89934235", + "1997756302", + "1209905035" + ], + "reference": [ + "26550955", + "197260046", + "1497953515", + "1543003006", + "1561240874", + "1600592911", + "1931818007", + "1973444249", + "1980293260", + "1990111898", + "2000288754", + "2007695015", + "2014889099", + "2030670215", + "2044012461", + "2046144220", + "2050734958", + "2065759448", + "2083016274", + "2088171019", + "2088630449", + "2094078373", + "2100586428", + "2134874858", + "2154356512", + "2161008043", + "2168872671", + "2169336598", + "2169528473", + "2169893912", + "2171330332" + ], + "abstract": "an algorithm is presented for finding the k nearest neighbors in a spatial network in a best first manner using network distance the algorithm is based on precomputing the shortest paths between all possible vertices in the network and then making use of an encoding that takes advantage of the fact that the shortest paths from vertex u to all of the remaining vertices can be decomposed into subsets based on the first edges on the shortest paths to them from u thus in the worst case the amount of work depends on the number of objects that are examined and the number of links on the shortest paths to them from q rather than depending on the number of vertices in the network the amount of storage required to keep track of the subsets is reduced by taking advantage of their spatial coherence which is captured by the aid of a shortest path quadtree in particular experiments on a number of large road networks as well as a theoretical analysis have shown that the storage has been reduced from o n3 to o n1 5 i e by an order of magnitude equal to the square root the precomputation of the shortest paths along the network essentially decouples the process of computing shortest paths along the network from that of finding the neighbors and thereby also decouples the domain s of the query objects and that of the objects from which the neighbors are drawn from the domain v of the vertices of the spatial network this means that as long as the spatial network is unchanged the algorithm and underlying representation of the shortest paths in the spatial network can be used with different sets of objects", + "title_raw": "Scalable network distance browsing in spatial databases", + "abstract_raw": "An algorithm is presented for finding the k nearest neighbors in a spatial network in a best-first manner using network distance. The algorithm is based on precomputing the shortest paths between all possible vertices in the network and then making use of an encoding that takes advantage of the fact that the shortest paths from vertex u to all of the remaining vertices can be decomposed into subsets based on the first edges on the shortest paths to them from u. Thus, in the worst case, the amount of work depends on the number of objects that are examined and the number of links on the shortest paths to them from q, rather than depending on the number of vertices in the network. The amount of storage required to keep track of the subsets is reduced by taking advantage of their spatial coherence which is captured by the aid of a shortest path quadtree. In particular, experiments on a number of large road networks as well as a theoretical analysis have shown that the storage has been reduced from O(N3) to O(N1.5) (i.e., by an order of magnitude equal to the square root). The precomputation of the shortest paths along the network essentially decouples the process of computing shortest paths along the network from that of finding the neighbors, and thereby also decouples the domain S of the query objects and that of the objects from which the neighbors are drawn from the domain V of the vertices of the spatial network. This means that as long as the spatial network is unchanged, the algorithm and underlying representation of the shortest paths in the spatial network can be used with different sets of objects.", + "link": "https://www.semanticscholar.org/paper/8510872ba5fb52f2341178e691f26a69bd7487f4", + "scraped_abstract": null, + "citation_best": 307 + }, + { + "paper": "2140190783", + "venue": "1166315290", + "year": "2008", + "title": "bringing physics to the surface", + "label": [ + "190390380", + "107457646", + "159437735", + "136197465" + ], + "author": [ + "2105571773", + "2098553916", + "2075214526", + "2132704291", + "2617703163" + ], + "reference": [ + "94894406", + "635058499", + "1482124653", + "1697827404", + "1751327189", + "1965447681", + "2041733557", + "2068113013", + "2069324208", + "2073524999", + "2098930842", + "2101577246", + "2109580619", + "2137092817", + "2140982079", + "2142796063", + "2144432444", + "2146203184", + "2148819007", + "2149980117", + "2158707444", + "2162456078", + "2162799284", + "2168442304", + "2234650683", + "3149441033" + ], + "abstract": "this paper explores the intersection of emerging surface technologies capable of sensing multiple contacts and of ten shape information and advanced games physics engines we define a technique for modeling the data sensed from such surfaces as input within a physics simulation this affords the user the ability to interact with digital objects in ways analogous to manipulation of real objects our technique is capable of modeling both multiple contact points and more sophisticated shape information such as the entire hand or other physical objects and of mapping this user input to contact forces due to friction and collisions within the physics simulation this enables a variety of fine grained and casual interactions supporting finger based whole hand and tangible input we demonstrate how our technique can be used to add real world dynamics to interactive surfaces such as a vision based tabletop creating a fluid and natural experience our approach hides from application developers many of the complexities inherent in using physics engines allowing the creation of applications without preprogrammed interaction behavior or gesture recognition", + "title_raw": "Bringing physics to the surface", + "abstract_raw": "This paper explores the intersection of emerging surface technologies, capable of sensing multiple contacts and of-ten shape information, and advanced games physics engines. We define a technique for modeling the data sensed from such surfaces as input within a physics simulation. This affords the user the ability to interact with digital objects in ways analogous to manipulation of real objects. Our technique is capable of modeling both multiple contact points and more sophisticated shape information, such as the entire hand or other physical objects, and of mapping this user input to contact forces due to friction and collisions within the physics simulation. This enables a variety of fine-grained and casual interactions, supporting finger-based, whole-hand, and tangible input. We demonstrate how our technique can be used to add real-world dynamics to interactive surfaces such as a vision-based tabletop, creating a fluid and natural experience. Our approach hides from application developers many of the complexities inherent in using physics engines, allowing the creation of applications without preprogrammed interaction behavior or gesture recognition.", + "link": "https://www.semanticscholar.org/paper/0564c581517d6ca954635451d8d87150ba889242", + "scraped_abstract": null, + "citation_best": 194 + }, + { + "paper": "2018989507", + "venue": "1133523790", + "year": "2008", + "title": "finding frequent items in data streams", + "label": [ + "89198739", + "124101348", + "109364899", + "2780527393", + "23123220" + ], + "author": [ + "2190072679", + "50097054" + ], + "reference": [ + "2540670", + "31696346", + "1489091404", + "1493892051", + "1525132895", + "1553409264", + "1601184934", + "1634257591", + "1675727887", + "1766932551", + "1977951613", + "2004110412", + "2004154913", + "2006355640", + "2026784425", + "2039852170", + "2049282244", + "2050290319", + "2058858139", + "2064379477", + "2069980026", + "2075567379", + "2080234606", + "2112452856", + "2113139394", + "2119714163", + "2122529145", + "2126922390", + "2130297445", + "2150569458", + "2152637787", + "2154096150", + "2161118867", + "2166290564", + "2170963089", + "2621193479" + ], + "abstract": "the frequent items problem is to process a stream of items and find all items occurring more than a given fraction of the time it is one of the most heavily studied problems in data stream mining dating back to the 1980s many applications rely directly or indirectly on finding the frequent items and implementations are in use in large scale industrial systems however there has not been much comparison of the different methods under uniform experimental conditions it is common to find papers touching on this topic in which important related work is mischaracterized overlooked or reinvented in this paper we aim to present the most important algorithms for this problem in a common framework we have created baseline implementations of the algorithms and used these to perform a thorough experimental study of their properties we give empirical evidence that there is considerable variation in the performance of frequent items algorithms the best methods can be implemented to find frequent items with high accuracy using only tens of kilobytes of memory at rates of millions of items per second on cheap modern hardware", + "title_raw": "Finding frequent items in data streams", + "abstract_raw": "The frequent items problem is to process a stream of items and find all items occurring more than a given fraction of the time. It is one of the most heavily studied problems in data stream mining, dating back to the 1980s. Many applications rely directly or indirectly on finding the frequent items, and implementations are in use in large scale industrial systems. However, there has not been much comparison of the different methods under uniform experimental conditions. It is common to find papers touching on this topic in which important related work is mischaracterized, overlooked, or reinvented.\r\n\r\nIn this paper, we aim to present the most important algorithms for this problem in a common framework. We have created baseline implementations of the algorithms, and used these to perform a thorough experimental study of their properties. We give empirical evidence that there is considerable variation in the performance of frequent items algorithms. The best methods can be implemented to find frequent items with high accuracy using only tens of kilobytes of memory, at rates of millions of items per second on cheap modern hardware.", + "link": "https://www.semanticscholar.org/paper/2a83b62e24f3aa372a2dbf71961c32f6473456ff", + "scraped_abstract": null, + "citation_best": 559 + }, + { + "paper": "2141463661", + "venue": "1133523790", + "year": "2008", + "title": "constrained physical design tuning", + "label": [ + "22414024", + "188817802", + "120314980", + "79403827" + ], + "author": [ + "2133606375", + "2163909284" + ], + "reference": [ + "61201023", + "983313976", + "1026154630", + "1144264627", + "1501465455", + "1521236715", + "1545359798", + "1551461997", + "1553825122", + "1560862172", + "1576035775", + "1583332480", + "1593715994", + "1612556207", + "1851390469", + "1864271624", + "1885387902", + "1890845846", + "1931710365", + "1958317016", + "1997375126", + "2027259333", + "2033368934", + "2093267241", + "2093490165", + "2096674153", + "2099478569", + "2099918651", + "2103447673", + "2106942224", + "2108490710", + "2108883953", + "2116616082", + "2119946100", + "2122816893", + "2125605956", + "2148291485", + "2152686654", + "2168362821", + "2168503413", + "2169540812", + "2170188482", + "2197146345", + "2204891826", + "2210762885", + "2216607184", + "2217180016", + "2217556904", + "2227634844", + "2253899864", + "2261054240", + "2282700874", + "2296408038", + "2300545664", + "2407187628", + "2411895888", + "3021961968" + ], + "abstract": "existing solutions to the automated physical design problem in database systems attempt to minimize execution costs of input workloads for a given storage constraint in this work we argue that this model is not flexible enough to address several real world situations to overcome this limitation we introduce a constraint language that is simple yet powerful enough to express many important scenarios we build upon a previously proposed transformation based framework to incorporate constraints into the search space we then show experimentally that we are able to handle a rich class of constraints and that our proposed technique scales gracefully our approach generalizes previous work that assumes simpler optimization models where configuration size is the only fixed constraint as a consequence the process of tuning a workload not only becomes more flexible but also more complex and getting the best design in the first attempt becomes difficult we propose a paradigm shift for physical design tuning in which sessions are highly interactive allowing dbas to quickly try different options identify problems and obtain physical designs in an agile manner", + "title_raw": "Constrained physical design tuning", + "abstract_raw": "Existing solutions to the automated physical design problem in database systems attempt to minimize execution costs of input workloads for a given storage constraint. In this work, we argue that this model is not flexible enough to address several real-world situations. To overcome this limitation, we introduce a constraint language that is simple yet powerful enough to express many important scenarios. We build upon a previously proposed transformation-based framework to incorporate constraints into the search space. We then show experimentally that we are able to handle a rich class of constraints and that our proposed technique scales gracefully. Our approach generalizes previous work that assumes simpler optimization models where configuration size is the only fixed constraint. As a consequence, the process of tuning a workload not only becomes more flexible, but also more complex, and getting the best design in the first attempt becomes difficult. We propose a paradigm shift for physical design tuning, in which sessions are highly interactive, allowing DBAs to quickly try different options, identify problems, and obtain physical designs in an agile manner.", + "link": "https://www.semanticscholar.org/paper/2bf18cc17d6db60e74aa84106b0cd048087363f1", + "scraped_abstract": null, + "citation_best": 36 + }, + { + "paper": "2145990704", + "venue": "1135342153", + "year": "2008", + "title": "irlbot scaling to 6 billion pages and beyond", + "label": [ + "2780154274", + "73340581", + "77088390", + "13743948", + "136134403", + "61423126", + "136764020" + ], + "author": [ + "2232519522", + "2133612086", + "2126247714", + "314230453" + ], + "reference": [ + "75458008", + "110443600", + "173713115", + "1561637629", + "1587314265", + "1602486336", + "1613836731", + "1674850363", + "1797803111", + "1966912174", + "1968155810", + "1979126145", + "1997438973", + "2000273502", + "2000333294", + "2007687650", + "2012833704", + "2013531639", + "2018928332", + "2029500199", + "2046441184", + "2051804774", + "2066636486", + "2085922539", + "2098660810", + "2108671511", + "2130242957", + "2130610812", + "2139532006", + "2140279085", + "2145349611", + "2151170908", + "2152565070", + "2158601853", + "2161118554", + "2164542999", + "2295141584", + "2986790267" + ], + "abstract": "this paper shares our experience in designing a web crawler that can download billions of pages using a single server implementation and models its performance we show that with the quadratically increasing complexity of verifying url uniqueness bfs crawl order and fixed per host rate limiting current crawling algorithms cannot effectively cope with the sheer volume of urls generated in large crawls highly branching spam legitimate multi million page blog sites and infinite loops created by server side scripts we offer a set of techniques for dealing with these issues and test their performance in an implementation we call irlbot in our recent experiment that lasted 41 days irlbot running on a single server successfully crawled 6 3 billion valid html pages 7 6 billion connection requests and sustained an average download rate of 319 mb s 1 789 pages s unlike our prior experiments with algorithms proposed in related work this version of irlbot did not experience any bottlenecks and successfully handled content from over 117 million hosts parsed out 394 billion links and discovered a subset of the web graph with 41 billion unique nodes", + "title_raw": "IRLbot: scaling to 6 billion pages and beyond", + "abstract_raw": "This paper shares our experience in designing a web crawler that can download billions of pages using a single-server implementation and models its performance. We show that with the quadratically increasing complexity of verifying URL uniqueness, BFS crawl order, and fixed per-host rate-limiting, current crawling algorithms cannot effectively cope with the sheer volume of URLs generated in large crawls, highly-branching spam, legitimate multi-million-page blog sites, and infinite loops created by server-side scripts. We offer a set of techniques for dealing with these issues and test their performance in an implementation we call IRLbot. In our recent experiment that lasted 41 days, IRLbot running on a single server successfully crawled 6.3 billion valid HTML pages ($7.6$ billion connection requests) and sustained an average download rate of 319 mb/s (1,789 pages/s). Unlike our prior experiments with algorithms proposed in related work, this version of IRLbot did not experience any bottlenecks and successfully handled content from over 117 million hosts, parsed out 394 billion links, and discovered a subset of the web graph with 41 billion unique nodes.", + "link": "https://www.semanticscholar.org/paper/f3af292e4c0f4afe93f22505dabdffdd89793b76", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2146447174", + "venue": "1184914352", + "year": "2007", + "title": "plow a collaborative task learning agent", + "label": [ + "160145156", + "138020889", + "154945302", + "107457646", + "2779439875", + "2781366954", + "161301231" + ], + "author": [ + "2235393145", + "2179135478", + "2128752096", + "7122111", + "2112107296", + "2101086093", + "143530444" + ], + "reference": [ + "140382357", + "1553306009", + "1977546238", + "1993680470", + "2069362004", + "2097360199", + "2098217544", + "2108600775", + "2140909204", + "2787576363" + ], + "abstract": "to be effective an agent that collaborates with humans needs to be able to learn new tasks from humans they work with this paper describes a system that learns executable task models from a single collaborative learning session consisting of demonstration explanation and dialogue to accomplish this the system integrates a range of ai technologies deep natural language understanding knowledge representation and reasoning dialogue systems planning agent based systems and machine learning a formal evaluation shows the approach has great promise", + "title_raw": "PLOW: a collaborative task learning agent", + "abstract_raw": "To be effective, an agent that collaborates with humans needs to be able to learn new tasks from humans they work with. This paper describes a system that learns executable task models from a single collaborative learning session consisting of demonstration, explanation and dialogue. To accomplish this, the system integrates a range of AI technologies: deep natural language understanding, knowledge representation and reasoning, dialogue systems, planning/agent-based systems and machine learning. A formal evaluation shows the approach has great promise.", + "link": "https://www.semanticscholar.org/paper/431e61648a59abcd05411503ead56de8aa97906b", + "scraped_abstract": null, + "citation_best": 151 + }, + { + "paper": "1902527071", + "venue": "1184914352", + "year": "2007", + "title": "thresholded rewards acting optimally in timed zero sum games", + "label": [ + "19768560", + "9515004", + "173801870" + ], + "author": [ + "2132958556", + "2108671403" + ], + "reference": [ + "155805226", + "1512919909", + "1515851193", + "1530444831", + "2006303459", + "2101915445", + "2107726111", + "2119567691", + "2170400507", + "2397240726", + "2911283634", + "2914656440", + "2914941842" + ], + "abstract": "in timed zero sum games the goal is to maximize the probability of winning which is not necessarily the same as maximizing our expected reward we consider cumulative intermediate reward to be the difference between our score and our opponent s score the true reward of a win loss or tie is determined at the end of a game by applying a threshold function to the cumulative intermediate reward we introduce thresholded rewards problems to capture this dependency of the final reward outcome on the cumulative intermediate reward thresholded rewards problems reflect different real world stochastic planning domains especially zero sum games in which time and score need to be considered we investigate the application of thresholded rewards to finite horizon markov decision processes mdps in general the optimal policy for a thresholded rewards mdp will be non stationary depending on the number of time steps remaining and the cumulative intermediate reward we introduce an efficient value iteration algorithm that solves thresholded rewards mdps exactly but with running time quadratic on the number of states in the mdp and the length of the time horizon we investigate a number of heuristic based techniques that efficiently find approximate solutions for mdps with large state spaces or long time horizons", + "title_raw": "Thresholded rewards: acting optimally in timed, zero-sum games", + "abstract_raw": "In timed, zero-sum games, the goal is to maximize the probability of winning, which is not necessarily the same as maximizing our expected reward. We consider cumulative intermediate reward to be the difference between our score and our opponent's score; the \"true\" reward of a win, loss, or tie is determined at the end of a game by applying a threshold function to the cumulative intermediate reward. We introduce thresholded-rewards problems to capture this dependency of the final reward outcome on the cumulative intermediate reward. Thresholded-rewards problems reflect different real-world stochastic planning domains, especially zero-sum games, in which time and score need to be considered. We investigate the application of thresholded rewards to finite-horizon Markov Decision Processes (MDPs). In general, the optimal policy for a thresholded-rewards MDP will be non-stationary, depending on the number of time steps remaining and the cumulative intermediate reward. We introduce an efficient value iteration algorithm that solves thresholded-rewards MDPs exactly, but with running time quadratic on the number of states in the MDP and the length of the time horizon. We investigate a number of heuristic-based techniques that efficiently find approximate solutions for MDPs with large state spaces or long time horizons.", + "link": "https://www.semanticscholar.org/paper/e1f99232103402e134eef9bded15f5d7ec142672", + "scraped_abstract": null, + "citation_best": 13 + }, + { + "paper": "2121465811", + "venue": "1188739475", + "year": "2007", + "title": "learning synchronous grammars for semantic parsing with lambda calculus", + "label": [ + "53893814", + "118364021", + "199360897", + "67621940", + "135790938", + "146810361", + "204321447", + "51802942", + "203005215", + "186644900" + ], + "author": [ + "2172933544", + "2167433806" + ], + "reference": [ + "125699335", + "1496189301", + "1518357715", + "1768003599", + "1986423506", + "2097828466", + "2102258316", + "2114964701", + "2116316001", + "2121127625", + "2148833958", + "2152263452", + "2156985047", + "2158388102", + "2163274265", + "2165666205", + "2264742718", + "2785731355" + ], + "abstract": "this paper presents the first empirical results to our knowledge on learning synchronous grammars that generate logical forms using statistical machine translation techniques a semantic parser based on a synchronous context free grammar augmented with operators is learned given a set of training sentences and their correct logical forms the resulting parser is shown to be the bestperforming system so far in a database query domain", + "title_raw": "Learning Synchronous Grammars for Semantic Parsing with Lambda Calculus", + "abstract_raw": "This paper presents the first empirical results to our knowledge on learning synchronous grammars that generate logical forms. Using statistical machine translation techniques, a semantic parser based on a synchronous context-free grammar augmented with \ufffdoperators is learned given a set of training sentences and their correct logical forms. The resulting parser is shown to be the bestperforming system so far in a database query domain.", + "link": "https://www.semanticscholar.org/paper/c2ecc66c0e5f976b0e0d95c64ed2d1e283a2625d", + "scraped_abstract": null, + "citation_best": 331 + }, + { + "paper": "2117601224", + "venue": "1163450153", + "year": "2007", + "title": "authoring sensor based interactions by demonstration with direct manipulation and pattern recognition", + "label": [ + "107457646", + "153180895", + "13854087", + "178980831" + ], + "author": [ + "2192055696", + "2223812137", + "2237362364", + "261822931" + ], + "reference": [ + "156058692", + "647259934", + "1492324553", + "1522006088", + "1533944381", + "1582682500", + "1972051027", + "1994547327", + "2016738952", + "2028878459", + "2036024905", + "2045593418", + "2062658884", + "2064189581", + "2083150192", + "2115647291", + "2120080783", + "2120093000", + "2121429649", + "2125124735", + "2128160875", + "2134063352", + "2134332471", + "2141385588", + "2149802704", + "2160288944", + "2161969471", + "2163314677", + "2189300278", + "2491703286", + "2610881169", + "2912082750" + ], + "abstract": "sensors are becoming increasingly important in interaction design authoring a sensor based interaction comprises three steps choosing and connecting the appropriate hardware creating application logic and specifying the relationship between sensor values and application logic recent research has successfully addressed the first two issues however linking sensor input data to application logic remains an exercise in patience and trial and error testing for most designers this paper introduces techniques for authoring sensor based interactions by demonstration a combination of direct manipulation and pattern recognition techniques enables designers to control how demonstrated examples are generalized to interaction rules this approach emphasizes design exploration by enabling very rapid iterative demonstrate edit review cycles this paper describes the manifestation of these techniques in a design tool exemplar and presents evaluations through a first use lab study and a theoretical analysis using the cognitive dimensions of notation framework", + "title_raw": "Authoring sensor-based interactions by demonstration with direct manipulation and pattern recognition", + "abstract_raw": "Sensors are becoming increasingly important in interaction design. Authoring a sensor-based interaction comprises three steps: choosing and connecting the appropriate hardware, creating application logic, and specifying the relationship between sensor values and application logic. Recent research has successfully addressed the first two issues. However, linking sensor input data to application logic remains an exercise in patience and trial-and-error testing for most designers. This paper introduces techniques for authoring sensor-based interactions by demonstration. A combination of direct manipulation and pattern recognition techniques enables designers to control how demonstrated examples are generalized to interaction rules. This approach emphasizes design exploration by enabling very rapid iterative demonstrate-edit-review cycles. This paper describes the manifestation of these techniques in a design tool, Exemplar, and presents evaluations through a first-use lab study and a theoretical analysis using the Cognitive Dimensions of Notation framework.", + "link": "https://www.semanticscholar.org/paper/9e9e587d9cc4445270a98b7fa7ae901995845ec4", + "scraped_abstract": null, + "citation_best": 149 + }, + { + "paper": "2006550435", + "venue": "1163450153", + "year": "2007", + "title": "consuming video on mobile devices", + "label": [ + "540522513", + "40161942", + "60952562", + "49774154", + "30658544", + "186967261", + "516764902" + ], + "author": [ + "2044174557", + "2135778182", + "2001916983" + ], + "reference": [ + "9305429", + "23054452", + "24829341", + "116898022", + "118547362", + "154669029", + "609508847", + "1484003379", + "1489515310", + "1508239974", + "1508564994", + "1551486248", + "1607822313", + "1808982900", + "1981646721", + "2037953526", + "2049328142", + "2051090142", + "2086423525", + "2091887445", + "2097774197", + "2099025317", + "2106479048", + "2107163126", + "2111356933", + "2126305976", + "2153516373", + "2154107987", + "2155678180", + "2489370993", + "2954065904" + ], + "abstract": "mobile video is now an everyday possibility with a wide array of commercially available devices services and content these technologies promise to transform the way that people can consume video media in their lives beyond the familiar behaviours associated with fixed tv and video technologies building upon earlier studies of mobile video this paper reports on a study using diary techniques and ethnographic interviews to better understand how people are using commercially available mobile video technologies in their everyday lives drawing on reported episodes of mobile video behaviour the study identifies the social motivations and values underpinning these behaviours that help characterise mobile video consumption beyond the simplistic notion of viewing tv to kill time wherever you may be implications for adoption and design of mobile video technologies and services are discussed", + "title_raw": "Consuming video on mobile devices", + "abstract_raw": "Mobile video is now an everyday possibility with a wide array of commercially available devices, services and content. These technologies promise to transform the way that people can consume video media in their lives beyond the familiar behaviours associated with fixed TV and video technologies. Building upon earlier studies of mobile video, this paper reports on a study using diary techniques and ethnographic interviews to better understand how people are using commercially available mobile video technologies in their everyday lives. Drawing on reported episodes of mobile video behaviour, the study identifies the social motivations and values underpinning these behaviours that help characterise mobile video consumption beyond the simplistic notion of viewing TV to kill time wherever you may be. Implications for adoption and design of mobile video technologies and services are discussed.", + "link": "https://www.semanticscholar.org/paper/fc6845c0924ba1c5f595f89a86e84034728c65f7", + "scraped_abstract": null, + "citation_best": 194 + }, + { + "paper": "2131801294", + "venue": "1163450153", + "year": "2007", + "title": "multiview improving trust in group video conferencing through spatial faithfulness", + "label": [ + "2779916870", + "49774154", + "22561748" + ], + "author": [ + "2273996846", + "2101610026" + ], + "reference": [ + "602681897", + "1495397386", + "1573665760", + "1963615444", + "1984926644", + "1985496673", + "2003361908", + "2040649730", + "2062663664", + "2080729871", + "2104697233", + "2107489595", + "2153027474", + "2155959475", + "2156834844", + "2167421295", + "2911311425", + "2970081029", + "3047671354" + ], + "abstract": "video conferencing is still considered a poor alternative to face to face meetings in the business setting where these systems are most prevalent the misuse of video conferencing systems can have detrimental results especially in high stakes communications prior work suggests that spatial distortions of nonverbal cues particularly gaze and deixis negatively impact many aspects of effective communication in dyadic communications however video conferencing systems are often used for group to group meetings where spatial distortions are exacerbated meanwhile its effects on the group dynamic are not well understood in this study we examine the effects that spatial distortions of nonverbal cues have on inter group trust formation we conducted a large 169 participant study of group conferencing under various conditions we found that the use of systems that introduce spatial distortions negatively affect trust formation patterns on the other hand these effects are essentially eliminated by using a spatially faithful video conferencing system", + "title_raw": "Multiview: improving trust in group video conferencing through spatial faithfulness", + "abstract_raw": "Video conferencing is still considered a poor alternative to face-to-face meetings. In the business setting, where these systems are most prevalent, the misuse of video conferencing systems can have detrimental results, especially in high-stakes communications. Prior work suggests that spatial distortions of nonverbal cues, particularly gaze and deixis, negatively impact many aspects of effective communication in dyadic communications. However, video conferencing systems are often used for group-to-group meetings where spatial distortions are exacerbated. Meanwhile, its effects on the group dynamic are not well understood. In this study, we examine the effects that spatial distortions of nonverbal cues have on inter-group trust formation. We conducted a large (169 participant) study of group conferencing under various conditions. We found that the use of systems that introduce spatial distortions negatively affect trust formation patterns. On the other hand, these effects are essentially eliminated by using a spatially faithful video conferencing system.", + "link": "https://www.semanticscholar.org/paper/2591de716c1a4853d42511fc04b33190b6e47f6b", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2108518773", + "venue": "1163450153", + "year": "2007", + "title": "shift a technique for operating pen based interfaces using touch", + "label": [ + "164086593", + "31972630", + "186967261" + ], + "author": [ + "2154794983", + "2009751849" + ], + "reference": [ + "1594426410", + "1801329930", + "2017881547", + "2030026498", + "2030620731", + "2049655377", + "2051782817", + "2076656908", + "2077885899", + "2091812768", + "2096986574", + "2101096043", + "2111644757", + "2112103637", + "2112824399", + "2124982312", + "2128629242", + "2156665908", + "2163125245", + "2165500877", + "2185992031", + "2296178917" + ], + "abstract": "retrieving the stylus of a pen based device takes time and requires a second hand especially for short intermittent interactions many users therefore choose to use their bare fingers although convenient this increases targeting times and error rates we argue that the main reasons are the occlusion of the target by the user s finger and ambiguity about which part of the finger defines the selection point we propose a pointing technique we call shift that is designed to address these issues when the user touches the screen shift creates a callout showing a copy of the occluded screen area and places it in a non occluded location the callout also shows a pointer representing the selection point of the finger using this visual feedback users guide the pointer into the target by moving their finger on the screen surface and commit the target acquisition by lifting the finger unlike existing techniques shift is only invoked when necessary over large targets no callout is created and users enjoy the full performance of an unaltered touch screen we report the results of a user study showing that with shift participants can select small targets with much lower error rates than an unaided touch screen and that shift is faster than offset cursor for larger targets", + "title_raw": "Shift: a technique for operating pen-based interfaces using touch", + "abstract_raw": "Retrieving the stylus of a pen-based device takes time and requires a second hand. Especially for short intermittent interactions many users therefore choose to use their bare fingers. Although convenient, this increases targeting times and error rates. We argue that the main reasons are the occlusion of the target by the user's finger and ambiguity about which part of the finger defines the selection point. We propose a pointing technique we call Shift that is designed to address these issues. When the user touches the screen, Shift creates a callout showing a copy of the occluded screen area and places it in a non-occluded location. The callout also shows a pointer representing the selection point of the finger. Using this visual feedback, users guide the pointer into the target by moving their finger on the screen surface and commit the target acquisition by lifting the finger. Unlike existing techniques, Shift is only invoked when necessary--over large targets no callout is created and users enjoy the full performance of an unaltered touch screen. We report the results of a user study showing that with Shift participants can select small targets with much lower error rates than an unaided touch screen and that Shift is faster than Offset Cursor for larger targets.", + "link": "https://www.semanticscholar.org/paper/b54ffad792d6d1f22f15cd36cc9201138599c245", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2057359302", + "venue": "1163450153", + "year": "2007", + "title": "software or wetware discovering when and why people use digital prosthetic memory", + "label": [ + "49774154", + "98986596", + "2777904410", + "2777778603" + ], + "author": [ + "113802524", + "2119343948" + ], + "reference": [ + "7067234", + "68640934", + "1501981432", + "1977089507", + "1981822424", + "1994398551", + "1995644500", + "2015720094", + "2049348296", + "2088703870", + "2112175905", + "2112897804", + "2114939427", + "2126145938", + "2126474993", + "2127923372", + "2137891816", + "2151062347", + "3088885737" + ], + "abstract": "our lives are full of memorable and important moments as well as important items of information the last few years have seen the proliferation of digital devices intended to support prosthetic memory pm to help users recall experiences conversations and retrieve personal information we nevertheless have little systematic understanding of when and why people might use such devices in preference to their own organic memory om although om is fallible it may be more efficient than accessing information from a complex pm device we report a controlled lab study which investigates when and why people use pm and om we found that pm use depended on users evaluation of the quality of their om as well as pm device properties in particular we found that users trade off accuracy and efficiency preferring rapid access to potentially inaccurate information over laborious access to accurate information we discuss the implications of these results for future pm design and theory rather than replacing om future pm designs need to focus on allowing om and pm to work in synergy", + "title_raw": "Software or wetware?: discovering when and why people use digital prosthetic memory", + "abstract_raw": "Our lives are full of memorable and important moments, as well as important items of information. The last few years have seen the proliferation of digital devices intended to support prosthetic memory (PM), to help users recall experiences, conversations and retrieve personal information. We nevertheless have little systematic understanding of when and why people might use such devices, in preference to their own organic memory (OM). Although OM is fallible, it may be more efficient than accessing information from a complex PM device. We report a controlled lab study which investigates when and why people use PM and OM. We found that PM use depended on users' evaluation of the quality of their OM, as well as PM device properties. In particular, we found that users trade-off Accuracy and Efficiency, preferring rapid access to potentially inaccurate information over laborious access to accurate information. We discuss the implications of these results for future PM design and theory. Rather than replacing OM, future PM designs need to focus on allowing OM and PM to work in synergy.", + "link": "https://www.semanticscholar.org/paper/7f7396f5489c559afe896053ce9823a9f3381770", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2168233682", + "venue": "1163450153", + "year": "2007", + "title": "sustainable interaction design invention disposal renewal reuse", + "label": [ + "9496109", + "13854087", + "542192845" + ], + "author": [ + "1273389305" + ], + "reference": [ + "15726484", + "31045409", + "45313759", + "87429994", + "95184617", + "169910887", + "429319409", + "594957933", + "1482164693", + "1512246197", + "1512483161", + "1528027857", + "1531652896", + "1535357018", + "1539259921", + "1580568969", + "1595324694", + "1603360341", + "1605872579", + "1880749507", + "1895273801", + "1964323399", + "1967954098", + "1976129032", + "2025506858", + "2031384998", + "2033759350", + "2038265284", + "2045240155", + "2053680135", + "2065314976", + "2077334683", + "2091579301", + "2091909921", + "2100163285", + "2105188853", + "2113531150", + "2113754844", + "2115028019", + "2126520272", + "2154172389", + "2166050198", + "2175432988", + "2183938216", + "2259327114", + "2326645863", + "2327021642", + "2332074261", + "2798839087", + "2911384745", + "2954919454", + "3145637508" + ], + "abstract": "this paper presents the perspective that sustainability can and should be a central focus of interaction design a perspective that is termed sustainable interaction design sid as a starting point for a perspective of sustainability design is defined as an act of choosing among or informing choices of future ways of being this perspective of sustainability is presented in terms of design values methods and reasoning the paper proposes i a rubric for understanding the material effects of particular interaction design cases in terms of forms of use reuse and disposal and ii several principles to guide sid the paper illustrates with particular examples of design critique for interactive products and appeals to secondary research how two of these principles may be applied to move the effects of designs from less preferred forms of use to more preferred ones finally a vision for incorporating sustainability into the research and practice of interaction design is described", + "title_raw": "Sustainable interaction design: invention & disposal, renewal & reuse", + "abstract_raw": "This paper presents the perspective that sustainability can and should be a central focus of interaction design-a perspective that is termed Sustainable Interaction Design (SID). As a starting point for a perspective of sustainability, design is defined as an act of choosing among or informing choices of future ways of being. This perspective of sustainability is presented in terms of design values, methods, and reasoning. The paper proposes (i) a rubric for understanding the material effects of particular interaction design cases in terms of forms of use, reuse, and disposal, and (ii) several principles to guide SID. The paper illustrates--with particular examples of design critique for interactive products and appeals to secondary research--how two of these principles may be applied to move the effects of designs from less preferred forms of use to more preferred ones. Finally, a vision for incorporating sustainability into the research and practice of interaction design is described.", + "link": "http://doi.acm.org/10.1145/1240624.1240705", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2144409879", + "venue": "1158167855", + "year": "2007", + "title": "dynamic 3d scene analysis from a moving vehicle", + "label": [ + "64729616", + "2776151529", + "146159030", + "10161872", + "31972630", + "64876066", + "126042441" + ], + "author": [ + "2141385208", + "2846644265", + "2104586280", + "2107867962" + ], + "reference": [ + "1512655934", + "1914102109", + "1992825118", + "2033819227", + "2040720916", + "2076699421", + "2080082398", + "2089181482", + "2096349671", + "2096846396", + "2107733398", + "2125907508", + "2141473805", + "2143233860", + "2143597234", + "2146352414", + "2147953023", + "2151103935", + "2153565331", + "2161969291", + "2170865122", + "2177274842", + "2295262250", + "3151111735" + ], + "abstract": "in this paper we present a system that integrates fully automatic scene geometry estimation 2d object detection 3d localization trajectory estimation and tracking for dynamic scene interpretation from a moving vehicle our sole input are two video streams from a calibrated stereo rig on top of a car from these streams we estimate structure from motion sfm and scene geometry in real time in parallel we perform multi view multi category object recognition to detect cars and pedestrians in both camera images using the sfm self localization 2d object detections are converted to 3d observations which are accumulated in a world coordinate frame a subsequent tracking module analyzes the resulting 3d observations to find physically plausible spacetime trajectories finally a global optimization criterion takes object object interactions into account to arrive at accurate 3d localization and trajectory estimates for both cars and pedestrians we demonstrate the performance of our integrated system on challenging real world data showing car passages through crowded city areas", + "title_raw": "Dynamic 3D Scene Analysis from a Moving Vehicle", + "abstract_raw": "In this paper, we present a system that integrates fully automatic scene geometry estimation, 2D object detection, 3D localization, trajectory estimation, and tracking for dynamic scene interpretation from a moving vehicle. Our sole input are two video streams from a calibrated stereo rig on top of a car. From these streams, we estimate structure-from-motion (SfM) and scene geometry in real-time. In parallel, we perform multi-view/multi-category object recognition to detect cars and pedestrians in both camera images. Using the SfM self-localization, 2D object detections are converted to 3D observations, which are accumulated in a world coordinate frame. A subsequent tracking module analyzes the resulting 3D observations to find physically plausible spacetime trajectories. Finally, a global optimization criterion takes object-object interactions into account to arrive at accurate 3D localization and trajectory estimates for both cars and pedestrians. We demonstrate the performance of our integrated system on challenging real-world data showing car passages through crowded city areas.", + "link": "https://www.semanticscholar.org/paper/82fa8d73891ce6476d58f11d6e3b563af21d0a3a", + "scraped_abstract": null, + "citation_best": 302 + }, + { + "paper": "3127778049", + "venue": "1150208541", + "year": "2007", + "title": "space efficient identity based encryption without pairings", + "label": [ + "205203396", + "93974786", + "148730421" + ], + "author": [ + "201828038", + "2339798781", + "2184725411" + ], + "reference": [ + "8287904", + "1504741231", + "1510116442", + "1546774120", + "1569083856", + "1569885018", + "1592092778", + "1596713208", + "1601924177", + "1660562555", + "1797206029", + "1801447676", + "1847491301", + "1905774212", + "1967821609", + "1973801321", + "1975287686", + "1982264769", + "1983378843", + "1991407886", + "2004322147", + "2007585617", + "2017271493", + "2046699624", + "2047360699", + "2052844078", + "2082353536", + "2083896132", + "2085733527", + "2086042811", + "2095179081", + "2097638666", + "2106264302", + "2108072891", + "2110740499", + "2117685508", + "2120976781", + "2136757778", + "2140915062", + "2141040012", + "2141462088", + "2145255308", + "2151471396", + "2154448764", + "2160277501", + "2161214567", + "2165111290", + "2167061810", + "2167816765", + "2176446742", + "2611529857", + "2612848572", + "2952402571", + "3022078572", + "3028652962" + ], + "abstract": "identity based encryption ibe systems are often constructed using bilinear maps a k a pairings on elliptic curves one exception is an elegant system due to cocks which builds an ibe based on the quadratic residuosity problem modulo an rsa composite n the cocks system however produces long ciphertexts since the introduction of the cocks system in 2001 it has been an open problem to construct a space efficient ibe system without pairings in this paper we present an ibe system in which ciphertext size is short an encryption of an bit message consists of a single element in z nz plus 1 additional bits security as in the cocks system relies on the quadratic residuosity problem the system is based on the theory of ternary quadratic forms and as a result encryption and decryption are slower than in the cocks system", + "title_raw": "Space-Efficient Identity Based Encryption Without Pairings", + "abstract_raw": "Identity Based Encryption (IBE) systems are often constructed using bilinear maps (a.k.a. pairings) on elliptic curves. One exception is an elegant system due to Cocks which builds an IBE based on the quadratic residuosity problem modulo an RSA composite N. The Cocks system, however, produces long ciphertexts. Since the introduction of the Cocks system in 2001 it has been an open problem to construct a space efficient IBE system without pairings. In this paper we present an IBE system in which ciphertext size is short: an encryption of an \u2018-bit message consists of a single element in Z/NZ plus \u2018+1 additional bits. Security, as in the Cocks system, relies on the quadratic residuosity problem. The system is based on the theory of ternary quadratic forms and as a result, encryption and decryption are slower than in the Cocks system.", + "link": "https://www.semanticscholar.org/paper/14d4bc372d955e5499b6855b3367c2e6d3d81083", + "scraped_abstract": null, + "citation_best": 3 + }, + { + "paper": "2167671111", + "venue": "1199533187", + "year": "2007", + "title": "mining specifications of malicious behavior", + "label": [ + "541664917", + "38652104", + "177264268", + "124101348", + "98045186", + "84525096" + ], + "author": [ + "350660853", + "2193269139", + "2001160045" + ], + "reference": [ + "123548525", + "126505109", + "161183723", + "1570533264", + "1593203335", + "1664285496", + "1832277845", + "1966150547", + "1973828066", + "2023288969", + "2054520963", + "2096522207", + "2106649514", + "2113371678", + "2114067856", + "2117030266", + "2118528519", + "2131523719", + "2132504937", + "2138756793", + "2154933195", + "2156841542", + "2158167094", + "2182951193", + "2295399529", + "3151756653" + ], + "abstract": "malware detectors require a specification of malicious behavior typically these specifications are manually constructed by investigating known malware we present an automatic technique to overcome this laborious manual process our technique derives such a specification by comparing the execution behavior of a known malware against the execution behaviors of a set of benign programs in other words we mine the malicious behavior present in a known malware that is not present in a set of benign programs the output of our algorithm can be used by malware detectors to detect malware variants since our algorithm provides a succinct description of malicious behavior present in a malware it can also be used by security analysts for understanding the malware we have implemented a prototype based on our algorithm and tested it on several malware programs experimental results obtained from our prototype indicate that our algorithm is effective in extracting malicious behaviors that can be used to detect malware variants", + "title_raw": "Mining specifications of malicious behavior", + "abstract_raw": "Malware detectors require a specification of malicious behavior. Typically, these specifications are manually constructed by investigating known malware. We present an automatic technique to overcome this laborious manual process. Our technique derives such a specification by comparing the execution behavior of a known malware against the execution behaviors of a set of benign programs. In other words, we mine the malicious behavior present in a known malware that is not present in a set of benign programs. The output of our algorithm can be used by malware detectors to detect malware variants. Since our algorithm provides a succinct description of malicious behavior present in a malware, it can also be used by security analysts for understanding the malware. We have implemented a prototype based on our algorithm and tested it on several malware programs. Experimental results obtained from our prototype indicate that our algorithm is effective in extracting malicious behaviors that can be used to detect malware variants.", + "link": "https://www.semanticscholar.org/paper/13b7e5564e56c0298c1d2f52a8b6c028ed92a1c9", + "scraped_abstract": null, + "citation_best": 280 + }, + { + "paper": "2112380328", + "venue": "1199533187", + "year": "2007", + "title": "automatic consistency assessment for query results in dynamic environments", + "label": [ + "96956885", + "172722865", + "192939062", + "164120249", + "118689300", + "192028432", + "99016210", + "157692150", + "80444323" + ], + "author": [ + "2223979042", + "2105115566", + "2152841916" + ], + "reference": [ + "1498342204", + "1967765703", + "1985165347", + "1986510087", + "2051694281", + "2055847995", + "2099460914", + "2108346589", + "2115120343", + "2123613326", + "2124074197", + "2124666967", + "2125310458", + "2126874927", + "2132932625", + "2142592086", + "2149921886" + ], + "abstract": "queries are convenient abstractions for the discovery of information and services as they offer content based information access in distributed settings query semantics are well defined e g they often satisfy acid transactional properties in a dynamic network setting however achieving transactional semantics becomes complex due to the openness and unpredictability in this paper we propose a query processing model for mobile ad hoc and sensor networks suitable for expressing a wide range of query semantics the semantics differ in the degree of consistency with which results reflect the state of the environment during execution we introduce several distinct notions of consistency and formalize them a practical contribution of this paper is a protocol for query processing that automatically assesses and adaptively provides an achievable degree of consistency given the state of the operational environment throughout its execution the protocol attaches an assessment of the achieved guarantee to returned query results allowing precise reasoning about a query with a range of possible semantics", + "title_raw": "Automatic consistency assessment for query results in dynamic environments", + "abstract_raw": "Queries are convenient abstractions for the discovery of information and services, as they offer content-based information access. In distributed settings, query semantics are well-defined, e.g., they often satisfy ACID transactional properties. In a dynamic network setting, however, achieving transactional semantics becomes complex due to the openness and unpredictability. In this paper, we propose a query processing model for mobile ad hoc and sensor networks suitable for expressing a wide range of query semantics; the semantics differ in the degree of consistency with which results reflect the state of the environment during execution. We introduce several distinct notions of consistency and formalize them. A practical contribution of this paper is a protocol for query processing that automatically assesses and adaptively provides an achievable degree of consistency given the state of the operational environment throughout its execution. The protocol attaches an assessment of the achieved guarantee to returned query results, allowing precise reasoning about a query with a range of possible semantics.", + "link": "https://www.semanticscholar.org/paper/96ad43d815415e1a666ae669e8b7009ac13ee148", + "scraped_abstract": null, + "citation_best": 14 + }, + { + "paper": "2137956345", + "venue": "1199533187", + "year": "2007", + "title": "object and reference immutability using java generics", + "label": [ + "22414024", + "29808475", + "64729616", + "548217200", + "169590947", + "199360897", + "172482141", + "2776126399", + "2780492165" + ], + "author": [ + "3207013322", + "1973305793", + "2191342754", + "1517157260", + "2224213317", + "2235702021" + ], + "reference": [ + "105397881", + "141642871", + "150412037", + "1511982475", + "1557561422", + "1644882639", + "1675700398", + "1884646655", + "1964830323", + "1978303100", + "1993140586", + "2010540686", + "2033348393", + "2052087935", + "2053718235", + "2074001172", + "2076062002", + "2094958132", + "2098123460", + "2110409011", + "2111544394", + "2122987706", + "2125044336", + "2137417304", + "2138855072", + "2145708265", + "2150922760", + "2152759860", + "2153887189", + "2164122643", + "2198668404", + "3106729728" + ], + "abstract": "a compiler checked immutability guarantee provides useful documentation facilitates reasoning and enables optimizations this paper presents immutability generic java igj a novel language extension that expresses immutability without changing java s syntax by building upon java s generics and annotation mechanisms in igj each class has one additional type parameter that is immutable mutable or readonly igj guarantees both reference immutability only mutable references can mutate an object and object immutability an immutable reference points to an immutable object igj is the first proposal for enforcing object immutability within java s syntax and type system and its reference immutability is more expressive than previous work igj also permits covariant changes of type parameters in a type safe manner e g a readonly list of integers is a subtype of a readonly list of numbers igj extends java s type system with a few simple rules we formalize this type system and prove it sound our igj compiler works by type erasure and generates byte code that can be executed on any jvm without runtime penalty", + "title_raw": "Object and reference immutability using Java generics", + "abstract_raw": "A compiler-checked immutability guarantee provides useful documentation, facilitates reasoning, and enables optimizations. This paper presents Immutability Generic Java (IGJ), a novel language extension that expresses immutability without changing Java's syntax by building upon Java's generics and annotation mechanisms. In IGJ, each class has one additional type parameter that is Immutable, Mutable, or ReadOnly. IGJ guarantees both reference immutability (only mutable references can mutate an object) and object immutability (an immutable reference points to an immutable object). IGJ is the first proposal for enforcing object immutability within Java's syntax and type system, and its reference immutability is more expressive than previous work. IGJ also permits covariant changes of type parameters in a type-safe manner, e.g., a readonly list of integers is a subtype of a readonly list of numbers. IGJ extends Java's type system with a few simple rules. We formalize this type system and prove it sound. Our IGJ compiler works by type-erasure and generates byte-code that can be executed on any JVM without runtime penalty.", + "link": "https://www.semanticscholar.org/paper/b953e299e5c6a73f8ef7b327976628d9d3fec88e", + "scraped_abstract": null, + "citation_best": 92 + }, + { + "paper": "2176528885", + "venue": "1127325140", + "year": "2006", + "title": "information theoretic metric learning", + "label": [ + "80444323" + ], + "author": [ + "2121989902", + "2175266326", + "2033403132" + ], + "reference": [], + "abstract": "", + "title_raw": "Information-theoretic Metric Learning", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/b81381d17baf6750c09bd58e96f4660d25be9225", + "scraped_abstract": null, + "citation_best": 45 + }, + { + "paper": "2145544369", + "venue": "1174403976", + "year": "2007", + "title": "tracking code clones in evolving software", + "label": [ + "121050878", + "152752567", + "529173508", + "124101348", + "149091818", + "101317890", + "2777904410", + "43126263", + "79403827" + ], + "author": [ + "1999879993", + "2136878537" + ], + "reference": [ + "106960782", + "1577422779", + "1607163165", + "2000473405", + "2004309083", + "2013071456", + "2018906677", + "2036629691", + "2056617761", + "2057568625", + "2066792529", + "2074529754", + "2112533109", + "2121932026", + "2136694367", + "2138756793", + "2143151143", + "2151580048", + "2153546999", + "2153887189", + "2157663777", + "2519190008", + "3003280364" + ], + "abstract": "code clones are generally considered harmful in software development and the predominant approach is to try to eliminate them through refactoring however recent research has provided evidence that it may not always be practical feasible or cost effective to eliminate certain clone groups we propose a technique for tracking clones in evolving software our technique relies on the concept of abstract clone region descriptors crd which describe clone regions within methods in a robust way that is independent from the exact text of the clone region or its location in a file we present our definition of crds and describe a complete clone tracking system capable of producing crds from the output of a clone detection tool notify developers of modifications to clone regions and support the simultaneous editing of clone regions we report on two experiments and a case study conducted to assess the performance and usefulness of our approach", + "title_raw": "Tracking Code Clones in Evolving Software", + "abstract_raw": "Code clones are generally considered harmful in software development, and the predominant approach is to try to eliminate them through refactoring. However, recent research has provided evidence that it may not always be practical, feasible, or cost-effective to eliminate certain clone groups. We propose a technique for tracking clones in evolving software. Our technique relies on the concept of abstract clone region descriptors (CRD), which describe clone regions within methods in a robust way that is independent from the exact text of the clone region or its location in a file. We present our definition of CRDs, and describe a complete clone tracking system capable of producing CRDs from the output of a clone detection tool, notify developers of modifications to clone regions, and support the simultaneous editing of clone regions. We report on two experiments and a case study conducted to assess the performance and usefulness of our approach.", + "link": "https://www.semanticscholar.org/paper/f62298e272f52b160c29685cb4783620196c75c8", + "scraped_abstract": null, + "citation_best": 6 + }, + { + "paper": "2151553346", + "venue": "1174403976", + "year": "2007", + "title": "predicting faults from cached history", + "label": [ + "31258907", + "77088390", + "149091818", + "50712370", + "115537543", + "11219265", + "43126263", + "117447612" + ], + "author": [ + "2164738181", + "2014339847", + "2316675293", + "2106620385" + ], + "reference": [ + "147581537", + "168229877", + "1533432827", + "1564131673", + "1993139624", + "1995945562", + "2007705030", + "2025674334", + "2049828091", + "2100849134", + "2100945416", + "2103640219", + "2105300539", + "2108875907", + "2110385988", + "2126166995", + "2127811329", + "2141558501", + "2145103646", + "2147105902", + "2150786161", + "2152703345", + "2157353183", + "2159610968", + "2164519300", + "2171733741", + "2542246377", + "2751318774", + "3145932680" + ], + "abstract": "we analyze the version history of 7 software systems to predict the most fault prone entities and files the basic assumption is that faults do not occur in isolation but rather in bursts of several related faults therefore we cache locations that are likely to have faults starting from the location of a known fixed fault we cache the location itself any locations changed together with the fault recently added locations and recently changed locations by consulting the cache at the moment a fault is fixed a developer can detect likely fault prone locations this is useful for prioritizing verification and validation resources on the most fault prone files or entities in our evaluation of seven open source projects with more than 200 000 revisions the cache selects 10 of the source code files these files account for 73 95 of faults a significant advance beyond the state of the art", + "title_raw": "Predicting Faults from Cached History", + "abstract_raw": "We analyze the version history of 7 software systems to predict the most fault prone entities and files. The basic assumption is that faults do not occur in isolation, but rather in bursts of several related faults. Therefore, we cache locations that are likely to have faults: starting from the location of a known (fixed) fault, we cache the location itself, any locations changed together with the fault, recently added locations, and recently changed locations. By consulting the cache at the moment a fault is fixed, a developer can detect likely fault-prone locations. This is useful for prioritizing verification and validation resources on the most fault prone files or entities. In our evaluation of seven open source projects with more than 200,000 revisions, the cache selects 10% of the source code files; these files account for 73%-95% of faults-- a significant advance beyond the state of the art.", + "link": "https://www.semanticscholar.org/paper/60be98075fc2930997f24643f4f5ed894c0ff7f6", + "scraped_abstract": null, + "citation_best": 99 + }, + { + "paper": "2152846401", + "venue": "1174403976", + "year": "2007", + "title": "matching and merging of statecharts specifications", + "label": [ + "124246873", + "76449508", + "116253237", + "80444323" + ], + "author": [ + "2024882999", + "1134822698", + "2032435099", + "329574911", + "248253975" + ], + "reference": [ + "49766897", + "101493532", + "1501640954", + "1503973138", + "1505090407", + "1518828449", + "1543050278", + "1561929444", + "1574901103", + "1591513229", + "2004817736", + "2008896880", + "2011705879", + "2015640848", + "2040933668", + "2049142911", + "2061475652", + "2102071894", + "2107079484", + "2125822162", + "2131878172", + "2133724574", + "2134908243", + "2142120379", + "2148159886", + "2150047172", + "2153258502", + "2156817008", + "2161726635", + "2164300322", + "2168690391", + "2490061619", + "2610406808" + ], + "abstract": "model management addresses the problem of managing an evolving collection of models by capturing the relationships between models and providing well defined operators to manipulate them in this paper we describe two such operators for manipulating hierarchical statecharts match for finding correspondences between models and merge for combining models with respect to known correspondences between them our match operator is heuristic making use of both static and behavioural properties of the models to improve the accuracy of matching our merge operator preserves the hierarchical structure of the input models and handles differences in behaviour through parameterization in this way we automatically construct merges that preserve the semantics of statecharts models we illustrate and evaluate our work by applying our operators to at t telecommunication features", + "title_raw": "Matching and Merging of Statecharts Specifications", + "abstract_raw": "Model Management addresses the problem of managing an evolving collection of models, by capturing the relationships between models and providing well-defined operators to manipulate them. In this paper, we describe two such operators for manipulating hierarchical Statecharts: Match, for finding correspondences between models, and Merge, for combining models with respect to known correspondences between them. Our Match operator is heuristic, making use of both static and behavioural properties of the models to improve the accuracy of matching. Our Merge operator preserves the hierarchical structure of the input models, and handles differences in behaviour through parameterization. In this way, we automatically construct merges that preserve the semantics of Statecharts models. We illustrate and evaluate our work by applying our operators to AT&T telecommunication features.", + "link": "https://www.semanticscholar.org/paper/4fac8d99be11ab2051417bf4e97d75bf0d941131", + "scraped_abstract": null, + "citation_best": 254 + }, + { + "paper": "2115694885", + "venue": "1174403976", + "year": "2007", + "title": "refactoring for parameterizing java classes", + "label": [ + "152752567", + "70973432", + "203763787", + "548217200", + "44779574", + "199360897", + "11219265" + ], + "author": [ + "1249941522", + "2235702021", + "2164566402", + "696077761" + ], + "reference": [ + "36134198", + "194677851", + "1532892735", + "1545985476", + "1546727036", + "1608862494", + "1644882639", + "1964973627", + "2030793354", + "2032049634", + "2033405134", + "2063169006", + "2081365411", + "2096012235", + "2108690834", + "2109783046", + "2113619131", + "2137417304", + "2140647971", + "2157549449", + "2161825580" + ], + "abstract": "type safety and expressiveness of many existing java libraries and their client applications would improve if the libraries were upgraded to define generic classes ef ficient and accurate tools exist to assist client applications to use generic libraries but so far the libraries themselves must be parameterized manually which is a tedious time consuming and error prone task we present a type constraint based algorithm for converting non generic libraries to add type parameters the algorithm handles the full java language and preserves backward compatibility thus making it safe for existing clients among other features it is capable of inferring wildcard types and introducing type parameters for mutually dependent classes we have implemented the algorithm as a fully automatic refactoring in eclipse we evaluated our work in two ways first our tool parameterized code that was lacking type parameters we contacted the developers of several of these applications and in all cases they confirmed that the resulting parameterizations were correct and useful second to better quantify its effectiveness our tool parameterized classes from already generic libraries and we compared the results to those that were created by the libraries authors our tool performed the refactoring accurately in 87 of cases the results were as good as those created manually by a human expert in 9 of cases the tool results were better and in 4 of cases the tool results were worse", + "title_raw": "Refactoring for Parameterizing Java Classes", + "abstract_raw": "Type safety and expressiveness of many existing Java libraries and their client applications would improve, if the libraries were upgraded to define generic classes. Ef- ficient and accurate tools exist to assist client applications to use generic libraries, but so far the libraries themselves must be parameterized manually, which is a tedious, time-consuming, and error-prone task. We present a type-constraint- based algorithm for converting non-generic libraries to add type parameters. The algorithm handles the full Java language and preserves backward compatibility, thus making it safe for existing clients. Among other features, it is capable of inferring wildcard types and introducing type parameters for mutually-dependent classes. We have implemented the algorithm as a fully automatic refactoring in Eclipse. We evaluated our work in two ways. First, our tool parameterized code that was lacking type parameters. We contacted the developers of several of these applications, and in all cases they confirmed that the resulting parameterizations were correct and useful. Second, to better quantify its effectiveness, our tool parameterized classes from already-generic libraries, and we compared the results to those that were created by the libraries' authors. Our tool performed the refactoring accurately.in 87% of cases the results were as good as those created manually by a human expert, in 9% of cases the tool results were better, and in 4% of cases the tool results were worse.", + "link": "https://www.semanticscholar.org/paper/2778da57c3a64455811daa9799b30624051c0afb", + "scraped_abstract": null, + "citation_best": 61 + }, + { + "paper": "110738662", + "venue": "1203999783", + "year": "2007", + "title": "automated heart wall motion abnormality detection from ultrasound images using bayesian networks", + "label": [ + "153083717", + "104114177", + "31972630", + "106131492", + "33724603" + ], + "author": [ + "2034050740", + "2105735912", + "2228919358", + "2117656073", + "1960521979", + "2122268670", + "2918442955", + "2500651715" + ], + "reference": [ + "391985582", + "1513861746", + "1524326598", + "1585743408", + "1597910678", + "1678889691", + "2050978313", + "2106281167", + "2107406777", + "2110297148", + "2124488388", + "2132401137", + "2155704307", + "2167825066", + "2171507802", + "2491061016", + "2979006918", + "3026417138" + ], + "abstract": "coronary heart disease can be diagnosed by measuring and scoring regional motion of the heart wall in ultrasound images of the left ventricle lv of the heart we describe a completely automated and robust technique that detects diseased hearts based on detection and automatic tracking of the endocardium and epicardium of the lv the local wall regions and the entire heart are then classified as normal or abnormal based on the regional and global lv wall motion in order to leverage structural information about the heart we applied bayesian networks to this problem and learned the relations among the wall regions off of the data using a structure learning algorithm we checked the validity of the obtained structure using anatomical knowledge of the heart and medical rules as described by doctors the resultant bayesian network classifier depends only on a small subset of numerical features extracted from dual contours tracked through time and selected using a filter based approach our numerical results confirm that our system is robust and accurate on echocardiograms collected in routine clinical practice at one hospital our system is built to be used in real time", + "title_raw": "Automated heart wall motion abnormality detection from ultrasound images using Bayesian networks", + "abstract_raw": "Coronary Heart Disease can be diagnosed by measuring and scoring regional motion of the heart wall in ultrasound images of the left ventricle (LV) of the heart. We describe a completely automated and robust technique that detects diseased hearts based on detection and automatic tracking of the endocardium and epicardium of the LV. The local wall regions and the entire heart are then classified as normal or abnormal based on the regional and global LV wall motion. In order to leverage structural information about the heart we applied Bayesian Networks to this problem, and learned the relations among the wall regions off of the data using a structure learning algorithm. We checked the validity of the obtained structure using anatomical knowledge of the heart and medical rules as described by doctors. The resultant Bayesian Network classifier depends only on a small subset of numerical features extracted from dual-contours tracked through time and selected using a filter-based approach. Our numerical results confirm that our system is robust and accurate on echocardiograms collected in routine clinical practice at one hospital; our system is built to be used in real-time.", + "link": "https://www.semanticscholar.org/paper/88f26b718d318aa78eb80119c3e2b766636fcb92", + "scraped_abstract": null, + "citation_best": 43 + }, + { + "paper": "57770583", + "venue": "1203999783", + "year": "2007", + "title": "performance analysis of online anticipatory algorithms for large multistage stochastic integer programs", + "label": [ + "97137487", + "11413529", + "136197465" + ], + "author": [ + "2153841975", + "301883243" + ], + "reference": [ + "110292346", + "114495458", + "140607747", + "1501757556", + "1512919909", + "1547500142", + "1552828154", + "1569990960", + "1581952794", + "1586087770", + "1997549165", + "2009533501", + "2064227123", + "2079001714" + ], + "abstract": "despite significant algorithmic advances in recent years finding optimal policies for large scale multistage stochastic combinatorial optimization problems remains far beyond the reach of existing methods this paper studies a complementary approach online anticipatory algorithms that make decisions at each step by solving the anticipatory relaxation for a polynomial number of scenarios online anticipatory algorithms have exhibited surprisingly good results on a variety of applications and this paper aims at understanding their success in particular the paper derives sufficient conditions under which online anticipatory algorithms achieve good expected utility and studies the various types of errors arising in the algorithms including the anticipativity and sampling errors the sampling error is shown to be negligible with a logarithmic number of scenarios the anticipativity error is harder to bound and is shown to be low both theoretically and experimentally for the existing applications", + "title_raw": "Performance analysis of online anticipatory algorithms for large multistage stochastic integer programs", + "abstract_raw": "Despite significant algorithmic advances in recent years, finding optimal policies for large-scale, multistage stochastic combinatorial optimization problems remains far beyond the reach of existing methods. This paper studies a complementary approach, online anticipatory algorithms, that make decisions at each step by solving the anticipatory relaxation for a polynomial number of scenarios. Online anticipatory algorithms have exhibited surprisingly good results on a variety of applications and this paper aims at understanding their success. In particular, the paper derives sufficient conditions under which online anticipatory algorithms achieve good expected utility and studies the various types of errors arising in the algorithms including the anticipativity and sampling errors. The sampling error is shown to be negligible with a logarithmic number of scenarios. The anticipativity error is harder to bound and is shown to be low, both theoretically and experimentally, for the existing applications.", + "link": "https://www.semanticscholar.org/paper/0557a87dbfe64d4f93b8352c89b1336a78d4b895", + "scraped_abstract": null, + "citation_best": 35 + }, + { + "paper": "1552944591", + "venue": "1203999783", + "year": "2007", + "title": "building structure into local search for sat", + "label": [ + "27574286", + "19889080", + "139979381", + "124145224", + "135320971", + "90189156", + "127705205" + ], + "author": [ + "2038326495", + "2122301133", + "2173091977" + ], + "reference": [ + "30950413", + "119819022", + "1496083386", + "1519526968", + "1546338010", + "1571393741", + "1598839320", + "1600919542", + "1667614912", + "1950282396", + "1978526866", + "2011055096", + "2096307462", + "2098508444", + "2610670194" + ], + "abstract": "local search procedures for solving satisfiability problems have attracted considerable attention since the development of gsat in 1992 however recentwork indicates that for many real world problems complete search methods have the advantage because modern heuristics are able to effectively exploit problem structure indeed to develop a local search technique that can effectively deal with variable dependencies has been an open challenge since 1997 in this paper we show that local search techniques can effectively exploit information about problem structure producing significant improvements in performance on structured problem instances building on the earlier work of ostrowski et al we describe how information about variable dependencies can be built into a local search so that only independent variables are considered for flipping the cost effect of a flip is then dynamically calculated using a dependency lattice that models dependent variables using gates specifically and or and equivalence gates the experimental study on hard structured benchmark problems demonstrates that our new approach significantly outperforms the previously reported best local search techniques", + "title_raw": "Building structure into local search for SAT", + "abstract_raw": "Local search procedures for solving satisfiability problems have attracted considerable attention since the development of GSAT in 1992. However, recentwork indicates that for many real-world problems, complete search methods have the advantage, because modern heuristics are able to effectively exploit problem structure. Indeed, to develop a local search technique that can effectively deal with variable dependencies has been an open challenge since 1997.\r\n\r\nIn this paper we show that local search techniques can effectively exploit information about problem structure producing significant improvements in performance on structured problem instances. Building on the earlier work of Ostrowski et al. we describe how information about variable dependencies can be built into a local search, so that only independent variables are considered for flipping. The cost effect of a flip is then dynamically calculated using a dependency lattice that models dependent variables using gates (specifically and, or and equivalence gates). The experimental study on hard structured benchmark problems demonstrates that our new approach significantly outperforms the previously reported best local search techniques.", + "link": "https://www.semanticscholar.org/paper/59e37114b2dc188d1bdab91e1cb25c09f65706a5", + "scraped_abstract": null, + "citation_best": 39 + }, + { + "paper": "1576549127", + "venue": "1158363782", + "year": "2007", + "title": "life death and the critical transition finding liveness bugs in systems code", + "label": [ + "26713055", + "114466953", + "15569618", + "94240970", + "199360897", + "2777904410", + "194147245", + "120314980", + "136085584", + "127705205" + ], + "author": [ + "2122227550", + "2143108988", + "2068611990", + "295064773" + ], + "reference": [ + "24619462", + "74242707", + "207759855", + "658910371", + "1497571013", + "1511405608", + "1516068469", + "1539432158", + "1541969962", + "1568471959", + "1577904438", + "1590315663", + "1593428110", + "1596365597", + "1966982815", + "2017560506", + "2040060046", + "2054198444", + "2065675749", + "2082000355", + "2105310795", + "2115309705", + "2116989825", + "2117009500", + "2118428193", + "2119736157", + "2124877509", + "2124909257", + "2129538349", + "2134890934", + "2135274583", + "2145326461", + "2158049821", + "2158870716", + "2167898414", + "2171581420", + "2295903414", + "2913459036" + ], + "abstract": "modern software model checkers find safety violations breaches where the system enters some bad state however we argue that checking liveness properties offers both a richer and more natural way to search for errors particularly in complex concurrent and distributed systems liveness properties specify desirable system behaviors which must be satisfied eventually but are not always satisfied perhaps as a result of failure or during system initialization existing software model checkers cannot verify liveness because doing so requires finding an infinite execution that does not satisfy a liveness property we present heuristics to find a large class of liveness violations and the critical transition of the execution the critical transition is the step in an execution that moves the system from a state that does not currently satisfy some liveness property but where recovery is possible in the future to a dead state that can never achieve the liveness property our software model checker macemc isolates complex liveness errors in our implementations of pastry chord a reliable transport protocol and an overlay tree", + "title_raw": "Life, death, and the critical transition: finding liveness bugs in systems code", + "abstract_raw": "Modern software model checkers find safety violations: breaches where the system enters some bad state. However, we argue that checking liveness properties offers both a richer and more natural way to search for errors, particularly in complex concurrent and distributed systems. Liveness properties specify desirable system behaviors which must be satisfied eventually, but are not always satisfied, perhaps as a result of failure or during system initialization.\r\n\r\nExisting software model checkers cannot verify liveness because doing so requires finding an infinite execution that does not satisfy a liveness property. We present heuristics to find a large class of liveness violations and the critical transition of the execution. The critical transition is the step in an execution that moves the system from a state that does not currently satisfy some liveness property--but where recovery is possible in the future--to a dead state that can never achieve the liveness property. Our software model checker, MACEMC, isolates complex liveness errors in our implementations of PASTRY, CHORD, a reliable transport protocol, and an overlay tree.", + "link": "https://www.semanticscholar.org/paper/7956a6fbc5c8030256ef5e6a8a5dde952f06f0bd", + "scraped_abstract": null, + "citation_best": 184 + }, + { + "paper": "2071286129", + "venue": "1127352206", + "year": "2007", + "title": "fault tolerant typed assembly language", + "label": [ + "63540848", + "149635348", + "113775141", + "75606506", + "50712370", + "2775836774", + "152124472", + "116253237", + "156325763", + "2777904410" + ], + "author": [ + "2126079967", + "2134505947", + "2151904487", + "1998594138", + "2185216079", + "2161850330" + ], + "reference": [ + "69024957", + "1968456054", + "1981514768", + "2021548867", + "2034593585", + "2053578304", + "2069107692", + "2102480715", + "2102863623", + "2116015411", + "2116059696", + "2116991991", + "2118582701", + "2125169487", + "2125369517", + "2130189691", + "2131529479", + "2144014903", + "2145064068", + "2153554709", + "2153837329", + "2155527402", + "2160590289", + "2161549238", + "2169213530", + "2169596872" + ], + "abstract": "a transient hardware fault occurs when an energetic particle strikes a transistor causing it to change state although transient faults do not permanently damage the hardware they may corrupt computations by altering stored values and signal transfers in this paper we propose a new scheme for provably safe and reliable computing in the presence of transient hardware faults in our scheme software computations are replicated to provide redundancy while special instructions compare the independently computed results to detect errors before writing critical data in stark contrast to any previous efforts in this area we have analyzed our fault tolerance scheme from a formal theoretical perspective to be specific first we provide an operational semantics for our assembly language which includes a precise formal definition of our fault model second we develop an assembly level type system designed to detect reliability problems in compiled code third we provide a formal specification for program fault tolerance under the given fault model and prove that all well typed programs are indeed fault tolerant in addition to the formal analysis we evaluate our detection scheme and show that it only takes 34 longer to execute than the unreliable version", + "title_raw": "Fault-tolerant typed assembly language", + "abstract_raw": "A transient hardware fault occurs when an energetic particle strikes a transistor, causing it to change state. Although transient faults do not permanently damage the hardware, they may corrupt computations by altering stored values and signal transfers. In this paper, we propose a new scheme for provably safe and reliable computing in the presence of transient hardware faults. In our scheme, software computations are replicated to provide redundancy while special instructions compare the independently computed results to detect errors before writing critical data. In stark contrast to any previous efforts in this area, we have analyzed our fault tolerance scheme from a formal, theoretical perspective. To be specific, first, we provide an operational semantics for our assembly language, which includes a precise formal definition of our fault model. Second, we develop an assembly-level type system designed to detect reliability problems in compiled code. Third, we provide a formal specification for program fault tolerance under the given fault model and prove that all well-typed programs are indeed fault tolerant. In addition to the formal analysis, we evaluate our detection scheme and show that it only takes 34% longer to execute than the unreliable version.", + "link": "https://www.semanticscholar.org/paper/8bead0e8bfa7142f6eaf346b68e53b5362685e20", + "scraped_abstract": null, + "citation_best": 36 + }, + { + "paper": "2153578567", + "venue": "1127352206", + "year": "2007", + "title": "the ant and the grasshopper fast and accurate pointer analysis for millions of lines of code", + "label": [ + "98183937", + "161969638", + "169590947", + "162319229", + "7263679", + "11413529", + "199519371" + ], + "author": [ + "673169841", + "2188667897" + ], + "reference": [ + "1507552563", + "1553894716", + "1891117775", + "1980468060", + "1996146601", + "2002771912", + "2029674270", + "2046699259", + "2050320220", + "2065088439", + "2069476565", + "2075309900", + "2078739669", + "2080267935", + "2087612811", + "2095115578", + "2096587139", + "2107089133", + "2118382442", + "2125725994", + "2131135493", + "2145913834", + "2149237601", + "2150138144", + "2151360539", + "2156729262", + "2158600037", + "2166341446" + ], + "abstract": "pointer information is a prerequisite for most program analyses and the quality of this information can greatly affect their precision and performance inclusion based i e andersen style pointer analysis is an important point in the space of pointer analyses offering a potential sweet spot in the trade off between precision and performance however current techniques for inclusion based pointer analysis can have difficulties delivering on this potential we introduce and evaluate two novel techniques for inclusion based pointer analysis one lazy one eager1 that significantly improve upon the current state of the art without impacting precision these techniques focus on the problem of online cycle detection a critical optimization for scaling such analyses using a suite of six open source c programs which range in size from 169k to 2 17m loc we compare our techniques against the three best inclusion based analyses described by heintze and tardieu 11 by pearce et al 21 and by berndl et al 4 the combination of our two techniques results in an algorithm which is on average 3 2 xfaster than heintze and tardieu s algorithm 6 4 xfaster than pearce et al s algorithm and 20 6 faster than berndl et al s algorithm we also investigate the use of different data structures to represent points to sets examining the impact on both performance and memory consumption we compare a sparse bitmap implementation used in the gcc compiler with a bdd based implementation and we find that the bdd implementation is on average 2x slower than using sparse bitmaps but uses 5 5x less memory", + "title_raw": "The ant and the grasshopper: fast and accurate pointer analysis for millions of lines of code", + "abstract_raw": "Pointer information is a prerequisite for most program analyses, and the quality of this information can greatly affect their precision and performance. Inclusion-based (i.e. Andersen-style) pointer analysis is an important point in the space of pointer analyses, offering a potential sweet-spot in the trade-off between precision and performance. However, current techniques for inclusion-based pointer analysis can have difficulties delivering on this potential. We introduce and evaluate two novel techniques for inclusion-based pointer analysis---one lazy, one eager1---that significantly improve upon the current state-of-the-art without impacting precision. These techniques focus on the problem of online cycle detection, a critical optimization for scaling such analyses. Using a suite of six open-source C programs, which range in size from 169K to 2.17M LOC, we compare our techniques against the three best inclusion-based analyses--described by Heintze and Tardieu [11], by Pearce et al. [21], and by Berndl et al. [4]. The combination of our two techniques results in an algorithm which is on average 3.2 xfaster than Heintze and Tardieu's algorithm, 6.4 xfaster than Pearce et al.'s algorithm, and 20.6 faster than Berndl et al.'s algorithm. We also investigate the use of different data structures to represent points-to sets, examining the impact on both performance and memory consumption. We compare a sparse-bitmap implementation used in the GCC compiler with a BDD-based implementation, and we find that the BDD implementation is on average 2x slower than using sparse bitmaps but uses 5.5x less memory.", + "link": "https://www.semanticscholar.org/paper/7870ed35b5402c3828db81ac3840709bc52dab02", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2149156280", + "venue": "1140684652", + "year": "2007", + "title": "studying the use of popular destinations to enhance web search interaction", + "label": [ + "166423231", + "14838553", + "164120249", + "118689300", + "97854310", + "78999398", + "136764020", + "23123220" + ], + "author": [ + "2096583854", + "1963524793", + "686107167" + ], + "reference": [ + "47069862", + "1508509952", + "1973006075", + "1978394996", + "1981131819", + "1982451429", + "1982889956", + "1985844568", + "2037140704", + "2041126289", + "2042949695", + "2104115112", + "2112769706", + "2125771191", + "2131567213", + "2134131174", + "2136661411", + "2143257555", + "2148467943", + "2158450083", + "2170741935" + ], + "abstract": "we present a novel web search interaction feature which for a given query provides links to websites frequently visited by other users with similar information needs these popular destinations complement traditional search results allowing direct navigation to authoritative resources for the query topic destinations are identified using the history of search and browsing behavior of many users over an extended time period whose collective behavior provides a basis for computing source authority we describe a user study which compared the suggestion of destinations with the previously proposed suggestion of related queries as well as with traditional unaided web search results show that search enhanced by destination suggestions outperforms other systems for exploratory tasks with best performance obtained from mining past user behavior at query level granularity", + "title_raw": "Studying the use of popular destinations to enhance web search interaction", + "abstract_raw": "We present a novel Web search interaction feature which, for a given query, provides links to websites frequently visited by other users with similar information needs. These popular destinations complement traditional search results, allowing direct navigation to authoritative resources for the query topic. Destinations are identified using the history of search and browsing behavior of many users over an extended time period, whose collective behavior provides a basis for computing source authority. We describe a user study which compared the suggestion of destinations with the previously proposed suggestion of related queries, as well as with traditional, unaided Web search. Results show that search enhanced by destination suggestions outperforms other systems for exploratory tasks, with best performance obtained from mining past user behavior at query-level granularity.", + "link": "https://www.semanticscholar.org/paper/23d732e8d81457fa1b2149ebef32a62fd8c7b36d", + "scraped_abstract": null, + "citation_best": 201 + }, + { + "paper": "2066262685", + "venue": "1131589359", + "year": "2007", + "title": "modeling the relative fitness of storage", + "label": [ + "2778476105", + "60509570", + "44154836", + "127571174", + "94966114" + ], + "author": [ + "2762381614", + "2150833760", + "688737622", + "2775942825", + "2791808" + ], + "reference": [ + "106162958", + "137206813", + "146745255", + "148271516", + "1480376833", + "1541482680", + "1550594210", + "1564519640", + "1569184546", + "1573904891", + "1577629062", + "1582414373", + "1594031697", + "1672988363", + "1899676114", + "1903201094", + "1915537773", + "1967141605", + "1975611596", + "1978264569", + "1980913989", + "1990911977", + "1998927861", + "2002099427", + "2005292817", + "2005373714", + "2008775846", + "2010488817", + "2011039300", + "2012706015", + "2023666014", + "2026102701", + "2035606364", + "2054838073", + "2055631879", + "2062225012", + "2062617656", + "2064254674", + "2065077642", + "2068689036", + "2070177045", + "2072278885", + "2072636268", + "2081642319", + "2088590034", + "2093477600", + "2095724615", + "2097311696", + "2100785383", + "2101105988", + "2101343419", + "2106292145", + "2109188700", + "2110631345", + "2124288146", + "2124349089", + "2127425314", + "2128458688", + "2128946566", + "2129764333", + "2134390259", + "2137562435", + "2139882667", + "2145056667", + "2146603609", + "2148702461", + "2150848585", + "2159081928", + "2164009255", + "2166709280", + "2167819041", + "2168395296", + "2168581225", + "2477400917", + "2493403980", + "2917663763", + "3085162807", + "3137496607", + "3152813856", + "3162989687" + ], + "abstract": "relative fitness is a new black box approach to modeling the performance of storage devices in contrast with an absolute model that predicts the performance of a workload on a given storage device a relative fitness model predicts performance differences between a pair of devices there are two primary advantages to this approach first because are lative fitness model is constructed for a device pair the application device feedback of a closed workload can be captured e g how the i o arrival rate changes as the workload moves from device a to device b second a relative fitness model allows performance and resource utilization to be used in place of workload characteristics this is beneficial when workload characteristics are difficult to obtain or concisely express e g rather than describe the spatio temporal characteristics of a workload one could use the observed cache behavior of device a to help predict the performance of b this paper describes the steps necessary to build a relative fitness model with an approach that is general enough to be used with any black box modeling technique we compare relative fitness models and absolute models across a variety of workloads and storage devices on average relative fitness models predict bandwidth and throughput within 10 20 and can reduce prediction error by as much as a factor of two when compared to absolute models", + "title_raw": "Modeling the relative fitness of storage", + "abstract_raw": "Relative fitness is a new black-box approach to modeling the performance of storage devices. In contrast with an absolute model that predicts the performance of a workload on a given storage device, a relative fitness model predicts performance differences between a pair of devices. There are two primary advantages to this approach. First, because are lative fitness model is constructed for a device pair, the application-device feedback of a closed workload can be captured (e.g., how the I/O arrival rate changes as the workload moves from device A to device B). Second, a relative fitness model allows performance and resource utilization to be used in place of workload characteristics. This is beneficial when workload characteristics are difficult to obtain or concisely express (e.g., rather than describe the spatio-temporal characteristics of a workload, one could use the observed cache behavior of device A to help predict the performance of B. This paper describes the steps necessary to build a relative fitness model, with an approach that is general enough to be used with any black-box modeling technique. We compare relative fitness models and absolute models across a variety of workloads and storage devices. On average, relative fitness models predict bandwidth and throughput within 10-20% and can reduce prediction error by as much as a factor of two when compared to absolute models.", + "link": "https://www.semanticscholar.org/paper/d1e02642a5baa5a10fca8892dc5e6f39f1e131d1", + "scraped_abstract": null, + "citation_best": 15 + }, + { + "paper": "2077449561", + "venue": "1175089206", + "year": "2007", + "title": "compiling mappings to bridge applications and databases", + "label": [ + "54239708", + "5968703", + "77088390", + "1668388", + "47487241", + "148840519", + "56310702", + "5655090", + "30775581" + ], + "author": [ + "2805592821", + "2022497718", + "2304774990" + ], + "reference": [ + "30253544", + "1489179195", + "1500132926", + "1500615737", + "1505216615", + "1508902783", + "1556076816", + "1583928555", + "1802428003", + "1943411325", + "1985223393", + "2000616678", + "2005318605", + "2020228538", + "2024796520", + "2032339126", + "2045141511", + "2046444514", + "2074669964", + "2080267935", + "2088675571", + "2090958287", + "2101498152", + "2113582770", + "2116476518", + "2120127771", + "2121447816", + "2125030020", + "2125713144", + "2140037779", + "2144192824", + "2144878221", + "2146462464", + "2152191782", + "2153670461", + "2155419928", + "2157812994", + "2158595459", + "2160983447", + "2167764264", + "2169207323", + "2997050146" + ], + "abstract": "translating data and data access operations between applications and databases is a longstanding data management problem we present a novel approach to this problem in which the relationship between the application data and the persistent storage is specified using a declarative mapping which is compiled into bidirectional views that drive the data transformation engine expressing the application model as a view on the database is used to answer queries while viewing the database in terms of the application model allows us to leverage view maintenance algorithms for update translation this approach has been implemented in a commercial product it enables developers to interact with a relational database via a conceptual schema and an object oriented programming surface we outline the implemented system and focus on the challenges of mapping compilation which include rewriting queries under constraints and supporting non relational constructs", + "title_raw": "Compiling mappings to bridge applications and databases", + "abstract_raw": "Translating data and data access operations between applications and databases is a longstanding data management problem. We present a novel approach to this problem, in which the relationship between the application data and the persistent storage is specified using a declarative mapping, which is compiled into bidirectional views that drive the data transformation engine. Expressing the application model as a view on the database is used to answer queries, while viewing the database in terms of the application model allows us to leverage view maintenance algorithms for update translation. This approach has been implemented in a commercial product. It enables developers to interact with a relational database via a conceptual schema and an object oriented programming surface. We outline the implemented system and focus on the challenges of mapping compilation, which include rewriting queries under constraints and supporting non-relational constructs.", + "link": "https://www.semanticscholar.org/paper/90ef06729fe0c5ade5b10e5ee63f982f94edbda2", + "scraped_abstract": null, + "citation_best": 50 + }, + { + "paper": "2035801804", + "venue": "1175089206", + "year": "2007", + "title": "scalable approximate query processing with the dbo engine", + "label": [ + "140779682", + "48044578", + "124101348", + "2780632077", + "157692150", + "5655090", + "120894424", + "24028149" + ], + "author": [ + "281056960", + "2052427730", + "2142279450", + "2074162063" + ], + "reference": [ + "1513100972", + "1679918387", + "1858414509", + "1964857063", + "2020147322", + "2020584928", + "2073479529", + "2095891890", + "2107422095", + "2132808937", + "2153834102", + "2154165046", + "2162569193", + "2167811976", + "2169486917", + "2296677182" + ], + "abstract": "this paper describes query processing in the dbo database system like other database systems designed for ad hoc analytic processing dbo is able to compute the exact answer to queries over a large relational database in a scalable fashion unlike any other system designed for analytic processing dbo can constantly maintain a guess as to the final answer to an aggregate query throughout execution along with statistically meaningful bounds for the guess s accuracy as dbo gathers more and more information the guess gets more and more accurate until it is 100 accurate as the query is completed this allows users to stop the execution at any time that they are happy with the query accuracy and encourages exploratory data analysis", + "title_raw": "Scalable approximate query processing with the DBO engine", + "abstract_raw": "This paper describes query processing in the DBO database system. Like other database systems designed for ad-hoc, analytic processing, DBO is able to compute the exact answer to queries over a large relational database in a scalable fashion. Unlike any other system designed for analytic processing, DBO can constantly maintain a guess as to the final answer to an aggregate query throughout execution, along with statistically meaningful bounds for the guess's accuracy. As DBO gathers more and more information, the guess gets more and more accurate, until it is 100% accurate as the query is completed. This allows users to stop the execution at any time that they are happy with the query accuracy, and encourages exploratory data analysis.", + "link": "https://www.semanticscholar.org/paper/b331ea79e42aafcc3217699320b54409e2d3babd", + "scraped_abstract": null, + "citation_best": 80 + }, + { + "paper": "2108026089", + "venue": "1171178643", + "year": "2007", + "title": "sinfonia a new paradigm for building scalable distributed systems", + "label": [ + "133875982", + "854659", + "111009948", + "48044578", + "144240696", + "47487241", + "162319229", + "2780940931", + "120314980" + ], + "author": [ + "2396404779", + "2159367053", + "2140057800", + "2134703114", + "741433898" + ], + "reference": [ + "41850619", + "51427095", + "299321315", + "1497150730", + "1524414985", + "1581438674", + "1581766414", + "1600328410", + "1929853380", + "1978254772", + "1991494135", + "1992479210", + "2041315700", + "2075854425", + "2092911542", + "2096247846", + "2097589646", + "2103527534", + "2104210894", + "2105055683", + "2108204150", + "2110020044", + "2113751407", + "2114407468", + "2116142433", + "2118828464", + "2119565742", + "2122465391", + "2130264930", + "2131929623", + "2133373086", + "2133943294", + "2138180780", + "2151791866", + "2155323584", + "2165900899", + "2169187571", + "2170892031", + "2624304035" + ], + "abstract": "we propose a new paradigm for building scalable distributed systems our approach does not require dealing with message passing protocols a major complication in existing distributed systems instead developers just design and manipulate data structures within our service called sinfonia sinfonia keeps data for applications on a set of memory nodes each exporting a linear address space at the core of sinfonia is a novel minitransaction primitive that enables efficient and consistent access to data while hiding the complexities that arise from concurrency and failures using sinfonia we implemented two very different and complex applications in a few months a cluster file system and a group communication service our implementations perform well and scale to hundreds of machines", + "title_raw": "Sinfonia: a new paradigm for building scalable distributed systems", + "abstract_raw": "We propose a new paradigm for building scalable distributed systems. Our approach does not require dealing with message-passing protocols -- a major complication in existing distributed systems. Instead, developers just design and manipulate data structures within our service called Sinfonia. Sinfonia keeps data for applications on a set of memory nodes, each exporting a linear address space. At the core of Sinfonia is a novel minitransaction primitive that enables efficient and consistent access to data, while hiding the complexities that arise from concurrency and failures. Using Sinfonia, we implemented two very different and complex applications in a few months: a cluster file system and a group communication service. Our implementations perform well and scale to hundreds of machines.", + "link": "https://www.semanticscholar.org/paper/fcc5e3ea26ead374905ce9c88cea48f0a7c9b600", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2139359217", + "venue": "1171178643", + "year": "2007", + "title": "zyzzyva speculative byzantine fault tolerance", + "label": [ + "63540848", + "91062100", + "141331961", + "168021876", + "17532199", + "153180980", + "49265948", + "70440993", + "120314980" + ], + "author": [ + "1997971539", + "2292936287", + "2051666199", + "2121586883", + "2096509916" + ], + "reference": [ + "201372435", + "1495653136", + "1540879848", + "1540902136", + "1565495482", + "1971991620", + "1995626000", + "2003214215", + "2035362408", + "2047418149", + "2053903896", + "2058322902", + "2061819031", + "2075854425", + "2077409330", + "2080288192", + "2088221489", + "2097829252", + "2099323800", + "2101939036", + "2112161048", + "2114579022", + "2117289367", + "2117441593", + "2118033476", + "2119565742", + "2121510533", + "2122142939", + "2124877509", + "2126087831", + "2126789306", + "2126924915", + "2129467152", + "2138509363", + "2147081775", + "2147524598", + "2152465173", + "2157762234", + "2167338470", + "2168393938", + "2169213530", + "2295705535", + "2620706897", + "2734571963", + "3137092842", + "3137220996", + "3151869053" + ], + "abstract": "we present zyzzyva a protocol that uses speculation to reduce the cost and simplify the design of byzantine fault tolerant state machine replication in zyzzyva replicas respond to a client s request without first running an expensive three phase commit protocol to reach agreement on the order in which the request must be processed instead they optimistically adopt the order proposed by the primary and respond immediately to the client replicas can thus become temporarily inconsistent with one another but clients detect inconsistencies help correct replicas converge on a single total ordering of requests and only rely on responses that are consistent with this total order this approach allows zyzzyva to reduce replication overheads to near their theoretical minimal", + "title_raw": "Zyzzyva: speculative byzantine fault tolerance", + "abstract_raw": "We present Zyzzyva, a protocol that uses speculation to reduce the cost and simplify the design of Byzantine fault tolerant state machine replication. In Zyzzyva, replicas respond to a client's request without first running an expensive three-phase commit protocol to reach agreement on the order in which the request must be processed. Instead, they optimistically adopt the order proposed by the primary and respond immediately to the client. Replicas can thus become temporarily inconsistent with one another, but clients detect inconsistencies, help correct replicas converge on a single total ordering of requests, and only rely on responses that are consistent with this total order. This approach allows Zyzzyva to reduce replication overheads to near their theoretical minimal.", + "link": "https://www.semanticscholar.org/paper/07e239123b3c50910e35607479d999d220985a6c", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2166510103", + "venue": "1171178643", + "year": "2007", + "title": "secure web applications via automatic partitioning", + "label": [ + "111919701", + "133162039", + "2144512", + "77088390", + "146870623", + "154526789", + "180232117", + "151578736", + "43126263", + "50951305" + ], + "author": [ + "2105167831", + "2141013874", + "2141746463", + "2224423105", + "2175309971", + "2158099478", + "2421114606" + ], + "reference": [ + "27720104", + "109951691", + "1480225633", + "1488890761", + "1511560695", + "1518533182", + "1582331515", + "1812582761", + "1983142587", + "2001693166", + "2043416466", + "2045144252", + "2060857434", + "2066583243", + "2075139806", + "2085925880", + "2102765175", + "2121496786", + "2126126443", + "2129278597", + "2136025860", + "2136327713", + "2148144728", + "2154564703", + "2158126684", + "2161433768", + "2294311096", + "2339690115", + "2611804663", + "2752885492" + ], + "abstract": "swift is a new principled approach to building web applications that are secure by construction in modern web applications some application functionality is usually implemented as client side code written in javascript moving code and data to the client can create security vulnerabilities but currently there are no good methods for deciding when it is secure to do so swift automatically partitions application code while providing assurance that the resulting placement is secure and efficient application code is written as java like code annotated with information flow policies that specify the confidentiality and integrity of web application information the compiler uses these policies to automatically partition the program into javascript code running in the browser and java code running on the server to improve interactive performance code and data are placed on the client side however security critical code and data are always placed on the server code and data can also be replicated across the client and server to obtain both security and performance a max flow algorithm is used to place code and data in a way that minimizes client server communication", + "title_raw": "Secure web applications via automatic partitioning", + "abstract_raw": "Swift is a new, principled approach to building web applications that are secure by construction. In modern web applications, some application functionality is usually implemented as client-side code written in JavaScript. Moving code and data to the client can create security vulnerabilities, but currently there are no good methods for deciding when it is secure to do so. Swift automatically partitions application code while providing assurance that the resulting placement is secure and efficient. Application code is written as Java-like code annotated with information flow policies that specify the confidentiality and integrity of web application information. The compiler uses these policies to automatically partition the program into JavaScript code running in the browser, and Java code running on the server. To improve interactive performance, code and data are placed on the client side. However, security-critical code and data are always placed on the server. Code and data can also be replicated across the client and server, to obtain both security and performance. A max-flow algorithm is used to place code and data in a way that minimizes client-server communication.", + "link": "https://www.semanticscholar.org/paper/ebd7e7e440d953948895acce35b388d599d47bb2", + "scraped_abstract": null, + "citation_best": 245 + }, + { + "paper": "2140280838", + "venue": "1166315290", + "year": "2007", + "title": "thinsight versatile multi touch sensing for thin form factor displays", + "label": [ + "2780152918", + "2776865275", + "128019096", + "31972630", + "133489148", + "2780753109" + ], + "author": [ + "2148014207", + "2098553916", + "2168200088", + "1255462760", + "2893200430" + ], + "reference": [ + "1965447681", + "1967451823", + "1968211101", + "1993853297", + "1995133527", + "2005198142", + "2008150314", + "2011688076", + "2029201173", + "2044109592", + "2049655377", + "2057088091", + "2090293796", + "2140982079", + "2148819007", + "2158707444" + ], + "abstract": "thinsight is a novel optical sensing system fully integrated into a thin form factor display capable of detecting multi ple fingers placed on or near the display surface we describe this new hardware in detail and demonstrate how it can be embedded behind a regular lcd allowing sensing without degradation of display capability with our approach fingertips and hands are clearly identifiable through the display the approach of optical sensing also opens up the exciting possibility for detecting other physical objects and visual markers through the display and some initial experiments are described we also discuss other novel capabilities of our system interaction at a distance using ir pointing devices and ir based communication with other electronic devices through the display a major advantage of thinsight over existing camera and projector based optical systems is its compact thin form factor making such systems even more deployable we therefore envisage using thinsight to capture rich sensor data through the display which can be processed using computer vision techniques to enable both multi touch and tangible interaction", + "title_raw": "ThinSight: versatile multi-touch sensing for thin form-factor displays", + "abstract_raw": "ThinSight is a novel optical sensing system, fully integrated into a thin form factor display, capable of detecting multi-ple fingers placed on or near the display surface. We describe this new hardware in detail, and demonstrate how it can be embedded behind a regular LCD, allowing sensing without degradation of display capability. With our approach, fingertips and hands are clearly identifiable through the display. The approach of optical sensing also opens up the exciting possibility for detecting other physical objects and visual markers through the display, and some initial experiments are described. We also discuss other novel capabilities of our system: interaction at a distance using IR pointing devices, and IR-based communication with other electronic devices through the display. A major advantage of ThinSight over existing camera and projector based optical systems is its compact, thin form-factor making such systems even more deployable. We therefore envisage using ThinSight to capture rich sensor data through the display which can be processed using computer vision techniques to enable both multi-touch and tangible interaction.", + "link": "https://www.semanticscholar.org/paper/038580ef558ae5f51071bad1cecb67e6f0e197ef", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2140613126", + "venue": "1133523790", + "year": "2007", + "title": "scalable semantic web data management using vertical partitioning", + "label": [ + "78923513", + "534406577", + "69075417", + "157595922", + "147497476", + "77088390", + "2129575", + "1668388", + "182321512", + "148792806", + "162005631", + "15657843", + "41009113", + "167379230" + ], + "author": [ + "2170206103", + "2157570195", + "2139913828", + "2876354731" + ], + "reference": [ + "29172951", + "97077349", + "125925666", + "163708704", + "1499327381", + "1504439773", + "1539367018", + "1547206386", + "1550571039", + "1570477871", + "1812636409", + "2021284998", + "2063601856", + "2068644217", + "2099427938", + "2112132204", + "2123686039", + "2124851765", + "2134547754", + "2134964460", + "2138489156", + "2144839430", + "2145055832", + "2157979971", + "2165286227", + "3138367763" + ], + "abstract": "efficient management of rdf data is an important factor in realizing the semantic web vision performance and scalability issues are becoming increasingly pressing as semantic web technology is applied to real world applications in this paper we examine the reasons why current data management solutions for rdf data scale poorly and explore the fundamental scalability limitations of these approaches we review the state of the art for improving performance for rdf databases and consider a recent suggestion property tables we then discuss practically and empirically why this solution has undesirable features as an improvement we propose an alternative solution vertically partitioning the rdf data we compare the performance of vertical partitioning with prior art on queries generated by a web based rdf browser over a large scale more than 50 million triples catalog of library data our results show that a vertical partitioned schema achieves similar performance to the property table technique while being much simpler to design further if a column oriented dbms a database architected specially for the vertically partitioned case is used instead of a row oriented dbms another order of magnitude performance improvement is observed with query times dropping from minutes to several seconds", + "title_raw": "Scalable semantic web data management using vertical partitioning", + "abstract_raw": "Efficient management of RDF data is an important factor in realizing the Semantic Web vision. Performance and scalability issues are becoming increasingly pressing as Semantic Web technology is applied to real-world applications. In this paper, we examine the reasons why current data management solutions for RDF data scale poorly, and explore the fundamental scalability limitations of these approaches. We review the state of the art for improving performance for RDF databases and consider a recent suggestion, \"property tables.\" We then discuss practically and empirically why this solution has undesirable features. As an improvement, we propose an alternative solution: vertically partitioning the RDF data. We compare the performance of vertical partitioning with prior art on queries generated by a Web-based RDF browser over a large-scale (more than 50 million triples) catalog of library data. Our results show that a vertical partitioned schema achieves similar performance to the property table technique while being much simpler to design. Further, if a column-oriented DBMS (a database architected specially for the vertically partitioned case) is used instead of a row-oriented DBMS, another order of magnitude performance improvement is observed, with query times dropping from minutes to several seconds.", + "link": "https://www.semanticscholar.org/paper/f9dfb80bf4078e3707e6e2bc311bb694f382f890", + "scraped_abstract": null, + "citation_best": 657 + }, + { + "paper": "2163263459", + "venue": "1135342153", + "year": "2007", + "title": "wherefore art thou r3579x anonymized social networks hidden patterns and structural steganography", + "label": [ + "41065033", + "108801101", + "86256295", + "136764020" + ], + "author": [ + "2096207090", + "208343995", + "2261367123" + ], + "reference": [ + "1616788770", + "1641403162", + "1873763122", + "1975937116", + "1999602050", + "2010523825", + "2013003500", + "2030724586", + "2033314190", + "2045472305", + "2049607688", + "2064906988", + "2068871408", + "2110868467", + "2115049345", + "2118425947", + "2120806354", + "2122710250", + "2128906841", + "2130099852", + "2147952642", + "2159024459", + "2162450625", + "2432978112", + "2905110430", + "2911978475" + ], + "abstract": "in a social network nodes correspond topeople or other social entities and edges correspond to social links between them in an effort to preserve privacy the practice of anonymization replaces names with meaningless unique identifiers we describe a family of attacks such that even from a single anonymized copy of a social network it is possible for an adversary to learn whether edges exist or not between specific targeted pairs of nodes", + "title_raw": "Wherefore art thou r3579x?: anonymized social networks, hidden patterns, and structural steganography", + "abstract_raw": "In a social network, nodes correspond topeople or other social entities, and edges correspond to social links between them. In an effort to preserve privacy, the practice of anonymization replaces names with meaningless unique identifiers. We describe a family of attacks such that even from a single anonymized copy of a social network, it is possible for an adversary to learn whether edges exist or not between specific targeted pairs of nodes.", + "link": "https://www.semanticscholar.org/paper/7300121833221b169042d1702b57e4221b69c205", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1607840088", + "venue": "1184914352", + "year": "2006", + "title": "model counting a new strategy for obtaining good bounds", + "label": [ + "111350023", + "168773769", + "74640108", + "11413529" + ], + "author": [ + "2108126638", + "2232198255", + "1966117383" + ], + "reference": [ + "179198942", + "1495732641", + "1513363711", + "1686746446", + "1964821516", + "2057512592", + "2151536080", + "2295428206", + "2479567345", + "2501499209", + "2610411743" + ], + "abstract": "model counting is the classical problem of computing the number of solutions of a given propositional formula it vastly generalizes the np complete problem of propositional satisfiability and hence is both highly useful and extremely expensive to solve in practice we present a new approach to model counting that is based on adding a carefully chosen number of so called streamlining constraints to the input formula in order to cut down the size of its solution space in a controlled manner each of the additional constraints is a randomly chosen xor or parity constraint on the problem variables represented either directly or in the standard cnf form inspired by a related yet quite different theoretical study of the properties of xor constraints we provide a formal proof that with high probability the number of xor constraints added in order to bring the formula to the boundary of being unsatisfiable determines with high precision its model count experimentally we demonstrate that this approach can be used to obtain good bounds on the model counts for formulas that are far beyond the reach of exact counting methods in fact we obtain the first non trivial solution counts for very hard highly structured combinatorial problem instances note that unlike other counting techniques such as markov chain monte carlo methods we are able to provide high confidence guarantees on the quality of the counts obtained", + "title_raw": "Model counting: a new strategy for obtaining good bounds", + "abstract_raw": "Model counting is the classical problem of computing the number of solutions of a given propositional formula. It vastly generalizes the NP-complete problem of propositional satisfiability, and hence is both highly useful and extremely expensive to solve in practice. We present a new approach to model counting that is based on adding a carefully chosen number of so-called streamlining constraints to the input formula in order to cut down the size of its solution space in a controlled manner. Each of the additional constraints is a randomly chosen XOR or parity constraint on the problem variables, represented either directly or in the standard CNF form. Inspired by a related yet quite different theoretical study of the properties of XOR constraints, we provide a formal proof that with high probability, the number of XOR constraints added in order to bring the formula to the boundary of being unsatisfiable determines with high precision its model count. Experimentally, we demonstrate that this approach can be used to obtain good bounds on the model counts for formulas that are far beyond the reach of exact counting methods. In fact, we obtain the first non-trivial solution counts for very hard, highly structured combinatorial problem instances. Note that unlike other counting techniques, such as Markov Chain Monte Carlo methods, we are able to provide high-confidence guarantees on the quality of the counts obtained.", + "link": "https://www.semanticscholar.org/paper/d1ed22234e6b143b3abfd2a30dbbc6b16eafb67c", + "scraped_abstract": null, + "citation_best": 143 + }, + { + "paper": "52985456", + "venue": "1184914352", + "year": "2006", + "title": "towards an axiom system for default logic", + "label": [ + "110039528", + "101874905", + "8245965", + "33203268", + "203659156", + "19689857", + "11413529", + "150889026", + "159032336", + "195344581", + "160236029", + "2039508", + "102993220", + "137488015", + "97364631", + "169896238", + "3845977" + ], + "author": [ + "10380012", + "2676601693" + ], + "reference": [ + "48715203", + "1493498708", + "1501530641", + "1506700997", + "1573063610", + "1596067335", + "1683212044", + "1966552757", + "1991972474", + "1998575874", + "2000264802", + "2004226183", + "2030773975", + "2034442203", + "2037518358", + "2067326626", + "2155322595", + "2406656780" + ], + "abstract": "recently lakemeyer and levesque proposed a logic of only knowing which precisely captures three forms of nonmonotonic reasoning moore s autoepistemic logic konolige s variant based on moderately grounded expansions and reiter s default logic defaults have a uniform representation under all three interpretations in the new logic moreover the logic itself is monotonic that is nonmonotonic reasoning is cast in terms of validity in the classical sense while lakemeyer and levesque gave a model theoretic account of their logic a proof theoretic characterization remained open this paper fills that gap for the propositional subset a sound and complete axiom system in the new logic for all three varieties of default reasoning we also present formal derivations for some examples of default reasoning finally we present evidence that it is unlikely that a complete axiom system exists in the first order case even when restricted to the simplest forms of default reasoning", + "title_raw": "Towards an axiom system for default logic", + "abstract_raw": "Recently, Lakemeyer and Levesque proposed a logic of only-knowing which precisely captures three forms of nonmonotonic reasoning: Moore's Autoepistemic Logic, Konolige's variant based on moderately grounded expansions, and Reiter's default logic. Defaults have a uniform representation under all three interpretations in the new logic. Moreover, the logic itself is monotonic, that is, nonmonotonic reasoning is cast in terms of validity in the classical sense. While Lakemeyer and Levesque gave a model-theoretic account of their logic, a proof-theoretic characterization remained open. This paper fills that gap for the propositional subset: a sound and complete axiom system in the new logic for all three varieties of default reasoning. We also present formal derivations for some examples of default reasoning. Finally we present evidence that it is unlikely that a complete axiom system exists in the first-order case, even when restricted to the simplest forms of default reasoning.", + "link": "https://www.semanticscholar.org/paper/4ee79c2d1868921b30ffda8b8eb2203dba7315fd", + "scraped_abstract": null, + "citation_best": 13 + }, + { + "paper": "2144108169", + "venue": "1188739475", + "year": "2006", + "title": "semantic taxonomy induction from heterogenous evidence", + "label": [ + "119857082", + "121934690", + "94413719", + "204321447", + "157659113" + ], + "author": [ + "2144630512", + "2089131864", + "2104401652" + ], + "reference": [ + "73274768", + "77146693", + "291570921", + "1554237613", + "1607723633", + "1637538661", + "1930023685", + "2020082880", + "2026185168", + "2038721957", + "2068737686", + "2100377551", + "2102515914", + "2107658650", + "2122056984", + "2123084125", + "2142086811", + "2148540243", + "2151846280", + "2161669948", + "2167061159" + ], + "abstract": "we propose a novel algorithm for inducing semantic taxonomies previous algorithms for taxonomy induction have typically focused on independent classifiers for discovering new single relationships based on hand constructed or automatically discovered textual patterns by contrast our algorithm flexibly incorporates evidence from multiple classifiers over heterogenous relationships to optimize the entire structure of the taxonomy using knowledge of a word s coordinate terms to help in determining its hypernyms and vice versa we apply our algorithm on the problem of sense disambiguated noun hyponym acquisition where we combine the predictions of hypernym and coordinate term classifiers with the knowledge in a preexisting semantic taxonomy wordnet 2 1 we add 10 000 novel synsets to wordnet 2 1 at 84 precision a relative error reduction of 70 over a non joint algorithm using the same component classifiers finally we show that a taxonomy built using our algorithm shows a 23 relative f score improvement over wordnet 2 1 on an independent testset of hypernym pairs", + "title_raw": "Semantic Taxonomy Induction from Heterogenous Evidence", + "abstract_raw": "We propose a novel algorithm for inducing semantic taxonomies. Previous algorithms for taxonomy induction have typically focused on independent classifiers for discovering new single relationships based on hand-constructed or automatically discovered textual patterns. By contrast, our algorithm flexibly incorporates evidence from multiple classifiers over heterogenous relationships to optimize the entire structure of the taxonomy, using knowledge of a word's coordinate terms to help in determining its hypernyms, and vice versa. We apply our algorithm on the problem of sense-disambiguated noun hyponym acquisition, where we combine the predictions of hypernym and coordinate term classifiers with the knowledge in a preexisting semantic taxonomy (WordNet 2.1). We add 10,000 novel synsets to WordNet 2.1 at 84% precision, a relative error reduction of 70% over a non-joint algorithm using the same component classifiers. Finally, we show that a taxonomy built using our algorithm shows a 23% relative F-score improvement over WordNet 2.1 on an independent testset of hypernym pairs.", + "link": "https://www.semanticscholar.org/paper/93bb6228776eafa606965e21f229d548de1998eb", + "scraped_abstract": null, + "citation_best": 466 + }, + { + "paper": "2003257628", + "venue": "1163450153", + "year": "2006", + "title": "trackball text entry for people with motor impairments", + "label": [ + "28490314", + "176544364", + "207347870" + ], + "author": [ + "318516288", + "2117127927" + ], + "reference": [ + "192831106", + "1481799709", + "1521455188", + "1582119411", + "2017150428", + "2020725915", + "2022093295", + "2022422334", + "2022689240", + "2025647423", + "2033288247", + "2035675819", + "2050037476", + "2051676735", + "2055688961", + "2058242929", + "2081623546", + "2099287431", + "2103751734", + "2125904902", + "2138879556", + "2147160183", + "2158391167", + "2170810427", + "2179427518", + "2752491485", + "3188418648" + ], + "abstract": "we present a new gestural text entry method for trackballs the method uses the mouse cursor and relies on crossing instead of pointing a user writes in fluid roman like unistrokes by pulsing the trackball in desired letter patterns we examine this method both theoretically using the steering law and empirically in two studies our studies show that able bodied users who were unfamiliar with trackballs could write at about 10 wpm with 4 total errors after 45 minutes in eight sessions a motor impaired trackball user peaked at 7 11 wpm with 0 uncorrected errors compared to 5 95 wpm with 0 uncorrected errors with an on screen keyboard over sessions his speeds were significantly faster with our gestural method than with an on screen keyboard a former 15 year veteran of on screen keyboards he now uses our gestural method instead", + "title_raw": "Trackball text entry for people with motor impairments", + "abstract_raw": "We present a new gestural text entry method for trackballs. The method uses the mouse cursor and relies on crossing instead of pointing. A user writes in fluid Roman-like unistrokes by \"\"pulsing\"\" the trackball in desired letter patterns. We examine this method both theoretically using the Steering Law and empirically in two studies. Our studies show that able-bodied users who were unfamiliar with trackballs could write at about 10 wpm with <4% total errors after 45 minutes. In eight sessions, a motor-impaired trackball user peaked at 7.11 wpm with 0% uncorrected errors, compared to 5.95 wpm with 0% uncorrected errors with an on-screen keyboard. Over sessions, his speeds were significantly faster with our gestural method than with an on-screen keyboard. A former 15-year veteran of on-screen keyboards, he now uses our gestural method instead.", + "link": "https://www.semanticscholar.org/paper/ba7c47fb86b02582da7bca4ab5c0ecb1f88444b2", + "scraped_abstract": null, + "citation_best": 77 + }, + { + "paper": "2151182223", + "venue": "1163450153", + "year": "2006", + "title": "embedded phenomena supporting science learning with classroom sized distributed simulations", + "label": [ + "107457646", + "48103436" + ], + "author": [ + "2179221810" + ], + "reference": [ + "77070038", + "87657467", + "96209597", + "572682039", + "1516369758", + "1551717579", + "1573622764", + "1580590571", + "1602232627", + "1608452410", + "1815090327", + "1978623692", + "1982170419", + "2007331479", + "2013951592", + "2023015865", + "2029933231", + "2039955996", + "2042285607", + "2043829411", + "2050896993", + "2051484905", + "2052876062", + "2078569098", + "2080292343", + "2090299671", + "2094982166", + "2107353262", + "2109261726", + "2111124607", + "2116199508", + "2118366990", + "2125669135", + "2126527906", + "2126713863", + "2130000957", + "2133137770", + "2138613430", + "2146120708", + "2154380009", + "2158508294", + "2163408656", + "2164717551", + "2167670020", + "2168733570", + "2784146925", + "2945424237", + "2980298611" + ], + "abstract": "embedded phenomena is a learning technology framework in which simulated scientific phenomena are mapped onto the physical space of classrooms students monitor and control the local state of the simulation through distributed media positioned around the room gathering and aggregating evidence to solve problems or answer questions related to those phenomena embedded phenomena are persistent running continuously over weeks and months creating information channels that are temporally and physically interleaved with but asynchronous with respect to the regular flow of instruction in this paper we describe the motivations for the framework describe classroom experiences with three embedded phenomena in the domains of seismology insect ecology and astronomy and situate embedded phenomena within the context of human computer interaction research in co located group interfaces and learning technologies", + "title_raw": "Embedded phenomena: supporting science learning with classroom-sized distributed simulations", + "abstract_raw": "'Embedded phenomena' is a learning technology framework in which simulated scientific phenomena are mapped onto the physical space of classrooms. Students monitor and control the local state of the simulation through distributed media positioned around the room, gathering and aggregating evidence to solve problems or answer questions related to those phenomena. Embedded phenomena are persistent, running continuously over weeks and months, creating information channels that are temporally and physically interleaved with, but asynchronous with respect to, the regular flow of instruction. In this paper, we describe the motivations for the framework, describe classroom experiences with three embedded phenomena in the domains of seismology, insect ecology, and astronomy, and situate embedded phenomena within the context of human-computer interaction research in co-located group interfaces and learning technologies.", + "link": "https://www.semanticscholar.org/paper/f34743fff11eb880b62c2645f81e383a106c9808", + "scraped_abstract": null, + "citation_best": 10 + }, + { + "paper": "2118755902", + "venue": "1163450153", + "year": "2006", + "title": "a role for haptics in mobile interaction initial design using a handheld tactile display prototype", + "label": [ + "115121344", + "44154836", + "152086174", + "186967261" + ], + "author": [ + "2974105335", + "137181722", + "2974090240", + "2105254560", + "1990108045", + "2000808029" + ], + "reference": [ + "42617256", + "134549639", + "1983376192", + "1996680658", + "2014788716", + "2024628547", + "2044088477", + "2059913822", + "2096894832", + "2120900691", + "2131800446", + "2143710843", + "2158019650", + "2158390706", + "2164011411", + "2164160147", + "2177079828", + "2259691514" + ], + "abstract": "mobile interaction can potentially be enhanced with well designed haptic control and display however advances have been limited by a vicious cycle whereby inadequate haptic technology obstructs inception of vitalizing applications we present the first stages of a systematic design effort to break that cycle beginning with specific usage scenarios and a new handheld display platform based on lateral skin stretch results of a perceptual device characterization inform mappings between device capabilities and specific roles in mobile interaction and the next step of hardware re engineering", + "title_raw": "A role for haptics in mobile interaction: initial design using a handheld tactile display prototype", + "abstract_raw": "Mobile interaction can potentially be enhanced with well-designed haptic control and display. However, advances have been limited by a vicious cycle whereby inadequate haptic technology obstructs inception of vitalizing applications. We present the first stages of a systematic design effort to break that cycle, beginning with specific usage scenarios and a new handheld display platform based on lateral skin stretch. Results of a perceptual device characterization inform mappings between device capabilities and specific roles in mobile interaction, and the next step of hardware re-engineering.", + "link": "https://www.semanticscholar.org/paper/300e3522b3b8d08f4d76ed6cf7bc55e3f5caa84c", + "scraped_abstract": null, + "citation_best": 15 + }, + { + "paper": "2146352414", + "venue": "1158167855", + "year": "2006", + "title": "putting objects in perspective", + "label": [ + "49937458", + "110898773", + "109950114", + "64729616", + "73752529", + "2776151529", + "12713177", + "9417928", + "31972630", + "64876066" + ], + "author": [ + "1917258602", + "2088536091", + "2101365550" + ], + "reference": [ + "39428922", + "1532257412", + "1546961578", + "1590283725", + "1618905105", + "1924378756", + "1986377419", + "2010363308", + "2056860348", + "2095844239", + "2110764733", + "2111151479", + "2114987560", + "2119664956", + "2121871678", + "2124189704", + "2128962821", + "2132397101", + "2134789565", + "2134921974", + "2144157654", + "2145086237", + "2155871590", + "2159080219", + "2161969291", + "2162820221", + "2280242340", + "2911355490", + "2970930881", + "2979006918", + "3097096317" + ], + "abstract": "image understanding requires not only individually estimating elements of the visual world but also capturing the interplay among them in this paper we provide a framework for placing local object detection in the context of the overall 3d scene by modeling the interdependence of objects surface orientations and camera viewpoint most object detection methods consider all scales and locations in the image as equally likely we show that with probabilistic estimates of 3d geometry both in terms of surfaces and world coordinates we can put objects into perspective and model the scale and location variance in the image our approach reflects the cyclical nature of the problem by allowing probabilistic object hypotheses to refine geometry and vice versa our framework allows painless substitution of almost any object detector and is easily extended to include other aspects of image understanding our results confirm the benefits of our integrated approach", + "title_raw": "Putting Objects in Perspective", + "abstract_raw": "Image understanding requires not only individually estimating elements of the visual world but also capturing the interplay among them. In this paper, we provide a framework for placing local object detection in the context of the overall 3D scene by modeling the interdependence of objects, surface orientations, and camera viewpoint. Most object detection methods consider all scales and locations in the image as equally likely. We show that with probabilistic estimates of 3D geometry, both in terms of surfaces and world coordinates, we can put objects into perspective and model the scale and location variance in the image. Our approach reflects the cyclical nature of the problem by allowing probabilistic object hypotheses to refine geometry and vice-versa. Our framework allows painless substitution of almost any object detector and is easily extended to include other aspects of image understanding. Our results confirm the benefits of our integrated approach.", + "link": "https://www.semanticscholar.org/paper/baddac96864c86538d3bd8bf495f00f818475a9e", + "scraped_abstract": null, + "citation_best": 235 + }, + { + "paper": "2082731247", + "venue": "1199533187", + "year": "2006", + "title": "controlling factors in evaluating path sensitive error detection techniques", + "label": [ + "24119478", + "26713055", + "137287247", + "110251889", + "22414024", + "103088060" + ], + "author": [ + "2156583880", + "2237556206", + "262280937" + ], + "reference": [ + "128817107", + "207759855", + "1497571013", + "1550487903", + "1964711453", + "1980522179", + "2016167891", + "2023392692", + "2036802481", + "2087252615", + "2106043799", + "2107978495", + "2115309705", + "2116989825", + "2117009500", + "2124877509", + "2126088541", + "2142647473", + "2142812175", + "2145727812", + "2146540946", + "2153265459", + "2158654729", + "2159765571", + "2171480813" + ], + "abstract": "recent advances in static program analysis have made it possible to detect errors in applications that have been thoroughly tested and are in wide spread use the ability to find errors that have eluded traditional validation methods is due to the development and combination of sophisticated algorithmic techniques that are embedded in the implementations of analysis tools evaluating new analysis techniques is typically performed by running an analysis tool on a collection of subject programs perhaps enabling and disabling a given technique in different runs while seemingly sensible this approach runs the risk of attributing improvements in the cost effectiveness of the analysis to the technique under consideration when those improvements may actually be due to details of analysis tool implementations that are uncontrolled during evaluation in this paper we focus on the specific class of path sensitive error detection techniques and identify several factors that can significantly influence the cost of analysis we show through careful empirical studies that the influence of these factors is sufficiently large that if left uncontrolled they may lead researchers to improperly attribute improvements in analysis cost and effectiveness we make several recommendations as to how the influence of these factors can be mitigated when evaluating techniques", + "title_raw": "Controlling factors in evaluating path-sensitive error detection techniques", + "abstract_raw": "Recent advances in static program analysis have made it possible to detect errors in applications that have been thoroughly tested and are in wide-spread use. The ability to find errors that have eluded traditional validation methods is due to the development and combination of sophisticated algorithmic techniques that are embedded in the implementations of analysis tools. Evaluating new analysis techniques is typically performed by running an analysis tool on a collection of subject programs, perhaps enabling and disabling a given technique in different runs. While seemingly sensible, this approach runs the risk of attributing improvements in the cost-effectiveness of the analysis to the technique under consideration, when those improvements may actually be due to details of analysis tool implementations that are uncontrolled during evaluation.In this paper, we focus on the specific class of path-sensitive error detection techniques and identify several factors that can significantly influence the cost of analysis. We show, through careful empirical studies, that the influence of these factors is sufficiently large that, if left uncontrolled, they may lead researchers to improperly attribute improvements in analysis cost and effectiveness. We make several recommendations as to how the influence of these factors can be mitigated when evaluating techniques.", + "link": "https://www.semanticscholar.org/paper/52d4cdd97c8901d8cec811645b841839ed6f15e0", + "scraped_abstract": null, + "citation_best": 11 + }, + { + "paper": "2107794009", + "venue": "1199533187", + "year": "2006", + "title": "synergy a new algorithm for property checking", + "label": [ + "192034797", + "110251889", + "48103436", + "11413529", + "80444323", + "123842658", + "30888246" + ], + "author": [ + "343101756", + "2707196735", + "2168290138", + "2049248193", + "2083764764" + ], + "reference": [ + "120333500", + "200765967", + "335711039", + "1531054827", + "1540180001", + "1581520638", + "1590315663", + "1605593319", + "1968898611", + "1986447758", + "2021473546", + "2043100293", + "2065675749", + "2066859698", + "2068361557", + "2071152819", + "2082000355", + "2084974764", + "2089139117", + "2096449544", + "2098698656", + "2101126217", + "2107089133", + "2130805777", + "2149918819", + "2151463894", + "2154689462", + "2158395308", + "2162844904", + "2295349525", + "2295903414", + "2340735175", + "2913459036" + ], + "abstract": "we consider the problem if a given program satisfies a specified safety property interesting programs have infinite state spaces with inputs ranging over infinite domains and for these programs the property checking problem is undecidable two broad approaches to property checking are testing and verification testing tries to find inputs and executions which demonstrate violations of the property verification tries to construct a formal proof which shows that all executions of the program satisfy the property testing works best when errors are easy to find but it is often difficult to achieve sufficient coverage for correct programs on the other hand verification methods are most successful when proofs are easy to find but they are often inefficient at discovering errors we propose a new algorithm s ynergy which combines testing and verification s ynergy unifies several ideas from the literature including counterexample guided model checking directed testing and partition refinement this paper presents a description of the s ynergy algorithm its theoretical properties a comparison with related algorithms and a prototype implementation called y ogi", + "title_raw": "SYNERGY: a new algorithm for property checking", + "abstract_raw": "We consider the problem if a given program satisfies a specified safety property. Interesting programs have infinite state spaces, with inputs ranging over infinite domains, and for these programs the property checking problem is undecidable. Two broad approaches to property checking are testing and verification. Testing tries to find inputs and executions which demonstrate violations of the property. Verification tries to construct a formal proof which shows that all executions of the program satisfy the property. Testing works best when errors are easy to find, but it is often difficult to achieve sufficient coverage for correct programs. On the other hand, verification methods are most successful when proofs are easy to find, but they are often inefficient at discovering errors. We propose a new algorithm, S ynergy , which combines testing and verification. S ynergy unifies several ideas from the literature, including counterexample-guided model checking, directed testing, and partition refinement.This paper presents a description of the S ynergy algorithm, its theoretical properties, a comparison with related algorithms, and a prototype implementation called Y ogi .", + "link": "https://www.semanticscholar.org/paper/03705958cb453b90654564c6b735031b2cb60ba6", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2071616717", + "venue": "1174403976", + "year": "2006", + "title": "model based development of dynamically adaptive software", + "label": [ + "558632462", + "144543869", + "195672273", + "174683762", + "89187990", + "116253237", + "2777904410", + "98045186", + "120314980", + "79403827" + ], + "author": [ + "2610681562", + "2116990958" + ], + "reference": [ + "1482121677", + "1560962701", + "1564907586", + "1585020894", + "1590686765", + "1598754349", + "1642278851", + "1822289477", + "1991486966", + "1992212585", + "1998017916", + "2001323710", + "2002887051", + "2023255236", + "2057807303", + "2080987629", + "2090398333", + "2095355321", + "2100345491", + "2100653580", + "2123479795", + "2134196660", + "2134716336", + "2138876988", + "2139300704", + "2143897980", + "2146478966", + "2151451947", + "2162612590", + "2162983444", + "2176300081", + "2983611614", + "3144368627", + "3148733312" + ], + "abstract": "increasingly software should dynamically adapt its behavior at run time in response to changing conditions in the supporting computing and communication infrastructure and in the surrounding physical environment in order for an adaptive program to be trusted it is important to have mechanisms to ensure that the program functions correctly during and after adaptations adaptive programs are generally more difficult to specify verify and validate due to their high complexity particularly when involving multi threaded adaptations the program behavior is the result of the collaborative behavior of multiple threads and software components this paper introduces an approach to create formal models for the behavior of adaptive programs our approach separates the adaptation behavior and non adaptive behavior specifications of adaptive programs making the models easier to specify and more amenable to automated analysis and visual inspection we introduce a process to construct adaptation models automatically generate adaptive programs from the models and verify and validate the models we illustrate our approach through the development of an adaptive gsm oriented audio streaming protocol for a mobile computing application", + "title_raw": "Model-based development of dynamically adaptive software", + "abstract_raw": "Increasingly, software should dynamically adapt its behavior at run-time in response to changing conditions in the supporting computing and communication infrastructure, and in the surrounding physical environment. In order for an adaptive program to be trusted, it is important to have mechanisms to ensure that the program functions correctly during and after adaptations. Adaptive programs are generally more difficult to specify, verify, and validate due to their high complexity. Particularly, when involving multi-threaded adaptations, the program behavior is the result of the collaborative behavior of multiple threads and software components. This paper introduces an approach to create formal models for the behavior of adaptive programs. Our approach separates the adaptation behavior and non-adaptive behavior specifications of adaptive programs, making the models easier to specify and more amenable to automated analysis and visual inspection. We introduce a process to construct adaptation models, automatically generate adaptive programs from the models, and verify and validate the models. We illustrate our approach through the development of an adaptive GSM-oriented audio streaming protocol for a mobile computing application.", + "link": "https://www.semanticscholar.org/paper/a060ca20d4e3ba2542afa79231a46ead323b8d24", + "scraped_abstract": null, + "citation_best": 413 + }, + { + "paper": "2079317829", + "venue": "1174403976", + "year": "2006", + "title": "who should fix this bug", + "label": [ + "2776184366", + "191727507", + "136764020", + "95623464" + ], + "author": [ + "2190880782", + "2501958846", + "2100357700" + ], + "reference": [ + "124631940", + "180288257", + "1504070272", + "1660390307", + "1869391892", + "1912123407", + "1953689839", + "2041114254", + "2060291526", + "2099332975", + "2118020653", + "2123504579", + "2125055259", + "2125889680", + "2140336868", + "2143561072", + "2149684865", + "2966207845", + "3148527572" + ], + "abstract": "open source development projects typically support an open bug repository to which both developers and users can report bugs the reports that appear in this repository must be triaged to determine if the report is one which requires attention and if it is which developer will be assigned the responsibility of resolving the report large open source developments are burdened by the rate at which new bug reports appear in the bug repository in this paper we present a semi automated approach intended to ease one part of this process the assignment of reports to a developer our approach applies a machine learning algorithm to the open bug repository to learn the kinds of reports each developer resolves when a new report arrives the classifier produced by the machine learning technique suggests a small number of developers suitable to resolve the report with this approach we have reached precision levels of 57 and 64 on the eclipse and firefox development projects respectively we have also applied our approach to the gcc open source development with less positive results we describe the conditions under which the approach is applicable and also report on the lessons we learned about applying machine learning to repositories used in open source development", + "title_raw": "Who should fix this bug", + "abstract_raw": "Open source development projects typically support an open bug repository to which both developers and users can report bugs. The reports that appear in this repository must be triaged to determine if the report is one which requires attention and if it is, which developer will be assigned the responsibility of resolving the report. Large open source developments are burdened by the rate at which new bug reports appear in the bug repository. In this paper, we present a semi-automated approach intended to ease one part of this process, the assignment of reports to a developer. Our approach applies a machine learning algorithm to the open bug repository to learn the kinds of reports each developer resolves. When a new report arrives, the classifier produced by the machine learning technique suggests a small number of developers suitable to resolve the report. With this approach, we have reached precision levels of 57% and 64% on the Eclipse and Firefox development projects respectively. We have also applied our approach to the gcc open source development with less positive results. We describe the conditions under which the approach is applicable and also report on the lessons we learned about applying machine learning to repositories used in open source development.", + "link": "https://www.semanticscholar.org/paper/b3b3c562a45d7710d6f62ad8f210ebca9a47d23f", + "scraped_abstract": null, + "citation_best": 900 + }, + { + "paper": "1544095305", + "venue": "1158363782", + "year": "2006", + "title": "experience with an object reputation system for peer to peer filesharing", + "label": [ + "27713364", + "534932454", + "64729616", + "2779513410", + "136764020", + "105339364", + "108827166", + "31352089" + ], + "author": [ + "2147169224", + "223610748" + ], + "reference": [ + "1520914943", + "1521374031", + "1580004440", + "1583018548", + "1764421085", + "1854214752", + "1994181344", + "2000432379", + "2056363353", + "2070621149", + "2095202767", + "2103916378", + "2118744608", + "2130234299", + "2143624305", + "2144780381", + "2155106456", + "2156523427", + "2160968921", + "2166764366", + "2171340077", + "2171774422", + "2171957559", + "2290132939", + "2466575204" + ], + "abstract": "in this paper we describe credence a decentralized object reputation and ranking system for large scale peer to peer filesharing networks credence counteracts pollution in these networks by allowing honest peers to assess the authenticity of online content through secure tabulation and management of endorsements from other peers our system enables peers to learn relationships even in the absence of direct observations or interactions through a novel flow based trust computation to discover trustworthy peers we have deployed credence as an overlay on top of the gnutella filesharing network with more than 10 000 downloads of our client software to date we describe the system design our experience with its deployment and results from a long term study of the trust network built by users data from the live deployment shows that credence s flow based trust computation enables users to avoid undesirable content honest credence clients can identify three quarters of the decoys encountered when querying the gnutella network", + "title_raw": "Experience with an object reputation system for peer-to-peer filesharing", + "abstract_raw": "In this paper, we describe Credence, a decentralized object reputation and ranking system for large-scale peer-to-peer filesharing networks. Credence counteracts pollution in these networks by allowing honest peers to assess the authenticity of online content through secure tabulation and management of endorsements from other peers. Our system enables peers to learn relationships even in the absence of direct observations or interactions through a novel, flow-based trust computation to discover trustworthy peers. We have deployed Credence as an overlay on top of the Gnutella filesharing network, with more than 10,000 downloads of our client software to date. We describe the system design, our experience with its deployment, and results from a long-term study of the trust network built by users. Data from the live deployment shows that Credence's flow-based trust computation enables users to avoid undesirable content. Honest Credence clients can identify three quarters of the decoys encountered when querying the Gnutella network.", + "link": "https://www.semanticscholar.org/paper/75641b5ba3bc6a8c2889c52c22ed14f058ce7f35", + "scraped_abstract": null, + "citation_best": 214 + }, + { + "paper": "1500238431", + "venue": "1158363782", + "year": "2006", + "title": "availability of multi object operations", + "label": [ + "137529215", + "24885549", + "64729616", + "2780300890", + "137955351", + "194739806", + "120314980" + ], + "author": [ + "2138265396", + "2144092273", + "2120595703" + ], + "reference": [ + "1524103123", + "1570696062", + "1743681434", + "1791778938", + "1815769434", + "1969490101", + "1970564778", + "1997179596", + "1997802075", + "2019957811", + "2020765652", + "2053069463", + "2059944263", + "2095897464", + "2096538410", + "2096703709", + "2103664057", + "2104210894", + "2115599946", + "2116777751", + "2119565742", + "2121133177", + "2123820820", + "2124074197", + "2125374545", + "2126310747", + "2127225084", + "2144499729", + "2147504831", + "2150676586", + "2153752068", + "2155846054", + "2158049821", + "2159761515", + "2163059190", + "2165861038", + "2167898414", + "2171957559", + "2405200838", + "2950660196" + ], + "abstract": "highly available distributed storage systems are commonly designed to optimize the availability of individual data objects despite the fact that user level tasks typically request multiple objects in this paper we show that the assignment of object replicas or fragments in the case of erasure coding to machines plays a dramatic role in the availability of such multi object operations without affecting the availability of individual objects for example for the tpc h benchmark under real world failures we observe differences of up to four nines between popular assignments used in existing systems experiments using our wide area storage system prototype moat on the planetlab as well as extensive simulations show which assignments lead to the highest availability for a given setting", + "title_raw": "Availability of multi-object operations", + "abstract_raw": "Highly-available distributed storage systems are commonly designed to optimize the availability of individual data objects, despite the fact that user level tasks typically request multiple objects. In this paper, we show that the assignment of object replicas (or fragments, in the case of erasure coding) to machines plays a dramatic role in the availability of such multi-object operations, without affecting the availability of individual objects. For example, for the TPC-H benchmark under real-world failures, we observe differences of up to four nines between popular assignments used in existing systems. Experiments using our wide-area storage system prototype, MOAT, on the PlanetLab, as well as extensive simulations, show which assignments lead to the highest availability for a given setting.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Availability+of+Multi-Object+Operations&as_oq=&as_eq=&as_occt=any&as_sauthors=Yu", + "scraped_abstract": null, + "citation_best": 29 + }, + { + "paper": "2620706897", + "venue": "1185109434", + "year": "2006", + "title": "rethink the sync", + "label": [ + "111919701", + "3913047", + "13674803", + "195015458", + "180500224", + "9390403", + "82820731", + "193769178", + "2780940931", + "166807848" + ], + "author": [ + "1273945011", + "1322315466", + "2101505567", + "1992040104" + ], + "reference": [ + "106162958", + "1514634512", + "1549389445", + "1557990038", + "1966666941", + "2047418149", + "2052915895", + "2058068178", + "2081644593", + "2097589646", + "2099765549", + "2100100130", + "2100970777", + "2105986919", + "2108183412", + "2108795876", + "2110137598", + "2112137708", + "2114167330", + "2115600841", + "2117289367", + "2122502383", + "2125776594", + "2141024949", + "2141318181", + "2151745115", + "2154817671", + "2197661084", + "2751601659", + "2993179107", + "3137220996" + ], + "abstract": "we introduce external synchrony a new model for local file i o that provides the reliability and simplicity of synchronous i o yet also closely approximates the performance of asynchronous i o an external observer cannot distinguish the output of a computer with an externally synchronous file system from the output of a computer with a synchronous file system no application modification is required to use an externally synchronous file system in fact application developers can program to the simpler synchronous i o abstraction and still receive excellent performance we have implemented an externally synchronous file system for linux called xsyncfs xsyncfs provides the same durability and ordering guarantees as those provided by a synchronously mounted ext3 file system yet even for i o intensive benchmarks xsyncfs performance is within 7 of ext3 mounted asynchronously compared to ext3 mounted synchronously xsyncfs is up to two orders of magnitude faster", + "title_raw": "Rethink the sync", + "abstract_raw": "We introduce external synchrony, a new model for local file I/O that provides the reliability and simplicity of synchronous I/O, yet also closely approximates the performance of asynchronous I/O. An external observer cannot distinguish the output of a computer with an externally synchronous file system from the output of a computer with a synchronous file system. No application modification is required to use an externally synchronous file system: in fact, application developers can program to the simpler synchronous I/O abstraction and still receive excellent performance. We have implemented an externally synchronous file system for Linux, called xsyncfs. Xsyncfs provides the same durability and ordering guarantees as those provided by a synchronously mounted ext3 file system. Yet, even for I/O-intensive benchmarks, xsyncfs performance is within 7% of ext3 mounted asynchronously. Compared to ext3 mounted synchronously, xsyncfs is up to two orders of magnitude faster.", + "link": "https://www.semanticscholar.org/paper/e31e1153919801bdf936defa1083b99833789312", + "scraped_abstract": null, + "citation_best": 45 + }, + { + "paper": "2624304035", + "venue": "1185109434", + "year": "2006", + "title": "bigtable a distributed storage system for structured data", + "label": [ + "24885549", + "21959979", + "77088390", + "13600138", + "2779893281", + "9476365", + "93996380", + "136764020" + ], + "author": [ + "2129779547", + "2429370538", + "2575315241", + "2136379990", + "2303980089", + "2302774460", + "2150538384", + "2004395710", + "2682017945" + ], + "reference": [ + "11669818", + "1497150730", + "1584635459", + "1650675509", + "1975577269", + "1992479210", + "1993744084", + "1997020216", + "2008750849", + "2010413210", + "2037177386", + "2043934800", + "2045255985", + "2054278783", + "2068739275", + "2075854425", + "2081461624", + "2106887953", + "2113288530", + "2115600841", + "2116420167", + "2119565742", + "2119714163", + "2122315991", + "2122465391", + "2123686039", + "2123845384", + "2128912667", + "2130642985", + "2143149536", + "2152686036", + "2158049821", + "2161694911", + "2163059190", + "2167898414", + "3138367763" + ], + "abstract": "bigtable is a distributed storage system for managing structured data that is designed to scale to a very large size petabytes of data across thousands of commodity servers many projects at google store data in bigtable including web indexing google earth and google finance these applications place very different demands on bigtable both in terms of data size from urls to web pages to satellite imagery and latency requirements from backend bulk processing to real time data serving despite these varied demands bigtable has successfully provided a flexible high performance solution for all of these google products in this paper we describe the simple data model provided by bigtable which gives clients dynamic control over data layout and format and we describe the design and implementation of bigtable", + "title_raw": "Bigtable: a distributed storage system for structured data", + "abstract_raw": "Bigtable is a distributed storage system for managing structured data that is designed to scale to a very large size: petabytes of data across thousands of commodity servers. Many projects at Google store data in Bigtable, including web indexing, Google Earth, and Google Finance. These applications place very different demands on Bigtable, both in terms of data size (from URLs to web pages to satellite imagery) and latency requirements (from backend bulk processing to real-time data serving). Despite these varied demands, Bigtable has successfully provided a flexible, high-performance solution for all of these Google products. In this paper we describe the simple data model provided by Bigtable, which gives clients dynamic control over data layout and format, and we describe the design and implementation of Bigtable.", + "link": "https://www.semanticscholar.org/paper/fb64e4a6d7b6dfb4d96a482319da9c0837f303e6", + "scraped_abstract": null, + "citation_best": 1866 + }, + { + "paper": "2124504084", + "venue": "1140684652", + "year": "2006", + "title": "minimal test collections for retrieval evaluation", + "label": [ + "119857082", + "100853971", + "177264268", + "124101348", + "87546605" + ], + "author": [ + "2645247999", + "2097030689", + "261571608" + ], + "reference": [ + "132343450", + "1554394657", + "1979870225", + "1983595289", + "1994960885", + "2015338694", + "2057495142", + "2069353545", + "2073722401", + "2075893676", + "2109244020", + "2120308175", + "2124379907", + "2130145422", + "2165387257", + "2323834113", + "2839980303" + ], + "abstract": "accurate estimation of information retrieval evaluation metrics such as average precision require large sets of relevance judgments building sets large enough for evaluation of real world implementations is at best inefficient at worst infeasible in this work we link evaluation with test collection construction to gain an understanding of the minimal judging effort that must be done to have high confidence in the outcome of an evaluation a new way of looking at average precision leads to a natural algorithm for selecting documents to judge and allows us to estimate the degree of confidence by defining a distribution over possible document judgments a study with annotators shows that this method can be used by a small group of researchers to rank a set of systems in under three hours with 95 confidence information retrieval metrics such as average precision require large sets of relevance judgments to be accurately estimated building these sets is infeasible and often inefficient for many real world retrieval implementations we present a new way of looking at average precision that allows us to estimate the confidence in an evaluation based on the size of the test collection we use this to build an algorithm for selecting the best documents to judge to have maximum confidence in an evaluation with a minimal number of relevance judgments a study with annotators shows how the algorithm can be used by a small group of researchers to quickly rank a set of systems with 95 confidence", + "title_raw": "Minimal test collections for retrieval evaluation", + "abstract_raw": "Accurate estimation of information retrieval evaluation metrics such as average precision require large sets of relevance judgments. Building sets large enough for evaluation of real-world implementations is at best inefficient, at worst infeasible. In this work we link evaluation with test collection construction to gain an understanding of the minimal judging effort that must be done to have high confidence in the outcome of an evaluation. A new way of looking at average precision leads to a natural algorithm for selecting documents to judge and allows us to estimate the degree of confidence by defining a distribution over possible document judgments. A study with annotators shows that this method can be used by a small group of researchers to rank a set of systems in under three hours with 95% confidence. Information retrieval metrics such as average precision require large sets of relevance judgments to be accurately estimated. Building these sets is infeasible and often inefficient for many real-world retrieval implementations. We present a new way of looking at average precision that allows us to estimate the confidence in an evaluation based on the size of the test collection. We use this to build an algorithm for selecting the best documents to judge to have maximum confidence in an evaluation with a minimal number of relevance judgments. A study with annotators shows how the algorithm can be used by a small group of researchers to quickly rank a set of systems with 95% confidence.", + "link": "https://www.semanticscholar.org/paper/34ddfe4d4fbf4afa586220a21a921478c8dfab35", + "scraped_abstract": null, + "citation_best": 245 + }, + { + "paper": "2152652256", + "venue": "1131589359", + "year": "2006", + "title": "maximizing throughput in wireless networks via gossiping", + "label": [ + "130120984", + "113200698", + "13533509", + "108037233", + "61455927", + "120314980" + ], + "author": [ + "165625497", + "2155470367", + "250761399" + ], + "reference": [ + "1491996068", + "1523785148", + "1568961751", + "1603765807", + "1607044975", + "1901187898", + "1964932074", + "1982490730", + "2059120410", + "2070317877", + "2101517602", + "2105177639", + "2106387257", + "2106522282", + "2118844846", + "2119098504", + "2125690626", + "2134915467", + "2137789333", + "2153086189", + "2153419843", + "2157004711", + "2170178059", + "2435603672" + ], + "abstract": "a major challenge in the design of wireless networks is the need for distributed scheduling algorithms that will efficiently share the common spectrum recently a few distributed algorithms for networks in which a node can converse with at most a single neighbor at a time have been presented these algorithms guarantee 50 of the maximum possible throughput we present the first distributed scheduling framework that guarantees maximum throughput it is based on a combination of a distributed matching algorithm and an algorithm that compares and merges successive matching solutions the comparison can be done by a deterministic algorithm or by randomized gossip algorithms in the latter case the comparison may be inaccurate yet we show that if the matching and gossip algorithms satisfy simple conditions related to their performance and to the inaccuracy of the comparison respectively the framework attains the desired throughput it is shown that the complexities of our algorithms that achieve nearly 100 throughput are comparable to those of the algorithms that achieve 50 throughput finally we discuss extensions to general interference models even for such models the framework provides a simple distributed throughput optimal algorithm", + "title_raw": "Maximizing throughput in wireless networks via gossiping", + "abstract_raw": "A major challenge in the design of wireless networks is the need for distributed scheduling algorithms that will efficiently share the common spectrum. Recently, a few distributed algorithms for networks in which a node can converse with at most a single neighbor at a time have been presented. These algorithms guarantee 50% of the maximum possible throughput. We present the first distributed scheduling framework that guarantees maximum throughput. It is based on a combination of a distributed matching algorithm and an algorithm that compares and merges successive matching solutions. The comparison can be done by a deterministic algorithm or by randomized gossip algorithms. In the latter case, the comparison may be inaccurate. Yet, we show that if the matching and gossip algorithms satisfy simple conditions related to their performance and to the inaccuracy of the comparison (respectively), the framework attains the desired throughput.It is shown that the complexities of our algorithms, that achieve nearly 100% throughput, are comparable to those of the algorithms that achieve 50% throughput. Finally, we discuss extensions to general interference models. Even for such models, the framework provides a simple distributed throughput optimal algorithm.", + "link": "https://www.semanticscholar.org/paper/389227f2fc1aedf402d541eadf929ea4bb34fb24", + "scraped_abstract": null, + "citation_best": 135 + }, + { + "paper": "2103224511", + "venue": "1175089206", + "year": "2006", + "title": "to search or to crawl towards a query optimizer for text centric tasks", + "label": [ + "127705205", + "97854310", + "157692150", + "195807954", + "23123220" + ], + "author": [ + "94049422", + "2283615530", + "2138413764", + "2251396636" + ], + "reference": [ + "108157922", + "109105922", + "1489949474", + "1489992655", + "1517178556", + "1520232900", + "1586176254", + "1587932711", + "1943309769", + "1986828474", + "2016892599", + "2017726337", + "2026080185", + "2075261997", + "2079656678", + "2079806729", + "2081948558", + "2084812512", + "2086253379", + "2096891167", + "2103931177", + "2105423800", + "2105995744", + "2115461474", + "2116341550", + "2118020653", + "2118229812", + "2124673015", + "2125969310", + "2131006463", + "2137845970", + "2140796896", + "2148603752", + "2151007976", + "2152766222", + "2154148563", + "2158011814", + "2169015768", + "2322524800", + "2914621247", + "3017143921" + ], + "abstract": "text is ubiquitous and not surprisingly many important applications rely on textual data for a variety of tasks as a notable example information extraction applications derive structured relations from unstructured text as another example focused crawlers explore the web to locate pages about specific topics execution plans for text centric tasks follow two general paradigms for processing a text database either we can scan or crawl the text database or alternatively we can exploit search engine indexes and retrieve the documents of interest via carefully crafted queries constructed in task specific ways the choice between crawl and query based execution plans can have a substantial impact on both execution time and output completeness e g in terms of recall nevertheless this choice is typically ad hoc and based on heuristics or plain intuition in this paper we present fundamental building blocks to make the choice of execution plans for text centric tasks in an informed cost based way towards this goal we show how to analyze query and crawl based plans in terms of both execution time and output completeness we adapt results from random graph theory and statistics to develop a rigorous cost model for the execution plans our cost model reflects the fact that the performance of the plans depends on fundamental task specific properties of the underlying text databases we identify these properties and present efficient techniques for estimating the associated cost model parameters overall our approach helps predict the most appropriate execution plans for a task resulting in significant efficiency and output completeness benefits we complement our results with a large scale experimental evaluation for three important text centric tasks and over multiple real life data sets", + "title_raw": "To search or to crawl?: towards a query optimizer for text-centric tasks", + "abstract_raw": "Text is ubiquitous and, not surprisingly, many important applications rely on textual data for a variety of tasks. As a notable example, information extraction applications derive structured relations from unstructured text; as another example, focused crawlers explore the web to locate pages about specific topics. Execution plans for text-centric tasks follow two general paradigms for processing a text database: either we can scan, or 'crawl,\" the text database or, alternatively, we can exploit search engine indexes and retrieve the documents of interest via carefully crafted queries constructed in task-specific ways. The choice between crawl- and query-based execution plans can have a substantial impact on both execution time and output \"completeness\" (e.g., in terms of recall). Nevertheless, this choice is typically ad-hoc and based on heuristics or plain intuition. In this paper, we present fundamental building blocks to make the choice of execution plans for text-centric tasks in an informed, cost-based way. Towards this goal, we show how to analyze query- and crawl-based plans in terms of both execution time and output completeness. We adapt results from random-graph theory and statistics to develop a rigorous cost model for the execution plans. Our cost model reflects the fact that the performance of the plans depends on fundamental task-specific properties of the underlying text databases. We identify these properties and present efficient techniques for estimating the associated cost-model parameters. Overall, our approach helps predict the most appropriate execution plans for a task, resulting in significant efficiency and output completeness benefits. We complement our results with a large-scale experimental evaluation for three important text-centric tasks and over multiple real-life data sets.", + "link": "https://www.semanticscholar.org/paper/2cf5c0b0da99c469da716f1658179d0ee374d085", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2062658884", + "venue": "1166315290", + "year": "2006", + "title": "reflective physical prototyping through integrated design test and analysis", + "label": [ + "142778471", + "107457646", + "41022531", + "9496109", + "108265739", + "115903868", + "32833848" + ], + "author": [ + "2192055696", + "261822931", + "1974803209", + "2223812137", + "2223517655", + "667705006", + "2229290657" + ], + "reference": [ + "68915126", + "175675895", + "334380453", + "1500115195", + "1969152782", + "1972051027", + "1994547327", + "2007363763", + "2021304089", + "2026975390", + "2030390645", + "2043032678", + "2077548134", + "2080907289", + "2099529102", + "2106281550", + "2120973494", + "2125124735", + "2128922568", + "2133646464", + "2134063352", + "2134816385", + "2141385588", + "2161969471", + "2165051314", + "2167516148", + "2169732913", + "2171036849", + "2295685962", + "2568476927" + ], + "abstract": "prototyping is the pivotal activity that structures innovation collaboration and creativity in design prototypes embody design hypotheses and enable designers to test them framin design as a thinking by doing activity foregrounds iteration as a central concern this paper presents d tools a toolkit that embodies an iterative design centered approach to prototyping information appliances this work offers contributions in three areas first d tools introduces a statechart based visual design tool that provides a low threshold for early stage prototyping extensible through code for higher fidelity prototypes second our research introduces three important types of hardware extensibility at the hardware to pc interface the intra hardware communication level and the circuit level third d tools integrates design test and analysis of information appliances we have evaluated d tools through three studies a laboratory study with thirteen participants rebuilding prototypes of existing and emerging devices and by observing seven student teams who built prototypes with d tools", + "title_raw": "Reflective physical prototyping through integrated design, test, and analysis", + "abstract_raw": "Prototyping is the pivotal activity that structures innovation, collaboration, and creativity in design. Prototypes embody design hypotheses and enable designers to test them. Framin design as a thinking-by-doing activity foregrounds iteration as a central concern. This paper presents d.tools, a toolkit that embodies an iterative-design-centered approach to prototyping information appliances. This work offers contributions in three areas. First, d.tools introduces a statechart-based visual design tool that provides a low threshold for early-stage prototyping, extensible through code for higher-fidelity prototypes. Second, our research introduces three important types of hardware extensibility - at the hardware-to-PC interface, the intra-hardware communication level, and the circuit level. Third, d.tools integrates design, test, and analysis of information appliances. We have evaluated d.tools through three studies: a laboratory study with thirteen participants; rebuilding prototypes of existing and emerging devices; and by observing seven student teams who built prototypes with d.tools.", + "link": "https://www.semanticscholar.org/paper/33a028ba5630d569763e1db8aaaca81f9a76f7b9", + "scraped_abstract": null, + "citation_best": 300 + }, + { + "paper": "2146081216", + "venue": "1133523790", + "year": "2006", + "title": "trustworthy keyword search for regulatory compliant records retention", + "label": [ + "110432227", + "77088390", + "24493144", + "2778773198", + "130590232", + "23123220" + ], + "author": [ + "2151738296", + "2115052180", + "2078136799" + ], + "reference": [ + "100863554", + "155995321", + "1482214997", + "1495007564", + "1530504482", + "1531395322", + "1544843123", + "1569403765", + "1592117810", + "1980347087", + "1982889956", + "2049342105", + "2055385473", + "2079656678", + "2084367148", + "2101423905", + "2122416857", + "2124575832", + "2125040475", + "2138091434", + "2140795521", + "2150145391", + "2160484851", + "2171027248", + "2621280964" + ], + "abstract": "recent litigation and intense regulatory focus on secure retention of electronic records have spurred a rush to introduce write once read many worm storage devices for retaining business records such as electronic mail however simply storing records in worm storage is insuffcient to ensure that the records are trustworthy i e able to provide irrefutable proof and accurate details of past events specifically some form of index is needed for timely access to the records but unless the index is maintained securely the records can in effect be hidden or altered even if stored in worm storage in this paper we systematically analyze the requirements for establishing a trustworthy inverted index to enable keyword based search queries we propose a novel scheme for effcient creation of such an index and demonstrate through extensive simulations and experiments with an enterprise keyword search engine that the scheme can achieve online update speeds while maintaining good query performance in addition we present a secure index structure for multi keyword queries that supports insert lookup and range queries in time logarithmic in the number of documents", + "title_raw": "Trustworthy keyword search for regulatory-compliant records retention", + "abstract_raw": "Recent litigation and intense regulatory focus on secure retention of electronic records have spurred a rush to introduce Write-Once-Read-Many (WORM) storage devices for retaining business records such as electronic mail. However, simply storing records in WORM storage is insuffcient to ensure that the records are trustworthy, i.e., able to provide irrefutable proof and accurate details of past events. Specifically, some form of index is needed for timely access to the records, but unless the index is maintained securely, the records can in effect be hidden or altered, even if stored in WORM storage. In this paper, we systematically analyze the requirements for establishing a trustworthy inverted index to enable keyword-based search queries. We propose a novel scheme for effcient creation of such an index and demonstrate, through extensive simulations and experiments with an enterprise keyword search engine, that the scheme can achieve online update speeds while maintaining good query performance. In addition, we present a secure index structure for multi-keyword queries that supports insert, lookup and range queries in time logarithmic in the number of documents.", + "link": "https://www.semanticscholar.org/paper/32608c925ea1a4df1690249ddc410cdc5d6432d1", + "scraped_abstract": null, + "citation_best": 29 + }, + { + "paper": "2128941908", + "venue": "1135342153", + "year": "2006", + "title": "random sampling from a search engine s index", + "label": [ + "2778773198", + "97854310", + "124101348", + "187192777", + "197927960" + ], + "author": [ + "2268994745", + "2027890328" + ], + "reference": [ + "185324491", + "1510634602", + "1568495775", + "1598759141", + "1605217017", + "1659541576", + "1964038241", + "1983416950", + "2001351653", + "2019473674", + "2028716813", + "2033057584", + "2033747448", + "2056760934", + "2057767944", + "2069739265", + "2080676333", + "2091082553", + "2117850397", + "2122141283", + "2125125501", + "2134711723", + "2136059419", + "2138309709", + "2139964991", + "2144959234", + "2147164982", + "2154707336", + "2155711776", + "2247055361", + "2798909945", + "3137614770" + ], + "abstract": "we revisit a problem introduced by bharat and broder almost a decade ago how to sample random pages from a search engine s index using only the search engine s public interface such a primitive is particularly useful in creating objective benchmarks for search engines the technique of bharat and broder suffers from two well recorded biases it favors long documents and highly ranked documents in this paper we introduce two novel sampling techniques a lexicon based technique and a random walk technique our methods produce biased sample documents but each sample is accompanied by a corresponding weight which represents the probability of this document to be selected in the sample the samples in conjunction with the weights are then used to simulate near uniform samples to this end we resort to three well known monte carlo simulation methods rejection sampling importance sampling and the metropolis hastings algorithm we analyze our methods rigorously and prove that under plausible assumptions our techniques are guaranteed to produce near uniform samples from the search engine s index experiments on a corpus of 2 4 million documents substantiate our analytical findings and show that our algorithms do not have significant bias towards long or highly ranked documents we use our algorithms to collect fresh data about the relative sizes of google msn search and yahoo", + "title_raw": "Random sampling from a search engine's index", + "abstract_raw": "We revisit a problem introduced by Bharat and Broder almost a decade ago: how to sample random pages from a search engine's index using only the search engine's public interface? Such a primitive is particularly useful in creating objective benchmarks for search engines.The technique of Bharat and Broder suffers from two well recorded biases: it favors long documents and highly ranked documents. In this paper we introduce two novel sampling techniques: a lexicon-based technique and a random walk technique. Our methods produce biased sample documents, but each sample is accompanied by a corresponding \"weight\", which represents the probability of this document to be selected in the sample. The samples, in conjunction with the weights, are then used to simulate near-uniform samples. To this end, we resort to three well known Monte Carlo simulation methods: rejection sampling, importance sampling and the Metropolis-Hastings algorithm.We analyze our methods rigorously and prove that under plausible assumptions, our techniques are guaranteed to produce near-uniform samples from the search engine's index. Experiments on a corpus of 2.4 million documents substantiate our analytical findings and show that our algorithms do not have significant bias towards long or highly ranked documents. We use our algorithms to collect fresh data about the relative sizes of Google, MSN Search, and Yahoo!.", + "link": "https://www.semanticscholar.org/paper/7e936ca4efc5b4462b56a96078ef60b4027c8cd5", + "scraped_abstract": null, + "citation_best": 126 + }, + { + "paper": "1587673378", + "venue": "1184914352", + "year": "2005", + "title": "the max k armed bandit a new model of exploration applied to search heuristic selection", + "label": [ + "113200698", + "173801870", + "125583679" + ], + "author": [ + "126521001", + "2136337285" + ], + "reference": [ + "1497256448", + "1500657154", + "1515851193", + "1524690415", + "1598982666", + "1866349670", + "1966505186", + "2010189695", + "2014653572", + "2058879737", + "2077902449", + "2107072968", + "2121863487", + "2164483807", + "2168405694" + ], + "abstract": "the multiarmed bandit is often used as an analogy for the tradeoff between exploration and exploitation in search problems the classic problem involves allocating trials to the arms of a multiarmed slot machine to maximize the expected sum of rewards we pose a new variation of the multiarmed bandit the max k armed bandit in which trials must be allocated among the arms to maximize the expected best single sample reward of the series of trials motivation for the max k armed bandit is the allocation of restarts among a set of multistart stochastic search algorithms we present an analysis of this max k armed bandit showing under certain assumptions that the optimal strategy allocates trials to the observed best arm at a rate increasing double exponentially relative to the other arms this motivates an exploration strategy that follows a boltzmann distribution with an exponentially decaying temperature parameter we compare this exploration policy to policies that allocate trials to the observed best arm at rates faster and slower than double exponentially the results confirm for two scheduling domains that the double exponential increase in the rate of allocations to the observed best heuristic outperfonns the other approaches", + "title_raw": "The max K-armed bandit: a new model of exploration applied to search heuristic selection", + "abstract_raw": "The multiarmed bandit is often used as an analogy for the tradeoff between exploration and exploitation in search problems. The classic problem involves allocating trials to the arms of a multiarmed slot machine to maximize the expected sum of rewards. We pose a new variation of the multiarmed bandit--the Max K-Armed Bandit--in which trials must be allocated among the arms to maximize the expected best single sample reward of the series of trials. Motivation for the Max K-Armed Bandit is the allocation of restarts among a set of multistart stochastic search algorithms. We present an analysis of this Max K-Armed Bandit showing under certain assumptions that the optimal strategy allocates trials to the observed best arm at a rate increasing double exponentially relative to the other arms. This motivates an exploration strategy that follows a Boltzmann distribution with an exponentially decaying temperature parameter. We compare this exploration policy to policies that allocate trials to the observed best arm at rates faster (and slower) than double exponentially. The results confirm, for two scheduling domains, that the double exponential increase in the rate of allocations to the observed best heuristic outperfonns the other approaches.", + "link": "https://www.semanticscholar.org/paper/53b61519e4fa067036213badca926936c095cd1f", + "scraped_abstract": null, + "citation_best": 80 + }, + { + "paper": "2152263452", + "venue": "1188739475", + "year": "2005", + "title": "a hierarchical phrase based model for statistical machine translation", + "label": [ + "622187", + "28490314", + "148526163", + "24687705", + "53893814", + "130597682", + "60048249", + "135784402", + "204321447", + "51802942", + "203005215", + "2776224158" + ], + "author": [ + "2297564775" + ], + "reference": [ + "2430588", + "24102868", + "54534146", + "222053410", + "1498238796", + "1534482508", + "1631260214", + "1973923101", + "1997420744", + "2000566875", + "2006969979", + "2076749875", + "2101105183", + "2103237065", + "2116316001", + "2119168550", + "2146574666", + "2153653739", + "2154124206", + "2158195707", + "2158388102", + "2161792612", + "2405762604" + ], + "abstract": "we present a statistical phrase based translation model that uses hierarchical phrases phrases that contain subphrases the model is formally a synchronous context free grammar but is learned from a bitext without any syntactic information thus it can be seen as a shift to the formal machinery of syntax based translation systems without any linguistic commitment in our experiments using bleu as a metric the hierarchical phrase based model achieves a relative improvement of 7 5 over pharaoh a state of the art phrase based system", + "title_raw": "A Hierarchical Phrase-Based Model for Statistical Machine Translation", + "abstract_raw": "We present a statistical phrase-based translation model that uses hierarchical phrases---phrases that contain subphrases. The model is formally a synchronous context-free grammar but is learned from a bitext without any syntactic information. Thus it can be seen as a shift to the formal machinery of syntax-based translation systems without any linguistic commitment. In our experiments using BLEU as a metric, the hierarchical phrase-based model achieves a relative improvement of 7.5% over Pharaoh, a state-of-the-art phrase-based system.", + "link": "https://www.semanticscholar.org/paper/ad3d2f463916784d0c14a19936c1544309a0a440", + "scraped_abstract": null, + "citation_best": 1184 + }, + { + "paper": "2112103637", + "venue": "1163450153", + "year": "2005", + "title": "the bubble cursor enhancing target acquisition by dynamic resizing of the cursor s activation area", + "label": [ + "79403827", + "31972630", + "2779726219" + ], + "author": [ + "2115951828", + "2130130894" + ], + "reference": [ + "1594426410", + "1606635698", + "1828869659", + "2022416142", + "2024421080", + "2025904186", + "2076656908", + "2077057201", + "2101388024", + "2112824399", + "2122297397", + "2143280386", + "2161278853", + "2168443748", + "2179427518", + "2188481870", + "2493051178" + ], + "abstract": "we present the bubble cursor a new target acquisition technique based on area cursors the bubble cursor improves upon area cursors by dynamically resizing its activation area depending on the proximity of surrounding targets such that only one target is selectable at any time we also present two controlled experiments that evaluate bubble cursor performance in 1d and 2d target acquisition tasks in complex situations with multiple targets of varying layout densities results show that the bubble cursor significantly outperforms the point cursor and the object pointing technique 7 and that bubble cursor performance can be accurately modeled and predicted using fitts law", + "title_raw": "The bubble cursor: enhancing target acquisition by dynamic resizing of the cursor's activation area", + "abstract_raw": "We present the bubble cursor - a new target acquisition technique based on area cursors. The bubble cursor improves upon area cursors by dynamically resizing its activation area depending on the proximity of surrounding targets, such that only one target is selectable at any time. We also present two controlled experiments that evaluate bubble cursor performance in 1D and 2D target acquisition tasks, in complex situations with multiple targets of varying layout densities. Results show that the bubble cursor significantly outperforms the point cursor and the object pointing technique [7], and that bubble cursor performance can be accurately modeled and predicted using Fitts' law.", + "link": "https://www.semanticscholar.org/paper/b687cd3db23d4367557dbf80f6ecda72abab889b", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2153282521", + "venue": "1163450153", + "year": "2005", + "title": "examining task engagement in sensor based statistical models of human interruptibility", + "label": [ + "114289077", + "5366617", + "107457646", + "2778514511", + "41661131" + ], + "author": [ + "2158378096", + "2980248491", + "2038999308", + "2133454800", + "2109728142", + "2171298838" + ], + "reference": [ + "8887994", + "24931679", + "1523785268", + "1661871015", + "1778770750", + "1906182963", + "1982888377", + "1993139624", + "2016900109", + "2017337590", + "2020701895", + "2027687467", + "2029778954", + "2049633694", + "2055104609", + "2065575115", + "2105410655", + "2117400595", + "2122533268", + "2123504579", + "2127695051", + "2130259926", + "2141217749", + "2148814820", + "2162342643", + "2170642728", + "2752491485", + "2966207845", + "3017143921", + "3129711340" + ], + "abstract": "the computer and communication systems that office workers currently use tend to interrupt at inappropriate times or unduly demand attention because they have no way to determine when an interruption is appropriate sensor based statistical models of human interruptibility offer a potential solution to this problem prior work to examine such models has primarily reported results related to social engagement but it seems that task engagement is also important using an approach developed in our prior work on sensor based statistical models of human interruptibility we examine task engagement by studying programmers working on a realistic programming task after examining many potential sensors we implement a system to log low level input events in a development environment we then automatically extract features from these low level event logs and build a statistical model of interruptibility by correctly identifying situations in which programmers are non interruptible and minimizing cases where the model incorrectly estimates that a programmer is non interruptible we can support a reduction in costly interruptions while still allowing systems to convey notifications in a timely manner", + "title_raw": "Examining task engagement in sensor-based statistical models of human interruptibility", + "abstract_raw": "The computer and communication systems that office workers currently use tend to interrupt at inappropriate times or unduly demand attention because they have no way to determine when an interruption is appropriate. Sensor?based statistical models of human interruptibility offer a potential solution to this problem. Prior work to examine such models has primarily reported results related to social engagement, but it seems that task engagement is also important. Using an approach developed in our prior work on sensor?based statistical models of human interruptibility, we examine task engagement by studying programmers working on a realistic programming task. After examining many potential sensors, we implement a system to log low?level input events in a development environment. We then automatically extract features from these low?level event logs and build a statistical model of interruptibility. By correctly identifying situations in which programmers are non?interruptible and minimizing cases where the model incorrectly estimates that a programmer is non?interruptible, we can support a reduction in costly interruptions while still allowing systems to convey notifications in a timely manner.", + "link": "https://www.semanticscholar.org/paper/91dea60a7b92e56a0ebf6f4f2f9f3e099c51ee7a", + "scraped_abstract": null, + "citation_best": 108 + }, + { + "paper": "2138134117", + "venue": "1163450153", + "year": "2005", + "title": "making space for stories ambiguity in the design of personal communication systems", + "label": [ + "2780522230", + "170130773", + "2522767166", + "56739046" + ], + "author": [ + "2128165465", + "2141509576" + ], + "reference": [ + "66786231", + "120482494", + "142106382", + "146668256", + "1487721735", + "1607672348", + "1608825224", + "1741471588", + "1800997657", + "1806024143", + "1966069193", + "1982516639", + "1989866397", + "2000145975", + "2005219594", + "2012481411", + "2024642405", + "2039618015", + "2041545805", + "2044300367", + "2046790216", + "2068687198", + "2096894832", + "2099586043", + "2106632722", + "2110537656", + "2115929676", + "2121136903", + "2129835639", + "2142448246", + "2152127494", + "2158704040" + ], + "abstract": "pervasive personal communication technologies offer the potential for important social benefits for individual users but also the potential for significant social difficulties and costs in research on face to face social interaction ambiguity is often identified as an important resource for resolving social difficulties in this paper we discuss two design cases of personal communication systems one based on fieldwork of a commercial system and another based on an unrealized design concept the cases illustrate how user behavior concerning a particular social difficulty unexplained unresponsiveness can be influenced by technological issues that result in interactional ambiguity the cases also highlight the need to balance the utility of ambiguity against the utility of usability and communicative clarity", + "title_raw": "Making space for stories: ambiguity in the design of personal communication systems", + "abstract_raw": "Pervasive personal communication technologies offer the potential for important social benefits for individual users, but also the potential for significant social difficulties and costs. In research on face-to-face social interaction, ambiguity is often identified as an important resource for resolving social difficulties. In this paper, we discuss two design cases of personal communication systems, one based on fieldwork of a commercial system and another based on an unrealized design concept. The cases illustrate how user behavior concerning a particular social difficulty, unexplained unresponsiveness, can be influenced by technological issues that result in interactional ambiguity. The cases also highlight the need to balance the utility of ambiguity against the utility of usability and communicative clarity.", + "link": "https://www.semanticscholar.org/paper/6debd596b4fd0652e89f0139cde43c4cdc2d6ff1", + "scraped_abstract": null, + "citation_best": 147 + }, + { + "paper": "2150240046", + "venue": "1199533187", + "year": "2005", + "title": "automatic generation of suggestions for program investigation", + "label": [ + "42011625", + "199360897", + "115903868", + "97686452", + "43126263" + ], + "author": [ + "2136878537" + ], + "reference": [ + "186294409", + "303139982", + "1497690372", + "1562934601", + "1984248430", + "2019912855", + "2026117800", + "2050187629", + "2084707233", + "2096768134", + "2097602997", + "2099571428", + "2100849134", + "2103473833", + "2118349506", + "2118944299", + "2121234902", + "2123187470", + "2125682153", + "2133961160", + "2141558501", + "2148350542", + "2157402204", + "2157791244", + "2162310108", + "2248168004", + "2293624369", + "2496352033" + ], + "abstract": "before performing a modification task a developer usually has to investigate the source code of a system to understand how to carry out the task discovering the code relevant to a change task is costly because it is an inherently human activity whose success depends on a large number of unpredictable factors such as intuition and luck although studies have shown that effective developers tend to explore a program by following structural dependencies no methodology is available to guide their navigation through the typically hundreds of dependency paths found in a non trivial program in this paper we propose a technique to automatically propose and rank program elements that are potentially interesting to a developer investigating source code our technique is based on an analysis of the topology of structural dependencies in a program it takes as input a set of program elements of interest to a developer and produces a fuzzy set describing other elements of potential interest empirical evaluation of our technique indicates that it can help developers quickly select program elements worthy of investigation while avoiding less interesting ones", + "title_raw": "Automatic generation of suggestions for program investigation", + "abstract_raw": "Before performing a modification task, a developer usually has to investigate the source code of a system to understand how to carry out the task. Discovering the code relevant to a change task is costly because it is an inherently human activity whose success depends on a large number of unpredictable factors, such as intuition and luck. Although studies have shown that effective developers tend to explore a program by following structural dependencies, no methodology is available to guide their navigation through the typically hundreds of dependency paths found in a non-trivial program. In this paper, we propose a technique to automatically propose and rank program elements that are potentially interesting to a developer investigating source code. Our technique is based on an analysis of the topology of structural dependencies in a program. It takes as input a set of program elements of interest to a developer and produces a fuzzy set describing other elements of potential interest. Empirical evaluation of our technique indicates that it can help developers quickly select program elements worthy of investigation while avoiding less interesting ones.", + "link": "https://www.semanticscholar.org/paper/da78db4b0a02c3c7dfb95367eb4927a0e0eda79d", + "scraped_abstract": null, + "citation_best": 132 + }, + { + "paper": "2166140339", + "venue": "1199533187", + "year": "2005", + "title": "context and path sensitive memory leak detection", + "label": [ + "553261973", + "176649486", + "156731835", + "103088060", + "131843455", + "6943359", + "199519371", + "79403827" + ], + "author": [ + "2100556900", + "2067453598" + ], + "reference": [ + "65629371", + "1492018576", + "1579435874", + "1966982815", + "1969610731", + "2007181618", + "2028437460", + "2050320220", + "2076933173", + "2077850509", + "2082827821", + "2084974764", + "2087612811", + "2102674270", + "2138369269", + "2151360539", + "2164392635", + "2171999426", + "2914074464" + ], + "abstract": "we present a context and path sensitive algorithm for detecting memory leaks in programs with explicit memory management our leak detection algorithm is based on an underlying escape analysis any allocated location in a procedure p that is not deallocated in p and does not escape from p is leaked we achieve very precise context and path sensitivity by expressing our analysis using boolean constraints in experiments with six large open source projects our analysis produced 510 warnings of which 455 were unique memory leaks a false positive rate of only 10 8 a parallel implementation improves performance by over an order of magnitude on large projects over five million lines of code in the linux kernel is analyzed in 50 minutes", + "title_raw": "Context- and path-sensitive memory leak detection", + "abstract_raw": "We present a context- and path-sensitive algorithm for detecting memory leaks in programs with explicit memory management. Our leak detection algorithm is based on an underlying escape analysis: any allocated location in a procedure P that is not deallocated in P and does not escape from P is leaked. We achieve very precise context- and path-sensitivity by expressing our analysis using boolean constraints. In experiments with six large open source projects our analysis produced 510 warnings of which 455 were unique memory leaks, a false positive rate of only 10.8%. A parallel implementation improves performance by over an order of magnitude on large projects; over five million lines of code in the Linux kernel is analyzed in 50 minutes.", + "link": "https://www.semanticscholar.org/paper/92e0f9bf99f70b9c183677d704488922482de325", + "scraped_abstract": null, + "citation_best": 30 + }, + { + "paper": "2009489720", + "venue": "1199533187", + "year": "2005", + "title": "cute a concolic unit testing engine for c", + "label": [ + "148027188", + "161969638", + "2779639559", + "199360897", + "162319229", + "11219265", + "131301208" + ], + "author": [ + "2157836386", + "1702668839", + "2161553928" + ], + "reference": [ + "1515278398", + "1550112417", + "1557957257", + "1565952674", + "1593280365", + "1720848645", + "1845288303", + "2009007001", + "2096449544", + "2098115125", + "2098353690", + "2107089133", + "2107751709", + "2117058582", + "2118915305", + "2119717320", + "2131290359", + "2138721431", + "2142689455", + "2146641295", + "2161488870", + "2162120832", + "2171683519", + "2171718649", + "2172260321", + "2295903414" + ], + "abstract": "in unit testing a program is decomposed into units which are collections of functions a part of unit can be tested by generating inputs for a single entry function the entry function may contain pointer arguments in which case the inputs to the unit are memory graphs the paper addresses the problem of automating unit testing with memory graphs as inputs the approach used builds on previous work combining symbolic and concrete execution and more specifically using such a combination to generate test inputs to explore all feasible execution paths the current work develops a method to represent and track constraints that capture the behavior of a symbolic execution of a unit with memory graphs as inputs moreover an efficient constraint solver is proposed to facilitate incremental generation of such test inputs finally cute a tool implementing the method is described together with the results of applying cute to real world examples of c code", + "title_raw": "CUTE: a concolic unit testing engine for C", + "abstract_raw": "In unit testing, a program is decomposed into units which are collections of functions. A part of unit can be tested by generating inputs for a single entry function. The entry function may contain pointer arguments, in which case the inputs to the unit are memory graphs. The paper addresses the problem of automating unit testing with memory graphs as inputs. The approach used builds on previous work combining symbolic and concrete execution, and more specifically, using such a combination to generate test inputs to explore all feasible execution paths. The current work develops a method to represent and track constraints that capture the behavior of a symbolic execution of a unit with memory graphs as inputs. Moreover, an efficient constraint solver is proposed to facilitate incremental generation of such test inputs. Finally, CUTE, a tool implementing the method is described together with the results of applying CUTE to real-world examples of C code.", + "link": "https://www.semanticscholar.org/paper/398e6b26088cc62bf9bd8801146c3d6fa650b08a", + "scraped_abstract": null, + "citation_best": 282 + }, + { + "paper": "2170239024", + "venue": "1174403976", + "year": "2005", + "title": "data structure repair using goal directed reasoning", + "label": [ + "34127721", + "50341643", + "168065819", + "199360897", + "50712370", + "162319229", + "115903868" + ], + "author": [ + "1218819846", + "343541395" + ], + "reference": [ + "44016460", + "137102142", + "1492626646", + "1502752511", + "1507924933", + "1515932031", + "1537929875", + "1594664896", + "1980314090", + "1980627207", + "1983119041", + "2026024665", + "2029115409", + "2032204694", + "2033656974", + "2060440626", + "2069891156", + "2098010463", + "2098725244", + "2102362134", + "2103714221", + "2106718373", + "2106972913", + "2107635250", + "2108153346", + "2110753312", + "2135274583", + "2141071440", + "2143135004", + "2144274003", + "2151359037", + "2158848208", + "2161600851", + "2165413178", + "2170603094", + "2172212694", + "2205436351", + "2252148092", + "3159465050" + ], + "abstract": "data structure repair is a promising technique for enabling programs to execute successfully in the presence of otherwise fatal data structure corruption errors previous research in this field relied on the developer to write a specification to explicitly translate model repairs into concrete data structure repairs raising the possibility of 1 incorrect translations causing the supposedly repaired concrete data structures to be inconsistent and 2 repaired models with no corresponding concrete data structure representation we present a new repair algorithm that uses goal directed reasoning to automatically translate model repairs into concrete data structure repairs this new repair algorithm eliminates the possibility of incorrect translations and repaired models with no corresponding representation as concrete data structures", + "title_raw": "Data structure repair using goal-directed reasoning", + "abstract_raw": "Data structure repair is a promising technique for enabling programs to execute successfully in the presence of otherwise fatal data structure corruption errors. Previous research in this field relied on the developer to write a specification to explicitly translate model repairs into concrete data structure repairs, raising the possibility of 1) incorrect translations causing the supposedly repaired concrete data structures to be inconsistent, and 2) repaired models with no corresponding concrete data structure representation.We present a new repair algorithm that uses goal-directed reasoning to automatically translate model repairs into concrete data structure repairs. This new repair algorithm eliminates the possibility of incorrect translations and repaired models with no corresponding representation as concrete data structures.", + "link": "https://www.semanticscholar.org/paper/758c4c1101acec946c7db74b40b707cc2fc7a2d4", + "scraped_abstract": null, + "citation_best": 10 + }, + { + "paper": "2169952536", + "venue": "1174403976", + "year": "2005", + "title": "using structural context to recommend source code examples", + "label": [ + "134317101", + "133162039", + "146870623", + "192028432", + "73752529", + "199360897", + "150292731", + "121957198", + "115903868", + "43126263" + ], + "author": [ + "2260674364", + "2100357700" + ], + "reference": [ + "88959964", + "1585991568", + "1974758125", + "1992154356", + "2008250898", + "2038803348", + "2047280058", + "2063247670", + "2081052864", + "2086970510", + "2096360778", + "2097527179", + "2097997737", + "2115317346", + "2118944299", + "2143151143", + "2154962571", + "2157532207", + "2165871256", + "2166019458", + "2171733741" + ], + "abstract": "when coding to a framework developers often become stuck unsure of which class to subclass which objects to instantiate and which methods to call example code that demonstrates the use of the framework can help developers make progress on their task in this paper we describe an approach for locating relevant code in an example repository that is based on heuristically matching the structure of the code under development to the example code our tool improves on existing approaches in two ways first the structural context needed to query the repository is extracted automatically from the code freeing the developer from learning a query language or from writing their code in a particular style second the repository can be generated easily from existing applications we demonstrate the utility of this approach by reporting on a case study involving two subjects completing four programming tasks within the eclipse integrated development environment framework", + "title_raw": "Using structural context to recommend source code examples", + "abstract_raw": "When coding to a framework, developers often become stuck, unsure of which class to subclass, which objects to instantiate and which methods to call. Example code that demonstrates the use of the framework can help developers make progress on their task. In this paper, we describe an approach for locating relevant code in an example repository that is based on heuristically matching the structure of the code under development to the example code. Our tool improves on existing approaches in two ways. First, the structural context needed to query the repository is extracted automatically from the code, freeing the developer from learning a query language or from writing their code in a particular style. Second, the repository can be generated easily from existing applications. We demonstrate the utility of this approach by reporting on a case study involving two subjects completing four programming tasks within the Eclipse integrated development environment framework.", + "link": "https://www.semanticscholar.org/paper/93e22371c7ec667e4fcb969b4203a575d8780803", + "scraped_abstract": null, + "citation_best": 70 + }, + { + "paper": "2160938265", + "venue": "1174403976", + "year": "2005", + "title": "eliciting design requirements for maintenance oriented ides a detailed study of corrective and perfective maintenance tasks", + "label": [ + "167955471", + "548217200", + "101317890", + "207850805", + "115903868" + ], + "author": [ + "2980248491", + "2038999308", + "2117127927" + ], + "reference": [ + "122154057", + "1837732256", + "1990229061", + "1992154356", + "1993139624", + "2008149650", + "2024931533", + "2029316761", + "2037712120", + "2091280632", + "2120076005", + "2126922738", + "2127695051", + "2157922094", + "2160510992", + "2809840185", + "2983995785" + ], + "abstract": "recently several innovative tools have found their way into mainstream use in modern development environments however most of these tools have focused on creating and modifying code despite evidence that most of programmers time is spent understanding code as part of maintenance tasks if new tools were designed to directly support these maintenance tasks what types would be most helpful to find out a study of expert java programmers using eclipse was performed the study suggests that maintenance work consists of three activities 1 forming a working set of task relevant code fragments 2 navigating the dependencies within this working set and 3 repairing or creating the necessary code the study identified several trends in these activities as well as many opportunities for new tools that could save programmers up to 35 of the time they currently spend on maintenance tasks", + "title_raw": "Eliciting design requirements for maintenance-oriented IDEs: a detailed study of corrective and perfective maintenance tasks", + "abstract_raw": "Recently, several innovative tools have found their way into mainstream use in modern development environments. However, most of these tools have focused on creating and modifying code, despite evidence that most of programmers' time is spent understanding code as part of maintenance tasks. If new tools were designed to directly support these maintenance tasks, what types would be most helpful? To find out, a study of expert Java programmers using Eclipse was performed. The study suggests that maintenance work consists of three activities: (1) forming a working set of task-relevant code fragments; (2) navigating the dependencies within this working set; and (3) repairing or creating the necessary code. The study identified several trends in these activities, as well as many opportunities for new tools that could save programmers up to 35% of the time they currently spend on maintenance tasks.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Eliciting+Design+Requirements+for+Maintenance-Oriented+IDEs:+A+Detailed+Study+of+Corrective+and+Perfective+Maintenance+Tasks&as_oq=&as_eq=&as_occt=any&as_sauthors=Ko", + "scraped_abstract": null, + "citation_best": 73 + }, + { + "paper": "1603364293", + "venue": "1203999783", + "year": "2005", + "title": "learning coordination classifiers", + "label": [ + "119857082", + "106135958", + "139532973", + "173102733", + "136134403", + "178980831", + "46686674", + "52620605", + "95623464" + ], + "author": [ + "2256485367", + "2019637813", + "1817936516" + ], + "reference": [ + "204750116", + "1480376833", + "1519789693", + "1542886316", + "1544324307", + "1560512119", + "1567512734", + "1817561967", + "1988790447", + "2105644991", + "2112076978", + "2114229504", + "2125055259", + "2132820034", + "2139823104", + "2145295623", + "2147880316", + "2148603752", + "2160664735", + "2211621381", + "2548695521", + "2962735828", + "3136726919" + ], + "abstract": "we present a new approach to ensemble classification that requires learning only a single base classifier the idea is to learn a classifier that simultaneously predicts pairs of test labels as opposed to learning multiple predictors for single test labels then coordinating the assignment of individual labels by propagating beliefs on a graph over the data we argue that the approach is statistically well motivated even for independent identically distributed iid data in fact we present experimental results that show improvements in classification accuracy over single example classifiers across a range of iid data sets and over a set of base classifiers like boosting the technique increases representational capacity while controlling variance through a principled form of classifier combination", + "title_raw": "Learning coordination classifiers", + "abstract_raw": "We present a new approach to ensemble classification that requires learning only a single base classifier. The idea is to learn a classifier that simultaneously predicts pairs of test labels--as opposed to learning multiple predictors for single test labels-- then coordinating the assignment of individual labels by propagating beliefs on a graph over the data. We argue that the approach is statistically well motivated, even for independent identically distributed (iid) data. In fact, we present experimental results that show improvements in classification accuracy over single-example classifiers, across a range of iid data sets and over a set of base classifiers. Like boosting, the technique increases representational capacity while controlling variance through a principled form of classifier combination.", + "link": "https://www.semanticscholar.org/paper/400b8ce909d235f61e96f595a21f6487b91b3715", + "scraped_abstract": null, + "citation_best": 7 + }, + { + "paper": "2912387951", + "venue": "1203999783", + "year": "2005", + "title": "solving checkers", + "label": [ + "2776291640", + "80444323" + ], + "author": [ + "2278512575", + "322517448", + "1975621190", + "2128994182", + "2913769118", + "2154195217", + "2233643341", + "2073314533" + ], + "reference": [ + "1540752020", + "1580378151", + "1633567050", + "1992249923", + "2082217430", + "2107952607", + "2116123331", + "2970412811", + "3094497466" + ], + "abstract": "ai has had notable success in building high performance game playing programs to complete against the best human players however the availability of fast and plentiful machines with large memories and disks creates the possibility of solving a game this has been done before for simple or relatively small games in this paper we present new ideas and algorithms for solving the game of checkers checkers is a popular game of skill with a search space of 1020 possible positions this paper reports on our first result one of the most challenging checkers openings has been solved the white doctor opening is a draw solving roughly 50 more openings will result in the game theoretic value of checkers being determined", + "title_raw": "Solving checkers", + "abstract_raw": "AI has had notable success in building high-performance game-playing programs to complete against the best human players. However, the availability of fast and plentiful machines with large memories and disks creates the possibility of solving a game. This has been done before for simple or relatively small games. In this paper, we present new ideas and algorithms for solving the game of checkers. Checkers is a popular game of skill with a search space of 1020 possible positions. This paper reports on our first result. One of the most challenging checkers openings has been solved-the White Doctor opening is a draw. Solving roughly 50 more openings will result in the game-theoretic value of checkers being determined.", + "link": "https://www.semanticscholar.org/paper/ff5e8efe430d079f94b22771d26ae639333ea1ad", + "scraped_abstract": null, + "citation_best": 51 + }, + { + "paper": "157725869", + "venue": "1203999783", + "year": "2005", + "title": "a probabilistic model of redundancy in information extraction", + "label": [ + "149271511", + "119857082", + "7797323", + "83702630", + "114289077", + "55439883", + "124101348", + "151956035", + "12267149", + "195807954" + ], + "author": [ + "2098223845", + "57747768", + "1998656315" + ], + "reference": [ + "50778051", + "197270748", + "1567365482", + "1980925709", + "2063918473", + "2103931177", + "2115461474", + "2148540243", + "2153635508", + "2162586529" + ], + "abstract": "unsupervised information extraction uie is the task of extracting knowledge from text without using hand tagged training examples a fundamental problem for both uie and supervised ie is assessing the probability that extracted information is correct in massive corpora such as the web the same extraction is found repeatedly in different documents how does this redundancy impact the probability of correctness this paper introduces a combinatorial balls andurns model that computes the impact of sample size redundancy and corroboration from multiple distinct extraction rules on the probability that an extraction is correct we describe methods for estimating the model s parameters in practice and demonstrate experimentally that for uie the model s log likelihoods are 15 times better on average than those obtained by pointwise mutual information pmi and the noisy or model used in previous work for supervised ie the model s performance is comparable to that of support vector machines and logistic regression", + "title_raw": "A probabilistic model of redundancy in information extraction", + "abstract_raw": "Unsupervised Information Extraction (UIE) is the task of extracting knowledge from text without using hand-tagged training examples. A fundamental problem for both UIE and supervised IE is assessing the probability that extracted information is correct. In massive corpora such as the Web, the same extraction is found repeatedly in different documents. How does this redundancy impact the probability of correctness?\r\n\r\nThis paper introduces a combinatorial \"balls-andurns\" model that computes the impact of sample size, redundancy, and corroboration from multiple distinct extraction rules on the probability that an extraction is correct. We describe methods for estimating the model's parameters in practice and demonstrate experimentally that for UIE the model's log likelihoods are 15 times better, on average, than those obtained by Pointwise Mutual Information (PMI) and the noisy-or model used in previous work. For supervised IE, the model's performance is comparable to that of Support Vector Machines, and Logistic Regression.", + "link": "https://www.semanticscholar.org/paper/310cd6a39b0539193561148cd9897b1953fa8b28", + "scraped_abstract": null, + "citation_best": 178 + }, + { + "paper": "1965343327", + "venue": "1158363782", + "year": "2005", + "title": "detecting bgp configuration faults with static analysis", + "label": [ + "97686452", + "31258907", + "104954878", + "178086212", + "74172769", + "2775896111", + "101396714", + "110875604", + "120314980", + "54108766" + ], + "author": [ + "1373210103", + "1998464616" + ], + "reference": [ + "149349835", + "207759855", + "1555030395", + "1836926064", + "1967656199", + "1980507101", + "1983405933", + "2004352814", + "2020769003", + "2024007412", + "2026392294", + "2029303516", + "2033871392", + "2065675749", + "2073939539", + "2095234341", + "2097514095", + "2113230746", + "2118632874", + "2141746193", + "2142065670", + "2143506299", + "2145343762", + "2145721479", + "2147497995", + "2148769607", + "2150336242", + "2151972741", + "2168618039" + ], + "abstract": "the internet is composed of many independent autonomous systems ases that exchange reachability information to destinations using the border gateway protocol bgp network operators in each as configure bgp routers to control the routes that are learned selected and announced to other routers faults in bgp configuration can cause forwarding loops packet loss and unintended paths between hosts each of which constitutes a failure of the internet routing infrastructure this paper describes the design and implementation of rcc the router configuration checker a tool that finds faults in bgp configurations using static analysis rcc detects faults by checking constraints that are based on a high level correctness specification rcc detects two broad classes of faults route validity faults where routers may learn routes that do not correspond to usable paths and path visibility faults where routers may fail to learn routes for paths that exist in the network rcc enables network operators to test and debug configurations before deploying them in an operational network improving on the status quo where most faults are detected only during operation rcc has been downloaded by more than sixty five network operators to date some of whom have shared their configurations with us we analyze network wide configurations from 17 different ases to detect a wide variety of faults and use these findings to motivate improvements to the internet routing infrastructure", + "title_raw": "Detecting BGP configuration faults with static analysis", + "abstract_raw": "The Internet is composed of many independent autonomous systems (ASes) that exchange reachability information to destinations using the Border Gateway Protocol (BGP). Network operators in each AS configure BGP routers to control the routes that are learned, selected, and announced to other routers. Faults in BGP configuration can cause forwarding loops, packet loss, and unintended paths between hosts, each of which constitutes a failure of the Internet routing infrastructure. This paper describes the design and implementation of rcc, the router configuration checker, a tool that finds faults in BGP configurations using static analysis. rcc detects faults by checking constraints that are based on a high-level correctness specification. rcc detects two broad classes of faults: route validity faults, where routers may learn routes that do not correspond to usable paths, and path visibility faults, where routers may fail to learn routes for paths that exist in the network. rcc enables network operators to test and debug configurations before deploying them in an operational network, improving on the status quo where most faults are detected only during operation. rcc has been downloaded by more than sixty-five network operators to date, some of whom have shared their configurations with us. We analyze network-wide configurations from 17 different ASes to detect a wide variety of faults and use these findings to motivate improvements to the Internet routing infrastructure.", + "link": "https://www.semanticscholar.org/paper/c43f7fc3e889ae125ad538d344a68638a4b50ed1", + "scraped_abstract": null, + "citation_best": 296 + }, + { + "paper": "2147278401", + "venue": "1127352206", + "year": "2005", + "title": "automatic pool allocation improving performance by controlling data structure layout in the heap", + "label": [ + "84388606", + "161969638", + "134757568", + "169590947", + "176950129", + "34339311", + "170553107", + "2778100165", + "173608175", + "115537543", + "7263679", + "33288326", + "68339613", + "162319229", + "176649486", + "75403996" + ], + "author": [ + "1221218874", + "1990061224" + ], + "reference": [ + "114095935", + "1507552563", + "1579435874", + "1583464938", + "1825457006", + "1974716894", + "1975514564", + "1981962444", + "1987691231", + "1998070736", + "2006638707", + "2010459485", + "2014530617", + "2019584651", + "2019854215", + "2028048148", + "2032688435", + "2035974062", + "2038897371", + "2040070287", + "2046699259", + "2063453797", + "2064853889", + "2074954052", + "2079078409", + "2085325584", + "2095115578", + "2095558030", + "2101462267", + "2114496697", + "2115597529", + "2116318340", + "2116730320", + "2117703621", + "2129133667", + "2131135493", + "2139165932", + "2152432695", + "2153185479", + "2153691881", + "2154554979", + "2161719857", + "2163365446", + "2166034429", + "2999857599", + "3031465972" + ], + "abstract": "this paper describes automatic pool allocation a transformation framework that segregates distinct instances of heap based data structures into seperate memory pools and allows heuristics to be used to partially control the internal layout of those data structures the primary goal of this work is performance improvement not automatic memory management and the paper makes several new contributions the key contribution is a new compiler algorithm for partitioning heap objects in imperative programs based on a context sensitive pointer analysis including a novel strategy for correct handling of indirect and potentially unsafe function calls the transformation does not require type safe programs and works for the full generality of c and c second the paper describes several optimizations that exploit data structure partitioning to further improve program performance third the paper evaluates how memory hierarchy behavior and overall program performance are impacted by the new transformations using a number of benchmarks and a few applications we find that compilation times are extremely low and overall running times for heap intensive programs speed up by 10 25 in many cases about 2x in two cases and more than 10x in two small benchmarks overall we believe this work provides a new framework for optimizing pointer intensive programs by segregating and controlling the layout of heap based data structures", + "title_raw": "Automatic pool allocation: improving performance by controlling data structure layout in the heap", + "abstract_raw": "This paper describes Automatic Pool Allocation, a transformation framework that segregates distinct instances of heap-based data structures into seperate memory pools and allows heuristics to be used to partially control the internal layout of those data structures. The primary goal of this work is performance improvement, not automatic memory management, and the paper makes several new contributions. The key contribution is a new compiler algorithm for partitioning heap objects in imperative programs based on a context-sensitive pointer analysis, including a novel strategy for correct handling of indirect (and potentially unsafe) function calls. The transformation does not require type safe programs and works for the full generality of C and C++. Second, the paper describes several optimizations that exploit data structure partitioning to further improve program performance. Third, the paper evaluates how memory hierarchy behavior and overall program performance are impacted by the new transformations. Using a number of benchmarks and a few applications, we find that compilation times are extremely low, and overall running times for heap intensive programs speed up by 10-25% in many cases, about 2x in two cases, and more than 10x in two small benchmarks. Overall, we believe this work provides a new framework for optimizing pointer intensive programs by segregating and controlling the layout of heap-based data structures.", + "link": "https://www.semanticscholar.org/paper/0261afd40eee66cea4ea682fab322a439a28f37d", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1997199152", + "venue": "1127352206", + "year": "2005", + "title": "programming by sketching for bit streaming programs", + "label": [ + "26713055", + "135257023", + "489000", + "169590947", + "2778514511", + "199360897", + "180152950", + "35869016" + ], + "author": [ + "2166621873", + "2309975281", + "1115548098", + "171997091" + ], + "reference": [ + "89245623", + "1496267491", + "1561299197", + "1568192366", + "1587207268", + "1590740917", + "1964477602", + "1968628974", + "1988425770", + "2029414465", + "2043924273", + "2049890071", + "2091158003", + "2096070062", + "2103871060", + "2107072426", + "2129019158", + "2135144788", + "2143238865", + "2145739724", + "2148542244", + "2163491234", + "2163769417", + "2911857293" + ], + "abstract": "this paper introduces the concept of programming with sketches an approach for the rapid development of high performance applications this approach allows a programmer to write clean and portable reference code and then obtain a high quality implementation by simply sketching the outlines of the desired implementation subsequently a compiler automatically fills in the missing details while also ensuring that a completed sketch is faithful to the input reference code in this paper we develop streambit as a sketching methodology for the important class of bit streaming programs e g coding and cryptography a sketch is a partial specification of the implementation and as such it affords several benefits to programmer in terms of productivity and code robustness first a sketch is easier to write compared to a complete implementation second sketching allows the programmer to focus on exploiting algorithmic properties rather than on orchestrating low level details third a sketch aware compiler rejects buggy sketches thus improving reliability while allowing the programmer to quickly evaluate sophisticated implementation ideas we evaluated the productivity and performance benefits of our programming methodology in a user study where a group of novice streambit programmers competed with a group of experienced c programmers on implementing a cipher we learned that given the same time budget the ciphers developed in streambit ran 2 5x faster than ciphers coded in c we also produced implementations of des and serpent that were competitive with hand optimized implementations available in the public domain", + "title_raw": "Programming by sketching for bit-streaming programs", + "abstract_raw": "This paper introduces the concept of programming with sketches, an approach for the rapid development of high-performance applications. This approach allows a programmer to write clean and portable reference code, and then obtain a high-quality implementation by simply sketching the outlines of the desired implementation. Subsequently, a compiler automatically fills in the missing details while also ensuring that a completed sketch is faithful to the input reference code. In this paper, we develop StreamBit as a sketching methodology for the important class of bit-streaming programs (e.g., coding and cryptography).A sketch is a partial specification of the implementation, and as such, it affords several benefits to programmer in terms of productivity and code robustness. First, a sketch is easier to write compared to a complete implementation. Second, sketching allows the programmer to focus on exploiting algorithmic properties rather than on orchestrating low-level details. Third, a sketch-aware compiler rejects \"buggy\" sketches, thus improving reliability while allowing the programmer to quickly evaluate sophisticated implementation ideas.We evaluated the productivity and performance benefits of our programming methodology in a user-study, where a group of novice StreamBit programmers competed with a group of experienced C programmers on implementing a cipher. We learned that, given the same time budget, the ciphers developed in StreamBit ran 2.5x faster than ciphers coded in C. We also produced implementations of DES and Serpent that were competitive with hand optimized implementations available in the public domain.", + "link": "https://www.semanticscholar.org/paper/f663cf55bf1bc4e5aaa61d98e73ae64065ecd530", + "scraped_abstract": null, + "citation_best": 213 + }, + { + "paper": "2159668267", + "venue": "1184151122", + "year": "2005", + "title": "xml data exchange consistency and query answering", + "label": [ + "44883583", + "183068750", + "15845906", + "11508877", + "55348073", + "34716815", + "68699486", + "44540991", + "23123220" + ], + "author": [ + "2117861229", + "1971151415" + ], + "reference": [ + "1513019207", + "1579407214", + "1582776257", + "1583112008", + "1591926286", + "1593799327", + "1966294080", + "1969340050", + "1990391007", + "1992673035", + "2011039300", + "2019789808", + "2020228538", + "2050171146", + "2063039776", + "2068157134", + "2078015341", + "2098213459", + "2102729564", + "2103658959", + "2103849501", + "2107907225", + "2114616381", + "2133278242", + "2159686758", + "2159793945", + "2160442532", + "2160983447", + "2162294668", + "2162975743" + ], + "abstract": "data exchange is the problem of finding an instance of a target schema given an instance of a source schema and a specification of the relationship between the source and the target theoretical foundations of data exchange have recently been investigated for relational data in this paper we start looking into the basic properties of xml data exchange that is restructuring of xml documents that conform to a source dtd under a target dtd and answering queries written over the target schema we define xml data exchange settings in which source to target dependencies refer to the hierarchical structure of the data combining dtds and dependencies makes some xml data exchange settings inconsistent we investigate the consistency problem and determine its exact complexity we then move to query answering and prove a dichotomy theorem that classifies data exchange settings into those over which query answering is tractable and those over which it is conp complete depending on classes of regular expressions used in dtds furthermore for all tractable cases we give polynomial time algorithms that compute target xml documents over which queries can be answered", + "title_raw": "XML data exchange: consistency and query answering", + "abstract_raw": "Data exchange is the problem of finding an instance of a target schema, given an instance of a source schema and a specification of the relationship between the source and the target. Theoretical foundations of data exchange have recently been investigated for relational data.In this paper, we start looking into the basic properties of XML data exchange, that is, restructuring of XML documents that conform to a source DTD under a target DTD, and answering queries written over the target schema. We define XML data exchange settings in which source-to-target dependencies refer to the hierarchical structure of the data. Combining DTDs and dependencies makes some XML data exchange settings inconsistent. We investigate the consistency problem and determine its exact complexity.We then move to query answering, and prove a dichotomy theorem that classifies data exchange settings into those over which query answering is tractable, and those over which it is coNP-complete, depending on classes of regular expressions used in DTDs. Furthermore, for all tractable cases we give polynomial-time algorithms that compute target XML documents over which queries can be answered.", + "link": "https://www.semanticscholar.org/paper/c0a03b08028a87e1fc78edda7345f722d5920c77", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1963658069", + "venue": "1140684652", + "year": "2005", + "title": "learning to estimate query difficulty including applications to missing content detection and distributed information retrieval", + "label": [ + "189430467", + "182861755", + "164120249", + "192939062", + "118689300", + "192028432", + "90288658", + "97854310", + "124101348", + "99016210", + "157692150", + "23123220" + ], + "author": [ + "205587521", + "2098016057", + "2088014474", + "2089199664" + ], + "reference": [ + "18749483", + "19293941", + "1517703053", + "1560724230", + "1577798322", + "1594031697", + "1605510967", + "1990388042", + "2030603245", + "2047221353", + "2053154970", + "2057577760", + "2079168273", + "2086253379", + "2096623622", + "2103569001", + "2112674530", + "2143584266", + "2150240006", + "2161270705", + "2799061466", + "2911964244", + "3085162807" + ], + "abstract": "in this article we present novel learning methods for estimating the quality of results returned by a search engine in response to a query estimation is based on the agreement between the top results of the full query and the top results of its sub queries we demonstrate the usefulness of quality estimation for several applications among them improvement of retrieval detecting queries for which no relevant content exists in the document collection and distributed information retrieval experiments on trec data demonstrate the robustness and the effectiveness of our learning algorithms", + "title_raw": "Learning to estimate query difficulty: including applications to missing content detection and distributed information retrieval", + "abstract_raw": "In this article we present novel learning methods for estimating the quality of results returned by a search engine in response to a query. Estimation is based on the agreement between the top results of the full query and the top results of its sub-queries. We demonstrate the usefulness of quality estimation for several applications, among them improvement of retrieval, detecting queries for which no relevant content exists in the document collection, and distributed information retrieval. Experiments on TREC data demonstrate the robustness and the effectiveness of our learning algorithms.", + "link": "https://www.semanticscholar.org/paper/a74f5c9922c1327b94c70149944e3340263de961", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2010859647", + "venue": "1131589359", + "year": "2005", + "title": "coupon replication systems", + "label": [ + "31258907", + "70440993" + ], + "author": [ + "17252502", + "1992645465" + ], + "reference": [ + "239964209", + "1508957056", + "1512460878", + "1803411289", + "1853723677", + "2010309395", + "2029948740", + "2068871408", + "2085728653", + "2103916378", + "2107176571", + "2112955560", + "2117047663", + "2151893328", + "2166245380", + "2166707941" + ], + "abstract": "motivated by the study of peer to peer file swarming systems a la bittorrent we introduce a probabilistic model of coupon replication systems these systems consist of users aiming to complete a collection of distinct coupons users are characterised by their current collection of coupons and leave the system once they complete their coupon collection the system evolution is then specified by describing how users of distinct types meet and which coupons get replicated upon such encounters for open systems with exogenous user arrivals we derive necessary and sufficient stability conditions in a layered scenario where encounters are between users holding the same number of coupons we also consider a system where encounters are between users chosen uniformly at random from the whole population we show that performance captured by sojourn time is asymptotically optimal in both systems as the number of coupon types becomes large we also consider closed systems with no exogenous user arrivals in a special scenario where users have only one missing coupon we evaluate the size of the population ultimately remaining in the system as the initial number of users n goes to infinity we show that this decreases geometrically with the number of coupons k in particular when the ratio k log n is above a critical threshold we prove that this number of left overs is of order log log n these results suggest that performance of file swarming systems does not depend critically on either altruistic user behavior or on load balancing strategies such as rarest first", + "title_raw": "Coupon replication systems", + "abstract_raw": "Motivated by the study of peer-to-peer file swarming systems a la BitTorrent, we introduce a probabilistic model of coupon replication systems. These systems consist of users, aiming to complete a collection of distinct coupons. Users are characterised by their current collection of coupons, and leave the system once they complete their coupon collection. The system evolution is then specified by describing how users of distinct types meet, and which coupons get replicated upon such encounters.For open systems, with exogenous user arrivals, we derive necessary and sufficient stability conditions in a layered scenario, where encounters are between users holding the same number of coupons. We also consider a system where encounters are between users chosen uniformly at random from the whole population. We show that performance, captured by sojourn time, is asymptotically optimal in both systems as the number of coupon types becomes large.We also consider closed systems with no exogenous user arrivals. In a special scenario where users have only one missing coupon, we evaluate the size of the population ultimately remaining in the system, as the initial number of users, N, goes to infinity. We show that this decreases geometrically with the number of coupons, K. In particular, when the ratio K/log(N) is above a critical threshold, we prove that this number of left-overs is of order log(log(N)).These results suggest that performance of file swarming systems does not depend critically on either altruistic user behavior, or on load balancing strategies such as rarest first.", + "link": "https://www.semanticscholar.org/paper/c9c63676b9006e47b8c3c844ea9ed2cd7e5f162e", + "scraped_abstract": null, + "citation_best": 48 + }, + { + "paper": "2110137598", + "venue": "1171178643", + "year": "2005", + "title": "rx treating bugs as allergies a safe method to survive software failures", + "label": [ + "1009929", + "149635348", + "111919701", + "93996380", + "193702766", + "2777904410", + "35869016", + "91587340", + "174220543", + "117447612" + ], + "author": [ + "2345606310", + "2595334369", + "1272052450", + "2155186186" + ], + "reference": [ + "11234669", + "46382711", + "1498585374", + "1502476232", + "1525451871", + "1551998755", + "1556616930", + "1589122423", + "1596182111", + "1655226010", + "1786189552", + "1949662805", + "1963836890", + "1967730132", + "1968054699", + "1977773183", + "1979868167", + "2002667367", + "2009394287", + "2010439775", + "2014462389", + "2016167891", + "2020115430", + "2024016608", + "2052915895", + "2053903896", + "2066660519", + "2070761976", + "2077409330", + "2081413727", + "2097829252", + "2097990218", + "2098809490", + "2100418159", + "2100970777", + "2101729333", + "2102323965", + "2104510605", + "2108795876", + "2109192777", + "2109739361", + "2114488210", + "2123532445", + "2126087831", + "2126523385", + "2129360963", + "2133201251", + "2140991542", + "2141071440", + "2142892618", + "2144984544", + "2151153404", + "2152386071", + "2156205360", + "2158848208", + "2166293939", + "2167338470", + "2171688871", + "2174598112", + "2296772319", + "3035757797" + ], + "abstract": "many applications demand availability unfortunately software failures greatly reduce system availability prior work on surviving software failures suffers from one or more of the following limitations required application restructuring inability to address deterministic software bugs unsafe speculation on program execution and long recovery time this paper proposes an innovative safe technique called rx which can quickly recover programs from many types of software bugs both deterministic and non deterministic our idea inspired from allergy treatment in real life is to rollback the program to a recent checkpoint upon a software failure and then to re execute the program in a modified environment we base this idea on the observation that many bugs are correlated with the execution environment and therefore can be avoided by removing the allergen from the environment rx requires few to no modifications to applications and provides programmers with additional feedback for bug diagnosis we have implemented rx on linux our experiments with four server applications that contain six bugs of various types show that rx can survive all the six software failures and provide transparent fast recovery within 0 017 0 16 seconds 21 53 times faster than the whole program restart approach for all but one case cvs in contrast the two tested alternatives a whole program restart approach and a simple rollback and re execution without environmental changes cannot successfully recover the three servers squid apache and cvs that contain deterministic bugs and have only a 40 recovery rate for the server mysql that contains a non deterministic concurrency bug additionally rx s checkpointing system is lightweight imposing small time and space overheads", + "title_raw": "Rx: treating bugs as allergies---a safe method to survive software failures", + "abstract_raw": "Many applications demand availability. Unfortunately, software failures greatly reduce system availability. Prior work on surviving software failures suffers from one or more of the following limitations: Required application restructuring, inability to address deterministic software bugs, unsafe speculation on program execution, and long recovery time.This paper proposes an innovative safe technique, called Rx, which can quickly recover programs from many types of software bugs, both deterministic and non-deterministic. Our idea, inspired from allergy treatment in real life, is to rollback the program to a recent checkpoint upon a software failure, and then to re-execute the program in a modified environment. We base this idea on the observation that many bugs are correlated with the execution environment, and therefore can be avoided by removing the \"allergen\" from the environment. Rx requires few to no modifications to applications and provides programmers with additional feedback for bug diagnosis.We have implemented RX on Linux. Our experiments with four server applications that contain six bugs of various types show that RX can survive all the six software failures and provide transparent fast recovery within 0.017-0.16 seconds, 21-53 times faster than the whole program restart approach for all but one case (CVS). In contrast, the two tested alternatives, a whole program restart approach and a simple rollback and re-execution without environmental changes, cannot successfully recover the three servers (Squid, Apache, and CVS) that contain deterministic bugs, and have only a 40% recovery rate for the server (MySQL) that contains a non-deterministic concurrency bug. Additionally, RX's checkpointing system is lightweight, imposing small time and space overheads.", + "link": "https://www.semanticscholar.org/paper/6c9f6ddaa08ac4cae34719b3500a44b2332b91d9", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2121178808", + "venue": "1171178643", + "year": "2005", + "title": "speculative execution in a distributed file system", + "label": [ + "152043487", + "141331961", + "111919701", + "553261973", + "15296174", + "12269588", + "82820731", + "2780940931" + ], + "author": [ + "1273945011", + "2101505567", + "1992040104" + ], + "reference": [ + "11234669", + "48317398", + "106162958", + "1556088231", + "1868801145", + "1946211190", + "1973371066", + "1973501242", + "1977838359", + "1982918197", + "2005373714", + "2010042648", + "2016403536", + "2053122765", + "2058068178", + "2077409330", + "2097589646", + "2100970777", + "2103499520", + "2104112849", + "2106018697", + "2107460938", + "2110137598", + "2114349788", + "2121133177", + "2124074197", + "2129269323", + "2131867938", + "2134882900", + "2136798749", + "2141024949", + "2161995137", + "2166509141", + "2169202567", + "2293069947", + "2296636214", + "2616698872" + ], + "abstract": "speculator provides linux kernel support for speculative execution it allows multiple processes to share speculative state by tracking causal dependencies propagated through inter process communication it guarantees correct execution by preventing speculative processes from externalizing output e g sending a network message or writing to the screen until the speculations on which that output depends have proven to be correct speculator improves the performance of distributed file systems by masking i o latency and increasing i o throughput rather than block during a remote operation a file system predicts the operation s result then uses speculator to checkpoint the state of the calling process and speculatively continue its execution based on the predicted result if the prediction is correct the checkpoint is discarded if it is incorrect the calling process is restored to the checkpoint and the operation is retried we have modified the client server and network protocol of two distributed file systems to use speculator for postmark and andrew style benchmarks speculative execution results in a factor of 2 performance improvement for nfs over local area networks and an order of magnitude improvement over wide area networks for the same benchmarks speculator enables the blue file system to provide the consistency of single copy file semantics and the safety of synchronous i o yet still outperform current distributed file systems with weaker consistency and safety", + "title_raw": "Speculative execution in a distributed file system", + "abstract_raw": "Speculator provides Linux kernel support for speculative execution. It allows multiple processes to share speculative state by tracking causal dependencies propagated through inter-process communication. It guarantees correct execution by preventing speculative processes from externalizing output, e.g., sending a network message or writing to the screen, until the speculations on which that output depends have proven to be correct. Speculator improves the performance of distributed file systems by masking I/O latency and increasing I/O throughput. Rather than block during a remote operation, a file system predicts the operation's result, then uses Speculator to checkpoint the state of the calling process and speculatively continue its execution based on the predicted result. If the prediction is correct, the checkpoint is discarded; if it is incorrect, the calling process is restored to the checkpoint, and the operation is retried. We have modified the client, server, and network protocol of two distributed file systems to use Speculator. For PostMark and Andrew-style benchmarks, speculative execution results in a factor of 2 performance improvement for NFS over local-area networks and an order of magnitude improvement over wide-area networks. For the same benchmarks, Speculator enables the Blue File System to provide the consistency of single-copy file semantics and the safety of synchronous I/O, yet still outperform current distributed file systems with weaker consistency and safety.", + "link": "https://www.semanticscholar.org/paper/8b9aaf102a6e28c39197ae6b95f7c29db923553a", + "scraped_abstract": null, + "citation_best": 96 + }, + { + "paper": "2165100126", + "venue": "1171178643", + "year": "2005", + "title": "vigilante end to end containment of internet worms", + "label": [ + "165696696", + "2777019822", + "38652104", + "176359209", + "152896618", + "2778419992", + "195917429", + "110875604", + "74296488" + ], + "author": [ + "3089181325", + "1970640687", + "2096975672", + "2338340454", + "2158515488", + "2147177291", + "2296249340" + ], + "reference": [ + "29275369", + "48317398", + "88694106", + "167667719", + "206265171", + "332253968", + "1490025813", + "1498585374", + "1502476232", + "1516506771", + "1520914943", + "1525451871", + "1529311848", + "1587981097", + "1597305440", + "1641762327", + "1655226010", + "1674877186", + "1774418490", + "1781758015", + "1994340575", + "2031006315", + "2033811087", + "2092043296", + "2097174997", + "2100666033", + "2100970777", + "2101699859", + "2103315222", + "2103919170", + "2109219878", + "2117002131", + "2117115928", + "2134006599", + "2137786570", + "2140611647", + "2142840915", + "2142892618", + "2155659292", + "2155750598", + "2159919478", + "2161728228", + "2163762767", + "2171264329", + "2171957559" + ], + "abstract": "worm containment must be automatic because worms can spread too fast for humans to respond recent work has proposed network level techniques to automate worm containment these techniques have limitations because there is no information about the vulnerabilities exploited by worms at the network level we propose vigilante a new end to end approach to contain worms automatically that addresses these limitations vigilante relies on collaborative worm detection at end hosts but does not require hosts to trust each other hosts run instrumented software to detect worms and broadcast self certifying alerts scas upon worm detection scas are proofs of vulnerability that can be inexpensively verified by any vulnerable host when hosts receive an sca they generate filters that block infection by analysing the sca guided execution of the vulnerable software we show that vigilante can automatically contain fast spreading worms that exploit unknown vulnerabilities without blocking innocuous traffic", + "title_raw": "Vigilante: end-to-end containment of internet worms", + "abstract_raw": "Worm containment must be automatic because worms can spread too fast for humans to respond. Recent work has proposed network-level techniques to automate worm containment; these techniques have limitations because there is no information about the vulnerabilities exploited by worms at the network level. We propose Vigilante, a new end-to-end approach to contain worms automatically that addresses these limitations. Vigilante relies on collaborative worm detection at end hosts, but does not require hosts to trust each other. Hosts run instrumented software to detect worms and broadcast self-certifying alerts (SCAs) upon worm detection. SCAs are proofs of vulnerability that can be inexpensively verified by any vulnerable host. When hosts receive an SCA, they generate filters that block infection by analysing the SCA-guided execution of the vulnerable software. We show that Vigilante can automatically contain fast-spreading worms that exploit unknown vulnerabilities without blocking innocuous traffic.", + "link": "https://www.semanticscholar.org/paper/cc9ae5b0b797acbdd901d213e6bbabc91547a6d7", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2042616792", + "venue": "1166315290", + "year": "2005", + "title": "automation and customization of rendered web pages", + "label": [ + "195274430", + "521306242", + "21959979", + "100158260", + "49774154", + "173576120", + "136764020", + "127613066", + "130436687", + "61096286" + ], + "author": [ + "1976994243", + "2156849624", + "1973826692", + "2506941997", + "2104582966" + ], + "reference": [ + "46681494", + "109947125", + "1530955034", + "1968940790", + "1996300450", + "2000756694", + "2024867010", + "2079333567", + "2102218101", + "2123718938", + "2157837521", + "2161433768", + "2167808397", + "2523075099" + ], + "abstract": "on the desktop an application can expect to control its user interface down to the last pixel but on the world wide web a content provider has no control over how the client will view the page once delivered to the browser this creates an opportunity for end users who want to automate and customize their web experiences but the growing complexity of web pages and standards prevents most users from realizing this opportunity we describe chickenfoot a programming system embedded in the firefox web browser which enables end users to automate customize and integrate web applications without examining their source code one way chickenfoot addresses this goal is a novel technique for identifying page components by keyword pattern matching we motivate this technique by studying how users name web page components and present a heuristic keyword matching algorithm that identifies the desired component from the user s name", + "title_raw": "Automation and customization of rendered web pages", + "abstract_raw": "On the desktop, an application can expect to control its user interface down to the last pixel, but on the World Wide Web, a content provider has no control over how the client will view the page, once delivered to the browser. This creates an opportunity for end-users who want to automate and customize their web experiences, but the growing complexity of web pages and standards prevents most users from realizing this opportunity. We describe Chickenfoot, a programming system embedded in the Firefox web browser, which enables end-users to automate, customize, and integrate web applications without examining their source code. One way Chickenfoot addresses this goal is a novel technique for identifying page components by keyword pattern matching. We motivate this technique by studying how users name web page components, and present a heuristic keyword matching algorithm that identifies the desired component from the user's name.", + "link": "https://www.semanticscholar.org/paper/b97b07e671bf1ddc2157f3860492100f3f9b0a4e", + "scraped_abstract": null, + "citation_best": 220 + }, + { + "paper": "2135552269", + "venue": "1133523790", + "year": "2005", + "title": "cache conscious frequent pattern mining on a modern processor", + "label": [ + "27602214", + "138101251", + "140763907", + "173608175", + "190290938", + "68339613", + "115537543", + "162319229", + "189783530" + ], + "author": [ + "2102101334", + "2224310768", + "2106796124", + "2304952236", + "2973325997", + "2137087333", + "2163667412" + ], + "reference": [ + "159524162", + "1483679765", + "1484413656", + "1506285740", + "1542978828", + "1568945110", + "1597161471", + "1645807896", + "1974716894", + "2016622853", + "2023612196", + "2025122101", + "2030969394", + "2038812321", + "2053727994", + "2064853889", + "2098130516", + "2099958604", + "2105220449", + "2108560469", + "2120230074", + "2126310301", + "2137077706", + "2138660495", + "2141115288", + "2155651153", + "2156026066", + "2158454296", + "2160404300", + "2166559705", + "2210278139", + "2295099251", + "2737719137", + "2788176369", + "3022377781" + ], + "abstract": "in this paper we examine the performance of frequent pattern mining algorithms on a modern processor a detailed performance study reveals that even the best frequent pattern mining implementations with highly efficient memory managers still grossly under utilize a modern processor the primary performance bottlenecks are poor data locality and low instruction level parallelism ilp we propose a cache conscious prefix tree to address this problem the resulting tree improves spatial locality and also enhances the benefits from hardware cache line prefetching furthermore the design of this data structure allows the use of a novel tiling strategy to improve temporal locality the result is an overall speedup of up to 3 2 when compared with state of the art implementations we then show how these algorithms can be improved further by realizing a non naive thread based decomposition that targets simultaneously multi threaded processors a key aspect of this decomposition is to ensure cache re use between threads that are co scheduled at a fine granularity this optimization affords an additional speedup of 50 resulting in an overall speedup of up to 4 8 to the best of our knowledge this effort is the first to target cache conscious data mining", + "title_raw": "Cache-conscious frequent pattern mining on a modern processor", + "abstract_raw": "In this paper, we examine the performance of frequent pattern mining algorithms on a modern processor. A detailed performance study reveals that even the best frequent pattern mining implementations, with highly efficient memory managers, still grossly under-utilize a modern processor. The primary performance bottlenecks are poor data locality and low instruction level parallelism (ILP). We propose a cache-conscious prefix tree to address this problem. The resulting tree improves spatial locality and also enhances the benefits from hardware cache line prefetching. Furthermore, the design of this data structure allows the use of a novel tiling strategy to improve temporal locality. The result is an overall speedup of up to 3.2 when compared with state-of-the-art implementations. We then show how these algorithms can be improved further by realizing a non-naive thread-based decomposition that targets simultaneously multi-threaded processors. A key aspect of this decomposition is to ensure cache re-use between threads that are co-scheduled at a fine granularity. This optimization affords an additional speedup of 50%, resulting in an overall speedup of up to 4.8. To the best of our knowledge, this effort is the first to target cache-conscious data mining.", + "link": "https://www.semanticscholar.org/paper/be6ae9ee29c7caee619eb5ec2ef525ddaf6934c1", + "scraped_abstract": null, + "citation_best": 69 + }, + { + "paper": "1977841655", + "venue": "1135342153", + "year": "2005", + "title": "three level caching for efficient query processing in large web search engines", + "label": [ + "93540628", + "164120249", + "192939062", + "118689300", + "157154645", + "97854310", + "13743948", + "115537543", + "99016210", + "157692150", + "130590232", + "23123220" + ], + "author": [ + "2127615133", + "702140476" + ], + "reference": [ + "96341830", + "1495124840", + "1502986177", + "1562093331", + "1565494300", + "1660390307", + "1751403574", + "1918432491", + "1970885105", + "1978063867", + "1991360400", + "2002252750", + "2003549201", + "2006608770", + "2006997130", + "2011039300", + "2018177083", + "2026095310", + "2029500199", + "2038807029", + "2046862025", + "2051296986", + "2053550438", + "2059387258", + "2066636486", + "2081534862", + "2086453025", + "2093698835", + "2096041903", + "2098025050", + "2106184308", + "2116504754", + "2119360906", + "2130242957", + "2130417465", + "2132069633", + "2135050452", + "2149328866", + "2160484851", + "2164542999", + "2164619507", + "2170344111", + "2621280964", + "2666600683", + "2891212941" + ], + "abstract": "large web search engines have to answer thousands of queries per second with interactive response times due to the sizes of the data sets involved often in the range of multiple terabytes a single query may require the processing of hundreds of megabytes or more of index data to keep up with this immense workload large search engines employ clusters of hundreds or thousands of machines and a number of techniques such as caching index compression and index and query pruning are used to improve scalability in particular two level caching techniques cache results of repeated identical queries at the frontend while index data for frequently used query terms are cached in each node at a lower level we propose and evaluate a three level caching scheme that adds an intermediate level of caching for additional performance gains this intermediate level attempts to exploit frequently occurring pairs of terms by caching intersections or projections of the corresponding inverted lists we propose and study several offline and online algorithms for the resulting weighted caching problem which turns out to be surprisingly rich in structure our experimental evaluation based on a large web crawl and real search engine query log shows significant performance gains for the best schemes both in isolation and in combination with the other caching levels we also observe that a careful selection of cache admission and eviction policies is crucial for best overall performance", + "title_raw": "Three-level caching for efficient query processing in large Web search engines", + "abstract_raw": "Large web search engines have to answer thousands of queries per second with interactive response times. Due to the sizes of the data sets involved, often in the range of multiple terabytes, a single query may require the processing of hundreds of megabytes or more of index data. To keep up with this immense workload, large search engines employ clusters of hundreds or thousands of machines, and a number of techniques such as caching, index compression, and index and query pruning are used to improve scalability. In particular, two-level caching techniques cache results of repeated identical queries at the frontend, while index data for frequently used query terms are cached in each node at a lower level.We propose and evaluate a three-level caching scheme that adds an intermediate level of caching for additional performance gains. This intermediate level attempts to exploit frequently occurring pairs of terms by caching intersections or projections of the corresponding inverted lists. We propose and study several offline and online algorithms for the resulting weighted caching problem, which turns out to be surprisingly rich in structure. Our experimental evaluation based on a large web crawl and real search engine query log shows significant performance gains for the best schemes, both in isolation and in combination with the other caching levels. We also observe that a careful selection of cache admission and eviction policies is crucial for best overall performance.", + "link": "https://www.semanticscholar.org/paper/c5ec6c9a03e005c8489ef95b919b619ba269c0b8", + "scraped_abstract": null, + "citation_best": 155 + }, + { + "paper": "3020921219", + "venue": "1184914352", + "year": "2004", + "title": "learning and inferring transportation routines", + "label": [ + "119857082", + "48677424", + "60229501", + "52421305", + "163836022", + "2776214188" + ], + "author": [ + "2125565904", + "2231782831", + "1966271946" + ], + "reference": [ + "1521536236", + "1531532259", + "1674411155", + "1735309676", + "1945360914", + "1980363510", + "2009155608", + "2058148593", + "2110575115", + "2126185296", + "2149020252" + ], + "abstract": "this paper introduces a hierarchical markov model that can learn and infer a user s daily movements through the community the model uses multiple levels of abstraction in order to bridge the gap between raw gps sensor measurements and high level information such as a user s mode of transportation or her goal we apply rao blackwellised particle filters for efficient inference both at the low level and at the higher levels of the hierarchy significant locations such as goals or locations where the user frequently changes mode of transportation are learned from gps data logs without requiring any manual labeling we show how to detect abnormal behaviors e g taking a wrong bus by concurrently tracking his activities with a trained and a prior model experiments show that our model is able to accurately predict the goals of a person and to recognize situations in which the user performs unknown activities", + "title_raw": "Learning and inferring transportation routines", + "abstract_raw": "This paper introduces a hierarchical Markov model that can learn and infer a user's daily movements through the community. The model uses multiple levels of abstraction in order to bridge the gap between raw GPS sensor measurements and high level information such as a user's mode of transportation or her goal. We apply Rao-Blackwellised particle filters for efficient inference both at the low level and at the higher levels of the hierarchy. Significant locations such as goals or locations where the user frequently changes mode of transportation are learned from GPS data logs without requiring any manual labeling. We show how to detect abnormal behaviors (e.g. taking a wrong bus) by concurrently tracking his activities with a trained and a prior model. Experiments show that our model is able to accurately predict the goals of a person and to recognize situations in which the user performs unknown activities.", + "link": "https://www.semanticscholar.org/paper/51c41c33908ebda05c4863474423a36d20c3b8ae", + "scraped_abstract": null, + "citation_best": 239 + }, + { + "paper": "2121147707", + "venue": "1188739475", + "year": "2004", + "title": "finding predominant word senses in untagged text", + "label": [ + "2778698081", + "2777743986", + "121934690", + "204321447", + "157659113", + "61249035" + ], + "author": [ + "2106968275", + "1993449033", + "668206901", + "2144459651" + ], + "reference": [ + "22702538", + "142000222", + "162532888", + "187228978", + "191422183", + "1479912259", + "1510514866", + "1560633476", + "1561908597", + "1938444495", + "1966907789", + "2026185168", + "2039217078", + "2042160362", + "2050712820", + "2065157922", + "2078546664", + "2094618127", + "2100935296", + "2113307939", + "2120973168", + "2140148956", + "2147332276", + "2166776180" + ], + "abstract": "in word sense disambiguation wsd the heuristic of choosing the most common sense is extremely powerful because the distribution of the senses of a word is often skewed the problem with using the predominant or first sense heuristic aside from the fact that it does not take surrounding context into account is that it assumes some quantity of hand tagged data whilst there are a few hand tagged corpora available for some languages one would expect the frequency distribution of the senses of words particularly topical words to depend on the genre and domain of the text under consideration we present work on the use of a thesaurus acquired from raw textual corpora and the wordnet similarity package to find predominant noun senses automatically the acquired predominant senses give a precision of 64 on the nouns of the senseval 2 english all words task this is a very promising result given that our method does not require any hand tagged text such as semcor furthermore we demonstrate that our method discovers appropriate predominant senses for words from two domain specific corpora", + "title_raw": "Finding Predominant Word Senses in Untagged Text", + "abstract_raw": "In word sense disambiguation (WSD), the heuristic of choosing the most common sense is extremely powerful because the distribution of the senses of a word is often skewed. The problem with using the predominant, or first sense heuristic, aside from the fact that it does not take surrounding context into account, is that it assumes some quantity of hand-tagged data. Whilst there are a few hand-tagged corpora available for some languages, one would expect the frequency distribution of the senses of words, particularly topical words, to depend on the genre and domain of the text under consideration. We present work on the use of a thesaurus acquired from raw textual corpora and the WordNet similarity package to find predominant noun senses automatically. The acquired predominant senses give a precision of 64% on the nouns of the SENSEVAL-2 English all-words task. This is a very promising result given that our method does not require any hand-tagged text, such as SemCor. Furthermore, we demonstrate that our method discovers appropriate predominant senses for words from two domain-specific corpora.", + "link": "https://www.semanticscholar.org/paper/1aac9a51700b4f548ed4d406d3987c8008876521", + "scraped_abstract": null, + "citation_best": 319 + }, + { + "paper": "2101305476", + "venue": "1158167855", + "year": "2004", + "title": "programmable imaging using a digital micromirror array", + "label": [ + "126422989", + "5339829", + "150627866", + "64434820", + "9417928", + "2776151529", + "2781399445", + "31972630", + "48983235" + ], + "author": [ + "2036349267", + "2955628093", + "729708866" + ], + "reference": [ + "1602153294", + "1658472922", + "1995330649", + "2015722957", + "2036562560", + "2080701608", + "2081251076", + "2089045444", + "2098693229", + "2103802376", + "2123977795", + "2137483959", + "2151149541", + "3098686204", + "3142504410", + "3151712549" + ], + "abstract": "we introduce the notion of a programmable imaging system such an imaging system provides a human user or a vision system significant control over the radiometric and geometric characteristics of the system this flexibility is achieved using a programmable array of micro mirrors the orientations of the mirrors of the array can be controlled with high precision over space and time this enables the system to select and modulate rays from the light field based on the needs of the application at hand we have implemented a programmable imaging system that uses a digital micro mirror device dmd which is used in digital light processing although the mirrors of this device can only be positioned in one of two states we show that our system can be used to implement a wide variety of imaging functions including high dynamic range imaging feature detection and object recognition we conclude with a discussion on how a micro mirror array can be used to efficiently control field of view without the use of moving parts", + "title_raw": "Programmable imaging using a digital micromirror array", + "abstract_raw": "We introduce the notion of a programmable imaging system. Such an imaging system provides a human user or a vision system significant control over the radiometric and geometric characteristics of the system. This flexibility is achieved using a programmable array of micro-mirrors. The orientations of the mirrors of the array can be controlled with high precision over space and time. This enables the system to select and modulate rays from the light field based on the needs of the application at hand. We have implemented a programmable imaging system that uses a digital micro-mirror device (DMD), which is used in digital light processing. Although the mirrors of this device can only be positioned in one of two states, we show that our system can be used to implement a wide variety of imaging functions, including, high dynamic range imaging, feature detection, and object recognition. We conclude with a discussion on how a micro-mirror array can be used to efficiently control field of view without the use of moving parts.", + "link": "https://www.semanticscholar.org/paper/5ca3bb0b4286923efc9bc59a84e3de7225b9c8d3", + "scraped_abstract": null, + "citation_best": 101 + }, + { + "paper": "2149612550", + "venue": "1199533187", + "year": "2004", + "title": "a classification system and analysis for aspect oriented programs", + "label": [ + "34127721", + "98183937", + "60051680", + "137955351", + "199360897", + "115903868" + ], + "author": [ + "343541395", + "1583515311", + "2051683724" + ], + "reference": [ + "1494264673", + "1505385311", + "1516285664", + "1584065846", + "1591331624", + "1610570299", + "1856289672", + "1968542268", + "1970681152", + "1971860760", + "1975914482", + "1991604845", + "1997502136", + "1999637392", + "2014530617", + "2023546887", + "2029414465", + "2036719919", + "2043100293", + "2057334711", + "2077850509", + "2082514774", + "2141399917", + "2143238865" + ], + "abstract": "we present a new classification system for aspect oriented programs this system characterizes the interactions between aspects and methods and identifies classes of interactions that enable modular reasoning about the crosscut program we argue that this system can help developers structure their understanding of aspect oriented programs and promotes their ability to reason productively about the consequences of crosscutting a program with a given aspect we have designed and implemented a program analysis system that automatically classifies interactions between aspects and methods and have applied this analysis to a set of benchmark programs we found that our analysis is able to 1 identify interactions with desirable properties such as lack of interference 2 identify potentially problematic interactions such as interference caused by the aspect and the method both writing the same field and 3 direct the developer s attention to the causes of such interactions", + "title_raw": "A classification system and analysis for aspect-oriented programs", + "abstract_raw": "We present a new classification system for aspect-oriented programs. This system characterizes the interactions between aspects and methods and identifies classes of interactions that enable modular reasoning about the crosscut program. We argue that this system can help developers structure their understanding of aspect-oriented programs and promotes their ability to reason productively about the consequences of crosscutting a program with a given aspect. We have designed and implemented a program analysis system that automatically classifies interactions between aspects and methods and have applied this analysis to a set of benchmark programs. We found that our analysis is able to 1) identify interactions with desirable properties (such as lack of interference), 2) identify potentially problematic interactions (such as interference caused by the aspect and the method both writing the same field), and 3) direct the developer's attention to the causes of such interactions.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=A+Classification+System+and+Analysis+for+Aspect-Oriented+Programs&as_oq=&as_eq=&as_occt=any&as_sauthors=Rinard", + "scraped_abstract": null, + "citation_best": 171 + }, + { + "paper": "2222782578", + "venue": "1199533187", + "year": "2004", + "title": "reasoning about partial goal satisfaction for requirements and design engineering", + "label": [ + "170130773", + "49937458", + "17777890", + "185172162", + "2777904410", + "199747065" + ], + "author": [ + "163926993", + "1227273176" + ], + "reference": [ + "1489227129", + "1504045958", + "1515119216", + "1624528677", + "1796487141", + "1801320017", + "1992580876", + "1992691094", + "1996832037", + "2005998857", + "2014626836", + "2021767672", + "2055888212", + "2080945770", + "2087422403", + "2105539612", + "2114775327", + "2115153124", + "2115179079", + "2117818414", + "2118926712", + "2124446794", + "2136463869", + "2139781850", + "2140796142", + "2146949683", + "2151451947", + "2158671725", + "2162943184", + "2168936936", + "2171627300", + "2301605390", + "2314870057", + "3003288124", + "3127288917" + ], + "abstract": "exploring alternative options is at the heart of the requirements and design processes different alternatives contribute to different degrees of achievement of non functional goals about system safety security performance usability and so forth such goals in general cannot be satisfied in an absolute clear cut sense various qualitative and quantitative frameworks have been proposed to support the assessment of alternatives for design decision making in general they lead to limited conclusions due to the lack of accuracy and measurability of goal formulations and the lack of impact propagation rules along goal contribution links the paper presents techniques for specifying partial degrees of goal satisfaction and for quantifying the impact of alternative system designs on the degree of goal satisfaction the approach consists in enriching goal refinement models with a probabilistic layer for reasoning about partial satisfaction within such models non functional goals are specified in a precise probabilistic way their specification is interpreted in terms of application specific measures impact of alternative goal refinements is evaluated in terms of refinement equations over random variables involved in the system s functional goals a systematic method is presented for guiding the elaboration of such models the latter can then be used to assess the impact of alternative decisions on the degree of goal satisfaction or to derive quantitative fine grained requirements on the software to achieve the higher level goals", + "title_raw": "Reasoning about partial goal satisfaction for requirements and design engineering", + "abstract_raw": "Exploring alternative options is at the heart of the requirements and design processes. Different alternatives contribute to different degrees of achievement of non-functional goals about system safety, security, performance, usability, and so forth. Such goals in general cannot be satisfied in an absolute, clear-cut sense. Various qualitative and quantitative frameworks have been proposed to support the assessment of alternatives for design decision making. In general they lead to limited conclusions due to the lack of accuracy and measurability of goal formulations and the lack of impact propagation rules along goal contribution links. The paper presents techniques for specifying partial degrees of goal satisfaction and for quantifying the impact of alternative system designs on the degree of goal satisfaction. The approach consists in enriching goal refinement models with a probabilistic layer for reasoning about partial satisfaction. Within such models, non-functional goals are specified in a precise, probabilistic way; their specification is interpreted in terms of application-specific measures; impact of alternative goal refinements is evaluated in terms of refinement equations over random variables involved in the system's functional goals. A systematic method is presented for guiding the elaboration of such models. The latter can then be used to assess the impact of alternative decisions on the degree of goal satisfaction or to derive quantitative, fine-grained requirements on the software to achieve the higher-level goals.", + "link": "https://www.semanticscholar.org/paper/38c52ba4fee560c1d1cd0f30e76c141d718eed70", + "scraped_abstract": null, + "citation_best": 81 + }, + { + "paper": "2157791244", + "venue": "1174403976", + "year": "2004", + "title": "sniafl towards a static non interactive approach to feature location", + "label": [ + "137287247", + "171981572", + "149091818", + "101814296", + "82214349", + "201515116", + "161743704", + "56909552", + "117447612", + "2776697782", + "7374053", + "123551368", + "4379982", + "101317890", + "43126263", + "46110900", + "529173508", + "124101348", + "52622490", + "76518257", + "186846655" + ], + "author": [ + "2698665668", + "2103091586", + "3199250783", + "2113724661", + "2157892594" + ], + "reference": [ + "1548254758", + "1557757161", + "1660390307", + "1822192794", + "1979346010", + "2001033929", + "2096012235", + "2099327151", + "2103473833", + "2106499802", + "2111643214", + "2117969453", + "2118944299", + "2121234902", + "2125682153", + "2128581098", + "2128990852", + "2136694367", + "2140792139", + "2147152072", + "2147777350", + "2148484494", + "2150231504", + "2150647484", + "2151318864", + "2153887189", + "2163960678" + ], + "abstract": "to facilitate software maintenance and evolution a helpfulstep is to locate features concerned in a particular maintenancetask in the literature both dynamic and interactive approacheshave been proposed for feature location in this paper wepresent a static and non interactive method for achieving thisobjective the main idea of our approach is to use theinformation retrieval ir technology to reveal the basicconnections between features and computational units in sourcecode due to the characteristics of the retrieved connections weuse a static representation of the source code named brcg tofurther recover both the relevant and the specific computationalunits for each feature furthermore we recover therelationships among the relevant units for each feature apremise of our approach is that programmers should usemeaningful names as identifiers we perform an experimentalstudy based on a gnu system to evaluate our approach in theexperimental study we present the detailed quantitativeexperimental data and give the qualitative analytical results", + "title_raw": "SNIAFL: towards a static non-interactive approach to feature location", + "abstract_raw": "To facilitate software maintenance and evolution, a helpfulstep is to locate features concerned in a particular maintenancetask. In the literature, both dynamic and interactive approacheshave been proposed for feature location. In this paper, wepresent a static and non-interactive method for achieving thisobjective. The main idea of our approach is to use theinformation retrieval (IR) technology to reveal the basicconnections between features and computational units in sourcecode. Due to the characteristics of the retrieved connections, weuse a static representation of the source code named BRCG tofurther recover both the relevant and the specific computationalunits for each feature. Furthermore, we recover therelationships among the relevant units for each feature. Apremise of our approach is that programmers should usemeaningful names as identifiers. We perform an experimentalstudy based on a GNU system to evaluate our approach. In theexperimental study, we present the detailed quantitativeexperimental data and give the qualitative analytical results.", + "link": "https://www.semanticscholar.org/paper/6bc8a70a655187e1f9fbcfbb8d592211eaf16b90", + "scraped_abstract": null, + "citation_best": 30 + }, + { + "paper": "2170830071", + "venue": "1174403976", + "year": "2004", + "title": "static checking of dynamically generated queries in database applications", + "label": [ + "35578498", + "98183937", + "2780685630", + "172722865", + "77088390", + "548217200", + "205295232", + "1462715", + "199360897", + "146658014", + "97686452", + "510870499" + ], + "author": [ + "2247030862", + "2102704429", + "1481757371" + ], + "reference": [ + "338991206", + "1491178396", + "1531064568", + "1809751277", + "1989058323", + "2002089154", + "2029786195", + "2033084853", + "2036134187", + "2057085278", + "2067949249", + "2069476565", + "2080573945", + "2084816051", + "2088968841", + "2096314727", + "2116098983", + "2124153277", + "2130466572", + "2131467114", + "2149237601" + ], + "abstract": "many data intensive applications dynamically constructqueries in response to client requests and execute them java servlets e g can create string representations ofsql queries and then send the queries using jdbc to adatabase server for execution the servlet programmer enjoysstatic checking via java s strong type system however the java type system does little to check for possible errorsin the dynamically generated sql query strings thus a type error in a generated selection query e g comparinga string attribute with an integer can result in an sqlruntime exception currently such defects must be rootedout through careful testing or worse might be found bycustomers at runtime in this paper we present a sound static program analysis technique to verify the correctnessof dynamically generated query strings we describe ouranalysis technique and provide soundness results for ourstatic analysis algorithm we also describe the details of aprototype tool based on the algorithm and present severalillustrative defects found in senior software engineeringstudent team projects online tutorial examples and a real worldpurchase order system written by one of the authors", + "title_raw": "Static checking of dynamically generated queries in database applications", + "abstract_raw": "Many data-intensive applications dynamically constructqueries in response to client requests and execute them.Java servlets, e.g., can create string representations ofSQL queries and then send the queries, using JDBC, to adatabase server for execution. The servlet programmer enjoysstatic checking via Java\u00fds strong type system. However,the Java type system does little to check for possible errorsin the dynamically generated SQL query strings. Thus,a type error in a generated selection query (e.g., comparinga string attribute with an integer) can result in an SQLruntime exception. Currently, such defects must be rootedout through careful testing, or (worse) might be found bycustomers at runtime. In this paper, we present a sound,static, program analysis technique to verify the correctnessof dynamically generated query strings. We describe ouranalysis technique and provide soundness results for ourstatic analysis algorithm. We also describe the details of aprototype tool based on the algorithm and present severalillustrative defects found in senior software-engineeringstudent-team projects, online tutorial examples, and a real-worldpurchase order system written by one of the authors.", + "link": "https://www.semanticscholar.org/paper/f9c7e21a3515b924e9d7bed2574a8ea79a3a79fc", + "scraped_abstract": null, + "citation_best": 75 + }, + { + "paper": "2105051842", + "venue": "1174403976", + "year": "2004", + "title": "a tool for writing and debugging algebraic specifications", + "label": [ + "170130773", + "22414024", + "548217200", + "168065819", + "122783720", + "2778514511", + "199360897", + "2777691520", + "116253237", + "136388014" + ], + "author": [ + "2137218346", + "2194186943" + ], + "reference": [ + "9614445", + "144724653", + "1536265389", + "1977912468", + "2035625685", + "2083411793", + "2129304537", + "2131958170", + "2134338128", + "2147543351", + "2148418519", + "2157888382", + "2162661340", + "2170108788", + "2171207016", + "2295399529", + "2613154132", + "3106729728" + ], + "abstract": "despite their benefits programmers rarely use formalspecifications because they are difficult to write and theyrequire an up front investment in time to address these issues we present a tool that helps programmers write anddebug algebraic specifications given an algebraic specification our tool instantiates a prototype that can be used just like any regular java class the tool can also modifyan existing application to use the prototype generatedby the interpreter instead of a hand coded implementation the tool improves the usability of algebraic specificationsin the following ways i a programmer can run an algebraicspecification to study its behavior the tool reportsin which way a specification is incomplete for a client application ii the tool can check whether a specification anda hand coded implementation behave the same for a particularrun of a client application iii a prototype can beused when a hand coded implementation is not yet available two case studies demonstrate how to use the tool", + "title_raw": "A tool for writing and debugging algebraic specifications", + "abstract_raw": "Despite their benefits, programmers rarely use formalspecifications, because they are difficult to write and theyrequire an up front investment in time. To address these issues,we present a tool that helps programmers write anddebug algebraic specifications. Given an algebraic specification, our tool instantiates a prototype that can be used just like any regular Java class. The tool can also modifyan existing application to use the prototype generatedby the interpreter instead of a hand-coded implementation.The tool improves the usability of algebraic specificationsin the following ways: (i) A programmer can \"run\" an algebraicspecification to study its behavior. The tool reportsin which way a specification is incomplete for a client application.(ii) The tool can check whether a specification anda hand-coded implementation behave the same for a particularrun of a client application. (iii) A prototype can beused when a hand-coded implementation is not yet available.Two case studies demonstrate how to use the tool.", + "link": "https://www.semanticscholar.org/paper/6bc0de4121854d36944dc58fcbb03e82fea2f972", + "scraped_abstract": null, + "citation_best": 13 + }, + { + "paper": "2145742469", + "venue": "1174403976", + "year": "2004", + "title": "extending the representational state transfer rest architectural style for decentralized systems", + "label": [ + "529173508", + "65399332", + "174683762", + "97382630", + "149091818", + "35869016", + "120314980", + "72280835" + ], + "author": [ + "2421131417", + "2113585066" + ], + "reference": [ + "23685451", + "46928291", + "54942354", + "83070612", + "123521928", + "175995597", + "331011677", + "356505800", + "846315960", + "1481078647", + "1483061398", + "1484739396", + "1491178396", + "1506905908", + "1508163308", + "1510320179", + "1515932031", + "1519197312", + "1522146512", + "1530955034", + "1544328988", + "1546468069", + "1548778571", + "1548991402", + "1560685296", + "1561433714", + "1569084524", + "1577231857", + "1584247638", + "1588904986", + "1590810061", + "1592740644", + "1596634728", + "1601376565", + "1603147766", + "1642192185", + "1649645444", + "1653446932", + "1661949323", + "1721796500", + "1895309945", + "1973501242", + "1978553093", + "1983587324", + "1987108387", + "1987800030", + "1991895580", + "1995875735", + "2001291669", + "2001474264", + "2005216655", + "2006688715", + "2014653795", + "2018501701", + "2020765652", + "2022907112", + "2026569863", + "2035362408", + "2039284421", + "2040311234", + "2051629332", + "2053903896", + "2054584570", + "2055988264", + "2057144245", + "2059557122", + "2067580069", + "2070136743", + "2078710802", + "2091981921", + "2093973064", + "2095343473", + "2096894832", + "2111427271", + "2112053513", + "2112578244", + "2115241120", + "2118828464", + "2120490947", + "2131185826", + "2131929623", + "2135044397", + "2137847645", + "2140251122", + "2141375288", + "2142822878", + "2144846366", + "2145220267", + "2150816008", + "2150889907", + "2151101658", + "2153718771", + "2153926311", + "2157765381", + "2158419323", + "2163059190", + "2169856608", + "2170362097", + "2170496240", + "2267707107", + "2296636214", + "2611515161", + "2613190526", + "2798777860", + "2913877777", + "2983995785", + "2988545412", + "3137092842", + "3160366303" + ], + "abstract": "because it takes time and trust to establish agreement traditional consensus based architectural styles cannotsafely accommodate resources that change faster than ittakes to transmit notification of that change nor resourcesthat must be shared across independent agencies the alternative is decentralization permitting independentagencies to make their own decisions ourdefinition contrasts with that of distribution in whichseveral agents share control of a single decision ultimately the physical limits of network latency and thesocial limits of independent agency call for solutions thatcan accommodate multiple values for the same variable our approach to this challenge is architectural proposingconstraints on the configuration of componentsand connectors to induce particular desired properties ofthe whole application specifically we present implement and evaluate variations of the world wide web srepresentational state transfer rest architecturalstyle that support distributed and decentralized systems", + "title_raw": "Extending the Representational State Transfer (REST) architectural style for decentralized systems", + "abstract_raw": "Because it takes time and trust to establish agreement,traditional consensus-based architectural styles cannotsafely accommodate resources that change faster than ittakes to transmit notification of that change, nor resourcesthat must be shared across independent agencies.The alternative is decentralization: permitting independentagencies to make their own decisions. Ourdefinition contrasts with that of distribution, in whichseveral agents share control of a single decision.Ultimately, the physical limits of network latency and thesocial limits of independent agency call for solutions thatcan accommodate multiple values for the same variable.Our approach to this challenge is architectural: proposingconstraints on the configuration of componentsand connectors to induce particular desired properties ofthe whole application. Specifically, we present, implement,and evaluate variations of the World Wide Web\u00fdsREpresentational State Transfer (REST) architecturalstyle that support distributed and decentralized systems.", + "link": "https://www.semanticscholar.org/paper/8e5b4f7c4947fbf6bdbd967c0f9ad38296262c09", + "scraped_abstract": null, + "citation_best": 89 + }, + { + "paper": "2058632086", + "venue": "1158363782", + "year": "2004", + "title": "trickle a self regulating algorithm for code propagation and maintenance in wireless sensor networks", + "label": [ + "31258907", + "167955471", + "158379750", + "2778335787", + "24590314", + "186353149", + "2778807401", + "2779317670", + "32295351", + "11413529", + "79403827" + ], + "author": [ + "2052109716", + "2173235739", + "348251026", + "719828399" + ], + "reference": [ + "151966512", + "1509885357", + "1580242306", + "1986805195", + "2014550373", + "2038562061", + "2091936226", + "2097562715", + "2102672769", + "2103662601", + "2104532741", + "2105338061", + "2113818210", + "2120454288", + "2124705135", + "2137358897", + "2144781367", + "2158416522", + "2167396179", + "2296427920" + ], + "abstract": "we present trickle an algorithm for propagating and maintaining code updates in wireless sensor networks borrowing techniques from the epidemic gossip scalable multicast and wireless broadcast literature trickle uses a polite gossip policy where motes periodically broadcast a code summary to local neighbors but stay quiet if they have recently heard a summary identical to theirs when a mote hears an older summary than its own it broadcasts an update instead of flooding a network with packets the algorithm controls the send rate so each mote hears a small trickle of packets just enough to stay up to date we show that with this simple mechanism trickle can scale to thousand fold changes in network density propagate new code in the order of seconds and impose a maintenance cost on the order of a few sends an hour", + "title_raw": "Trickle: a self-regulating algorithm for code propagation and maintenance in wireless sensor networks", + "abstract_raw": "We present Trickle, an algorithm for propagating and maintaining code updates in wireless sensor networks. Borrowing techniques from the epidemic/gossip, scalable multicast, and wireless broadcast literature, Trickle uses a \"polite gossip\" policy, where motes periodically broadcast a code summary to local neighbors but stay quiet if they have recently heard a summary identical to theirs. When a mote hears an older summary than its own, it broadcasts an update. Instead of flooding a network with packets, the algorithm controls the send rate so each mote hears a small trickle of packets, just enough to stay up to date. We show that with this simple mechanism, Trickle can scale to thousand-fold changes in network density, propagate new code in the order of seconds, and impose a maintenance cost on the order of a few sends an hour.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Trickle:+A+Self-Regulating+Algorithm+for+Code+Propagation+and+Maintenance+in+Wireless+Sensor+Networks&as_oq=&as_eq=&as_occt=any&as_sauthors=Levis", + "scraped_abstract": null, + "citation_best": 1144 + }, + { + "paper": "2174598112", + "venue": "1185109434", + "year": "2004", + "title": "recovering device drivers", + "label": [ + "149635348", + "49585438", + "98234853", + "2779960059", + "144559511", + "167677733", + "136197465" + ], + "author": [ + "2304722209", + "2473372968", + "1988425031", + "737098973" + ], + "reference": [ + "19830081", + "1497542366", + "1515932031", + "1537929875", + "1558202157", + "1977773183", + "1979594720", + "2002915275", + "2026114446", + "2026527406", + "2076265406", + "2079029390", + "2098735855", + "2107635250", + "2108795876", + "2109739361", + "2114488210", + "2119892028", + "2122960384", + "2130934773", + "2144977275", + "2144984544", + "2145659622", + "2146878883", + "2151153404", + "2153531096", + "2153950928", + "2154817671", + "2159412780", + "2159477904", + "2160275438", + "2256687803" + ], + "abstract": "this paper presents a new mechanism that enables applications to run correctly when device drivers fail because device drivers are the principal failing component in most systems reducing driver induced failures greatly improves overall reliability earlier work has shown that an operating system can survive driver failures 33 but the applications that depend on them cannot thus while operating system reliability was greatly improved application reliability generally was not to remedy this situation we introduce a new operating system mechanism called a shadow driver a shadow driver monitors device drivers and transparently recovers from driver failures moreover it assumes the role of the failed driver during recovery in this way applications using the failed driver as well as the kernel itself continue to function as expected we implemented shadow drivers for the linux operating system and tested them on over a dozen device drivers our results show that applications and the os can indeed survive the failure of a variety of device drivers moreover shadow drivers impose minimal performance overhead lastly they can be introduced with only modest changes to the os kernel and with no changes at all to existing device drivers", + "title_raw": "Recovering device drivers", + "abstract_raw": "This paper presents a new mechanism that enables applications to run correctly when device drivers fail. Because device drivers are the principal failing component in most systems, reducing driver-induced failures greatly improves overall reliability. Earlier work has shown that an operating system can survive driver failures [33], but the applications that depend on them cannot. Thus, while operating system reliability was greatly improved, application reliability generally was not.\r\n\r\nTo remedy this situation, we introduce a new operating system mechanism called a shadow driver. A shadow driver monitors device drivers and transparently recovers from driver failures. Moreover, it assumes the role of the failed driver during recovery. In this way, applications using the failed driver, as well as the kernel itself, continue to function as expected.\r\n\r\nWe implemented shadow drivers for the Linux operating system and tested them on over a dozen device drivers. Our results show that applications and the OS can indeed survive the failure of a variety of device drivers. Moreover, shadow drivers impose minimal performance overhead. Lastly, they can be introduced with only modest changes to the OS kernel and with no changes at all to existing device drivers.", + "link": "https://www.semanticscholar.org/paper/92990fd84ab0f6824b47cd35f6693f23f18ae334", + "scraped_abstract": null, + "citation_best": 158 + }, + { + "paper": "2158600037", + "venue": "1127352206", + "year": "2004", + "title": "cloning based context sensitive pointer alias analysis using binary decision diagrams", + "label": [ + "139571649", + "2779758555", + "98183937", + "87468716", + "161969638", + "148230440", + "102379954", + "548217200", + "131843455", + "3309909", + "199360897", + "38754835", + "7263679", + "128838566", + "80444323" + ], + "author": [ + "2262305730", + "2136170053" + ], + "reference": [ + "4015096", + "10127936", + "19219339", + "1505616861", + "1514258760", + "1536098516", + "1543471126", + "1544565902", + "1576135198", + "1584814208", + "1691700263", + "1734116059", + "1992810975", + "1998162300", + "1998915208", + "2014530617", + "2018449188", + "2032504915", + "2069476565", + "2074954052", + "2075309900", + "2077850509", + "2078739669", + "2080267935", + "2080573945", + "2087612811", + "2095115578", + "2125403226", + "2126922738", + "2131135493", + "2136503680", + "2141472133", + "2151360539", + "2155036144", + "2156549049", + "2162621793", + "2171240827", + "2914074464" + ], + "abstract": "this paper presents the first scalable context sensitive inclusion based pointer alias analysis for java programs our approach to context sensitivity is to create a clone of a method for every context of interest and run a context insensitive algorithm over the expanded call graph to get context sensitive results for precision we generate a clone for every acyclic path through a program s call graph treating methods in a strongly connected component as a single node normally this formulation is hopelessly intractable as a call graph often has 10 14 acyclic paths or more we show that these exponential relations can be computed efficiently using binary decision diagrams bdds key to the scalability of the technique is a context numbering scheme that exposes the commonalities across contexts we applied our algorithm to the most popular applications available on sourceforge and found that the largest programs with hundreds of thousands of java bytecodes can be analyzed in under 20 minutes this paper shows that pointer analysis and many other queries and algorithms can be described succinctly and declaratively using datalog a logic programming language we have developed a system called bddbddb that automatically translates datalog programs into highly efficient bdd implementations we used this approach to develop a variety of context sensitive algorithms including side effect analysis type analysis and escape analysis", + "title_raw": "Cloning-based context-sensitive pointer alias analysis using binary decision diagrams", + "abstract_raw": "This paper presents the first scalable context-sensitive, inclusion-based pointer alias analysis for Java programs. Our approach to context sensitivity is to create a clone of a method for every context of interest, and run a context-insensitive algorithm over the expanded call graph to get context-sensitive results. For precision, we generate a clone for every acyclic path through a program's call graph, treating methods in a strongly connected component as a single node. Normally, this formulation is hopelessly intractable as a call graph often has 10 14 acyclic paths or more. We show that these exponential relations can be computed efficiently using binary decision diagrams (BDDs). Key to the scalability of the technique is a context numbering scheme that exposes the commonalities across contexts. We applied our algorithm to the most popular applications available on Sourceforge, and found that the largest programs, with hundreds of thousands of Java bytecodes, can be analyzed in under 20 minutes.This paper shows that pointer analysis, and many other queries and algorithms, can be described succinctly and declaratively using Datalog, a logic programming language. We have developed a system called bddbddb that automatically translates Datalog programs into highly efficient BDD implementations. We used this approach to develop a variety of context-sensitive algorithms including side effect analysis, type analysis, and escape analysis.", + "link": "https://www.semanticscholar.org/paper/6df5140b9ba8e109132166a35ec850d811190c14", + "scraped_abstract": null, + "citation_best": 54 + }, + { + "paper": "2092650892", + "venue": "1184151122", + "year": "2004", + "title": "conditional xpath the first order complete xpath dialect", + "label": [ + "61114434", + "13593490", + "2780213375", + "25016198", + "199360897", + "8797682" + ], + "author": [ + "2195082027" + ], + "reference": [ + "193724012", + "255293104", + "270849871", + "1496394707", + "1533614358", + "1540516609", + "1573061520", + "1583633027", + "1684503074", + "1971742702", + "1979514837", + "1984581643", + "2017658637", + "2023251419", + "2029764918", + "2029970296", + "2031641275", + "2088994048", + "2098171268", + "2103332648", + "2115309483", + "2122225808", + "2137931097", + "2157319504", + "2616076417", + "2912380172", + "3202227343" + ], + "abstract": "xpath is the w3c standard node addressing language for xml documents xpath is still under development and its technical aspects are intensively studied what is missing at present is a clear characterization of the expressive power of xpath be it either semantical or with reference to some well established existing logical formalism core xpath the logical core of xpath 1 0 defined by gottlob et al cannot express queries with conditional paths as exemplified by do a child step while test is true at the resulting node in a first order complete extension of core xpath such queries are expressible we add conditional axis relations to core xpath and show that the resulting language called conditional xpath is equally expressive as first order logic when interpreted on ordered trees both the result the extended xpath language and the proof are closely related to temporal logic specifically while core xpath may be viewed as a simple temporal logic conditional xpath extends this with counterparts of the since and until operators", + "title_raw": "Conditional XPath, the first order complete XPath dialect", + "abstract_raw": "XPath is the W3C -- standard node addressing language for XML documents. XPath is still under development and its technical aspects are intensively studied. What is missing at present is a clear characterization of the expressive power of XPath, be it either semantical or with reference to some well established existing (logical) formalism. Core XPath (the logical core of XPath 1.0 defined by Gottlob et al.) cannot express queries with conditional paths as exemplified by \"do a child step, while test is true at the resulting node.\" In a first-order complete extension of Core XPath, such queries are expressible, We add conditional axis relations to Core XPath and show that the resulting language, called conditional XPath, is equally expressive as first-order logic when interpreted on ordered trees. Both the result, the extended XPath language, and the proof are closely related to temporal logic. Specifically, while Core XPath may be viewed as a simple temporal logic, conditional XPath extends this with (counterparts of) the since and until operators.", + "link": "https://www.semanticscholar.org/paper/54be8c03e1fb81014a1167fdfe3cc85f6353f7d7", + "scraped_abstract": null, + "citation_best": 70 + }, + { + "paper": "2134557008", + "venue": "1140684652", + "year": "2004", + "title": "a formal study of information retrieval heuristics", + "label": [ + "127705205", + "194145944", + "177264268", + "149189445", + "23123220" + ], + "author": [ + "2618399871", + "2288867482", + "2152766206" + ], + "reference": [ + "8870360", + "1592871157", + "1833785989", + "1956559956", + "1965657003", + "1978394996", + "2014415866", + "2043909051", + "2047031127", + "2058089741", + "2078875869", + "2093390569", + "2100279283", + "2105157020", + "2111004121", + "2136542423", + "2136729221", + "2421218773" + ], + "abstract": "empirical studies of information retrieval methods show that good retrieval performance is closely related to the use of various retrieval heuristics such as tf idf weighting one basic research question is thus what exactly are these necessary heuristics that seem to cause good retrieval performance in this paper we present a formal study of retrieval heuristics we formally define a set of basic desirable constraints that any reasonable retrieval function should satisfy and check these constraints on a variety of representative retrieval functions we find that none of these retrieval functions satisfies all the constraints unconditionally empirical results show that when a constraint is not satisfied it often indicates non optimality of the method and when a constraint is satisfied only for a certain range of parameter values its performance tends to be poor when the parameter is out of the range in general we find that the empirical performance of a retrieval formula is tightly related to how well it satisfies these constraints thus the proposed constraints provide a good explanation of many empirical observations and make it possible to evaluate any existing or new retrieval formula analytically", + "title_raw": "A formal study of information retrieval heuristics", + "abstract_raw": "Empirical studies of information retrieval methods show that good retrieval performance is closely related to the use of various retrieval heuristics, such as TF-IDF weighting. One basic research question is thus what exactly are these \"necessary\" heuristics that seem to cause good retrieval performance. In this paper, we present a formal study of retrieval heuristics. We formally define a set of basic desirable constraints that any reasonable retrieval function should satisfy, and check these constraints on a variety of representative retrieval functions. We find that none of these retrieval functions satisfies all the constraints unconditionally. Empirical results show that when a constraint is not satisfied, it often indicates non-optimality of the method, and when a constraint is satisfied only for a certain range of parameter values, its performance tends to be poor when the parameter is out of the range. In general, we find that the empirical performance of a retrieval formula is tightly related to how well it satisfies these constraints. Thus the proposed constraints provide a good explanation of many empirical observations and make it possible to evaluate any existing or new retrieval formula analytically.", + "link": "https://www.semanticscholar.org/paper/c5445155baf89ad8d17a04fc82d0f9626558d5e6", + "scraped_abstract": null, + "citation_best": 346 + }, + { + "paper": "2116790783", + "venue": "1131589359", + "year": "2004", + "title": "on performance bounds for the integration of elastic and adaptive streaming flows", + "label": [ + "104122410", + "161765866", + "79403827" + ], + "author": [ + "99017749", + "2084777258" + ], + "reference": [ + "1506813753", + "1520666268", + "1529132671", + "1583757272", + "1965887263", + "1990160109", + "2032588037", + "2074637591", + "2106896172", + "2120163662", + "2123205020", + "2126880366", + "2139440703", + "2149000368", + "2149422460", + "2150297176", + "2150408145", + "2151927853", + "2161809383", + "2167922912" + ], + "abstract": "we consider a network model where bandwidth is fairly shared by a dynamic number of elastic and adaptive streaming flows elastic flows correspond to data transfers while adaptive streaming flows correspond to audio video applications with variable rate codecs in particular the former are characterized by a fixed size in bits while the latter are characterized by a fixed duration this flow level model turns out to be intractable in general in this paper we give performance bounds for both elastic and streaming traffic by means of sample path arguments these bounds present the practical interest of being insensitive to traffic characteristics like the distributions of elastic flow size and streaming flow duration", + "title_raw": "On performance bounds for the integration of elastic and adaptive streaming flows", + "abstract_raw": "We consider a network model where bandwidth is fairly shared by a dynamic number of elastic and adaptive streaming flows. Elastic flows correspond to data transfers while adaptive streaming flows correspond to audio/video applications with variable rate codecs. In particular, the former are characterized by a fixed size (in bits) while the latter are characterized by a fixed duration. This flow-level model turns out to be intractable in general. In this paper, we give performance bounds for both elastic and streaming traffic by means of sample-path arguments. These bounds present the practical interest of being insensitive to traffic characteristics like the distributions of elastic flow size and streaming flow duration.", + "link": "https://www.semanticscholar.org/paper/eb7f432d233ea637a17ff797d681372dc42a8c6c", + "scraped_abstract": null, + "citation_best": 51 + }, + { + "paper": "1993855803", + "venue": "1175089206", + "year": "2004", + "title": "indexing spatio temporal trajectories with chebyshev polynomials", + "label": [ + "171326582", + "120174047", + "196512905", + "21424316" + ], + "author": [ + "2408285798", + "2109621001" + ], + "reference": [ + "116902681", + "1487607052", + "1491931038", + "1499049447", + "1499117135", + "1510964588", + "1520144421", + "1541459201", + "1574593551", + "1587157435", + "1608072158", + "1610282999", + "1837913551", + "1906171242", + "1939649909", + "1979207937", + "1982042357", + "2001103857", + "2001670934", + "2002328435", + "2036557187", + "2042591571", + "2046144220", + "2059998776", + "2063646365", + "2064727561", + "2066796814", + "2067481201", + "2083888736", + "2084481683", + "2086086639", + "2096702528", + "2097597750", + "2097983034", + "2098001825", + "2099397891", + "2100315210", + "2101429492", + "2106642566", + "2121110504", + "2122594532", + "2128061541", + "2131620262", + "2133184712", + "2134423343", + "2138087993", + "2141394101", + "2143702666", + "2147880780", + "2148039410", + "2163203501", + "2163336863", + "2167035411", + "2167833054", + "2169057819", + "3004157836", + "3097169496" + ], + "abstract": "in this paper we attempt to approximate and index a d dimensional d 1 spatio temporal trajectory with a low order continuous polynomial there are many possible ways to choose the polynomial including continuous fourier transforms splines non linear regressino etc some of these possiblities have indeed been studied beofre we hypothesize that one of the best possibilities is the polynomial that minimizes the maximum deviation from the true value which is called the minimax polynomial minimax approximation is particularly meaningful for indexing because in a branch and bound search i e for finding nearest neighbours the smaller the maximum deviation the more pruning opportunities there exist however in general among all the polynomials of the same degree the optimal minimax polynomial is very hard to compute however it has been shown thta the chebyshev approximation is almost identical to the optimal minimax polynomial and is easy to compute 16 thus in this paper we explore how to use the chebyshev polynomials as a basis for approximating and indexing d dimenstional trajectories the key analytic result of this paper is the lower bounding lemma that is we show that the euclidean distance between two d dimensional trajectories is lower bounded by the weighted euclidean distance between the two vectors of chebyshev coefficients this lemma is not trivial to show and it ensures that indexing with chebyshev cofficients aedmits no false negatives to complement that analystic result we conducted comprehensive experimental evaluation with real and generated 1 dimensional to 4 dimensional data sets we compared the proposed schem with the adaptive piecewise constant approximation apca scheme our preliminary results indicate that in all situations we tested chebyshev indexing dominates apca in pruning power i o and cpu costs", + "title_raw": "Indexing spatio-temporal trajectories with Chebyshev polynomials", + "abstract_raw": "In this paper, we attempt to approximate and index a d- dimensional (d \u2265 1) spatio-temporal trajectory with a low order continuous polynomial. There are many possible ways to choose the polynomial, including (continuous)Fourier transforms, splines, non-linear regressino, etc. Some of these possiblities have indeed been studied beofre. We hypothesize that one of the best possibilities is the polynomial that minimizes the maximum deviation from the true value, which is called the minimax polynomial. Minimax approximation is particularly meaningful for indexing because in a branch-and-bound search (i.e., for finding nearest neighbours), the smaller the maximum deviation, the more pruning opportunities there exist. However, in general, among all the polynomials of the same degree, the optimal minimax polynomial is very hard to compute. However, it has been shown thta the Chebyshev approximation is almost identical to the optimal minimax polynomial, and is easy to compute [16]. Thus, in this paper, we explore how to use the Chebyshev polynomials as a basis for approximating and indexing d-dimenstional trajectories.The key analytic result of this paper is the Lower Bounding Lemma. that is, we show that the Euclidean distance between two d-dimensional trajectories is lower bounded by the weighted Euclidean distance between the two vectors of Chebyshev coefficients. this lemma is not trivial to show, and it ensures that indexing with Chebyshev cofficients aedmits no false negatives. To complement that analystic result, we conducted comprehensive experimental evaluation with real and generated 1-dimensional to 4-dimensional data sets. We compared the proposed schem with the Adaptive Piecewise Constant Approximation (APCA) scheme. Our preliminary results indicate that in all situations we tested, Chebyshev indexing dominates APCA in pruning power, I/O and CPU costs.", + "link": "https://www.semanticscholar.org/paper/d108d181417d8e18d3dde91d1f645179e95130ce", + "scraped_abstract": null, + "citation_best": 311 + }, + { + "paper": "1989450868", + "venue": "1166315290", + "year": "2004", + "title": "multi finger gestural interaction with 3d volumetric displays", + "label": [ + "2776977519", + "153083717", + "95020103", + "36464697", + "31972630", + "107750057", + "121684516" + ], + "author": [ + "2115951828", + "2065696548", + "2130130894" + ], + "reference": [ + "1502076088", + "1536474500", + "1554565390", + "1556910981", + "1967310439", + "1974223339", + "2023207621", + "2027164646", + "2039054456", + "2042751882", + "2042871795", + "2067870229", + "2108147168", + "2111204873", + "2113746821", + "2117043006", + "2122739566", + "2145946022" + ], + "abstract": "volumetric displays provide interesting opportunities and challenges for 3d interaction and visualization particularly when used in a highly interactive manner we explore this area through the design and implementation of techniques for interactive direct manipulation of objects with a 3d volumetric display motion tracking of the user s fingers provides for direct gestural interaction with the virtual objects through manipulations on and around the display s hemispheric enclosure our techniques leverage the unique features of volumetric displays including a 360 viewing volume that enables manipulation from any viewpoint around the display as well as natural and accurate perception of true depth information in the displayed 3d scene we demonstrate our techniques within a prototype 3d geometric model building application", + "title_raw": "Multi-finger gestural interaction with 3d volumetric displays", + "abstract_raw": "Volumetric displays provide interesting opportunities and challenges for 3D interaction and visualization, particularly when used in a highly interactive manner. We explore this area through the design and implementation of techniques for interactive direct manipulation of objects with a 3D volumetric display. Motion tracking of the user's fingers provides for direct gestural interaction with the virtual objects, through manipulations on and around the display's hemispheric enclosure. Our techniques leverage the unique features of volumetric displays, including a 360\u00b0 viewing volume that enables manipulation from any viewpoint around the display, as well as natural and accurate perception of true depth information in the displayed 3D scene. We demonstrate our techniques within a prototype 3D geometric model building application.", + "link": "https://www.semanticscholar.org/paper/e2c847e2e6f6c638dde922a6c77c691949184fbb", + "scraped_abstract": null, + "citation_best": 24 + }, + { + "paper": "2088761521", + "venue": "1164321581", + "year": "2005", + "title": "crossy a crossing based drawing application", + "label": [ + "37789001", + "137955351", + "25621077", + "121684516", + "2776799293" + ], + "author": [ + "1996478780", + "2289381742" + ], + "reference": [ + "20751697", + "44054103", + "1482422856", + "1515087561", + "1531523252", + "1989790238", + "1994610034", + "2010117363", + "2014788716", + "2033288247", + "2050037476", + "2054731618", + "2055465369", + "2076454692", + "2078175541", + "2092829503", + "2124783468", + "2131054109", + "2133495565", + "2141837194", + "2158391167", + "2161581092", + "2168162036", + "2293769110" + ], + "abstract": "we introduce crossy a simple drawing application developed as a benchmark to demonstrate the feasibility of goal crossing as the basis for a graphical user interface while crossing was previously identified as a potential substitute for the classic point and click interaction this work is the first to report on the practical aspects of implementing an interface solely based on goal crossing", + "title_raw": "CrossY: a crossing-based drawing application", + "abstract_raw": "We introduce CrossY, a simple drawing application developed as a benchmark to demonstrate the feasibility of goal-crossing as the basis for a graphical user interface. While crossing was previously identified as a potential substitute for the classic point-and-click interaction, this work is the first to report on the practical aspects of implementing an interface solely based on goal-crossing.", + "link": "https://www.semanticscholar.org/paper/f3b76ec872a8f21ac01e6f866ce3031139ba5da8", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2097995023", + "venue": "1133523790", + "year": "2004", + "title": "model driven data acquisition in sensor networks", + "label": [ + "119857082", + "49937458", + "114289077", + "50341643", + "24590314", + "555944384", + "184783062", + "124101348", + "163985040", + "137836250" + ], + "author": [ + "2002742946", + "1988556028", + "2139913828", + "2063640528", + "2763131346" + ], + "reference": [ + "151966512", + "1513861746", + "1568191923", + "1598443221", + "1603054560", + "1993482412", + "2042986967", + "2110849300", + "2112056262", + "2116687437", + "2121255383", + "2122410182", + "2126333155", + "2130990991", + "2134786002", + "2135252439", + "2148251644", + "2153259545", + "2153834102", + "2158390893", + "2168452204", + "2168865746", + "2171776999", + "2296677182" + ], + "abstract": "declarative queries are proving to be an attractive paradigm for ineracting with networks of wireless sensors the metaphor that the sensornet is a database is problematic however because sensors do not exhaustively represent the data in the real world in order to map the raw sensor readings onto physical reality a model of that reality is required to complement the readings in this paper we enrich interactive sensor querying with statistical modeling techniques we demonstrate that such models can help provide answers that are both more meaningful and by introducing approximations with probabilistic confidences significantly more efficient to compute in both time and energy utilizing the combination of a model and live data acquisition raises the challenging optimization problem of selecting the best sensor readings to acquire balancing the increase in the confidence of our answer against the communication and data acquisition costs in the network we describe an exponential time algorithm for finding the optimal solution to this optimization problem and a polynomial time heuristic for identifying solutions that perform well in practice we evaluate our approach on several real world sensor network data sets taking into account the real measured data and communication quality demonstrating that our model based approach provides a high fidelity representation of the real phenomena and leads to significant performance gains versus traditional data acquisition techniques", + "title_raw": "Model-driven data acquisition in sensor networks", + "abstract_raw": "Declarative queries are proving to be an attractive paradigm for ineracting with networks of wireless sensors. The metaphor that \"the sensornet is a database\" is problematic, however, because sensors do not exhaustively represent the data in the real world. In order to map the raw sensor readings onto physical reality, a model of that reality is required to complement the readings. In this paper, we enrich interactive sensor querying with statistical modeling techniques. We demonstrate that such models can help provide answers that are both more meaningful, and, by introducing approximations with probabilistic confidences, significantly more efficient to compute in both time and energy. Utilizing the combination of a model and live data acquisition raises the challenging optimization problem of selecting the best sensor readings to acquire, balancing the increase in the confidence of our answer against the communication and data acquisition costs in the network. We describe an exponential time algorithm for finding the optimal solution to this optimization problem, and a polynomial-time heuristic for identifying solutions that perform well in practice. We evaluate our approach on several real-world sensor-network data sets, taking into account the real measured data and communication quality, demonstrating that our model-based approach provides a high-fidelity representation of the real phenomena and leads to significant performance gains versus traditional data acquisition techniques.", + "link": "https://www.semanticscholar.org/paper/393c73fccc56fdeb480e4f1c980b14182711a69d", + "scraped_abstract": null, + "citation_best": 1053 + }, + { + "paper": "2064716575", + "venue": "1135342153", + "year": "2004", + "title": "automatic detection of fragments in dynamically generated web pages", + "label": [ + "21959979", + "183003079", + "100158260", + "124101348", + "115537543", + "162319229" + ], + "author": [ + "2183807529", + "1251078740", + "2125988131", + "202340837" + ], + "reference": [ + "51518710", + "89823361", + "158713739", + "1609518033", + "1748877974", + "1991548210", + "2013571142", + "2022261236", + "2026839093", + "2040075907", + "2084326726", + "2096122728", + "2099676315", + "2126577727", + "2128836931", + "2132069633", + "2132627996", + "2141378275", + "2152565070", + "2170708842", + "2965039799" + ], + "abstract": "dividing web pages into fragments has been shown to provide significant benefits for both content generation and caching in order for a web site to use fragment based content generation however good methods are needed for dividing web pages into fragments manual fragmentation of web pages is expensive error prone and unscalable this paper proposes a novel scheme to automatically detect and flag fragments that are cost effective cache units in web sites serving dynamic content we consider the fragments to be interesting if they are shared among multiple documents or they have different lifetime or personalization characteristics our approach has three unique features first we propose a hierarchical and fragment aware model of the dynamic web pages and a data structure that is compact and effective for fragment detection second we present an efficient algorithm to detect maximal fragments that are shared among multiple documents third we develop a practical algorithm that effectively detects fragments based on their lifetime and personalization characteristics we evaluate the proposed scheme through a series of experiments showing the benefits and costs of the algorithms we also study the impact of adopting the fragments detected by our system on disk space utilization and network bandwidth consumption", + "title_raw": "Automatic detection of fragments in dynamically generated web pages", + "abstract_raw": "Dividing web pages into fragments has been shown to provide significant benefits for both content generation and caching. In order for a web site to use fragment-based content generation, however, good methods are needed for dividing web pages into fragments. Manual fragmentation of web pages is expensive, error prone, and unscalable. This paper proposes a novel scheme to automatically detect and flag fragments that are cost-effective cache units in web sites serving dynamic content. We consider the fragments to be interesting if they are shared among multiple documents or they have different lifetime or personalization characteristics. Our approach has three unique features. First, we propose a hierarchical and fragment-aware model of the dynamic web pages and a data structure that is compact and effective for fragment detection. Second, we present an efficient algorithm to detect maximal fragments that are shared among multiple documents. Third, we develop a practical algorithm that effectively detects fragments based on their lifetime and personalization characteristics. We evaluate the proposed scheme through a series of experiments, showing the benefits and costs of the algorithms. We also study the impact of adopting the fragments detected by our system on disk space utilization and network bandwidth consumption.", + "link": "https://www.semanticscholar.org/paper/7151f607403425f283bcb226f37742e9d3b40ce8", + "scraped_abstract": null, + "citation_best": 80 + }, + { + "paper": "2113272343", + "venue": "1188739475", + "year": "2003", + "title": "towards a model of face to face grounding", + "label": [ + "56461940", + "100609095", + "40140605" + ], + "author": [ + "2713460036", + "2276842651", + "1912765636", + "2245648575" + ], + "reference": [ + "186044204", + "1484139912", + "1563178652", + "1568240964", + "1583314545", + "1907884286", + "2000343728", + "2003684257", + "2009266235", + "2062614785", + "2088956580", + "2110930288", + "2116971293", + "2156880591", + "2169749310", + "2170584777", + "2226647544" + ], + "abstract": "we investigate the verbal and nonverbal means for grounding and propose a design for embodied conversational agents that relies on both kinds of signals to establish common ground in human computer interaction we analyzed eye gaze head nods and attentional focus in the context of a direction giving task the distribution of nonverbal behaviors differed depending on the type of dialogue move being grounded and the overall pattern reflected a monitoring of lack of negative feedback based on these results we present an eca that uses verbal and nonverbal grounding acts to update dialogue state", + "title_raw": "Towards a Model of Face-to-Face Grounding", + "abstract_raw": "We investigate the verbal and nonverbal means for grounding, and propose a design for embodied conversational agents that relies on both kinds of signals to establish common ground in human-computer interaction. We analyzed eye gaze, head nods and attentional focus in the context of a direction-giving task. The distribution of nonverbal behaviors differed depending on the type of dialogue move being grounded, and the overall pattern reflected a monitoring of lack of negative feedback. Based on these results, we present an ECA that uses verbal and nonverbal grounding acts to update dialogue state.", + "link": "https://www.semanticscholar.org/paper/d2fa3412d4c9b2867f2952de9851f729761b8f47", + "scraped_abstract": null, + "citation_best": 204 + }, + { + "paper": "2097606805", + "venue": "1188739475", + "year": "2003", + "title": "accurate unlexicalized parsing", + "label": [ + "2777875368", + "206134035", + "2781018953", + "186644900", + "204321447" + ], + "author": [ + "2618175022", + "2149153931" + ], + "reference": [ + "199541590", + "1496659931", + "1535015163", + "1551104980", + "1567570606", + "1859173823", + "2047706513", + "2092654472", + "2096466920", + "2110882317", + "2111041233", + "2138607469", + "2153439141", + "2155693943", + "2157140289", + "2161204834", + "2170716495", + "3021928671" + ], + "abstract": "we demonstrate that an unlexicalized pcfg can parse much more accurately than previously shown by making use of simple linguistically motivated state splits which break down false independence assumptions latent in a vanilla treebank grammar indeed its performance of 86 36 lp lr f1 is better than that of early lexicalized pcfg models and surprisingly close to the current state of the art this result has potential uses beyond establishing a strong lower bound on the maximum possible accuracy of unlexicalized models an unlexicalized pcfg is much more compact easier to replicate and easier to interpret than more complex lexical models and the parsing algorithms are simpler more widely understood of lower asymptotic complexity and easier to optimize", + "title_raw": "Accurate Unlexicalized Parsing", + "abstract_raw": "We demonstrate that an unlexicalized PCFG can parse much more accurately than previously shown, by making use of simple, linguistically motivated state splits, which break down false independence assumptions latent in a vanilla treebank grammar. Indeed, its performance of 86.36% (LP/LR F1) is better than that of early lexicalized PCFG models, and surprisingly close to the current state-of-the-art. This result has potential uses beyond establishing a strong lower bound on the maximum possible accuracy of unlexicalized models: an unlexicalized PCFG is much more compact, easier to replicate, and easier to interpret than more complex lexical models, and the parsing algorithms are simpler, more widely understood, of lower asymptotic complexity, and easier to optimize.", + "link": "https://www.semanticscholar.org/paper/a600850ac0120cb09a0b7de7da80bb6a7a76de06", + "scraped_abstract": null, + "citation_best": 3113 + }, + { + "paper": "2121376435", + "venue": "1199533187", + "year": "2003", + "title": "predicting problems caused by component upgrades", + "label": [ + "71151206", + "21491501", + "49585438", + "40878169", + "3247490", + "174683762", + "149091818" + ], + "author": [ + "2271153937", + "2235702021" + ], + "reference": [ + "47804331", + "137443411", + "199352148", + "303139982", + "1505144991", + "1521161646", + "1521711401", + "1583826417", + "1637866372", + "1638559933", + "1787500456", + "1848617919", + "1970495662", + "1972245406", + "1975255815", + "1983119041", + "1992371286", + "1992431017", + "1993836075", + "1995008247", + "2004309454", + "2006993239", + "2011958337", + "2036656026", + "2038497675", + "2040015657", + "2040856861", + "2051632385", + "2055477538", + "2059295282", + "2066210260", + "2080573945", + "2097895505", + "2101307187", + "2103318645", + "2109452325", + "2116973066", + "2130175237", + "2134716336", + "2135032959", + "2136671373", + "2138059712", + "2153131284", + "2154575443", + "2166944884", + "2171339374", + "2173184982", + "2197622446", + "2230407679", + "2494946951", + "3152134618" + ], + "abstract": "we present a new automatic technique to assess whether replacing a component of a software system by a purportedly compatible component may change the behavior of the system the technique operates before integrating the new component into the system or running system tests permitting quicker and cheaper identification of problems it takes into account the system s use of the component because a particular component upgrade may be desirable in one context but undesirable in another no formal specifications are required permitting detection of problems due either to errors in the component or to errors in the system both external and internal behaviors can be compared enabling detection of problems that are not immediately reflected in the output the technique generates an operational abstraction for the old component in the context of the system and generates an operational abstraction for the new component in the context of its test suite an operational abstraction is a set of program properties that generalizes over observed run time behavior if automated logical comparison indicates that the new component does not make all the guarantees that the old one did then the upgrade may affect system behavior and should not be performed without further scrutiny in case studies the technique identified several incompatibilities among software components", + "title_raw": "Predicting problems caused by component upgrades", + "abstract_raw": "We present a new, automatic technique to assess whether replacing a component of a software system by a purportedly compatible component may change the behavior of the system. The technique operates before integrating the new component into the system or running system tests, permitting quicker and cheaper identification of problems. It takes into account the system's use of the component, because a particular component upgrade may be desirable in one context but undesirable in another. No formal specifications are required, permitting detection of problems due either to errors in the component or to errors in the system. Both external and internal behaviors can be compared, enabling detection of problems that are not immediately reflected in the output.The technique generates an operational abstraction for the old component in the context of the system and generates an operational abstraction for the new component in the context of its test suite; an operational abstraction is a set of program properties that generalizes over observed run-time behavior. If automated logical comparison indicates that the new component does not make all the guarantees that the old one did, then the upgrade may affect system behavior and should not be performed without further scrutiny. In case studies, the technique identified several incompatibilities among software components.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Predicting+Problems+Caused+by+Component+Upgrades&as_oq=&as_eq=&as_occt=any&as_sauthors=McCamant", + "scraped_abstract": null, + "citation_best": 5 + }, + { + "paper": "2131467114", + "venue": "1199533187", + "year": "2003", + "title": "a family of test adequacy criteria for database driven applications", + "label": [ + "96324660", + "100850083", + "77088390", + "124101348", + "2777904410", + "148840519", + "5655090", + "30775581" + ], + "author": [ + "233550907", + "686913843" + ], + "reference": [ + "338991206", + "1512840853", + "1513107709", + "1523736726", + "1564381193", + "1591521007", + "1966188439", + "1985551847", + "2004378344", + "2009285751", + "2014806198", + "2024795941", + "2034334856", + "2060795628", + "2074432970", + "2103326816", + "2109322951", + "2121669067", + "2128363840", + "2134691366", + "2140297147", + "2154897437" + ], + "abstract": "although a software application always executes within a particular environment current testing methods have largely ignored these environmental factors many applications execute in an environment that contains a database in this paper we propose a family of test adequacy criteria that can be used to assess the quality of test suites for database driven applications our test adequacy criteria use dataflow information that is associated with the entities in a relational database furthermore we develop a unique representation of a database driven application that facilitates the enumeration of database interaction associations these associations can reflect an application s definition and use of database entities at multiple levels of granularity the usage of a tool to calculate intraprocedural database interaction associations for two case study applications indicates that our adequacy criteria can be computed with an acceptable time and space overhead", + "title_raw": "A family of test adequacy criteria for database-driven applications", + "abstract_raw": "Although a software application always executes within a particular environment, current testing methods have largely ignored these environmental factors. Many applications execute in an environment that contains a database. In this paper, we propose a family of test adequacy criteria that can be used to assess the quality of test suites for database-driven applications. Our test adequacy criteria use dataflow information that is associated with the entities in a relational database. Furthermore, we develop a unique representation of a database-driven application that facilitates the enumeration of database interaction associations. These associations can reflect an application's definition and use of database entities at multiple levels of granularity. The usage of a tool to calculate intraprocedural database interaction associations for two case study applications indicates that our adequacy criteria can be computed with an acceptable time and space overhead.", + "link": "https://www.semanticscholar.org/paper/8d8068dd838dd060fc1eb819a2199517172ecd66", + "scraped_abstract": null, + "citation_best": 27 + }, + { + "paper": "2142038400", + "venue": "1199533187", + "year": "2003", + "title": "eliminating redundancies with a composition with adaptation meta programming technique", + "label": [ + "35390924", + "167955471", + "548217200", + "199360897", + "89187990", + "151578736" + ], + "author": [ + "1937785728", + "2229015836" + ], + "reference": [ + "1500250067", + "1832968370", + "1974967044", + "1991604845", + "2010619212", + "2029414465", + "2058486846", + "2102846053", + "2115703981", + "2118989541", + "2128419413", + "2143238865", + "2165875645" + ], + "abstract": "redundant code obstructs program understanding and contributes to high maintenance costs while most experts agree on that opinions on how serious the problem of redundancies really is and how to tackle it differ in this paper we present the study of redundancies in the java buffer library jdk 1 4 1 which was recently released by sun we found that at least 68 of code in the buffer library is redundant in the sense that it recurs in many classes in the same or slightly modified form we effectively eliminated that 68 of code at the meta level using a technique based on composition with adaptation called xvcl we argue that such a program solution is easier to maintain than buffer classes with redundant code in this experiment we have designed our meta representation so that we could produce buffer classes in exactly the same form as they appear in the original buffer library while we have been tempted to re design the buffer classes we chose not to do so in order to allow for the seamless integration of the xvcl solution into contemporary programming methodologies and systems this decision has not affected the essential results reported in this paper", + "title_raw": "Eliminating redundancies with a \"composition with adaptation\" meta-programming technique", + "abstract_raw": "Redundant code obstructs program understanding and contributes to high maintenance costs. While most experts agree on that, opinions - on how serious the problem of redundancies really is and how to tackle it - differ. In this paper, we present the study of redundancies in the Java Buffer library, JDK 1.4.1, which was recently released by Sun. We found that at least 68% of code in the Buffer library is redundant in the sense that it recurs in many classes in the same or slightly modified form. We effectively eliminated that 68% of code at the meta-level using a technique based on \"composition with adaptation\" called XVCL. We argue that such a program solution is easier to maintain than buffer classes with redundant code. In this experiment, we have designed our meta-representation so that we could produce buffer classes in exactly the same form as they appear in the original Buffer library. While we have been tempted to re-design the buffer classes, we chose not to do so, in order to allow for the seamless integration of the XVCL solution into contemporary programming methodologies and systems. This decision has not affected the essential results reported in this paper.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Eliminating+redundancies+with+a+'composition+with+adaptation'+meta-programming+technique&as_oq=&as_eq=&as_occt=any&as_sauthors=Jarzabek", + "scraped_abstract": null, + "citation_best": 4 + }, + { + "paper": "2171048379", + "venue": "1164975091", + "year": "2003", + "title": "image parsing unifying segmentation detection and recognition", + "label": [ + "124504099", + "49937458", + "89600930", + "141404830", + "98763669", + "31510193", + "2776151529", + "31972630", + "97931131", + "64876066", + "178980831" + ], + "author": [ + "2134014566", + "2148313454", + "2682580105", + "2172768145" + ], + "reference": [ + "109881820", + "168365783", + "995727756", + "1574225613", + "1574901103", + "1575129940", + "1592925889", + "1600276254", + "1601741105", + "1772705656", + "1934863104", + "1964443764", + "1972774730", + "1975559841", + "1997011019", + "2017115857", + "2024046085", + "2026799324", + "2027833538", + "2041898787", + "2053887662", + "2055505161", + "2056760934", + "2057175746", + "2063252599", + "2063266501", + "2069739265", + "2091845343", + "2095783300", + "2098152234", + "2098355199", + "2098678088", + "2099111195", + "2103443043", + "2104955141", + "2106110775", + "2106706098", + "2107333657", + "2108284981", + "2112076978", + "2114202040", + "2121927366", + "2121947440", + "2128716185", + "2129882630", + "2138309709", + "2138451337", + "2141376824", + "2141658770", + "2145023731", + "2151103935", + "2152826865", + "2155511848", + "2158355041", + "2164302083", + "2165657641", + "2166501286", + "2167828171", + "2168020168", + "2217896605", + "2295106276", + "2765543176" + ], + "abstract": "we propose a general framework for parsing images into regions and objects in this framework the detection and recognition of objects proceed simultaneously with image segmentation in a competitive and cooperative manner we illustrate our approach on natural images of complex city scenes where the objects of primary interest are faces and text this method makes use of bottom up proposals combined with top down generative models using the data driven markov chain monte carlo ddmcmc algorithm which is guaranteed to converge to the optimal estimate asymptotically more precisely we define generative models for faces text and generic regions e g shading texture and clutter these models are activated by bottom up proposals the proposals for faces and text are learnt using a probabilistic version of adaboost the ddmcmc combines reversible jump and diffusion dynamics to enable the generative models to explain the input images in a competitive and cooperative manner our experiments illustrate the advantages and importance of combining bottom up and top down models and of performing segmentation and object detection recognition simultaneously", + "title_raw": "Image parsing: unifying segmentation, detection, and recognition", + "abstract_raw": "We propose a general framework for parsing images into regions and objects. In this framework, the detection and recognition of objects proceed simultaneously with image segmentation in a competitive and cooperative manner. We illustrate our approach on natural images of complex city scenes where the objects of primary interest are faces and text. This method makes use of bottom-up proposals combined with top-down generative models using the data driven Markov chain Monte Carlo (DDMCMC) algorithm, which is guaranteed to converge to the optimal estimate asymptotically. More precisely, we define generative models for faces, text, and generic regions- e.g. shading, texture, and clutter. These models are activated by bottom-up proposals. The proposals for faces and text are learnt using a probabilistic version of AdaBoost. The DDMCMC combines reversible jump and diffusion dynamics to enable the generative models to explain the input images in a competitive and cooperative manner. Our experiments illustrate the advantages and importance of combining bottom-up and top-down models and of performing segmentation and object detection/recognition simultaneously.", + "link": "https://www.semanticscholar.org/paper/cca9200d9da958b7f90eab901b2f30c04f1e0e9c", + "scraped_abstract": null, + "citation_best": 67 + }, + { + "paper": "1992825118", + "venue": "1164975091", + "year": "2003", + "title": "detecting pedestrians using patterns of motion and appearance", + "label": [ + "2780142956", + "160633673", + "141404830", + "94915269", + "31972630", + "46686674" + ], + "author": [ + "2639312569", + "2140427449", + "2663532111" + ], + "reference": [ + "1522061046", + "1570914610", + "1988790447", + "2032210760", + "2089181482", + "2107853414", + "2115763357", + "2143023146", + "2143425433", + "2145073242", + "2152668169", + "2155511848", + "2164598857", + "2217896605" + ], + "abstract": "this paper describes a pedestrian detection system that integrates image intensity information with motion information we use a detection style algorithm that scans a detector over two consecutive frames of a video sequence the detector is trained using adaboost to take advantage of both motion and appearance information to detect a walking person past approaches have built detectors based on appearance information but ours is the first to combine both sources of information in a single detector the implementation described runs at about 4 frames second detects pedestrians at very small scales as small as 20 spl times 15 pixels and has a very low false positive rate our approach builds on the detection work of viola and jones novel contributions of this paper include i development of a representation of image motion which is extremely efficient and ii implementation of a state of the art pedestrian detection system which operates on low resolution images under difficult conditions such as rain and snow", + "title_raw": "Detecting pedestrians using patterns of motion and appearance", + "abstract_raw": "This paper describes a pedestrian detection system that integrates image intensity information with motion information. We use a detection style algorithm that scans a detector over two consecutive frames of a video sequence. The detector is trained (using AdaBoost) to take advantage of both motion and appearance information to detect a walking person. Past approaches have built detectors based on appearance information, but ours is the first to combine both sources of information in a single detector. The implementation described runs at about 4 frames/second, detects pedestrians at very small scales (as small as 20/spl times/15 pixels), and has a very low false positive rate. Our approach builds on the detection work of Viola and Jones. Novel contributions of this paper include: i) development of a representation of image motion which is extremely efficient, and ii) implementation of a state of the art pedestrian detection system which operates on low resolution images under difficult conditions (such as rain and snow).", + "link": "https://www.semanticscholar.org/paper/4521ac382772e90607c23af1cff34a04df37e6c4", + "scraped_abstract": null, + "citation_best": 2103 + }, + { + "paper": "2160510992", + "venue": "1174403976", + "year": "2003", + "title": "precise dynamic slicing algorithms", + "label": [ + "34736171", + "91071405", + "179799912", + "136134403", + "2776190703", + "140745168", + "11413529" + ], + "author": [ + "2157206324", + "2104865906", + "2136599468" + ], + "reference": [ + "303139982", + "1535300117", + "1984248430", + "1984846967", + "2024205760", + "2061608960", + "2110019745", + "2110066339", + "2120308515", + "2293624369" + ], + "abstract": "dynamic slicing algorithms can greatly reduce the de bugging effort by focusing the attention of the user on a relevant subset of program statements in this paper we present the design and evaluation of three precise dynamic slicing algorithms called the full preprocessing fp no preprocessing np and limited preprocessing lp algorithms the algorithms differ in the relative timing of constructing the dynamic data dependence graph and its traversal for computing requested dynamic slices our experiments show that the lp algorithm is a fast and practical precise slicing algorithm in fact we show that while precise slices can be orders of magnitude smaller than imprecise dynamic slices for small number of slicing requests the lp algorithm is faster than an imprecise dynamic slicing algorithm proposed by agrawal and horgan", + "title_raw": "Precise dynamic slicing algorithms", + "abstract_raw": "Dynamic slicing algorithms can greatly reduce the de bugging effort by focusing the attention of the user on a relevant subset of program statements. In this paper we present the design and evaluation of three precise dynamic slicing algorithms called the full preprocessing (FP), no preprocessing (NP) and limited preprocessing (LP) algorithms. The algorithms differ in the relative timing of constructing the dynamic data dependence graph and its traversal for computing requested dynamic slices. Our experiments show that the LP algorithm is a fast and practical precise slicing algorithm. In fact we show that while precise slices can be orders of magnitude smaller than imprecise dynamic slices, for small number of slicing requests, the LP algorithm is faster than an imprecise dynamic slicing algorithm proposed by Agrawal and Horgan.", + "link": "https://www.semanticscholar.org/paper/9a383a922be09960d8399c8cb44d2b578fceb916", + "scraped_abstract": null, + "citation_best": 69 + }, + { + "paper": "2112243500", + "venue": "1174403976", + "year": "2003", + "title": "modular verification of software components in c", + "label": [ + "2779907942", + "52913732", + "206880738", + "167822520", + "33054407", + "199360897", + "149091818", + "174683762", + "116253237", + "145644426", + "6943359", + "80444323", + "43126263" + ], + "author": [ + "2112745185", + "2129477832", + "1965603299", + "2193269139", + "1976817768" + ], + "reference": [ + "136416930", + "153567890", + "1497571013", + "1503973138", + "1511155470", + "1531082202", + "1547620028", + "1553858749", + "1582571943", + "1593025666", + "1603799276", + "1623229847", + "1756958128", + "1787074469", + "1979812412", + "1985111476", + "1989884524", + "2011992362", + "2032577199", + "2039549168", + "2040060046", + "2048051309", + "2060910670", + "2066859698", + "2074845051", + "2080593426", + "2089139117", + "2099529102", + "2106972913", + "2107308446", + "2111692334", + "2116181031", + "2117189826", + "2122875187", + "2130175237", + "2138653344", + "2142785340", + "2158395308", + "2161660684", + "2162284942", + "2176300081", + "2295903414", + "2340735175", + "2611025373", + "2863264816", + "2913459036", + "2987907651", + "3045969830", + "3146075203" + ], + "abstract": "we present a new methodology for automatic verification of c programs against finite state machine specifications our approach is compositional naturally enabling us to decompose the verification of large software system into subproblems of manageable complexity the decomposition reflects the modularity in the software design we use weak simulation as the notion of conformance between the program and its specification following the abstract verify refine paradigm our tool magic first extracts a finite model from c source code using predicate abstraction and theorem proving subsequently simulation is checked via a reduction to boolean satisfiability magic is able to interface with several publicly available theorem provers and sat solvers we report experimental results with procedures from the linux kernel and the openssl toolkit", + "title_raw": "Modular verification of software components in C", + "abstract_raw": "We present a new methodology for automatic verification of C programs against finite state machine specifications. Our approach is compositional, naturally enabling us to decompose the verification of large software system into subproblems of manageable complexity. The decomposition reflects the modularity in the software design. We use weak simulation as the notion of conformance between the program and its specification. Following the abstract-verify-refine paradigm, our tool MAGIC first extracts a finite model from C source code using predicate abstraction and theorem proving. Subsequently, simulation is checked via a reduction to Boolean satisfiability. MAGIC is able to interface with several publicly available theorem provers and SAT solvers. We report experimental results with procedures from the Linux kernel and the OpenSSL toolkit.", + "link": "https://www.semanticscholar.org/paper/3b2534f8042d85ab39df2a09da9c7c174f4fcd9e", + "scraped_abstract": null, + "citation_best": 125 + }, + { + "paper": "1664963465", + "venue": "1203999783", + "year": "2003", + "title": "approximating game theoretic optimal strategies for full scale poker", + "label": [ + "154945302", + "45374587", + "97824396", + "2776229418", + "41045048", + "200632571", + "2781039887" + ], + "author": [ + "2114626649", + "1975621190", + "2133609849", + "2000740313", + "2278512575", + "2090602626", + "2130120576" + ], + "reference": [ + "169877102", + "356658040", + "1554808787", + "1980717955", + "2023373582", + "2040217797", + "2067050450", + "2083347533", + "2101861158", + "2144846366" + ], + "abstract": "the computation of the first complete approximations of game theoretic optimal strategies for full scale poker is addressed several abstraction techniques are combined to represent the game of 2 player texas hold em having size o 1018 using closely related models each having size o 1o7 despite the reduction in size by a factor of 100 billion the resulting models retain the key properties and structure of the real game linear programming solutions to the abstracted game are used to create substantially improved poker playing programs able to defeat strong human players and be competitive against world class opponents", + "title_raw": "Approximating game-theoretic optimal strategies for full-scale poker", + "abstract_raw": "The computation of the first complete approximations of game-theoretic optimal strategies for full-scale poker is addressed. Several abstraction techniques are combined to represent the game of 2-player Texas Hold'em, having size O(1018), using closely related models each having size O(1O7). Despite the reduction in size by a factor of 100 billion, the resulting models retain the key properties and structure of the real game. Linear programming solutions to the abstracted game are used to create substantially improved poker-playing programs, able to defeat strong human players and be competitive against world-class opponents.", + "link": "https://www.semanticscholar.org/paper/ce32b559b429f2549d38d67046409399a22bf91f", + "scraped_abstract": null, + "citation_best": 217 + }, + { + "paper": "2061820396", + "venue": "1130985203", + "year": "2003", + "title": "maximizing the spread of influence through a social network", + "label": [ + "187008535", + "114713312", + "148764684", + "86256295", + "127705205" + ], + "author": [ + "2137221145", + "2261367123", + "2009277913" + ], + "reference": [ + "1481080069", + "1495750374", + "1521233381", + "1566478939", + "1586505959", + "1680189815", + "1964473994", + "1968143987", + "1971526329", + "1971788485", + "1971842701", + "1972178529", + "1972645710", + "1973315504", + "1990513740", + "2002863780", + "2041157860", + "2042123098", + "2056609785", + "2059092337", + "2065769502", + "2066862787", + "2079434400", + "2097264805", + "2114696370", + "2125315567", + "2142925354", + "2143401113", + "2157067735", + "2167951823", + "2170502901", + "2187996512", + "2265720734", + "2332241897" + ], + "abstract": "models for the processes by which ideas and influence propagate through a social network have been studied in a number of domains including the diffusion of medical and technological innovations the sudden and widespread adoption of various strategies in game theoretic settings and the effects of word of mouth in the promotion of new products recently motivated by the design of viral marketing strategies domingos and richardson posed a fundamental algorithmic problem for such social network processes if we can try to convince a subset of individuals to adopt a new product or innovation and the goal is to trigger a large cascade of further adoptions which set of individuals should we target we consider this problem in several of the most widely studied models in social network analysis the optimization problem of selecting the most influential nodes is np hard here and we provide the first provable approximation guarantees for efficient algorithms using an analysis framework based on submodular functions we show that a natural greedy strategy obtains a solution that is provably within 63 of optimal for several classes of models our framework suggests a general approach for reasoning about the performance guarantees of algorithms for these types of influence problems in social networks we also provide computational experiments on large collaboration networks showing that in addition to their provable guarantees our approximation algorithms significantly out perform node selection heuristics based on the well studied notions of degree centrality and distance centrality from the field of social networks", + "title_raw": "Maximizing the spread of influence through a social network", + "abstract_raw": "Models for the processes by which ideas and influence propagate through a social network have been studied in a number of domains, including the diffusion of medical and technological innovations, the sudden and widespread adoption of various strategies in game-theoretic settings, and the effects of \"word of mouth\" in the promotion of new products. Recently, motivated by the design of viral marketing strategies, Domingos and Richardson posed a fundamental algorithmic problem for such social network processes: if we can try to convince a subset of individuals to adopt a new product or innovation, and the goal is to trigger a large cascade of further adoptions, which set of individuals should we target?We consider this problem in several of the most widely studied models in social network analysis. The optimization problem of selecting the most influential nodes is NP-hard here, and we provide the first provable approximation guarantees for efficient algorithms. Using an analysis framework based on submodular functions, we show that a natural greedy strategy obtains a solution that is provably within 63% of optimal for several classes of models; our framework suggests a general approach for reasoning about the performance guarantees of algorithms for these types of influence problems in social networks.We also provide computational experiments on large collaboration networks, showing that in addition to their provable guarantees, our approximation algorithms significantly out-perform node-selection heuristics based on the well-studied notions of degree centrality and distance centrality from the field of social networks.", + "link": "https://www.semanticscholar.org/paper/13df42f24714d1d0c365a2ca6f62dd5cac9534de", + "scraped_abstract": null, + "citation_best": 6739 + }, + { + "paper": "2125346056", + "venue": "1127352206", + "year": "2003", + "title": "automatically proving the correctness of compiler optimizations", + "label": [ + "96324660", + "106663253", + "55439883", + "135257023", + "161969638", + "145272703", + "206880738", + "202096789", + "190902152", + "169590947", + "199360897", + "160191386", + "2777027219" + ], + "author": [ + "2223743999", + "294596595", + "2133284789" + ], + "reference": [ + "127506714", + "1519861553", + "1529785310", + "1590315663", + "1593280365", + "1659048632", + "1964193719", + "1967031800", + "1992347193", + "1993836075", + "2000178662", + "2003848320", + "2007146206", + "2008609084", + "2012426706", + "2014764321", + "2034711041", + "2035030296", + "2040856861", + "2043100293", + "2051632385", + "2058360616", + "2065635796", + "2069107692", + "2087086761", + "2110423393", + "2157319504", + "2164778826", + "2171691057", + "2527757871", + "2911450990" + ], + "abstract": "we describe a technique for automatically proving compiler optimizations sound meaning that their transformations are always semantics preserving we first present a domain specific language called cobalt for implementing optimizations as guarded rewrite rules cobalt optimizations operate over a c like intermediate representation including unstructured control flow pointers to local variables and dynamically allocated memory and recursive procedures then we describe a technique for automatically proving the soundness of cobalt optimizations our technique requires an automatic theorem prover to discharge a small set of simple optimization specific proof obligations for each optimization we have written a variety of forward and backward intraprocedural dataflow optimizations in cobalt including constant propagation and folding branch folding full and partial redundancy elimination full and partial dead assignment elimination and simple forms of points to analysis we implemented our soundness checking strategy using the simplify automatic theorem prover and we have used this implementation to automatically prove our optimizations correct our checker found many subtle bugs during the course of developing our optimizations we also implemented an execution engine for cobalt optimizations as part of the whirlwind compiler infrastructure", + "title_raw": "Automatically proving the correctness of compiler optimizations", + "abstract_raw": "We describe a technique for automatically proving compiler optimizations sound, meaning that their transformations are always semantics-preserving. We first present a domain-specific language, called Cobalt, for implementing optimizations as guarded rewrite rules. Cobalt optimizations operate over a C-like intermediate representation including unstructured control flow, pointers to local variables and dynamically allocated memory, and recursive procedures. Then we describe a technique for automatically proving the soundness of Cobalt optimizations. Our technique requires an automatic theorem prover to discharge a small set of simple, optimization-specific proof obligations for each optimization. We have written a variety of forward and backward intraprocedural dataflow optimizations in Cobalt, including constant propagation and folding, branch folding, full and partial redundancy elimination, full and partial dead assignment elimination, and simple forms of points-to analysis. We implemented our soundness-checking strategy using the Simplify automatic theorem prover, and we have used this implementation to automatically prove our optimizations correct. Our checker found many subtle bugs during the course of developing our optimizations. We also implemented an execution engine for Cobalt optimizations as part of the Whirlwind compiler infrastructure.", + "link": "https://www.semanticscholar.org/paper/c685c3c47d797b446472e17a87b27e93a399eb3a", + "scraped_abstract": null, + "citation_best": 115 + }, + { + "paper": "1971778458", + "venue": "1184151122", + "year": "2003", + "title": "an information theoretic approach to normal forms for relational and xml data", + "label": [ + "162984825", + "5655090", + "34716815", + "177264268", + "124101348", + "8797682", + "55348073", + "148840519", + "80444323", + "67186912" + ], + "author": [ + "2117861229", + "1971151415" + ], + "reference": [ + "1558832481", + "1566656512", + "1579126536", + "1716383435", + "1874705449", + "1983304353", + "1985581502", + "1995875735", + "2000773957", + "2008396232", + "2013026082", + "2020958835", + "2025151754", + "2035020007", + "2041404167", + "2041601261", + "2057852703", + "2071374120", + "2078015341", + "2086065621", + "2099111195", + "2110810394", + "2125552274", + "2139238107", + "2293122579", + "2294548604", + "2397160827" + ], + "abstract": "normalization as a way of producing good database designs is a well understood topic however the same problem of distinguishing well designed databases from poorly designed ones arises in other data models in particular xml while in the relational world the criteria for being well designed are usually very intuitive and clear to state they become more obscure when one moves to more complex data models our goal is to provide a set of tools for testing when a condition on a database design specified by a normal form corresponds to a good design we use techniques of information theory and define a measure of information content of elements in a database with respect to a set of constraints we first test this measure in the relational context providing information theoretic justification for familiar normal forms such as bcnf 4nf pj nf 5nfr dk nf we then show that the same measure applies in the xml context which gives us a characterization of a recently introduced xml normal form called xnf finally we look at information theoretic criteria for justifying normalization algorithms", + "title_raw": "An information-theoretic approach to normal forms for relational and XML data", + "abstract_raw": "Normalization as a way of producing good database designs is a well-understood topic. However, the same problem of distinguishing well-designed databases from poorly designed ones arises in other data models, in particular, XML. While in the relational world the criteria for being well-designed are usually very intuitive and clear to state, they become more obscure when one moves to more complex data models.Our goal is to provide a set of tools for testing when a condition on a database design, specified by a normal form, corresponds to a good design. We use techniques of information theory, and define a measure of information content of elements in a database with respect to a set of constraints. We first test this measure in the relational context, providing information-theoretic justification for familiar normal forms such as BCNF, 4NF, PJ/NF, 5NFR, DK/NF. We then show that the same measure applies in the XML context, which gives us a characterization of a recently introduced XML normal form called XNF. Finally, we look at information-theoretic criteria for justifying normalization algorithms.", + "link": "https://www.semanticscholar.org/paper/3bd1e2b0c5fdc1c08304e1cfb6d05eeb3692ea74", + "scraped_abstract": null, + "citation_best": 32 + }, + { + "paper": "2125596620", + "venue": "1140684652", + "year": "2003", + "title": "re examining the potential effectiveness of interactive query expansion", + "label": [ + "96956885", + "164120249", + "192939062", + "118689300", + "192028432", + "97854310", + "99016210", + "157692150", + "23123220" + ], + "author": [ + "1993639548" + ], + "reference": [ + "1495980316", + "1557757161", + "1982451429", + "1991546151", + "2011563572", + "2043909051", + "2065096648", + "2065432550", + "2066395238", + "2066423708", + "2090805977", + "2154724067" + ], + "abstract": "much attention has been paid to the relative effectiveness of interactive query expansion versus automatic query expansion although interactive query expansion has the potential to be an effective means of improving a search in this paper we show that on average human searchers are less likely than systems to make good expansion decisions to enable good expansion decisions searchers must have adequate instructions on how to use interactive query expansion functionalities we show that simple instructions on using interactive query expansion do not necessarily help searchers make good expansion decisions and discuss difficulties found in making query expansion decisions", + "title_raw": "Re-examining the potential effectiveness of interactive query expansion", + "abstract_raw": "Much attention has been paid to the relative effectiveness of interactive query expansion versus automatic query expansion. Although interactive query expansion has the potential to be an effective means of improving a search, in this paper we show that, on average, human searchers are less likely than systems to make good expansion decisions. To enable good expansion decisions, searchers must have adequate instructions on how to use interactive query expansion functionalities. We show that simple instructions on using interactive query expansion do not necessarily help searchers make good expansion decisions and discuss difficulties found in making query expansion decisions.", + "link": "https://www.semanticscholar.org/paper/c5b9096e0945e779dd092a58937646d0563be0b8", + "scraped_abstract": null, + "citation_best": 182 + }, + { + "paper": "2102951036", + "venue": "1175089206", + "year": "2003", + "title": "spreadsheets in rdbms for olap", + "label": [ + "24394798", + "203763787", + "201932085", + "2777032813", + "103000020", + "199360897", + "2778692605", + "40207289", + "80444323", + "123593499", + "510870499", + "2780075982" + ], + "author": [ + "2148799853", + "1826152957", + "2619896389", + "1989712085", + "2060644445", + "2408915704", + "2441819755", + "2242934804" + ], + "reference": [ + "1483324348", + "1534906344", + "1539088309", + "2012670464", + "2039491213", + "2049859567", + "2102387332", + "2106181436", + "2118269922", + "2118382442", + "2125759315", + "2132582487", + "2144829796", + "2151135734", + "2160967997", + "2164219265", + "2509242913" + ], + "abstract": "one of the critical deficiencies of sql is lack of support for n dimensional array based computations which are frequent in olap environments relational olap rolap applications have to emulate them using joins recently introduced sql window functions 18 and complex and inefficient case expressions the designated place in sql for specifying calculations is the select clause which is extremely limiting and forces the user to generate queries using nested views subqueries and complex joins furthermore sql query optimizer is pre occupied with determining efficient join orders and choosing optimal access methods and largely disregards optimization of complex numerical formulas execution methods concentrated on efficient computation of a cube 11 16 rather than on random access structures for inter row calculations this has created a gap that has been filled by spreadsheets and specialized molap engines which are good at formulas for mathematical modeling but lack the formalism of the relational model are difficult to manage and exhibit scalability problems this paper presents sql extensions involving array based calculations for complex modeling in addition we present optimizations access structures and execution models for processing them efficiently", + "title_raw": "Spreadsheets in RDBMS for OLAP", + "abstract_raw": "One of the critical deficiencies of SQL is lack of support for n-dimensional array-based computations which are frequent in OLAP environments. Relational OLAP (ROLAP) applications have to emulate them using joins, recently introduced SQL Window Functions [18] and complex and inefficient CASE expressions. The designated place in SQL for specifying calculations is the SELECT clause, which is extremely limiting and forces the user to generate queries using nested views, subqueries and complex joins. Furthermore, SQL-query optimizer is pre-occupied with determining efficient join orders and choosing optimal access methods and largely disregards optimization of complex numerical formulas. Execution methods concentrated on efficient computation of a cube [11], [16] rather than on random access structures for inter-row calculations. This has created a gap that has been filled by spreadsheets and specialized MOLAP engines, which are good at formulas for mathematical modeling but lack the formalism of the relational model, are difficult to manage, and exhibit scalability problems. This paper presents SQL extensions involving array based calculations for complex modeling. In addition, we present optimizations, access structures and execution models for processing them efficiently.", + "link": "https://www.semanticscholar.org/paper/be278ebf93c1ad910152f3aba00b45d97780bb09", + "scraped_abstract": null, + "citation_best": 72 + }, + { + "paper": "2127784167", + "venue": "1171178643", + "year": "2003", + "title": "improving the reliability of commodity operating systems", + "label": [ + "63540848", + "149635348", + "111919701", + "76399640", + "144240696", + "183469790", + "2780940931", + "167677733", + "32833848", + "196697905" + ], + "author": [ + "2304722209", + "1988425031", + "737098973" + ], + "reference": [ + "19830081", + "46191940", + "46382711", + "67472179", + "100273451", + "198325753", + "207759855", + "323988595", + "1485749815", + "1489110502", + "1492360400", + "1492409500", + "1497542366", + "1504127697", + "1511219199", + "1520961854", + "1537929875", + "1556462035", + "1569570229", + "1573399142", + "1580634328", + "1581005283", + "1597133564", + "1644882639", + "1650006494", + "1809664600", + "1813040609", + "1825457006", + "1829813581", + "1861383510", + "1945100066", + "1959256509", + "1971991620", + "1977773183", + "1979594720", + "1987507348", + "1996565109", + "1998070736", + "2002915275", + "2005383603", + "2015739534", + "2026114446", + "2026527406", + "2029414465", + "2036548030", + "2040015657", + "2042559279", + "2042851811", + "2045719854", + "2047924755", + "2055578361", + "2055631879", + "2056203846", + "2059652525", + "2060468294", + "2066049459", + "2066660519", + "2066859698", + "2073275508", + "2076265406", + "2077631114", + "2079029390", + "2081409107", + "2082000355", + "2083469471", + "2089138728", + "2089139117", + "2096165352", + "2096732299", + "2097589646", + "2097990218", + "2098735855", + "2101307187", + "2101669841", + "2103552508", + "2105874953", + "2106035061", + "2107082099", + "2107635250", + "2108795876", + "2109387036", + "2109739361", + "2110881009", + "2113350481", + "2114488210", + "2115309705", + "2117009500", + "2120185818", + "2121251946", + "2122960384", + "2124731241", + "2125951560", + "2126087831", + "2126523385", + "2126721920", + "2128336546", + "2130934773", + "2131726714", + "2133201251", + "2134119432", + "2134138865", + "2137164720", + "2137622193", + "2141071440", + "2141081008", + "2143443796", + "2144977275", + "2144984544", + "2145659622", + "2146878883", + "2148165526", + "2148577665", + "2148602057", + "2149356814", + "2150206209", + "2150769115", + "2153531096", + "2153649450", + "2155066383", + "2156205360", + "2159477904", + "2160275438", + "2161407365", + "2162819069", + "2166389164", + "2241531943", + "2293797831", + "2575328986", + "2751601659", + "2902140019", + "3149170319" + ], + "abstract": "despite decades of research in extensible operating system technology extensions such as device drivers remain a significant cause of system failures in windows xp for example drivers account for 85 of recently reported failures this paper describes nooks a reliability subsystem that seeks to greatly enhance os reliability by isolating the os from driver failures the nooks approach is practical rather than guaranteeing complete fault tolerance through a new and incompatible os or driver architecture our goal is to prevent the vast majority of driver caused crashes with little or no change to existing driver and system code to achieve this nooks isolates drivers within lightweight protection domains inside the kernel address space where hardware and software prevent them from corrupting the kernel nooks also tracks a driver s use of kernel resources to hasten automatic clean up during recovery to prove the viability of our approach we implemented nooks in the linux operating system and used it to fault isolate several device drivers our results show that nooks offers a substantial increase in the reliability of operating systems catching and quickly recovering from many faults that would otherwise crash the system in a series of 2000 fault injection tests nooks recovered automatically from 99 of the faults that caused linux to crash while nooks was designed for drivers our techniques generalize to other kernel extensions as well we demonstrate this by isolating a kernel mode file system and an in kernel internet service overall because nooks supports existing c language extensions runs on a commodity operating system and hardware and enables automated recovery it represents a substantial step beyond the specialized architectures and type safe languages required by previous efforts directed at safe extensibility", + "title_raw": "Improving the reliability of commodity operating systems", + "abstract_raw": "Despite decades of research in extensible operating system technology, extensions such as device drivers remain a significant cause of system failures. In Windows XP, for example, drivers account for 85% of recently reported failures. This paper describes Nooks, a reliability subsystem that seeks to greatly enhance OS reliability by isolating the OS from driver failures. The Nooks approach is practical: rather than guaranteeing complete fault tolerance through a new (and incompatible) OS or driver architecture, our goal is to prevent the vast majority of driver-caused crashes with little or no change to existing driver and system code. To achieve this, Nooks isolates drivers within lightweight protection domains inside the kernel address space, where hardware and software prevent them from corrupting the kernel. Nooks also tracks a driver's use of kernel resources to hasten automatic clean-up during recovery.To prove the viability of our approach, we implemented Nooks in the Linux operating system and used it to fault-isolate several device drivers. Our results show that Nooks offers a substantial increase in the reliability of operating systems, catching and quickly recovering from many faults that would otherwise crash the system. In a series of 2000 fault-injection tests, Nooks recovered automatically from 99% of the faults that caused Linux to crash.While Nooks was designed for drivers, our techniques generalize to other kernel extensions, as well. We demonstrate this by isolating a kernel-mode file system and an in-kernel Internet service. Overall, because Nooks supports existing C-language extensions, runs on a commodity operating system and hardware, and enables automated recovery, it represents a substantial step beyond the specialized architectures and type-safe languages required by previous efforts directed at safe extensibility.", + "link": "https://www.semanticscholar.org/paper/eb1ff0b8282340fb617f80a35cc40053b47a0a06", + "scraped_abstract": null, + "citation_best": 188 + }, + { + "paper": "2144552569", + "venue": "1171178643", + "year": "2003", + "title": "preserving peer replicas by rate limited sampled voting", + "label": [ + "91062100", + "35525427", + "38652104", + "2778674798", + "167677733", + "70440993" + ], + "author": [ + "1992922445", + "2024516279", + "2170952340", + "2473486677", + "2974797005", + "362712741" + ], + "reference": [ + "18751748", + "77534746", + "125192817", + "161179119", + "1498585374", + "1515582328", + "1520914943", + "1563061804", + "1580004440", + "1601379374", + "1638287885", + "1675137049", + "1677206658", + "1805165030", + "1980766218", + "1998350371", + "2024995040", + "2049913483", + "2064360543", + "2080288192", + "2104210894", + "2123820820", + "2126087831", + "2131627661", + "2147504831", + "2147587452", + "2150676586", + "2156186849", + "2159919478", + "2162505784", + "2174507869", + "2296427920", + "2394489512" + ], + "abstract": "the lockss project has developed and deployed in a world wide test a peer to peer system for preserving access to journals and other archival information published on the web it consists of a large number of independent low cost persistent web caches that cooperate to detect and repair damage to their content by voting in opinion polls based on this experience we present a design for and simulations of a novel protocol for voting in systems of this kind it incorporates rate limitation and intrusion detection to ensure that even some very powerful adversaries attacking over many years have only a small probability of causing irrecoverable damage before being detected", + "title_raw": "Preserving peer replicas by rate-limited sampled voting", + "abstract_raw": "The LOCKSS project has developed and deployed in a world-wide test a peer-to-peer system for preserving access to journals and other archival information published on the Web. It consists of a large number of independent, low-cost, persistent web caches that cooperate to detect and repair damage to their content by voting in \"opinion polls.\" Based on this experience, we present a design for and simulations of a novel protocol for voting in systems of this kind. It incorporates rate limitation and intrusion detection to ensure that even some very powerful adversaries attacking over many years have only a small probability of causing irrecoverable damage before being detected.", + "link": "https://www.semanticscholar.org/paper/8bf795cdbae5725d84c88466bf9e29a231c45b50", + "scraped_abstract": null, + "citation_best": 106 + }, + { + "paper": "2295705535", + "venue": "1171178643", + "year": "2003", + "title": "backtracking intrusions", + "label": [ + "16311509", + "100850083", + "111919701", + "556601545", + "2777884278", + "107054480", + "156884757", + "191267431" + ], + "author": [ + "2628906953", + "2101505567" + ], + "reference": [ + "24839522", + "303139982", + "1502173919", + "1600965014", + "1641762327", + "2094873755", + "2099137371", + "2103499520", + "2106388306", + "2117115928", + "2126058759", + "2128217000", + "2142892618", + "2144801589", + "2150042079", + "2154081981", + "3137220996" + ], + "abstract": "analyzing intrusions today is an arduous largely manual task because system administrators lack the information and tools needed to understand easily the sequence of steps that occurred in an attack the goal of backtracker is to identify automatically potential sequences of steps that occurred in an intrusion starting with a single detection point e g a suspicious file backtracker identifies files and processes that could have affected that detection point and displays chains of events in a dependency graph we use backtracker to analyze several real attacks against computers that we set up as honeypots in each case backtracker is able to highlight effectively the entry point used to gain access to the system and the sequence of steps from that entry point to the point at which we noticed the intrusion the logging required to support backtracker added 9 overhead in running time and generated 1 2 gb per day of log data for an operating system intensive workload", + "title_raw": "Backtracking intrusions", + "abstract_raw": "Analyzing intrusions today is an arduous, largely manual task because system administrators lack the information and tools needed to understand easily the sequence of steps that occurred in an attack. The goal of BackTracker is to identify automatically potential sequences of steps that occurred in an intrusion. Starting with a single detection point (e.g., a suspicious file), BackTracker identifies files and processes that could have affected that detection point and displays chains of events in a dependency graph. We use BackTracker to analyze several real attacks against computers that we set up as honeypots. In each case, BackTracker is able to highlight effectively the entry point used to gain access to the system and the sequence of steps from that entry point to the point at which we noticed the intrusion. The logging required to support BackTracker added 9% overhead in running time and generated 1.2 GB per day of log data for an operating-system intensive workload.", + "link": "https://www.semanticscholar.org/paper/1e4e963670719ee1cb6bff8f87a33edc39c69e21", + "scraped_abstract": null, + "citation_best": 303 + }, + { + "paper": "1971549254", + "venue": "1164321581", + "year": "2004", + "title": "perceptually supported image editing of text and graphics", + "label": [ + "2776674983", + "21442007", + "121684516", + "2780315633" + ], + "author": [ + "207967185", + "2157904231", + "2154476044", + "2115534364" + ], + "reference": [ + "1544458978", + "1545423298", + "1748744376", + "1971117094", + "1989790238", + "1997461518", + "2024196580", + "2066221198", + "2068091162", + "2075056807", + "2131054109", + "2131666595", + "2138566277", + "2143877328", + "2147271913", + "2150411098", + "2160726327", + "2177645021" + ], + "abstract": "this extended abstract reprises our uist 03 paper on perceptually supported image editing of text and graphics we introduce a novel image editing program called scanscribe that emphasizes easy selection and manipulation of material found in informal casual documents such as sketches handwritten notes whiteboard images screen snapshots and scanned documents", + "title_raw": "Perceptually-supported image editing of text and graphics", + "abstract_raw": "This extended abstract reprises our UIST '03 paper on \"Perceptually-Supported Image Editing of Text and Graphics.\" We introduce a novel image editing program, called ScanScribe, that emphasizes easy selection and manipulation of material found in informal, casual documents such as sketches, handwritten notes, whiteboard images, screen snapshots, and scanned documents.", + "link": "https://www.semanticscholar.org/paper/d3ba81328edfc1db1a49c9e72673264dc38ce4f1", + "scraped_abstract": null, + "citation_best": 77 + }, + { + "paper": "2069153192", + "venue": "1135342153", + "year": "2003", + "title": "scaling personalized web search", + "label": [ + "2780373767", + "48044578", + "2779172887", + "136134403", + "157484941", + "80444323" + ], + "author": [ + "2780464597", + "671527850" + ], + "reference": [ + "1562405179", + "1671881141", + "1854214752", + "1981202432", + "2096041903", + "2117831564", + "2130610812", + "2170344111", + "2295428206" + ], + "abstract": "recent web search techniques augment traditional text matching with a global notion of importance based on the linkage structure of the web such as in google s pagerank algorithm for more refined searches this global notion of importance can be specialized to create personalized views of importance for example importance scores can be biased according to a user specified set of initially interesting pages computing and storing all possible personalized views in advance is impractical as is computing personalized views at query time since the computation of each view requires an iterative computation over the web graph we present new graph theoretical results and a new technique based on these results that encode personalized views as partial vectors partial vectors are shared across multiple personalized views and their computation and storage costs scale well with the number of views our approach enables incremental computation so that the construction of personalized views from partial vectors is practical at query time we present efficient dynamic programming algorithms for computing partial vectors an algorithm for constructing personalized views from partial vectors and experimental results demonstrating the effectiveness and scalability of our techniques", + "title_raw": "Scaling personalized web search", + "abstract_raw": "Recent web search techniques augment traditional text matching with a global notion of \"importance\" based on the linkage structure of the web, such as in Google's PageRank algorithm. For more refined searches, this global notion of importance can be specialized to create personalized views of importance--for example, importance scores can be biased according to a user-specified set of initially-interesting pages. Computing and storing all possible personalized views in advance is impractical, as is computing personalized views at query time, since the computation of each view requires an iterative computation over the web graph. We present new graph-theoretical results, and a new technique based on these results, that encode personalized views as partial vectors. Partial vectors are shared across multiple personalized views, and their computation and storage costs scale well with the number of views. Our approach enables incremental computation, so that the construction of personalized views from partial vectors is practical at query time. We present efficient dynamic programming algorithms for computing partial vectors, an algorithm for constructing personalized views from partial vectors, and experimental results demonstrating the effectiveness and scalability of our techniques.", + "link": "https://www.semanticscholar.org/paper/4d1ebc191df29fa577b8c8bc2f85c15202e9df20", + "scraped_abstract": null, + "citation_best": 140 + }, + { + "paper": "2169463693", + "venue": "1135342153", + "year": "2003", + "title": "semtag and seeker bootstrapping the semantic web via automated semantic annotation", + "label": [ + "166423231", + "103692084", + "167379230", + "6881194", + "93518851", + "192800085", + "57298352", + "534406577", + "21959979", + "173862523", + "2129575", + "511149849", + "148792806", + "71472368", + "110903229", + "162005631", + "71695816", + "23123220" + ], + "author": [ + "2141345681", + "2402050362", + "2137779811", + "2008480993", + "2302699734", + "2310902222", + "1248537349", + "2059781006", + "2130754085", + "2090818239", + "2282406454" + ], + "reference": [ + "23685451", + "51686446", + "93195387", + "168525779", + "192304934", + "192438917", + "1514962553", + "1533023826", + "1553019137", + "1557256174", + "1564124598", + "1577793635", + "1580236550", + "1608048669", + "1684311429", + "1691409177", + "1710641084", + "1765465858", + "1832601430", + "1930023685", + "1979566122", + "2023726828", + "2059275719", + "2066736098", + "2087060113", + "2090656662", + "2115470070", + "2130337399", + "2130610812", + "2133109597", + "2156877380", + "2164947858", + "2780912412" + ], + "abstract": "this paper describes seeker a platform for large scale text analytics and semtag an application written on the platform to perform automated semantic tagging of large corpora we apply semtag to a collection of approximately 264 million web pages and generate approximately 434 million automatically disambiguated semantic tags published to the web as a label bureau providing metadata regarding the 434 million annotations to our knowledge this is the largest scale semantic tagging effort to date we describe the seeker platform discuss the architecture of the semtag application describe a new disambiguation algorithm specialized to support ontological disambiguation of large scale data evaluate the algorithm and present our final results with information about acquiring and making use of the semantic tags we argue that automated large scale semantic tagging of ambiguous content can bootstrap and accelerate the creation of the semantic web", + "title_raw": "SemTag and seeker: bootstrapping the semantic web via automated semantic annotation", + "abstract_raw": "This paper describes Seeker, a platform for large-scale text analytics, and SemTag, an application written on the platform to perform automated semantic tagging of large corpora. We apply SemTag to a collection of approximately 264 million web pages, and generate approximately 434 million automatically disambiguated semantic tags, published to the web as a label bureau providing metadata regarding the 434 million annotations. To our knowledge, this is the largest scale semantic tagging effort to date.We describe the Seeker platform, discuss the architecture of the SemTag application, describe a new disambiguation algorithm specialized to support ontological disambiguation of large-scale data, evaluate the algorithm, and present our final results with information about acquiring and making use of the semantic tags. We argue that automated large scale semantic tagging of ambiguous content can bootstrap and accelerate the creation of the semantic web.", + "link": "https://www.semanticscholar.org/paper/f60ff1e240be1b1e74c164d0ef4fb2d67f0cbdc3", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1964376592", + "venue": "1184914352", + "year": "2002", + "title": "on computing all abductive explanations", + "label": [ + "179799912", + "45374587", + "177264268", + "11413529" + ], + "author": [ + "420267153", + "2343481990" + ], + "reference": [ + "1546514315", + "1569596742", + "1965184317", + "1969172463", + "1999410326", + "2024768126", + "2031661891", + "2035824165", + "2058269520", + "2063645305", + "2065895258", + "2080947358", + "2100657934", + "2124301168", + "2156018937", + "2165144647" + ], + "abstract": "we consider the computation of all respectively a polynomial subset of the explanations of an abductive query from a horn theory and pay particular attention to whether the query is a positive or negative letter the explanation is based on literals from an assumption set and the horn theory is represented in terms of formulas or characteristic models we derive tractability results one of which refutes a conjecture by selman and levesque as well as intractability results and furthermore also semi tractability results in terms of solvability in quasi polynomial time our results complement previous results in the literature and elucidate the computational complexity of generating the set of explanations", + "title_raw": "On computing all abductive explanations", + "abstract_raw": "We consider the computation of all respectively a polynomial subset of the explanations of an abductive query from a Horn theory, and pay particular attention to whether the query is a positive or negative letter, the explanation is based on literals from an assumption set, and the Horn theory is represented in terms of formulas or characteristic models. We derive tractability results, one of which refutes a conjecture by Selman and Levesque, as well as intractability results, and furthermore also semi-tractability results in terms of solvability in quasi-polynomial time. Our results complement previous results in the literature, and elucidate the computational complexity of generating the set of explanations.", + "link": "https://www.semanticscholar.org/paper/2b8b6a760d8482786007f5b269a9468a6159c02e", + "scraped_abstract": null, + "citation_best": 46 + }, + { + "paper": "2154124206", + "venue": "1188739475", + "year": "2002", + "title": "discriminative training and maximum entropy models for statistical machine translation", + "label": [ + "2777530160", + "9679016", + "24687705", + "178980831", + "196956702", + "203005215", + "53893814", + "2776866485", + "130597682", + "97931131", + "204321447", + "39608478", + "195324797" + ], + "author": [ + "2233053262", + "2293758362" + ], + "reference": [ + "1517947178", + "1518357715", + "1529616844", + "1562694524", + "1585396714", + "1877570817", + "1979102019", + "2001792610", + "2006969979", + "2095743640", + "2096175520", + "2101105183", + "2117428648", + "2129545859", + "2500011979" + ], + "abstract": "we present a framework for statistical machine translation of natural languages based on direct maximum entropy models which contains the widely used source channel approach as a special case all knowledge sources are treated as feature functions which depend on the source language sentence the target language sentence and possible hidden variables this approach allows a baseline machine translation system to be extended easily by adding new feature functions we show that a baseline statistical machine translation system is significantly improved using this approach", + "title_raw": "Discriminative Training and Maximum Entropy Models for Statistical Machine Translation", + "abstract_raw": "We present a framework for statistical machine translation of natural languages based on direct maximum entropy models, which contains the widely used source-channel approach as a special case. All knowledge sources are treated as feature functions, which depend on the source language sentence, the target language sentence and possible hidden variables. This approach allows a baseline machine translation system to be extended easily by adding new feature functions. We show that a baseline statistical machine translation system is significantly improved using this approach.", + "link": "https://www.semanticscholar.org/paper/37fadfb6d60e83e24c72d8a90da5644b39d6e8f0", + "scraped_abstract": null, + "citation_best": 1063 + }, + { + "paper": "2098121410", + "venue": "1150208541", + "year": "2002", + "title": "constant round coin tossing with a man in the middle or realizing the shared random string model", + "label": [ + "91062100", + "33884865", + "1462715", + "178489894", + "196491621", + "91399829", + "80444323" + ], + "author": [ + "2199786071" + ], + "reference": [ + "103647506", + "115629558", + "1484557542", + "1490590996", + "1502416796", + "1532874975", + "1548880861", + "1588049796", + "1621317130", + "1626447266", + "1867273832", + "1970606468", + "1979215153", + "1995926987", + "1996888795", + "1998918799", + "2012045293", + "2021736779", + "2028554387", + "2064627910", + "2065722310", + "2067596507", + "2073086835", + "2079913403", + "2102526737", + "2103230338", + "2126661026", + "2129086113", + "2130993035", + "2134475940", + "2137883105", + "2149634854", + "2153825963", + "2159339434", + "2163172859", + "2293021588", + "2788224102", + "2911777915", + "2949488597", + "3146762596" + ], + "abstract": "we present the first constant round non malleable commitment scheme and the first constant round non malleable zero knowledge argument system as defined by dolev dwork and naor 1991 previous constructions either used a non constant number of rounds or were only secure under stronger setup assumptions an example of such an assumption is the shared random string model where we assume all parties have access to a reference string that was chosen uniformly at random by a trusted dealer we obtain these results by defining an adequate notion of non malleable coin tossing and presenting a constant round protocol that satisfies it this protocol allows us to transform protocols that are non malleable in a modified notion of the shared random string model into protocols that are non malleable in the plain model without any trusted dealer or setup assumptions observing that known constructions of a non interactive non malleable zero knowledge argument systems in the shared random string model de santis et al 2001 are in fact non malleable in the modified model and combining them with our coin tossing protocol we obtain the results mentioned above the techniques we use are different from those used in previous constructions of non malleable protocols in particular our protocol uses diagonalization and a non black box proof of security in a sense similar to barak s zero knowledge argument 2001", + "title_raw": "Constant-round coin-tossing with a man in the middle or realizing the shared random string model", + "abstract_raw": "We present the first constant-round non-malleable commitment scheme and the first constant-round non-malleable zero-knowledge argument system, as defined by Dolev, Dwork and Naor (1991). Previous constructions either used a non-constant number of rounds, or were only secure under stronger setup assumptions. An example of such an assumption is the shared random string model where we assume all parties have access to a reference string that was chosen uniformly at random by a trusted dealer. We obtain these results by defining an adequate notion of non-malleable coin-tossing, and presenting a constant-round protocol that satisfies it. This protocol allows us to transform protocols that are non-malleable in (a modified notion of) the shared random string model into protocols that are non-malleable in the plain model (without any trusted dealer or setup assumptions). Observing that known constructions of a non-interactive non-malleable zero-knowledge argument systems in the shared random string model (De Santis et. al., 2001) are in fact non-malleable in the modified model, and combining them with our coin-tossing protocol we obtain the results mentioned above. The techniques we use are different from those used in previous constructions of non-malleable protocols. In particular our protocol uses diagonalization and a non-black-box proof of security (in a sense similar to Barak's zero-knowledge argument (2001)).", + "link": "https://www.semanticscholar.org/paper/3824615f567952fc593123fc7f5a81884437a6c0", + "scraped_abstract": null, + "citation_best": 163 + }, + { + "paper": "2145818650", + "venue": "1150208541", + "year": "2002", + "title": "minimizing congestion in general networks", + "label": [ + "199845137", + "51675839", + "31258907", + "196921405", + "2775973920", + "2780513914", + "120314980", + "22592002" + ], + "author": [ + "2066115779" + ], + "reference": [ + "1512794094", + "1719880865", + "1988294273", + "2010442238", + "2024158969", + "2027920448", + "2042899161", + "2052416052", + "2057399436", + "2069489095", + "2107881198", + "2125278290", + "2170832776", + "2173141774", + "2211934910" + ], + "abstract": "a principle task in parallel and distributed systems is to reduce the communication load in the interconnection network as this is usually the major bottleneck for the performance of distributed applications we introduce a framework for solving online problems that aim to minimize the congestion i e the maximum load of a network link in general topology networks we apply this framework to the problem of online routing of virtual circuits and to a dynamic data management problem for both scenarios we achieve a competitive ratio of o log sup 3 n with respect to the congestion of the network links our online algorithm for the routing problem has the remarkable property that it is oblivious i e the path chosen for a virtual circuit is independent of the current network load oblivious routing strategies can easily be implemented in distributed environments and have therefore been intensively studied for certain network topologies as e g meshes tori and hypercubic networks this is the first oblivious path selection algorithm that achieves a polylogarithmic competitive ratio in general networks", + "title_raw": "Minimizing congestion in general networks", + "abstract_raw": "A principle task in parallel and distributed systems is to reduce the communication load in the interconnection network, as this is usually the major bottleneck for the performance of distributed applications. We introduce a framework for solving online problems that aim to minimize the congestion (i.e. the maximum load of a network link) in general topology networks. We apply this framework to the problem of online routing of virtual circuits and to a dynamic data management problem. For both scenarios we achieve a competitive ratio of O(log/sup 3/ n) with respect to the congestion of the network links. Our online algorithm for the routing problem has the remarkable property that it is oblivious, i.e., the path chosen for a virtual circuit is independent of the current network load. Oblivious routing strategies can easily be implemented in distributed environments and have therefore been intensively studied for certain network topologies as e.g. meshes, tori and hypercubic networks. This is the first oblivious path selection algorithm that achieves a polylogarithmic competitive ratio in general networks.", + "link": "https://www.semanticscholar.org/paper/d146946631dfef74b5f3a80e676785e290bc6696", + "scraped_abstract": null, + "citation_best": 261 + }, + { + "paper": "2121081915", + "venue": "1199533187", + "year": "2002", + "title": "isolating cause effect chains from computer programs", + "label": [ + "98183937", + "138673069", + "169590947", + "48103436", + "183469790", + "199360897", + "136388014", + "2777561058" + ], + "author": [ + "2106620385" + ], + "reference": [ + "303139982", + "1514468887", + "1543287511", + "1984248430", + "2020538887", + "2026117800", + "2068379792", + "2079027980", + "2079055089", + "2095064458", + "2116007667", + "2116409384", + "2124796303", + "2134691366", + "2141109493", + "2141442517", + "2148329403", + "2157402204", + "2162045655", + "2170224888", + "2912071127", + "3106729728" + ], + "abstract": "consider the execution of a failing program as a sequence of program states each state induces the following state up to the failure which variables and values of a program state are relevant for the failure we show how the delta debugging algorithm isolates the relevant variables and values by systematically narrowing the state difference between a passing run and a failing run by assessing the outcome of altered executions to determine wether a change in the program state makes a difference in the test outcome applying delta debugging to multiple states of the program automatically reveals the cause effect chain of the failure that is the variables and values that caused the failure in a case study our prototype implementation successfully isolated the cause effect chain for a failure of the gnu c compiler initially the c program to be compiled contained an addition of 1 0 this caused an addition operator in the intermediate rtl representation this caused a cycle in the rtl tree and this caused the compiler to crash", + "title_raw": "Isolating cause-effect chains from computer programs", + "abstract_raw": "Consider the execution of a failing program as a sequence of program states. Each state induces the following state, up to the failure. Which variables and values of a program state are relevant for the failure? We show how the Delta Debugging algorithm isolates the relevant variables and values by systematically narrowing the state difference between a passing run and a failing run---by assessing the outcome of altered executions to determine wether a change in the program state makes a difference in the test outcome. Applying Delta Debugging to multiple states of the program automatically reveals the cause-effect chain of the failure---that is, the variables and values that caused the failure.In a case study, our prototype implementation successfully isolated the cause-effect chain for a failure of the GNU C compiler: \"Initially, the C program to be compiled contained an addition of 1.0; this caused an addition operator in the intermediate RTL representation; this caused a cycle in the RTL tree---and this caused the compiler to crash.\"", + "link": "https://www.semanticscholar.org/paper/7c10b26af2f9f4181a7a3949af37af6e0b1657bf", + "scraped_abstract": null, + "citation_best": 530 + }, + { + "paper": "2045747317", + "venue": "1130985203", + "year": "2002", + "title": "pattern discovery in sequences under a markov assumption", + "label": [ + "119857082", + "143809311", + "207201462", + "189973286", + "5274069", + "105445830" + ], + "author": [ + "1982595335", + "2137074633" + ], + "reference": [ + "1490430289", + "1580213736", + "1949998359", + "1990762432", + "1998300401", + "2013570924", + "2032231928", + "2056002855", + "2073850887", + "2077013196", + "2089414039", + "2117812871", + "2129476886", + "2148603752", + "2155976638", + "2156909104", + "2159883877", + "2163017429", + "3150945973" + ], + "abstract": "in this paper we investigate the general problem of discovering recurrent patterns that are embedded in categorical sequences an important real world problem of this nature is motif discovery in dna sequences we investigate the fundamental aspects of this data mining problem that can make discovery easy or hard we present a general framework for characterizing learning in this context by deriving the bayes error rate for this problem under a markov assumption the bayes error framework demonstrates why certain patterns are much harder to discover than others it also explains the role of different parameters such as pattern length and pattern frequency in sequential discovery we demonstrate how the bayes error can be used to calibrate existing discovery algorithms providing a lower bound on achievable performance we discuss a number of fundamental issues that characterize sequential pattern discovery in this context present a variety of empirical results to complement and verify the theoretical analysis and apply our methodology to real world motif discovery problems in computational biology", + "title_raw": "Pattern discovery in sequences under a Markov assumption", + "abstract_raw": "In this paper we investigate the general problem of discovering recurrent patterns that are embedded in categorical sequences. An important real-world problem of this nature is motif discovery in DNA sequences. We investigate the fundamental aspects of this data mining problem that can make discovery \"easy\" or \"hard.\" We present a general framework for characterizing learning in this context by deriving the Bayes error rate for this problem under a Markov assumption. The Bayes error framework demonstrates why certain patterns are much harder to discover than others. It also explains the role of different parameters such as pattern length and pattern frequency in sequential discovery. We demonstrate how the Bayes error can be used to calibrate existing discovery algorithms, providing a lower bound on achievable performance. We discuss a number of fundamental issues that characterize sequential pattern discovery in this context, present a variety of empirical results to complement and verify the theoretical analysis, and apply our methodology to real-world motif-discovery problems in computational biology.", + "link": "https://www.semanticscholar.org/paper/cf640511fb17f544b99a80470b018b7ea0d7b7b4", + "scraped_abstract": null, + "citation_best": 37 + }, + { + "paper": "2121542813", + "venue": "1185109434", + "year": "2002", + "title": "memory resource management in vmware esx server", + "label": [ + "2777783080", + "188873839", + "193343404", + "25344961", + "111919701", + "76399640", + "2779760088", + "142355369", + "33925742" + ], + "author": [ + "2113042477" + ], + "reference": [ + "141391869", + "178090542", + "1506996919", + "1829813581", + "1916709771", + "1976029653", + "1978005952", + "2002175374", + "2006816934", + "2061031304", + "2064359039", + "2071582233", + "2076265406", + "2088837715", + "2089821795", + "2092537111", + "2104276806", + "2111087562", + "2115078506", + "2124516381", + "2125576935", + "2125895608", + "2141253292", + "2150709314", + "2159257711", + "2160992991", + "2162655049", + "2164136586", + "2167290952", + "2168886965" + ], + "abstract": "vmware esx server is a thin software layer designed to multiplex hardware resources efficiently among virtual machines running unmodified commodity operating systems this paper introduces several novel esx server mechanisms and policies for managing memory a ballooning technique reclaims the pages considered least valuable by the operating system running in a virtual machine an idle memory tax achieves efficient memory utilization while maintaining performance isolation guarantees content based page sharing and hot i o page remapping exploit transparent page remapping to eliminate redundancy and reduce copying overheads these techniques are combined to efficiently support virtual machine workloads that overcommit memory", + "title_raw": "Memory resource management in VMware ESX server", + "abstract_raw": "VMware ESX Server is a thin software layer designed to multiplex hardware resources efficiently among virtual machines running unmodified commodity operating systems. This paper introduces several novel ESX Server mechanisms and policies for managing memory. A ballooning technique reclaims the pages considered least valuable by the operating system running in a virtual machine. An idle memory tax achieves efficient memory utilization while maintaining performance isolation guarantees. Content-based page sharing and hot I/O page remapping exploit transparent page remapping to eliminate redundancy and reduce copying overheads. These techniques are combined to efficiently support virtual machine workloads that overcommit memory.", + "link": "https://www.semanticscholar.org/paper/242b5b545bb17879a73161134bc84d5ba3e3cf35", + "scraped_abstract": null, + "citation_best": 1215 + }, + { + "paper": "2003304137", + "venue": "1184151122", + "year": "2002", + "title": "monadic datalog and the expressive power of languages for web information extraction", + "label": [ + "195807954", + "2777857634", + "148230440", + "163797641", + "21959979", + "199360897", + "2776235265", + "80444323", + "3270621" + ], + "author": [ + "1873538321", + "2131931065" + ], + "reference": [ + "52840101", + "237884759", + "1482193060", + "1509428113", + "1527664624", + "1540516609", + "1558832481", + "1560050980", + "1569375617", + "1578270647", + "1583633027", + "1591926286", + "1605153944", + "1816620374", + "1819973606", + "1821155018", + "1927338256", + "1969965298", + "1970599921", + "1985581502", + "1991958955", + "1992810975", + "1995214664", + "2002089154", + "2008029457", + "2011992362", + "2013618518", + "2029764918", + "2029970296", + "2032386524", + "2033176822", + "2035020702", + "2042853248", + "2052409393", + "2053045757", + "2059275719", + "2072936489", + "2073663206", + "2079107830", + "2083317499", + "2091843304", + "2114240051", + "2122225808", + "2124812961", + "2135105491", + "2136451095", + "2137931097", + "2145404631", + "2148210463", + "2156049581", + "2162621793", + "2911295106", + "2913383557" + ], + "abstract": "research on information extraction from web pages wrapping has seen much activity in recent times particularly systems implementations but little work has been done on formally studying the expressiveness of the formalisms proposed or on the theoretical foundations of wrapping in this paper we first study monadic datalog as a wrapping language over ranked or unranked tree structures using previous work by neven and schwentick we show that this simple language is equivalent to full monadic second order logic mso in its ability to specify wrappers we believe that mso has the right expressiveness required for web information extraction and thus propose mso as a yardstick for evaluating and comparing wrappers using the above result we study the kernel fragment elog of the elog wrapping language used in the lixto system a visual wrapper generator the striking fact here is that elog exactly captures mso yet is easier to use indeed programs in this language can be entirely visually specified we also formally compare elog to other wrapping languages proposed in the literature", + "title_raw": "Monadic datalog and the expressive power of languages for web information extraction", + "abstract_raw": "Research on information extraction from Web pages (wrapping) has seen much activity in recent times (particularly systems implementations), but little work has been done on formally studying the expressiveness of the formalisms proposed or on the theoretical foundations of wrapping.In this paper, we first study monadic datalog as a wrapping language (over ranked or unranked tree structures). Using previous work by Neven and Schwentick, we show that this simple language is equivalent to full monadic second order logic (MSO) in its ability to specify wrappers. We believe that MSO has the right expressiveness required for Web information extraction and thus propose MSO as a yardstick for evaluating and comparing wrappers.Using the above result, we study the kernel fragment Elog- of the Elog wrapping language used in the Lixto system (a visual wrapper generator). The striking fact here is that Elog- exactly captures MSO, yet is easier to use. Indeed, programs in this language can be entirely visually specified. We also formally compare Elog to other wrapping languages proposed in the literature.", + "link": "https://www.semanticscholar.org/paper/48e94dbedf4924b537d4a0713b7dad88648b2228", + "scraped_abstract": null, + "citation_best": 58 + }, + { + "paper": "1981825277", + "venue": "1140684652", + "year": "2002", + "title": "novelty and redundancy detection in adaptive filtering", + "label": [ + "137293760", + "83702630", + "178674793", + "102248274", + "124101348", + "2780762811", + "152124472", + "178980831" + ], + "author": [ + "2671671868", + "2148123616", + "172536002" + ], + "reference": [ + "66569044", + "1523949738", + "1964348731", + "1979663447", + "2011516515", + "2036139138", + "2064988570", + "2086925418", + "2092929923", + "2095368471", + "2120084270", + "2135909747", + "2136542423", + "2140011080" + ], + "abstract": "this paper addresses the problem of extending an adaptive information filtering system to make decisions about the novelty and redundancy of relevant documents it argues that relevance and redundance should each be modelled explicitly and separately a set of five redundancy measures are proposed and evaluated in experiments with and without redundancy thresholds the experimental results demonstrate that the cosine similarity metric and a redundancy measure based on a mixture of language models are both effective for identifying redundant documents", + "title_raw": "Novelty and redundancy detection in adaptive filtering", + "abstract_raw": "This paper addresses the problem of extending an adaptive information filtering system to make decisions about the novelty and redundancy of relevant documents. It argues that relevance and redundance should each be modelled explicitly and separately. A set of five redundancy measures are proposed and evaluated in experiments with and without redundancy thresholds. The experimental results demonstrate that the cosine similarity metric and a redundancy measure based on a mixture of language models are both effective for identifying redundant documents.", + "link": "https://www.semanticscholar.org/paper/2f7f81529c67e9d98722f4ee17fe3808f0752b5e", + "scraped_abstract": null, + "citation_best": 441 + }, + { + "paper": "2295284025", + "venue": "1164321581", + "year": "2003", + "title": "clothing manipulation", + "label": [ + "177264268", + "89505385", + "121684516" + ], + "author": [ + "2152110089", + "3186889977" + ], + "reference": [ + "3788360", + "1492940985", + "1496369700", + "1967934102", + "1969561071", + "1978235861", + "1984448916", + "1996832320", + "2003563812", + "2026687385", + "2046911213", + "2072005677", + "2143173427", + "2235036220", + "2296630161" + ], + "abstract": "this paper presents interaction techniques and the underlying implementations for putting clothes on a 3d character and manipulating them the user paints freeform marks on the clothes and corresponding marks on the 3d character the system then puts the clothes around the body so that corresponding marks match internally the system grows the clothes on the body surface around the marks while maintaining basic cloth constraints via simple relaxation steps the entire computation takes a few seconds after that the user can adjust the placement of the clothes by an enhanced dragging operation unlike standard dragging where the user moves a set of vertices in a single direction in 3d space our dragging operation moves the cloth along the body surface to make possible more flexible operations the user can apply pushpins to fix certain cloth points during dragging the techniques are ideal for specifying an initial cloth configuration before applying a more sophisticated cloth simulation", + "title_raw": "Clothing manipulation", + "abstract_raw": "This paper presents interaction techniques (and the underlying implementations) for putting clothes on a 3D character and manipulating them. The user paints freeform marks on the clothes and corresponding marks on the 3D character; the system then puts the clothes around the body so that corresponding marks match. Internally, the system grows the clothes on the body surface around the marks while maintaining basic cloth constraints via simple relaxation steps. The entire computation takes a few seconds. After that, the user can adjust the placement of the clothes by an enhanced dragging operation. Unlike standard dragging where the user moves a set of vertices in a single direction in 3D space, our dragging operation moves the cloth along the body surface to make possible more flexible operations. The user can apply pushpins to fix certain cloth points during dragging. The techniques are ideal for specifying an initial cloth configuration before applying a more sophisticated cloth simulation.", + "link": "https://www.semanticscholar.org/paper/52655049da787dc623ed139c453a0c9e099402b7", + "scraped_abstract": null, + "citation_best": 3 + }, + { + "paper": "2006591097", + "venue": "1135342153", + "year": "2002", + "title": "abstracting application level web security", + "label": [ + "117110713", + "22111027", + "38652104", + "109297577", + "62913178", + "76178495", + "13159133", + "527648132", + "86844869", + "103377522", + "6353995", + "130436687", + "195518309", + "154908896", + "114869243", + "174683762", + "29983905", + "136764020", + "184842701", + "79373723", + "59241245", + "121822524", + "48044578", + "77109596", + "131275738" + ], + "author": [ + "2294836853", + "2161161904" + ], + "reference": [ + "1535072080", + "1546727036", + "1583044117", + "1585665690", + "1596171380", + "1596447773", + "1829244603", + "1970104544", + "2131300413", + "2161433768", + "2166822586", + "2343161693", + "2523075099", + "2725758462", + "3145042860" + ], + "abstract": "application level web security refers to vulnerabilities inherent in the code of a web application itself irrespective of the technologies in which it is implemented or the security of the web server back end database on which it is built in the last few months application level vulnerabilities have been exploited with serious consequences hackers have tricked e commerce sites into shipping goods for no charge user names and passwords have been harvested and condential information such as addresses and credit card numbers has been leaked in this paper we investigate new tools and techniques which address the problem of application level web security we i describe a scalable structuring mechanism facilitating the abstraction of security policies from large web applications developed in heterogenous multi platform environments ii present a tool which assists programmers develop secure applications which are resilient to a wide range of common attacks and iii report results and experience arising from our implementation of these techniques", + "title_raw": "Abstracting application-level web security", + "abstract_raw": "Application-level web security refers to vulnerabilities inherent in the code of a web-application itself (irrespective of the technologies in which it is implemented or the security of the web-server/back-end database on which it is built). In the last few months application-level vulnerabilities have been exploited with serious consequences: hackers have tricked e-commerce sites into shipping goods for no charge, user-names and passwords have been harvested and condential information (such as addresses and credit-card numbers) has been leaked.In this paper we investigate new tools and techniques which address the problem of application-level web security. We (i) describe a scalable structuring mechanism facilitating the abstraction of security policies from large web-applications developed in heterogenous multi-platform environments; (ii) present a tool which assists programmers develop secure applications which are resilient to a wide range of common attacks; and (iii) report results and experience arising from our implementation of these techniques.", + "link": "https://www.semanticscholar.org/paper/149fadb68c5d7ea6d43eb2cbc1a27aa34b4f5660", + "scraped_abstract": null, + "citation_best": 35 + }, + { + "paper": "2139403546", + "venue": "1188739475", + "year": "2001", + "title": "fast decoding and optimal decoding for machine translation", + "label": [ + "177264268", + "204397858", + "159363923", + "57273362", + "185588885", + "11413529", + "193969084", + "203005215", + "137836250" + ], + "author": [ + "2099510583", + "2277515789", + "2330030585", + "1853234685", + "2177616917" + ], + "reference": [ + "1525605957", + "1555286493", + "1650993530", + "1667614912", + "1935980843", + "2006969979", + "2011039300", + "2012511220", + "2035227369", + "2146418175", + "2158164089" + ], + "abstract": "a good decoding algorithm is critical to the success of any statistical machine translation system the decoder s job is to find the translation that is most likely according to set of previously learned parameters and a formula for combining them since the space of possible translations is extremely large typical decoding algorithms are only able to examine a portion of it thus risking to miss good solutions in this paper we compare the speed and output quality of a traditional stack based decoding algorithm with two new decoders a fast greedy decoder and a slow but optimal decoder that treats decoding as an integer programming optimization problem", + "title_raw": "Fast Decoding and Optimal Decoding for Machine Translation", + "abstract_raw": "A good decoding algorithm is critical to the success of any statistical machine translation system. The decoder's job is to find the translation that is most likely according to set of previously learned parameters (and a formula for combining them). Since the space of possible translations is extremely large, typical decoding algorithms are only able to examine a portion of it, thus risking to miss good solutions. In this paper, we compare the speed and output quality of a traditional stack-based decoding algorithm with two new decoders: a fast greedy decoder and a slow but optimal decoder that treats decoding as an integer-programming optimization problem.", + "link": "https://www.semanticscholar.org/paper/dd5514876b7e1c09b6d2f931d90bb34aa3501441", + "scraped_abstract": null, + "citation_best": 233 + }, + { + "paper": "2155693943", + "venue": "1188739475", + "year": "2001", + "title": "immediate head parsing for language models", + "label": [ + "100279451", + "137293760", + "42560504", + "118364021", + "168740440", + "166651950", + "115355334", + "186644900", + "204321447", + "35164859", + "143613975", + "137546455" + ], + "author": [ + "103415842" + ], + "reference": [ + "186828541", + "1535015163", + "1549364818", + "1632114991", + "1953828586", + "1955233831", + "1986543644", + "2092654472", + "2103005629", + "2104399512", + "2108321481", + "2119659342", + "2123893795", + "2135375068", + "2153439141", + "2161204834", + "2949237929", + "3021452258" + ], + "abstract": "we present two language models based upon an immediate head parser our name for a parser that conditions all events below a constituent c upon the head of c while all of the most accurate statistical parsers are of the immediate head variety no previous grammatical language model uses this technology the perplexity for both of these models significantly improve upon the trigram model base line as well as the best previous grammar based language model for the better of our two models these improvements are 24 and 14 respectively we also suggest that improvement of the underlying parser should significantly improve the model s perplexity and that even in the near term there is a lot of potential for improvement in immediate head language models", + "title_raw": "Immediate-Head Parsing for Language Models", + "abstract_raw": "We present two language models based upon an \"immediate-head\" parser --- our name for a parser that conditions all events below a constituent c upon the head of c. While all of the most accurate statistical parsers are of the immediate-head variety, no previous grammatical language model uses this technology. The perplexity for both of these models significantly improve upon the trigram model base-line as well as the best previous grammar-based language model. For the better of our two models these improvements are 24% and 14% respectively. We also suggest that improvement of the underlying parser should significantly improve the model's perplexity and that even in the near term there is a lot of potential for improvement in immediate-head language models.", + "link": "https://www.semanticscholar.org/paper/71093afbe9da56e599fd7af8a5d4832ad0b15ded", + "scraped_abstract": null, + "citation_best": 308 + }, + { + "paper": "1960231166", + "venue": "1203999783", + "year": "2001", + "title": "complexity results for structure based causality", + "label": [ + "49937458", + "179799912", + "127964446", + "164215192" + ], + "author": [ + "420267153", + "297530023" + ], + "reference": [ + "81871881", + "133789137", + "1504628537", + "1511107244", + "1964821516", + "1989159237", + "2000805093", + "2012357101", + "2071894919", + "2078578754", + "2795329138", + "2963412070", + "3133236490" + ], + "abstract": "", + "title_raw": "Complexity results for structure-based causality", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/dfcc94113515dc7108835e3afce9b89d7e4cb740", + "scraped_abstract": null, + "citation_best": 88 + }, + { + "paper": "2134206624", + "venue": "1184151122", + "year": "2001", + "title": "optimal aggregation algorithms for middleware", + "label": [ + "135802936", + "64729616", + "101722063", + "162319229", + "11413529" + ], + "author": [ + "2103086064", + "2223366040", + "1023361224" + ], + "reference": [ + "64531727", + "1498726528", + "1552828154", + "1570542661", + "1600580971", + "1833785989", + "1850437073", + "1990313671", + "2007771077", + "2020007061", + "2020919487", + "2039407144", + "2041645394", + "2053550438", + "2086174602", + "2093191240", + "2098025050", + "2099797738", + "2114909350", + "2141649964", + "2164520297", + "2168605051", + "2295428206", + "2912565176" + ], + "abstract": "assume that each object in a database has m grades or scores one for each of m attributes for example an object can have a color grade that tells how red it is and a shape grade that tells how round it is for each attribute there is a sorted list which lists each object and its grade under that attribute sorted by grade highest grade first there is some monotone aggregation function or combining rule such as min or average that combines the individual grades to obtain an overall grade to determine objects that have the best overall grades the naive algorithm must access every object in the database to find its grade under each attribute fagin has given an algorithm fagin s algorithm or fa that is much more efficient for some distributions on grades and for some monotone aggregation functions fa is optimal in a high probability sense we analyze an elegant and remarkably simple algorithm the threshold algorithm or ta that is optimal in a much stronger sense than fa we show that ta is essentially optimal not just for some monotone aggregation functions but for all of them and not just in a high probability sense but over every database unlike fa which requires large buffers whose size may grow unboundedly as the database size grows ta requires only a small constant size buffer we distinguish two types of access sorted access where the middleware system obtains the grade of an object in some sorted list by proceeding through the list sequentially from the top and random access where the middleware system requests the grade of object in a list and obtains it in one step we consider the scenarios where random access is either impossible or expensive relative to sorted access and provide algorithms that are essentially optimal for these cases as well", + "title_raw": "Optimal aggregation algorithms for middleware", + "abstract_raw": "Assume that each object in a database has m grades, or scores, one for each of m attributes. For example, an object can have a color grade, that tells how red it is, and a shape grade, that tells how round it is. For each attribute, there is a sorted list, which lists each object and its grade under that attribute, sorted by grade (highest grade first). There is some monotone aggregation function, or combining rule, such as min or average, that combines the individual grades to obtain an overall grade. To determine objects that have the best overall grades, the naive algorithm must access every object in the database, to find its grade under each attribute. Fagin has given an algorithm (\u201cFagin's Algorithm\u201d, or FA) that is much more efficient. For some distributions on grades, and for some monotone aggregation functions, FA is optimal in a high-probability sense. We analyze an elegant and remarkably simple algorithm (\u201cthe threshold algorithm\u201d, or TA) that is optimal in a much stronger sense than FA. We show that TA is essentially optimal, not just for some monotone aggregation functions, but for all of them, and not just in a high-probability sense, but over every database. Unlike FA, which requires large buffers (whose size may grow unboundedly as the database size grows), TA requires only a small, constant-size buffer. We distinguish two types of access: sorted access (where the middleware system obtains the grade of an object in some sorted list by proceeding through the list sequentially from the top), and random access (where the middleware system requests the grade of object in a list, and obtains it in one step). We consider the scenarios where random access is either impossible, or expensive relative to sorted access, and provide algorithms that are essentially optimal for these cases as well.", + "link": "https://www.semanticscholar.org/paper/4bd13c8c3086ec060885081cfd76286749098683", + "scraped_abstract": null, + "citation_best": 1085 + }, + { + "paper": "2094661073", + "venue": "1140684652", + "year": "2001", + "title": "temporal summaries of new topics", + "label": [ + "5366617", + "204321447", + "170858558", + "23123220" + ], + "author": [ + "2097030689", + "2428762169", + "2100891832" + ], + "reference": [ + "219285816", + "1521682831", + "1574901103", + "1577877742", + "1588495139", + "1974339500", + "1978672522", + "1980813323", + "1996620594", + "1999321610", + "2005492507", + "2026162439", + "2029869027", + "2031275836", + "2034771957", + "2038316483", + "2039373390", + "2051618067", + "2055452728", + "2062432826", + "2067879405", + "2083305840", + "2093390569", + "2101390659", + "2115541970", + "2128672521", + "2135909747", + "2158291389", + "2166347079", + "3138773240" + ], + "abstract": "we discuss technology to help a person monitor changes in news coverage over time we define temporal summaries of news stories as extracting a single sentence from each event within a news topic where the stories are presented one at a time and sentences from a story must be ranked before the next story can be considered we explain a method for evaluation and describe an evaluation corpus that we have built we also propose several methods for constructing temporal summaries and evaluate their effectiveness in comparison to degenerate cases we show that simple approaches are effective but that the problem is far from solved", + "title_raw": "Temporal summaries of new topics", + "abstract_raw": "We discuss technology to help a person monitor changes in news coverage over time. We define temporal summaries of news stories as extracting a single sentence from each event within a news topic, where the stories are presented one at a time and sentences from a story must be ranked before the next story can be considered. We explain a method for evaluation, and describe an evaluation corpus that we have built. We also propose several methods for constructing temporal summaries and evaluate their effectiveness in comparison to degenerate cases. We show that simple approaches are effective, but that the problem is far from solved.", + "link": "https://www.semanticscholar.org/paper/b3365e64e4ce52823e25eea0fa16096d6cf383f0", + "scraped_abstract": null, + "citation_best": 261 + }, + { + "paper": "2163336863", + "venue": "1175089206", + "year": "2001", + "title": "locally adaptive dimensionality reduction for indexing large time series databases", + "label": [ + "70518039", + "75165309", + "77088390", + "22789450", + "120174047", + "21809047", + "2639959", + "116738811", + "46286280" + ], + "author": [ + "2170070822", + "2124676925", + "1996789426", + "2201039448" + ], + "reference": [ + "151353940", + "151863654", + "1484672141", + "1494985239", + "1499049447", + "1499117135", + "1509578651", + "1512584621", + "1515412672", + "1577640466", + "1587157435", + "1723433588", + "1864972570", + "1965627686", + "1969357402", + "1978572064", + "2001103857", + "2002328435", + "2014122208", + "2016876497", + "2031435600", + "2036557187", + "2042591571", + "2046144220", + "2053596628", + "2062618908", + "2066796814", + "2080801975", + "2083888736", + "2084481683", + "2086086639", + "2097983034", + "2101005720", + "2106626784", + "2109225550", + "2116076814", + "2118269922", + "2118314386", + "2128061541", + "2134423343", + "2134627110", + "2143702666", + "2145910025", + "2148039410", + "2155301533", + "2159113403", + "2163976215", + "2167035411", + "2167833054" + ], + "abstract": "similarity search in large time series databases has attracted much research interest recently it is a difficult problem because of the typically high dimensionality of the data the most promising solutions involve performing dimensionality reduction on the data then indexing the reduced data with a multidimensional index structure many dimensionality reduction techniques have been proposed including singular value decomposition svd the discrete fourier transform dft and the discrete wavelet transform dwt in this work we introduce a new dimensionality reduction technique which we call adaptive piecewise constant approximation apca while previous techniques e g svd dft and dwt choose a common representation for all the items in the database that minimizes the global reconstruction error apca approximates each time series by a set of constant value segments of varying lengths such that their individual reconstruction errors are minimal we show how apca can be indexed using a multidimensional index structure we propose two distance measures in the indexed space that exploit the high fidelity of apca for fast searching a lower bounding euclidean distance approximation and a non lower bounding but very tight euclidean distance approximation and show how they can support fast exact searching and even faster approximate searching on the same index structure we theoretically and empirically compare apca to all the other techniques and demonstrate its superiority", + "title_raw": "Locally adaptive dimensionality reduction for indexing large time series databases", + "abstract_raw": "Similarity search in large time series databases has attracted much research interest recently. It is a difficult problem because of the typically high dimensionality of the data.. The most promising solutions involve performing dimensionality reduction on the data, then indexing the reduced data with a multidimensional index structure. Many dimensionality reduction techniques have been proposed, including Singular Value Decomposition (SVD), the Discrete Fourier transform (DFT), and the Discrete Wavelet Transform (DWT). In this work we introduce a new dimensionality reduction technique which we call Adaptive Piecewise Constant Approximation (APCA). While previous techniques (e.g., SVD, DFT and DWT) choose a common representation for all the items in the database that minimizes the global reconstruction error, APCA approximates each time series by a set of constant value segments of varying lengths such that their individual reconstruction errors are minimal. We show how APCA can be indexed using a multidimensional index structure. We propose two distance measures in the indexed space that exploit the high fidelity of APCA for fast searching: a lower bounding Euclidean distance approximation, and a non-lower bounding, but very tight Euclidean distance approximation and show how they can support fast exact searching, and even faster approximate searching on the same index structure. We theoretically and empirically compare APCA to all the other techniques and demonstrate its superiority.", + "link": "https://www.semanticscholar.org/paper/728211a7edfe536299670f34714de1b426d969dc", + "scraped_abstract": null, + "citation_best": 867 + }, + { + "paper": "2161168778", + "venue": "1171178643", + "year": "2001", + "title": "untrusted hosts and confidentiality secure program partitioning", + "label": [ + "38652104", + "45374587", + "71745522" + ], + "author": [ + "1994212081", + "2158099478", + "2119228451", + "2141746463" + ], + "reference": [ + "303139982", + "1215342543", + "1526649648", + "1560855680", + "1562978232", + "1581472742", + "1589806443", + "1591919839", + "1640592531", + "1864248001", + "1985439562", + "1991895580", + "1993941352", + "1997775274", + "2011333687", + "2016017569", + "2034711041", + "2036910349", + "2053409086", + "2057503672", + "2067012475", + "2069107692", + "2070061575", + "2080914957", + "2094873755", + "2120350928", + "2121134342", + "2122522758", + "2129278597", + "2134296086", + "2146749986", + "2147029498", + "2148144728", + "2150174204", + "2154564703", + "2158126684", + "2158196184", + "2168686464", + "2168753065", + "2170350256", + "2295903522", + "2997271062" + ], + "abstract": "this paper presents secure program partitioning a language based technique for protecting confidential data during computation in distributed systems containing mutually untrusted hosts confidentiality and integrity policies can be expressed by annotating programs with security types that constrain information flow these programs can then be partitioned automatically to run securely on heterogeneously trusted hosts the resulting communicating subprograms collectively implement the original program yet the system as a whole satisfies the security requirements of participating principals without requiring a universally trusted host machine the experience in applying this methodology and the performance of the resulting distributed code suggest that this is a promising way to obtain secure distributed computation", + "title_raw": "Untrusted hosts and confidentiality: secure program partitioning", + "abstract_raw": "This paper presents secure program partitioning, a language-based technique for protecting confidential data during computation in distributed systems containing mutually untrusted hosts. Confidentiality and integrity policies can be expressed by annotating programs with security types that constrain information flow; these programs can then be partitioned automatically to run securely on heterogeneously trusted hosts. The resulting communicating subprograms collectively implement the original program, yet the system as a whole satisfies the security requirements of participating principals without requiring a universally trusted host machine. The experience in applying this methodology and the performance of the resulting distributed code suggest that this is a promising way to obtain secure distributed computation.", + "link": "https://www.semanticscholar.org/paper/59c2cb4951804445257aa25048c3baa616f6bab8", + "scraped_abstract": null, + "citation_best": 13 + }, + { + "paper": "2053903896", + "venue": "1171178643", + "year": "2001", + "title": "base using abstraction to improve fault tolerance", + "label": [ + "63540848", + "26713055", + "149635348", + "168021876", + "50712370", + "2780940931", + "2777904410", + "120314980" + ], + "author": [ + "2121681675", + "2096975672", + "2084056563" + ], + "reference": [ + "54838500", + "1496288774", + "1515932031", + "1523720513", + "1540879848", + "1549404527", + "1552460860", + "1627303300", + "1661326830", + "1945100066", + "1952581049", + "1982431534", + "1985349143", + "2005373714", + "2015121269", + "2016646807", + "2020181081", + "2055631879", + "2067266271", + "2077409330", + "2088221489", + "2105309920", + "2109192777", + "2110026634", + "2110861182", + "2113741883", + "2114579022", + "2126087831", + "2126924915", + "2127351510", + "2147853062", + "2150348590", + "2150543724", + "2151791866", + "2152465173", + "2152893862", + "2156320870", + "2162064942", + "2228835857", + "2341872891", + "2613733113", + "3137220996", + "3151869053" + ], + "abstract": "software errors are a major cause of outages and they are increasingly exploited in malicious attacks byzantine fault tolerance allows replicated systems to mask some software errors but it is expensive to deploy this paper describes a replication technique base which uses abstraction to reduce the cost of byzantine fault tolerance and to improve its ability to mask software errors base reduces cost because it enables reuse of off the shelf service implementations it improves availability because each replica can be repaired periodically using an abstract view of the state stored by correct replicas and because each replica can run distinct or non deterministic service implementations which reduces the probability of common mode failures we built an nfs service where each replica can run a different off the shelf file system implementation and an object oriented database where the replicas ran the same non deterministic implementation these examples suggest that our technique can be used in practice in both cases the implementation required only a modest amount of new code and our performance results indicate that the replicated services perform comparably to the implementations that they reuse", + "title_raw": "BASE: using abstraction to improve fault tolerance", + "abstract_raw": "Software errors are a major cause of outages and they are increasingly exploited in malicious attacks. Byzantine fault tolerance allows replicated systems to mask some software errors but it is expensive to deploy. This paper describes a replication technique, BASE, which uses abstraction to reduce the cost of Byzantine fault tolerance and to improve its ability to mask software errors. BASE reduces cost because it enables reuse of off-the-shelf service implementations. It improves availability because each replica can be repaired periodically using an abstract view of the state stored by correct replicas, and because each replica can run distinct or non-deterministic service implementations, which reduces the probability of common mode failures. We built an NFS service where each replica can run a different off-the-shelf file system implementation, and an object-oriented database where the replicas ran the same, non-deterministic implementation. These examples suggest that our technique can be used in practice --- in both cases, the implementation required only a modest amount of new code, and our performance results indicate that the replicated services perform comparably to the implementations that they reuse.", + "link": "https://www.semanticscholar.org/paper/0d046054b4d2d9cf541c9bf6c9fc4a6cc0b47cdb", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1994547327", + "venue": "1166315290", + "year": "2001", + "title": "phidgets easy development of physical interfaces through physical widgets", + "label": [ + "37789001", + "48677424", + "107457646", + "168065819", + "48103436", + "2778514511", + "89505385", + "2777904410", + "25621077" + ], + "author": [ + "2120666348", + "2004402387" + ], + "reference": [ + "135070850", + "1492939098", + "1596703051", + "1965415330", + "1984402152", + "2007202387", + "2013608024", + "2018759273", + "2061530813", + "2076352443", + "2092481071", + "2092493253", + "2138683923", + "2149891956", + "2151440594", + "2163419627", + "2544582816", + "3145637508" + ], + "abstract": "physical widgets or phidgets are to physical user interfaces what widgets are to graphical user interfaces similar to widgets phidgets abstract and package input and output devices they hide implementation and construction details they expose functionality through a well defined api and they have an optional on screen interactive interface for displaying and controlling device state unlike widgets phidgets also require a connection manager to track how devices appear on line a way to link a software phidget with its physical counterpart and a simulation mode to allow the programmer to develop debug and test a physical interface even when no physical device is present our evaluation shows that everyday programmers using phidgets can rapidly develop physical interfaces", + "title_raw": "Phidgets: easy development of physical interfaces through physical widgets", + "abstract_raw": "Physical widgets or phidgets are to physical user interfaces what widgets are to graphical user interfaces. Similar to widgets, phidgets abstract and package input and output devices: they hide implementation and construction details, they expose functionality through a well-defined API, and they have an (optional) on-screen interactive interface for displaying and controlling device state. Unlike widgets, phidgets also require: a connection manager to track how devices appear on-line; a way to link a software phidget with its physical counterpart; and a simulation mode to allow the programmer to develop, debug and test a physical interface even when no physical device is present. Our evaluation shows that everyday programmers using phidgets can rapidly develop physical interfaces.", + "link": "https://www.semanticscholar.org/paper/ca69858486184001559ac69f7f447dd1e9e99ffe", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2130642985", + "venue": "1133523790", + "year": "2001", + "title": "weaving relations for cache performance", + "label": [ + "157547923", + "59687516", + "51185590", + "141822785", + "201148951", + "107814850", + "120348434", + "113166858", + "25536678", + "167713795", + "173608175", + "36340418", + "120936851", + "115537543", + "199979278", + "38556500", + "188045654", + "59689275", + "189783530" + ], + "author": [ + "1787861840", + "1979791317", + "2112660313", + "1210403672" + ], + "reference": [ + "163898659", + "1487130678", + "1497748148", + "1538592187", + "1541475832", + "1555915743", + "1570477871", + "1645807896", + "1671280358", + "1987981153", + "2016622853", + "2019358506", + "2038057969", + "2039795745", + "2055468282", + "2063296107", + "2104954161", + "2110631345", + "2111967617", + "2118269922", + "2125233616", + "2153329411", + "2153691881", + "2155651153", + "2156000104", + "2157979971", + "2753710282" + ], + "abstract": "relational database systems have traditionally optimzed for i o performance and organized records sequentially on disk pages using the n ary storage model nsm a k a slotted pages recent research however indicates that cache utilization and performance is becoming increasingly important on modern platforms in this paper we first demonstrate that in page data placement is the key to high cache performance and that nsm exhibits low cache utilization on modern platforms next we propose a new data organization model called pax partition attributes across that significantly improves cache performance by grouping together all values of each attribute within each page because pax only affects layout inside the pages it incurs no storage penalty and does not affect i o behavior according to our experimental results when compared to nsm a pax exhibits superior cache and memory bandwidth utilization saving at least 75 of nsm s stall time due to data cache accesses b range selection queries and updates on memoryresident relations execute 17 25 faster and c tpc h queries involving i o execute 11 48 faster", + "title_raw": "Weaving Relations for Cache Performance", + "abstract_raw": "Relational database systems have traditionally optimzed for I/O performance and organized records sequentially on disk pages using the N-ary Storage Model (NSM) (a.k.a., slotted pages). Recent research, however, indicates that cache utilization and performance is becoming increasingly important on modern platforms. In this paper, we first demonstrate that in-page data placement is the key to high cache performance and that NSM exhibits low cache utilization on modern platforms. Next, we propose a new data organization model called PAX (Partition Attributes Across), that significantly improves cache performance by grouping together all values of each attribute within each page. Because PAX only affects layout inside the pages, it incurs no storage penalty and does not affect I/O behavior. According to our experimental results, when compared to NSM (a) PAX exhibits superior cache and memory bandwidth utilization, saving at least 75% of NSM\u2019s stall time due to data cache accesses, (b) range selection queries and updates on memoryresident relations execute 17-25% faster, and (c) TPC-H queries involving I/O execute 11-48% faster.", + "link": "https://www.semanticscholar.org/paper/0109b8d4f75feed4ffbd4b5d555bac1e2d27815d", + "scraped_abstract": null, + "citation_best": 327 + }, + { + "paper": "1985727586", + "venue": "1135342153", + "year": "2001", + "title": "engineering server driven consistency for large scale dynamic web services", + "label": [ + "31258907", + "77088390", + "48044578", + "100158260", + "197298091", + "516187249", + "115537543", + "93996380", + "110875604", + "162005631" + ], + "author": [ + "2798365388", + "2292936287", + "2051666199", + "1251078740" + ], + "reference": [ + "21274465", + "142559593", + "147895047", + "1253666328", + "1507801529", + "1533813939", + "1779735989", + "1918225299", + "1974923925", + "2005373714", + "2012481411", + "2016403536", + "2022261236", + "2066955419", + "2087912304", + "2096122728", + "2107460938", + "2109293745", + "2125264097", + "2125387478", + "2127234011", + "2127440409", + "2127768834", + "2147740195", + "2166868187" + ], + "abstract": "many researchers have shown that server driven consistency protocols can potentially reduce read latency server driven consistency protocols are particularly attractive for largescale dynamic web workloads because dynamically generated data can change rapidly and unpredictably however there have been no reports on engineering server driven consistency for such a workload this paper reports our experience in engineering server driven consistency for a sporting and event web site hosted by ibm one of the most popular web sites on the internet for the duration of the event our study focuses on scalability and cachability of dynamic content to assess scalability we measure both the amount of state that a server needs to maintain to ensure consistency and the bursts of load that a server sustains to send out invalidation messages when a popular object is modi ed we nd that it is possible to limit the size of the server s state without signi cant performance costs and that bursts of load can be smoothed out with minimal impact on the consistency guarantees to improve performance we systematically investigate several design issues for which prior research has suggested widely di erent solutions including how long servers should send invalidations to idle clients finally we quantify the performance impact of caching dynamic data with server driven consistency protocols and nd that it can reduce read latency by more than 10 we have implemented a prototype of a server driven consistency protocol based on our ndings on top of the popular squid cache", + "title_raw": "Engineering server-driven consistency for large scale dynamic Web services", + "abstract_raw": "Many researchers have shown that server-driven consistency protocols can potentially reduce read latency. Server-driven consistency protocols are particularly attractive for largescale dynamic web workloads because dynamically generated data can change rapidly and unpredictably. However, there have been no reports on engineering server-driven consistency for such a workload. This paper reports our experience in engineering server-driven consistency for a Sporting and Event web site hosted by IBM, one of the most popular web sites on the Internet for the duration of the event. Our study focuses on scalability and cachability of dynamic content. To assess scalability, we measure both the amount of state that a server needs to maintain to ensure consistency and the bursts of load that a server sustains to send out invalidation messages when a popular object is modi ed. We nd that it is possible to limit the size of the server's state without signi cant performance costs and that bursts of load can be smoothed out with minimal impact on the consistency guarantees. To improve performance, we systematically investigate several design issues for which prior research has suggested widely di erent solutions, including how long servers should send invalidations to idle clients. Finally, we quantify the performance impact of caching dynamic data with server-driven consistency protocols and nd that it can reduce read latency by more than 10%. We have implemented a prototype of a server-driven consistency protocol based on our ndings on top of the popular Squid cache.", + "link": "https://www.semanticscholar.org/paper/790b580d44a287de8682d09bc3c8bd37112856e8", + "scraped_abstract": null, + "citation_best": 66 + }, + { + "paper": "2096678443", + "venue": "1184914352", + "year": "2000", + "title": "the game of hex an automatic theorem proving approach to game programming", + "label": [ + "46149586", + "109347269", + "154945302", + "170828538", + "80444323", + "93826744" + ], + "author": [ + "1898342490" + ], + "reference": [ + "1636614024", + "1785811943", + "1973474784", + "1990221494", + "2032313369", + "2047008100", + "2104125501", + "2894564506" + ], + "abstract": "the game of hex is a two player game with simple rules a deep underlying mathematical beauty and a strategic complexity comparable to that of chess and go the massive game tree search techniques developed mostly for chess and successfully used for checkers othello and a number of other games become less useful for games with large branching factors like go and hex we offer a new approach which results in superior playing strength this approach emphasizes deep analysis of relatively few game positions in order to reach this goal we develop an automatic theorem proving technique for topological analysis of hex positions we also discuss in detail an idea of modeling hex positions with electrical resistor circuits we explain how this approach is implemented in hexy the strongest known hex playing computer program able to compete with best human players", + "title_raw": "The Game of Hex: An Automatic Theorem Proving Approach to Game Programming", + "abstract_raw": "The game of Hex is a two-player game with simple rules, a deep underlying mathematical beauty, and a strategic complexity comparable to that of Chess and Go. The massive game-tree search techniques developed mostly for Chess, and successfully used for Checkers, Othello, and a number of other games, become less useful for games with large branching factors like Go and Hex. We offer a new approach, which results in superior playing strength. This approach emphasizes deep analysis of relatively few game positions. In order to reach this goal, we develop an automatic theorem proving technique for topological analysis of Hex positions. We also discuss in detail an idea of modeling Hex positions with electrical resistor circuits. We explain how this approach is implemented in Hexy - the strongest known Hex-playing computer program, able to compete with best human players.", + "link": "https://www.semanticscholar.org/paper/7c7144e79a391c1c3577f1674874b469aa7072da", + "scraped_abstract": null, + "citation_best": 37 + }, + { + "paper": "2284662111", + "venue": "1184914352", + "year": "2000", + "title": "automatic invention of integer sequences", + "label": [ + "62764039", + "11413529" + ], + "author": [ + "2107862633", + "2000084407", + "2156731892" + ], + "reference": [ + "118351535", + "133258627", + "1498005597", + "1890599079", + "1968523464", + "1983552426", + "2072892619", + "2125330525", + "2149584320", + "2156769044", + "2319309530", + "3019648277" + ], + "abstract": "we report on the application of the hr program colton bundy walsh 1999 to the problem of automatically inventing integer sequences seventeen sequences invented by hr are interesting enough to have been accepted into the encyclopedia of integer sequences sloane 2000 and all were supplied with interesting conjectures about their nature also discovered by hr by extending hr we have enabled it to perform a two stage process of invention and investigation this involves generating both the definition and terms of a new sequence relating it to sequences already in the encyclopedia and pruning the output to help identify the most surprising and interesting results", + "title_raw": "Automatic Invention of Integer Sequences", + "abstract_raw": "We report on the application of the HR program (Colton, Bundy, & Walsh 1999) to the problem of automatically inventing integer sequences. Seventeen sequences invented by HR are interesting enough to have been accepted into the Encyclopedia of Integer Sequences (Sloane 2000) and all were supplied with interesting conjectures about their nature, also discovered by HR. By extending HR, we have enabled it to perform a two stage process of invention and investigation. This involves generating both the definition and terms of a new sequence, relating it to sequences already in the Encyclopedia and pruning the output to help identify the most surprising and interesting results.", + "link": "https://www.aaai.org/Library/AAAI/2000/aaai00-085.php", + "scraped_abstract": null, + "citation_best": 34 + }, + { + "paper": "2118119027", + "venue": "1184914352", + "year": "2000", + "title": "statistics based summarization step one sentence compression", + "label": [ + "204321447", + "2780719617", + "109364899", + "170858558" + ], + "author": [ + "2330030585", + "1853234685" + ], + "reference": [ + "99221312", + "219285816", + "1508165687", + "1537217019", + "1577877742", + "1658658360", + "2006969979", + "2061271742", + "2062270497", + "2066972299", + "2110017317", + "2118733980", + "2125055259", + "2153439141", + "2913739034" + ], + "abstract": "when humans produce summaries of documents they do not simply extract sentences and concatenate them rather they create new sentences that are grammatical that cohere with one another and that capture the most salient pieces of information in the original document given that large collections of text abstract pairs are available online it is now possible to envision algorithms that are trained to mimic this process in this paper we focus on sentence compression a simpler version of this larger challenge we aim to achieve two goals simultaneously our compressions should be grammatical and they should retain the most important pieces of information these two goals can conflict we devise both noisy channel and decision tree approaches to the problem and we evaluate results against manual compressions and a simple baseline", + "title_raw": "Statistics-Based Summarization - Step One: Sentence Compression", + "abstract_raw": "When humans produce summaries of documents, they do not simply extract sentences and concatenate them. Rather, they create new sentences that are grammatical, that cohere with one another, and that capture the most salient pieces of information in the original document. Given that large collections of text/abstract pairs are available online, it is now possible to envision algorithms that are trained to mimic this process. In this paper, we focus on sentence compression, a simpler version of this larger challenge. We aim to achieve two goals simultaneously:our compressions should be grammatical, and they should retain the most important pieces of information. These two goals can conflict. We devise both noisy-channel and decision-tree approaches to the problem, and we evaluate results against manual compressions and a simple baseline.", + "link": "https://www.aaai.org/Papers/AAAI/2000/AAAI00-108.pdf", + "scraped_abstract": null, + "citation_best": 410 + }, + { + "paper": "2103552898", + "venue": "1184914352", + "year": "2000", + "title": "local search characteristics of incomplete sat procedures", + "label": [ + "19889080", + "124145224", + "85522705", + "90189156", + "2780043526", + "116149140" + ], + "author": [ + "1817936516", + "1991776605" + ], + "reference": [ + "13289598", + "14839205", + "30950413", + "37101401", + "83818278", + "125178576", + "127040706", + "139325465", + "1507065364", + "1524408330", + "1526749820", + "1537595897", + "1574977213", + "1600919542", + "1667614912", + "1700222499", + "2095709533", + "2106440199", + "2108637261", + "2119190805", + "2157280385", + "2160444875", + "2165372647", + "2167658266" + ], + "abstract": "", + "title_raw": "Local Search Characteristics of Incomplete SAT Procedures", + "abstract_raw": "", + "link": "https://www.aaai.org/Library/AAAI/2000/aaai00-046.php", + "scraped_abstract": null, + "citation_best": 78 + }, + { + "paper": "2159128898", + "venue": "1158167855", + "year": "2000", + "title": "real time tracking of non rigid objects using mean shift", + "label": [ + "2777210071", + "132094186", + "48548287", + "31972630", + "24145651", + "3270621" + ], + "author": [ + "2054682710", + "2140766645", + "2568784767" + ], + "reference": [ + "204885769", + "1513763184", + "1687797484", + "1845517820", + "1964443764", + "2033009866", + "2072204744", + "2100548006", + "2104755791", + "2105238356", + "2106792145", + "2123947647", + "2125767025", + "2131656657", + "2131741173", + "2135346934", + "2140235142", + "2140935196", + "2149197198", + "2150117517", + "2161406034", + "2168682262", + "2413295641", + "2914885528" + ], + "abstract": "a new method for real time tracking of non rigid objects seen from a moving camera is proposed the central computational module is based on the mean shift iterations and finds the most probable target position in the current frame the dissimilarity between the target model its color distribution and the target candidates is expressed by a metric derived from the bhattacharyya coefficient the theoretical analysis of the approach shows that it relates to the bayesian framework while providing a practical fast and efficient solution the capability of the tracker to handle in real time partial occlusions significant clutter and target scale variations is demonstrated for several image sequences", + "title_raw": "Real-time tracking of non-rigid objects using mean shift", + "abstract_raw": "A new method for real time tracking of non-rigid objects seen from a moving camera is proposed. The central computational module is based on the mean shift iterations and finds the most probable target position in the current frame. The dissimilarity between the target model (its color distribution) and the target candidates is expressed by a metric derived from the Bhattacharyya coefficient. The theoretical analysis of the approach shows that it relates to the Bayesian framework while providing a practical, fast and efficient solution. The capability of the tracker to handle in real time partial occlusions, significant clutter, and target scale variations, is demonstrated for several image sequences.", + "link": "https://www.semanticscholar.org/paper/2cfa006b33084abe8160b001f9a24944cda25d05", + "scraped_abstract": null, + "citation_best": 2918 + }, + { + "paper": "1991962718", + "venue": "1130985203", + "year": "2000", + "title": "hancock a language for extracting signatures from data streams", + "label": [ + "77088390", + "89198739" + ], + "author": [ + "2134830209", + "2134374908", + "2949760587", + "2287929299" + ], + "reference": [ + "1517113043", + "2078599356", + "2149350443", + "2150847526", + "2166091981", + "2914011927" + ], + "abstract": "", + "title_raw": "Hancock: a language for extracting signatures from data streams", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/11da9ae15aa44c443df7688724e68396a3ab1aea", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2066859698", + "venue": "1185109434", + "year": "2000", + "title": "checking system rules using system specific programmer written compiler extensions", + "label": [ + "154488198", + "55439883", + "167955471", + "111919701", + "12096594", + "169590947", + "199360897", + "2777904410", + "172644921", + "199519371" + ], + "author": [ + "2163716051", + "2093545088", + "2112890893", + "2069701982" + ], + "reference": [ + "42002036", + "84258820", + "98763178", + "1527793496", + "1541457918", + "1556133362", + "1595810432", + "1965348053", + "1972544179", + "1974940962", + "1978220811", + "2029414465", + "2047226031", + "2069300761", + "2070869623", + "2107995193", + "2135274583", + "2138990624", + "2152056423", + "2159890891", + "2167345029", + "2169476734" + ], + "abstract": "systems software such as os kernels embedded systems and libraries must obey many rules for both correctness and performance common examples include accesses to variable a must be guarded by lock b system calls must check user pointers for validity before using them and message handlers should free their buffers as quickly as possible to allow greater parallelism unfortunately adherence to these rules is largely unchecked this paper attacks this problem by showing how system implementors can use meta level compilation mc to write simple system specific compiler extensions that automatically check their code for rule violations by melding domain specific knowledge with the automatic machinery of compilers mc brings the benefits of language level checking and optimizing to the higher meta level of the systems implemented in these languages this paper demonstrates the effectiveness of the mc approach by applying it to four complex real systems linux openbsd the xok exokernel and the flash machine s embedded software mc extensions found roughly 500 errors in these systems and led to numerous kernel patches most extensions were less than a hundred lines of code and written by implementors who had a limited understanding of the systems checked", + "title_raw": "Checking system rules using system-specific, programmer-written compiler extensions", + "abstract_raw": "Systems software such as OS kernels, embedded systems, and libraries must obey many rules for both correctness and performance. Common examples include \"accesses to variable A must be guarded by lock B,\" \"system calls must check user pointers for validity before using them,\" and \"message handlers should free their buffers as quickly as possible to allow greater parallelism.\" Unfortunately, adherence to these rules is largely unchecked. This paper attacks this problem by showing how system implementors can use meta-level compilation (MC) to write simple, system-specific compiler extensions that automatically check their code for rule violations. By melding domain-specific knowledge with the automatic machinery of compilers, MC brings the benefits of language-level checking and optimizing to the higher, \"meta\" level of the systems implemented in these languages. This paper demonstrates the effectiveness of the MC approach by applying it to four complex, real systems: Linux, OpenBSD, the Xok exokernel, and the FLASH machine's embedded software. MC extensions found roughly 500 errors in these systems and led to numerous kernel patches. Most extensions were less than a hundred lines of code and written by implementors who had a limited understanding of the systems checked.", + "link": "https://www.semanticscholar.org/paper/603860e3fbc6976148f7cdb88f942635f1d64ced", + "scraped_abstract": null, + "citation_best": 171 + }, + { + "paper": "2072737419", + "venue": "1127352206", + "year": "2000", + "title": "dynamo a transparent dynamic optimization system", + "label": [ + "149635348", + "50450317", + "169590947", + "118530786", + "137955351", + "67953723", + "173608175", + "2778971978", + "2777904410" + ], + "author": [ + "1952917645", + "2675049322", + "2173125747" + ], + "reference": [ + "56691616", + "1937689195", + "1991751536", + "1993318777", + "1999421692", + "2000504947", + "2009418036", + "2011669306", + "2018016444", + "2021951417", + "2063255488", + "2071568619", + "2084475036", + "2088494048", + "2089131124", + "2098771596", + "2101134669", + "2112768459", + "2139859676", + "2147121609", + "2157074753", + "2160796062", + "2163935347", + "2165423885", + "2167651816", + "2339669378", + "2798737326", + "3142323636" + ], + "abstract": "we describe the design and implementation of dynamo a software dynamic optimization system that is capable of transparently improving the performance of a native instruction stream as it executes on the processor the input native instruction stream to dynamo can be dynamically generated by a jit for example or it can come from the execution of a statically compiled native binary this paper evaluates the dynamo system in the latter more challenging situation in order to emphasize the limits rather than the potential of the system our experiments demonstrate that even statically optimized native binaries can be accelerated dynamo and often by a significant degree for example the average performance of o optimized specint95 benchmark binaries created by the hp product c compiler is improved to a level comparable to their o4 optimized version running without dynamo dynamo achieves this by focusing its efforts on optimization opportunities that tend to manifest only at runtime and hence opportunities that might be difficult for a static compiler to exploit dynamo s operation is transparent in the sense that it does not depend on any user annotations or binary instrumentation and does not require multiple runs or any special compiler operating system or hardware support the dynamo prototype presented here is a realistic implementation running on an hp pa 8000 workstation under the hpux 10 20 operating system", + "title_raw": "Dynamo: a transparent dynamic optimization system", + "abstract_raw": "We describe the design and implementation of Dynamo, a software dynamic optimization system that is capable of transparently improving the performance of a native instruction stream as it executes on the processor. The input native instruction stream to Dynamo can be dynamically generated (by a JIT for example), or it can come from the execution of a statically compiled native binary. This paper evaluates the Dynamo system in the latter, more challenging situation, in order to emphasize the limits, rather than the potential, of the system. Our experiments demonstrate that even statically optimized native binaries can be accelerated Dynamo, and often by a significant degree. For example, the average performance of -O optimized SpecInt95 benchmark binaries created by the HP product C compiler is improved to a level comparable to their -O4 optimized version running without Dynamo. Dynamo achieves this by focusing its efforts on optimization opportunities that tend to manifest only at runtime, and hence opportunities that might be difficult for a static compiler to exploit. Dynamo's operation is transparent in the sense that it does not depend on any user annotations or binary instrumentation, and does not require multiple runs, or any special compiler, operating system or hardware support. The Dynamo prototype presented here is a realistic implementation running on an HP PA-8000 workstation under the HPUX 10.20 operating system.", + "link": "https://www.semanticscholar.org/paper/6fdeb62011816cb7fb3be5a70e2e00e33af6ddac", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2035071899", + "venue": "1184151122", + "year": "2000", + "title": "auditing boolean attributes", + "label": [ + "2778861586", + "2779287364", + "137822555", + "80444323" + ], + "author": [ + "2261367123", + "2220829341", + "2195048431" + ], + "reference": [ + "1963547452", + "1981961329", + "1989799419", + "2011095877", + "2041025307", + "2044307594", + "2055725320", + "2062882367", + "2067858436", + "2113427031", + "2157314903", + "2752885492" + ], + "abstract": "we study the problem of auditing databases which support statistical sum queries to protect the security of sensitive information we focus on the special case in which the sensitive information is boolean principles and techniques developed for the security of statistical database in the case of continuous attributes do not apply here we prove certain strong complexity results suggesting that there is no general efficient solution for the auditing problem in this case we propose two efficient algorithms the first is applicable when the sum queries are one dimensional range queries we prove that the problem is np hard even in the two dimensional case the second is an approximate algorithm that maintains security although it may be too restrictive finally we consider a dual variant with continuous data but an aggregate function that is combinatorial in nature specifically we provide algorithms for two natural definitions of the auditing condition when the aggregate function is max", + "title_raw": "Auditing Boolean attributes", + "abstract_raw": "We study the problem of auditing databases which support statistical sum queries to protect the security of sensitive information; we focus on the special case in which the sensitive information is Boolean. Principles and techniques developed for the security of statistical database in the case of continuous attributes do not apply here. We prove certain strong complexity results suggesting that there is no general efficient solution for the auditing problem in this case. We propose two efficient algorithms: The first is applicable when the sum queries are one-dimensional range queries (we prove that the problem is NP-hard even in the two-dimensional case). The second is an approximate algorithm that maintains security, although it may be too restrictive. Finally, we consider a \u201cdual\u201d variant, with continuous data but an aggregate function that is combinatorial in nature. Specifically, we provide algorithms for two natural definitions of the auditing condition when the aggregate function is MAX.", + "link": "https://www.semanticscholar.org/paper/333a016bc00ff74da51ebf51f1c4f80298db7122", + "scraped_abstract": null, + "citation_best": 79 + }, + { + "paper": "1985554184", + "venue": "1140684652", + "year": "2000", + "title": "ir evaluation methods for retrieving highly relevant documents", + "label": [ + "189430467", + "2780667362", + "2777309117", + "97854310", + "87546605", + "197927960", + "23123220" + ], + "author": [ + "54234561", + "1949934518" + ], + "reference": [ + "84158582", + "103650626", + "119444765", + "1555108314", + "1564227049", + "1598520654", + "1974035951", + "1991466808", + "2017001830", + "2019976352", + "2039433449", + "2042204200", + "2049312520", + "2051497960", + "2054805201", + "2081477775", + "2083605078", + "2151012289", + "2171694224", + "2401023160" + ], + "abstract": "this paper proposes evaluation methods based on the use of non dichotomous relevance judgements in ir experiments it is argued that evaluation methods should credit ir methods for their ability to retrieve highly relevant documents this is desirable from the user point of view in modem large ir environments the proposed methods are 1 a novel application of p r curves and average precision computations based on separate recall bases for documents of different degrees of relevance and 2 two novel measures computing the cumulative gain the user obtains by examining the retrieval result up to a given ranked position we then demonstrate the use of these evaluation methods in a case study on the effectiveness of query types based on combinations of query structures and expansion in retrieving documents of various degrees of relevance the test was run with a best match retrieval system in query i in a text database consisting of newspaper articles the results indicate that the tested strong query structures are most effective in retrieving highly relevant documents the differences between the query types are practically essential and statistically significant more generally the novel evaluation methods and the case demonstrate that non dichotomous relevance assessments are applicable in ir experiments may reveal interesting phenomena and allow harder testing of ir methods", + "title_raw": "IR evaluation methods for retrieving highly relevant documents", + "abstract_raw": "This paper proposes evaluation methods based on the use of non-dichotomous relevance judgements in IR experiments. It is argued that evaluation methods should credit IR methods for their ability to retrieve highly relevant documents. This is desirable from the user point of view in modem large IR environments. The proposed methods are (1) a novel application of P-R curves and average precision computations based on separate recall bases for documents of different degrees of relevance, and (2) two novel measures computing the cumulative gain the user obtains by examining the retrieval result up to a given ranked position. We then demonstrate the use of these evaluation methods in a case study on the effectiveness of query types, based on combinations of query structures and expansion, in retrieving documents of various degrees of relevance. The test was run with a best match retrieval system (In- Query I) in a text database consisting of newspaper articles. The results indicate that the tested strong query structures are most effective in retrieving highly relevant documents. The differences between the query types are practically essential and statistically significant. More generally, the novel evaluation methods and the case demonstrate that non-dichotomous relevance assessments are applicable in IR experiments, may reveal interesting phenomena, and allow harder testing of IR methods.", + "link": "https://www.semanticscholar.org/paper/93b9d22e6b1f3fc05feba3c5c3922a23dce09ea9", + "scraped_abstract": null, + "citation_best": 934 + }, + { + "paper": "2202508117", + "venue": "1175089206", + "year": "2000", + "title": "xmill an efficient compressor for xml data", + "label": [ + "77088390", + "44883583", + "183068750", + "15845906", + "11508877", + "138958017" + ], + "author": [ + "1179716466", + "1986159374" + ], + "reference": [ + "109947125", + "1569584939", + "1600355623", + "1632114991", + "1970231722", + "2002089154", + "2091009885", + "2107745473", + "2129803206", + "2134356404", + "2155729921", + "2156000104", + "2161488606", + "2165025417", + "2166099525", + "2341510490", + "2611071497", + "2913251172" + ], + "abstract": "we describe a tool for compressing xml data with applications in data exchange and archiving which usually achieves about twice the compression ratio of gzip at roughly the same speed the compressor called xmill incorporates and combines existing compressors in order to apply them to heterogeneous xml data it uses zlib the library function for gzip a collection of datatype specific compressors for simple data types and possibly user defined compressors for application specific data types", + "title_raw": "XMill: an efficient compressor for XML data", + "abstract_raw": "We describe a tool for compressing XML data, with applications in data exchange and archiving, which usually achieves about twice the compression ratio of gzip at roughly the same speed. The compressor, called XMill, incorporates and combines existing compressors in order to apply them to heterogeneous XML data: it uses zlib, the library function for gzip, a collection of datatype specific compressors for simple data types, and, possibly, user defined compressors for application specific data types.", + "link": "https://www.semanticscholar.org/paper/141e8528f0d3cdcf5dcbfe3ba5be5af494832c09", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2169732913", + "venue": "1166315290", + "year": "2000", + "title": "sensing techniques for mobile interaction", + "label": [ + "170130773", + "115121344", + "59046462", + "107457646", + "144543869", + "49774154", + "121449826", + "186967261" + ], + "author": [ + "1560725665", + "2121099458", + "2154671057", + "1970391018" + ], + "reference": [ + "1226869406", + "1490670090", + "1493876111", + "1576835806", + "1577936768", + "1778770750", + "1928061009", + "1983239853", + "1985951810", + "2007580360", + "2012573326", + "2017310186", + "2023331673", + "2025805571", + "2037717243", + "2040048368", + "2051572365", + "2059216172", + "2060069377", + "2061121279", + "2095082282", + "2103798636", + "2108715885", + "2116207401", + "2126434504", + "2128026023", + "2128590751", + "2128629242", + "2130306162", + "2130351016", + "2138727017", + "2145550961", + "2148766288", + "2156706552", + "2158882008", + "2160170050", + "2165195887", + "2630105337" + ], + "abstract": "we describe sensing techniques motivated by unique aspects of human computer interaction with handheld devices in mobile settings special features of mobile interaction include changing orientation and position changing venues the use of computing as auxiliary to ongoing real world activities like talking to a colleague and the general intimacy of use for such devices we introduce and integrate a set of sensors into a handheld device and demonstrate several new functionalities engendered by the sensors such as recording memos when the device is held like a cell phone switching between portrait and landscape display modes by holding the device in the desired orientation automatically powering up the device when the user picks it up the device to start using it and scrolling the display using tilt we present an informal experiment initial usability testing results and user reactions to these techniques", + "title_raw": "Sensing techniques for mobile interaction", + "abstract_raw": "We describe sensing techniques motivated by unique aspects of human-computer interaction with handheld devices in mobile settings. Special features of mobile interaction include changing orientation and position, changing venues, the use of computing as auxiliary to ongoing, real-world activities like talking to a colleague, and the general intimacy of use for such devices. We introduce and integrate a set of sensors into a handheld device, and demonstrate several new functionalities engendered by the sensors, such as recording memos when the device is held like a cell phone, switching between portrait and landscape display modes by holding the device in the desired orientation, automatically powering up the device when the user picks it up the device to start using it, and scrolling the display using tilt. We present an informal experiment, initial usability testing results, and user reactions to these techniques.", + "link": "https://www.semanticscholar.org/paper/1cfa915f1f86da52c92aff8d60cced0924b6a344", + "scraped_abstract": null, + "citation_best": 541 + }, + { + "paper": "2175110005", + "venue": "1135342153", + "year": "2000", + "title": "graph structure in the web", + "label": [ + "105606406", + "136134403", + "176225458", + "136764020", + "2777569578" + ], + "author": [ + "1970098533", + "3029771320", + "73889060", + "2195048431", + "2059781006", + "2028182399", + "2130754085", + "2665851981" + ], + "reference": [ + "1479910094", + "1506099691", + "1968642292", + "1976232673", + "1976969221", + "1981202432", + "1987272746", + "2006119904", + "2008620264", + "2018355987", + "2028406909", + "2036671379", + "2037498077", + "2048392910", + "2059276410", + "2066636486", + "2079656678", + "2079672501", + "2082398795", + "2085950095", + "2089192108", + "2097147952", + "2134122907", + "2135282325", + "2145748623", + "2769133055", + "2799004609", + "3007065685", + "3125161049", + "3140662957" + ], + "abstract": "the study of the web as a graph is not only fascinating in its own right but also yields valuable insight into web algorithms for crawling searching and community discovery and the sociological phenomena which characterize its evolution we report on experiments on local and global properties of the web graph using two altavista crawls each with over 200 million pages and 1 5 billion links our study indicates that the macroscopic structure of the web is considerably more intricate than suggested by earlier experiments on a smaller scale", + "title_raw": "Graph structure in the Web", + "abstract_raw": "The study of the web as a graph is not only fascinating in its own right, but also yields valuable insight into web algorithms for crawling, searching and community discovery, and the sociological phenomena which characterize its evolution. We report on experiments on local and global properties of the web graph using two Altavista crawls each with over 200 million pages and 1.5 billion links. Our study indicates that the macroscopic structure of the web is considerably more intricate than suggested by earlier experiments on a smaller scale.", + "link": "https://www.semanticscholar.org/paper/b59aad57ca1baeca15f497ead7bbb2db01b3c952", + "scraped_abstract": null, + "citation_best": 35 + }, + { + "paper": "1483940455", + "venue": "1184914352", + "year": "1999", + "title": "proverb the probabilistic cruciverbalist", + "label": [ + "49937458", + "154945302", + "50341643", + "177264268", + "204321447" + ], + "author": [ + "2101795779", + "2496873187", + "1075592465", + "2284852570", + "2283549412", + "2567391742", + "2280115039", + "2343276469", + "2983190058", + "2774793119" + ], + "reference": [ + "164549", + "88959009", + "1577626262", + "1956559956", + "1983578042", + "2102381086" + ], + "abstract": "we attacked the problem of solving crossword puzzles by computer given a set of clues and a crossword grid try to maximize the number of words correctly filled in in our system expert modules specialize in solving specific types of clues drawing on ideas from information retrieval database search and machine learning each expert module generates a possibly empty candidate list for each clue and the lists are merged together and placed into the grid by a centralized solver we used a probabilistic representation throughout the system as a common interchange language between subsystems and to drive the search for an optimal solution proverb the complete system averages 95 3 words correct and 98 1 letters correct in under 15 minutes per puzzle on a sample of 370 puzzles taken from the new york times and several other puzzle sources this corresponds to missing roughly 3 words or 4 letters on a daily 15 15 puzzle making proverb a better than average cruciverbalist crossword solver", + "title_raw": "Proverb: the probabilistic cruciverbalist", + "abstract_raw": "We attacked the problem of solving crossword puzzles by computer: given a set of clues and a crossword grid, try to maximize the number of words correctly filled in. In our system, \"expert modules\" specialize in solving specific types of clues, drawing on ideas from information retrieval, database search, and machine learning. Each expert module generates a (possibly empty) candidate list for each clue, and the lists are merged together and placed into the grid by a centralized solver. We used a probabilistic representation throughout the system as a common interchange language between subsystems and to drive the search for an optimal solution. PROVERB, the complete system, averages 95.3% words correct and 98.1 % letters correct in under 15 minutes per puzzle on a sample of 370 puzzles taken from the New York Times and several other puzzle sources. This corresponds to missing roughly 3 words or 4 letters on a daily 15 \u00d7 15 puzzle, making PROVERB a better-than-average cruciverbalist (crossword solver).", + "link": "https://www.semanticscholar.org/paper/bd417d347a18e52598b3ccafcf5d8031f45c0f41", + "scraped_abstract": null, + "citation_best": 39 + }, + { + "paper": "2127466278", + "venue": "1203999783", + "year": "1999", + "title": "a distributed case based reasoning application for engineering sales support", + "label": [ + "14414571", + "20162079", + "12269588", + "8797682", + "136764020", + "15524039" + ], + "author": [ + "1508472518", + "170697772" + ], + "reference": [ + "60976464", + "73774838", + "1498118703", + "1509235435", + "1532750478", + "1567283344", + "1840591879", + "1895754424", + "2088777431", + "2161133420" + ], + "abstract": "this paper describes the implementation of a distributed case based reasoning application that supports engineering sales staff the application operates on the world wide web and uses the xml standard as a communications protocol between client and server side java applets the paper describes the distributed architecture of the application the two case retrieval techniques used its implementation trial roll out and subsequent improvements to its architecture and retrieval techniques using introspective reasoning to improve retrieval efficiency the benefits it has provided to the company are detailed", + "title_raw": "A Distributed Case-Based Reasoning Application for Engineering Sales Support", + "abstract_raw": "This paper describes the implementation of a distributed case-based reasoning application that supports engineering sales staff. The application operates on the world wide web and uses the XML standard as a communications protocol between client and server side Java applets. The paper describes the distributed architecture of the application, the two case retrieval techniques used, its implementation, trial, roll-out and subsequent improvements to its architecture and retrieval techniques using introspective reasoning to improve retrieval efficiency. The benefits it has provided to the company are detailed.", + "link": "https://www.semanticscholar.org/paper/b2c139a66b36c4daf3c9487d26b958643776e539", + "scraped_abstract": null, + "citation_best": 47 + }, + { + "paper": "1774901127", + "venue": "1203999783", + "year": "1999", + "title": "learning in natural language", + "label": [ + "119857082", + "58973888", + "49937458", + "167966045", + "9679016", + "97970142", + "207201462", + "163836022", + "52001869", + "136389625", + "32254414", + "112972136", + "195324797", + "2781376004" + ], + "author": [ + "2122007671" + ], + "reference": [ + "1530699444", + "1597379537", + "1648417313", + "1977182536", + "1982381767", + "1984424319", + "2001792610", + "2015042937", + "2018490621", + "2019363670", + "2039533113", + "2051027583", + "2056957301", + "2071927551", + "2072631813", + "2084544490", + "2100796029", + "2112792378", + "2118996379", + "2125838338", + "2156909104", + "2170206653", + "2296427152", + "2963847008", + "3017143921" + ], + "abstract": "statistics based classifiers in natural language are developed typically by assuming a generative model for the data estimating its parameters from training data and then using bayes rule to obtain a classifier for many problems the assumptions made by the generative models are evidently wrong leaving open the question of why these approaches work this paper presents a learning theory account of the major statistical approaches to learning in natural language a class of linear statistical queries lsq hypotheses is defined and learning with it is shown to exhibit some robustness properties many statistical learners used in natural language including naive bayes markov models and maximum entropy models are shown to be lsq hypotheses explaining the robustness of these predictors even when the underlying probabilistic assumptions do not hold this coherent view of when and why learning approaches work in this context may help to develop better learning methods and an understanding of the role of learning in natural language inferences", + "title_raw": "Learning in Natural Language", + "abstract_raw": "Statistics-based classifiers in natural language are developed typically by assuming a generative model for the data, estimating its parameters from training data and then using Bayes rule to obtain a classifier. For many problems the assumptions made by the generative models are evidently wrong, leaving open the question of why these approaches work.\r\n\r\nThis paper presents a learning theory account of the major statistical approaches to learning in natural language. A class of Linear Statistical Queries (LSQ) hypotheses is defined and learning with it is shown to exhibit some robustness properties. Many statistical learners used in natural language, including naive Bayes, Markov Models and Maximum Entropy models are shown to be LSQ hypotheses, explaining the robustness of these predictors even when the underlying probabilistic assumptions do not hold. This coherent view of when and why learning approaches work in this context may help to develop better learning methods and an understanding of the role of learning in natural language inferences.", + "link": "https://www.semanticscholar.org/paper/ca732052b6893b87c1c13c9f1f70809af39f55d1", + "scraped_abstract": null, + "citation_best": 12 + }, + { + "paper": "2058732827", + "venue": "1130985203", + "year": "1999", + "title": "metacost a general method for making classifiers cost sensitive", + "label": [ + "119857082", + "124101348", + "2781376004", + "95623464" + ], + "author": [ + "2169012919" + ], + "reference": [ + "61567823", + "66185676", + "98862427", + "145450961", + "152305382", + "1523293200", + "1524761913", + "1554663460", + "1573228426", + "1588833628", + "1594031697", + "1610836425", + "1856927991", + "1970406594", + "2084812512", + "2112076978", + "2114350817", + "2125055259", + "2140785063", + "2142771821", + "2147169507", + "2156049106", + "2912934387", + "3017143921", + "3085162807" + ], + "abstract": "research in machine learning statistics and related fields has produced a wide variety of algorithms for classification however most of these algorithms assume that all errors have the same cost which is seldom the case in kdd problems individually making each classification learner costsensitive is laborious and often non trivial in this paper we propose a principled method for making an arbitrary classifier cost sensitive by wrapping a cost minimizing procedure around it this procedure called metacost treats the underlying classifier as a black box requiring no knowledge of its functioning or change to it unlike stratification metacost is applicable to any number of classes and to arbitrary cost matrices empirical trials on a large suite of benchmark databases show that metacost almost always produces large cost reductions compared to the cost blind classifier used c4 5rules and to two forms of stratification further tests identify the key components of metacost and those that can be varied without substantial loss experiments on a larger database indicate that metacost scales well", + "title_raw": "MetaCost: a general method for making classifiers cost-sensitive", + "abstract_raw": "Research in machine learning, statistics and related fields has produced a wide variety of algorithms for classification. However, most of these algorithms assume that all errors have the same cost, which is seldom the case in KDD problems. Individually making each classification learner costsensitive is laborious, and often non-trivial. In this paper we propose a principled method for making an arbitrary classifier cost-sensitive by wrapping a cost-minimizing procedure around it. This procedure, called MetaCost, treats the underlying classifier as a black box, requiring no knowledge of its functioning or change to it. Unlike stratification, MetaCost, is applicable to any number of classes and to arbitrary cost matrices. Empirical trials on a large suite of benchmark databases show that MetaCost almost always produces large cost reductions compared to the cost-blind classifier used (C4.5RULES) and to two forms of stratification. Further tests identify the key components of MetaCost and those that can be varied without substantial loss. Experiments on a larger database indicate that MetaCost scales well.", + "link": "https://www.semanticscholar.org/paper/36bca41eba5a7cea8d69a89ee7bc24923bc380ba", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2151200745", + "venue": "1185109434", + "year": "1999", + "title": "io lite a unified i o buffering and caching system", + "label": [ + "111919701", + "162100846", + "189783530", + "11392498", + "527821871", + "115537543", + "204156049", + "196697905" + ], + "author": [ + "2303316918", + "62967857", + "2290999210" + ], + "reference": [ + "1552041766", + "1581274276", + "1595668546", + "1622463472", + "1918432491", + "2045808511", + "2052840314", + "2078074976", + "2083469471", + "2092537111", + "2108580089", + "2111673261", + "2122960384", + "2134323418", + "2138381338", + "2148620466", + "2159477904", + "2159890891", + "2160992991", + "2169505235" + ], + "abstract": "this paper presents the design implementation and evaluation of io lite a unified i o buffering and caching system for general purpose operating systems io lite unifies all buffering and caching in the system to the extent permitted by the hardware in particular it allows applications interprocess communication the filesystem the file cache and the network subsystem to share a single physical copy of the data safely and concurrently protection and security are maintained through a combination of access control and read only sharing the various subsystems use mutable buffer aggregates to access the data according to their needs io lite eliminates all copying and multiple buffering of i o data and enables various cross subsystem optimizations experiments with a web server on io lite show performance improvements between 40 and 80 on real workloads", + "title_raw": "IO-lite: a unified I/O buffering and caching system", + "abstract_raw": "This paper presents the design, implementation and evaluation of IO-Lite, a unified I/O buffering and caching system for general-purpose operating systems. IO-Lite unifies all buffering and caching in the system, to the extent permitted by the hardware. In particular, it allows applications, interprocess communication, the filesystem, the file cache, and the network subsystem to share a single physical copy of the data safely and concurrently. Protection and security are maintained through a combination of access control and read-only sharing. The various subsystems use (mutable) buffer aggregates to access the data according to their needs. IO-Lite eliminates all copying and multiple buffering of I/O data, and enables various cross-subsystem optimizations. Experiments with a Web server on IO-Lite show performance improvements between 40 and 80% on real workloads.", + "link": "https://www.semanticscholar.org/paper/4747b17f3c79ffc1821e573b6b52fe4967144da5", + "scraped_abstract": null, + "citation_best": 107 + }, + { + "paper": "2108112890", + "venue": "1127352206", + "year": "1999", + "title": "whole program paths", + "label": [ + "190902152", + "187191949", + "2777138346", + "173608175", + "160191386" + ], + "author": [ + "635023701" + ], + "reference": [ + "1480780803", + "1487130678", + "1561389604", + "1965341318", + "1988927353", + "1990349513", + "1996431959", + "2013137740", + "2036918610", + "2040183246", + "2044951665", + "2047226031", + "2059504991", + "2066688923", + "2067836954", + "2078944436", + "2101134669", + "2106861521", + "2117189826", + "2129962996", + "2133304975", + "2143160644", + "2146151266", + "2146210875", + "2162777718", + "2165423885", + "2178429314", + "2294451725" + ], + "abstract": "whole program paths wpp are a new approach to capturing and representing a program s dynamic actually executed control flow unlike other path profiling techniques which record intraprocedural or acyclic paths wpps produce a single compact description of a program s entire control flow including loop iteration and interprocedural paths this paper explains how to collect and represent wpps it also shows how to use wpps to find hot subpaths which are the heavily executed sequences of code that should be the focus of performance tuning and compiler optimization", + "title_raw": "Whole program paths", + "abstract_raw": "Whole program paths (WPP) are a new approach to capturing and representing a program's dynamic---actually executed---control flow. Unlike other path profiling techniques, which record intraprocedural or acyclic paths, WPPs produce a single, compact description of a program's entire control flow, including loop iteration and interprocedural paths.This paper explains how to collect and represent WPPs. It also shows how to use WPPs to find hot subpaths, which are the heavily executed sequences of code that should be the focus of performance tuning and compiler optimization.", + "link": "https://www.semanticscholar.org/paper/63281ffa177c6d88841ddc6e01d1b0a74ea853e0", + "scraped_abstract": null, + "citation_best": 34 + }, + { + "paper": "2043778692", + "venue": "1184151122", + "year": "1999", + "title": "exact and approximate aggregation in constraint query languages", + "label": [ + "119857082", + "192939062", + "192028432", + "157692150", + "80444323" + ], + "author": [ + "2016987025", + "1971151415" + ], + "reference": [ + "1481001647", + "1546328101", + "1551066637", + "1558832481", + "1572511250", + "1596887828", + "1822440022", + "1964084160", + "1984534637", + "1985813404", + "1987679122", + "1989267168", + "1993482412", + "2004405116", + "2010757891", + "2012348391", + "2019671617", + "2019782055", + "2037381552", + "2042690606", + "2063342497", + "2068589781", + "2077328395", + "2077459175", + "2080937655", + "2084875013", + "2090832823", + "2094548115", + "2100440346", + "2129879687", + "2154952480", + "2160312889", + "2162052349", + "2177603603", + "2215180673", + "2296677182" + ], + "abstract": "", + "title_raw": "Exact and approximate aggregation in constraint query languages", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/cf673ce1ecdc97e7d5fc306690eef560d792e431", + "scraped_abstract": null, + "citation_best": 18 + }, + { + "paper": "2024181699", + "venue": "1140684652", + "year": "1999", + "title": "cross language information retrieval based on parallel texts and automatic mining of parallel texts from the web", + "label": [ + "49937458", + "114289077", + "24687705", + "2778842860", + "203005215", + "136764020", + "2780801425", + "71472368", + "204321447", + "159363923", + "110046852", + "23123220" + ], + "author": [ + "2149813270", + "2151256176", + "2463755004", + "2429303833" + ], + "reference": [ + "1489181569", + "1514971736", + "1543039027", + "1560575414", + "1636405317", + "1759973002", + "1956559956", + "2006969979", + "2021822930", + "2062630764", + "2161935199", + "3148327847" + ], + "abstract": "this paper describes the use of a probabilistic translation model to cross language ir clir the performance of this approach is compared with that using machine translation mt it is shown that using a probabilistic model we are able to obtain performances close to those using an mt system in addition we also investigated the possibility of automatically gather parallel texts from the web in an attempt to construct a reasonable training corpus the result is very encouraging we showed that in several tests such a training corpus is as good as a manually constructed one for clir purposes", + "title_raw": "Cross-language information retrieval based on parallel texts and automatic mining of parallel texts from the Web", + "abstract_raw": "This paper describes the use of a probabilistic translation model to cross-language IR (CLIR). The performance of this approach is compared with that using machine translation (MT). It is shown that using a probabilistic model, we are able to obtain performances close to those using an MT system. In addition, we also investigated the possibility of automatically gather parallel texts from the Web in an attempt to construct a reasonable training corpus. The result is very encouraging. We showed that in several tests, such a training corpus is as good as a manually constructed one for CLIR purposes.", + "link": "http://doi.acm.org/10.1145/312624.312656", + "scraped_abstract": null, + "citation_best": 315 + }, + { + "paper": "2164049396", + "venue": "1175089206", + "year": "1999", + "title": "dynamat a dynamic view management system for data warehouses", + "label": [ + "2776012861", + "77088390", + "135572916", + "98199447", + "177264268", + "2780632077", + "124101348", + "2780009758" + ], + "author": [ + "306802008", + "2918468806" + ], + "reference": [ + "38346608", + "1538267625", + "1549717312", + "1563590781", + "1581670235", + "1581805425", + "1584914510", + "1596620276", + "1835416193", + "1973382888", + "1991383761", + "1998147193", + "1999276015", + "2001852222", + "2007625694", + "2012670464", + "2044240774", + "2054170837", + "2103201239", + "2113888164", + "2114712170", + "2132582487", + "2143401113", + "2150950420", + "2157219191", + "2248652221", + "2918864124" + ], + "abstract": "pre computation and materialization of views with aggregate functions is a common technique in data warehouses due to the complex structure of the warehouse and the different profiles of the users who submit queries there is need for tools that will automate the selection and management of the materialized data in this paper we present dynamat a system that dynamically materializes information at multiple levels of granularity in order to match the demand workload but also takes into account the maintenance restrictions for the warehouse such as down time to update the views and space availability dynamat unifies the view selection and the view maintenance problems under a single framework using a novel goodness measure for the materialized views dynamat constantly monitors incoming queries and materializes the best set of views subject to the space constraints during updates dynamat reconciles the current materialized view selection and refreshes the most beneficial subset of it within a given maintenance window we compare dynamat against a system that is given all queries in advance and the pre computed optimal static view selection the comparison is made based on a new metric the detailed cost savings ratio introduced for quantifying the benefits of view materialization against incoming queries these experiments show that dynamat s dynamic view selection outperforms the optimal static view selection and thus any sub optimal static algorithm that has appeared in the literature", + "title_raw": "DynaMat: a dynamic view management system for data warehouses", + "abstract_raw": "Pre-computation and materialization of views with aggregate functions is a common technique in Data Warehouses. Due to the complex structure of the warehouse and the different profiles of the users who submit queries, there is need for tools that will automate the selection and management of the materialized data. In this paper we present DynaMat, a system that dynamically materializes information at multiple levels of granularity in order to match the demand (workload) but also takes into account the maintenance restrictions for the warehouse, such as down time to update the views and space availability. DynaMat unifies the view selection and the view maintenance problems under a single framework using a novel \u201cgoodness\u201d measure for the materialized views. DynaMat constantly monitors incoming queries and materializes the best set of views subject to the space constraints. During updates, DynaMat reconciles the current materialized view selection and refreshes the most beneficial subset of it within a given maintenance window. We compare DynaMat against a system that is given all queries in advance and the pre-computed optimal static view selection. The comparison is made based on a new metric, the Detailed Cost Savings Ratio introduced for quantifying the benefits of view materialization against incoming queries. These experiments show that DynaMat's dynamic view selection outperforms the optimal static view selection and thus, any sub-optimal static algorithm that has appeared in the literature.", + "link": "https://www.semanticscholar.org/paper/d30053f74173c7bb1daa92ff3daa9e214f6c0819", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2101915391", + "venue": "1171178643", + "year": "1999", + "title": "manageability availability and performance in porcupine a highly scalable cluster based mail service", + "label": [ + "63540848", + "31258907", + "111919701", + "48044578", + "139330139", + "162319229" + ], + "author": [ + "2134735096", + "1988425031", + "737098973" + ], + "reference": [ + "46382711", + "200874579", + "1515932031", + "1527961683", + "1675263286", + "1722094644", + "2012643955", + "2020765652", + "2022049964", + "2025413686", + "2035300188", + "2044534358", + "2088221489", + "2094068153", + "2102748509", + "2109440766", + "2115457697", + "2117536183", + "2124288146", + "2125011947", + "2127851689", + "2137509374", + "2144034981", + "2147853062", + "2153747271", + "2154007983", + "2171688871", + "2616698872", + "3137220996" + ], + "abstract": "this paper describes the motivation design and performance of porcupine a scalable mail server the goal of porcupine is to provide a highly available and scalable electronic mail service using a large cluster of commodity pcs we designed porcupine to be easy to manage by emphasizing dynamic load balancing automatic configuration and graceful degradation in the presence of failures key to the system s manageability availability and performance is that sessions data and underlying services are distributed homogeneously and dynamically across nodes in a cluster", + "title_raw": "Manageability, availability and performance in Porcupine: a highly scalable, cluster-based mail service", + "abstract_raw": "This paper describes the motivation, design, and performance of Porcupine, a scalable mail server. The goal of Porcupine is to provide a highly available and scalable electronic mail service using a large cluster of commodity PCs. We designed Porcupine to be easy to manage by emphasizing dynamic load balancing, automatic configuration, and graceful degradation in the presence of failures. Key to the system's manageability, availability, and performance is that sessions, data, and underlying services are distributed homogeneously and dynamically across nodes in a cluster.", + "link": "https://www.semanticscholar.org/paper/c30cbe128280b4a8b01eb5cb6a963ae72ea32b45", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2150709314", + "venue": "1171178643", + "year": "1999", + "title": "cellular disco resource management using virtual clusters on shared memory multiprocessors", + "label": [ + "112904061", + "133875982", + "51332947", + "25344961", + "4822641", + "149635348", + "48044578", + "2779960034" + ], + "author": [ + "2108698817", + "280431639", + "2168405129", + "2102969555" + ], + "reference": [ + "1525016610", + "1549737115", + "1836469812", + "1978980371", + "2006816934", + "2011992756", + "2012643955", + "2054496292", + "2071515131", + "2088837715", + "2097174266", + "2098375905", + "2100720297", + "2120616073", + "2125895608", + "2128336546", + "2138529599", + "2140455011", + "2145021036", + "2152056423", + "2162655049", + "2294047952", + "2611515161", + "3025363883" + ], + "abstract": "despite the fact that large scale shared memory multiprocessors have been commercially available for several years system software that fully utilizes all their features is still not available mostly due to the complexity and cost of making the required changes to the operating system a recently proposed approach called disco substantially reduces this development cost by using a virtual machine monitor that leverages the existing operating system technology in this paper we present a system called cellular disco that extends the disco work to provide all the advantages of the hardware partitioning and scalable operating system approaches we argue that cellular disco can achieve these benefits at only a small fraction of the development cost of modifying the operating system cellular disco effectively turns a large scale shared memory multiprocessor into a virtual cluster that supports fault containment and heterogeneity while avoiding operating system scalability bottle necks yet at the same time cellular disco preserves the benefits of a shared memory multiprocessor by implementing dynamic fine grained resource sharing and by allowing users to overcommit resources such as processors and memory this hybrid approach requires a scalable resource manager that makes local decisions with limited information while still providing good global performance and fault containment in this paper we describe our experience with a cellular disco prototype on a 32 processor sgi origin 2000 system we show that the execution time penalty for this approach is low typically within 10 of the best available commercial operating system for most workloads and that it can manage the cpu and memory resources of the machine significantly better than the hardware partitioning approach", + "title_raw": "Cellular Disco: resource management using virtual clusters on shared-memory multiprocessors", + "abstract_raw": "Despite the fact that large-scale shared-memory multiprocessors have been commercially available for several years, system software that fully utilizes all their features is still not available, mostly due to the complexity and cost of making the required changes to the operating system. A recently proposed approach, called Disco, substantially reduces this development cost by using a virtual machine monitor that leverages the existing operating system technology.In this paper we present a system called Cellular Disco that extends the Disco work to provide all the advantages of the hardware partitioning and scalable operating system approaches. We argue that Cellular Disco can achieve these benefits at only a small fraction of the development cost of modifying the operating system. Cellular Disco effectively turns a large-scale shared-memory multiprocessor into a virtual cluster that supports fault containment and heterogeneity, while avoiding operating system scalability bottle-necks. Yet at the same time, Cellular Disco preserves the benefits of a shared-memory multiprocessor by implementing dynamic, fine-grained resource sharing, and by allowing users to overcommit resources such as processors and memory. This hybrid approach requires a scalable resource manager that makes local decisions with limited information while still providing good global performance and fault containment.In this paper we describe our experience with a Cellular Disco prototype on a 32-processor SGI Origin 2000 system. We show that the execution time penalty for this approach is low, typically within 10% of the best available commercial operating system for most workloads, and that it can manage the CPU and memory resources of the machine significantly better than the hardware partitioning approach.", + "link": "https://www.semanticscholar.org/paper/ed21a8d26c99e9119aeecc7d2a41f41b0b7ba1b7", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2156874421", + "venue": "1171178643", + "year": "1999", + "title": "the click modular router", + "label": [ + "152174988", + "149635348", + "31258907", + "158379750", + "141947644", + "2775896111", + "86726114", + "87756765", + "202274260", + "2779581428" + ], + "author": [ + "2270424441", + "2114981089", + "2203489810", + "2779384724" + ], + "reference": [ + "1537353332", + "1620528571", + "1655359704", + "1920802909", + "1936190831", + "1965900296", + "1970837734", + "1977867261", + "2010365467", + "2014485836", + "2031969505", + "2052584779", + "2063657191", + "2069749081", + "2096241833", + "2111695485", + "2112106563", + "2124767351", + "2132581831", + "2134323418", + "2138381338", + "2143160644", + "2146430517", + "2147900802", + "2155750235", + "2156639545", + "2158733823", + "2164161511", + "2295498007", + "2913277331" + ], + "abstract": "click is a new software architecture for building flexible and configurable routers a click router is assembled from packet processing modules called elements individual elements implement simple router functions like packet classification queueing scheduling and interfacing with network devices complete configurations are built by connecting elements into a graph packets flow along the graph s edges several features make individual elements more powerful and complex configurations easier to write including pull processing which models packet flow driven by transmitting interfaces and flow based router context which helps an element locate other interesting elements we demonstrate several working configurations including an ip router and an ethernet bridge these configurations are modular the ip router has 16 elements on the forwarding path and easy to extend by adding additional elements which we demonstrate with augmented configurations on commodity pc hardware running linux the click ip router can forward 64 byte packets at 73 000 packets per second just 10 slower than linux alone", + "title_raw": "The Click modular router", + "abstract_raw": "Click is a new software architecture for building flexible and configurable routers. A Click router is assembled from packet processing modules called elements. Individual elements implement simple router functions like packet classification, queueing, scheduling, and interfacing with network devices. Complete configurations are built by connecting elements into a graph; packets flow along the graph's edges. Several features make individual elements more powerful and complex configurations easier to write, including pull processing, which models packet flow driven by transmitting interfaces, and flow-based router context, which helps an element locate other interesting elements.We demonstrate several working configurations, including an IP router and an Ethernet bridge. These configurations are modular---the IP router has 16 elements on the forwarding path---and easy to extend by adding additional elements, which we demonstrate with augmented configurations. On commodity PC hardware running Linux, the Click IP router can forward 64-byte packets at 73,000 packets per second, just 10% slower than Linux alone.", + "link": "https://www.semanticscholar.org/paper/448326de7b82c8ed10ad9ac89d1305bf2260cd5f", + "scraped_abstract": null, + "citation_best": 2437 + }, + { + "paper": "2116833128", + "venue": "1171178643", + "year": "1999", + "title": "soft timers efficient microsecond software timer support for network processing", + "label": [ + "204854418", + "149635348", + "158379750", + "113200698", + "2776633867", + "11392498", + "2777904410", + "19012869", + "53833338" + ], + "author": [ + "2115251612", + "62967857" + ], + "reference": [ + "1484507062", + "1554445692", + "1779735989", + "1988171572", + "2014485836", + "2021512083", + "2022013117", + "2023209622", + "2042171995", + "2049904414", + "2070136743", + "2087871804", + "2096812769", + "2098289156", + "2098879470", + "2101182788", + "2105347639", + "2120507338", + "2122889548", + "2128321126", + "2141512618", + "2148620466", + "2151263383", + "2154327760", + "2553945548", + "3161729677" + ], + "abstract": "this paper proposes and evaluates soft timers a new operating system facility that allows the efficient scheduling of software events at a granularity down to tens of microseconds soft timers can be used to avoid interrupts and reduce context switches associated with network processing without sacrificing low communication delays more specifically soft timers enable transport protocols like tcp to efficiently perform rate based clocking of packet transmissions experiments show that rate based clocking can improve http response time over connections with high bandwidth delay products by up to 89 and that soft timers allow a server to employ rate based clocking with little cpu overhead 2 6 at high aggregate bandwidths soft timers can also be used to perform network polling which eliminates network interrupts and increases the memory access locality of the network subsystem without sacrificing delay experiments show that this technique can improve the throughput of a web server by up to 25", + "title_raw": "Soft timers: efficient microsecond software timer support for network processing", + "abstract_raw": "This paper proposes and evaluates soft timers, a new operating system facility that allows the efficient scheduling of software events at a granularity down to tens of microseconds. Soft timers can be used to avoid interrupts and reduce context switches associated with network processing without sacrificing low communication delays.More specifically, soft timers enable transport protocols like TCP to efficiently perform rate-based clocking of packet transmissions. Experiments show that rate-based clocking can improve HTTP response time over connections with high bandwidth-delay products by up to 89% and that soft timers allow a server to employ rate-based clocking with little CPU overhead (2-6%) at high aggregate bandwidths.Soft timers can also be used to perform network polling, which eliminates network interrupts and increases the memory access locality of the network subsystem without sacrificing delay. Experiments show that this technique can improve the throughput of a Web server by up to 25%.", + "link": "https://www.semanticscholar.org/paper/35f2a93661182b39f4cae41cb50020acc59884e6", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "1489992655", + "venue": "1135342153", + "year": "1999", + "title": "focused crawling a new approach to topic specific web resource discovery", + "label": [ + "65603577", + "75165309", + "18030348", + "73340581", + "162215914", + "110875604", + "97854310", + "180503905", + "13743948", + "136764020", + "149672775", + "23123220" + ], + "author": [ + "2103349674", + "2104233022", + "2013056967" + ], + "reference": [ + "128720162", + "157562232", + "1538805611", + "1549392931", + "1605217017", + "1846765585", + "1968642292", + "1971788485", + "1987777228", + "1998102571", + "2001832505", + "2003000928", + "2006119904", + "2012516036", + "2026417691", + "2029341294", + "2040041305", + "2044743392", + "2049961212", + "2054119298", + "2066636486", + "2076008912", + "2079333567", + "2079672501", + "2082751088", + "2089192108", + "2097089247", + "2126064812", + "2126631147", + "2127169217", + "2138621811", + "2147164982", + "2155033358", + "2161956524", + "2322524800", + "2795444554" + ], + "abstract": "the rapid growth of the world wide web poses unprecedented scaling challenges for general purpose crawlers and search engines in this paper we describe a new hypertext resource discovery system called a focused crawler the goal of a focused crawler is to selectively seek out pages that are relevant to a pre defined set of topics the topics are specified not using keywords but using exemplary documents rather than collecting and indexing all accessible web documents to be able to answer all possible ad hoc queries a focused crawler analyzes its crawl boundary to find the links that are likely to be most relevant for the crawl and avoids irrelevant regions of the web this leads to significant savings in hardware and network resources and helps keep the crawl more up to date to achieve such goal directed crawling we designed two hypertext mining programs that guide our crawler a classifier that evaluates the relevance of a hypertext document with respect to the focus topics and a distiller that identifies hypertext nodes that are great access points to many relevant pages within a few links we report on extensive focused crawling experiments using several topics at different levels of specificity focused crawling acquires relevant pages steadily while standard crawling quickly loses its way even though they are started from the same root set focused crawling is robust against large perturbations in the starting set of urls it discovers largely overlapping sets of resources in spite of these perturbations it is also capable of exploring out and discovering valuable resources that are dozens of links away from the start set while carefully pruning the millions of pages that may lie within this same radius our anecdotes suggest that focused crawling is very effective for building high quality collections of web documents on specific topics using modest desktop hardware 1999 published by elsevier science b v all rights reserved", + "title_raw": "Focused crawling: a new approach to topic-specific Web resource discovery", + "abstract_raw": "The rapid growth of the World-Wide Web poses unprecedented scaling challenges for general-purpose crawlers and search engines. In this paper we describe a new hypertext resource discovery system called a Focused Crawler. The goal of a focused crawler is to selectively seek out pages that are relevant to a pre-defined set of topics. The topics are specified not using keywords, but using exemplary documents. Rather than collecting and indexing all accessible Web documents to be able to answer all possible ad-hoc queries, a focused crawler analyzes its crawl boundary to find the links that are likely to be most relevant for the crawl, and avoids irrelevant regions of the Web. This leads to significant savings in hardware and network resources, and helps keep the crawl more up-to-date. To achieve such goal-directed crawling, we designed two hypertext mining programs that guide our crawler: a classifier that evaluates the relevance of a hypertext document with respect to the focus topics, and a distiller that identifies hypertext nodes that are great access points to many relevant pages within a few links. We report on extensive focused-crawling experiments using several topics at different levels of specificity. Focused crawling acquires relevant pages steadily while standard crawling quickly loses its way, even though they are started from the same root set. Focused crawling is robust against large perturbations in the starting set of URLs. It discovers largely overlapping sets of resources in spite of these perturbations. It is also capable of exploring out and discovering valuable resources that are dozens of links away from the start set, while carefully pruning the millions of pages that may lie within this same radius. Our anecdotes suggest that focused crawling is very effective for building high-quality collections of Web documents on specific topics, using modest desktop hardware. \u00a9 1999 Published by Elsevier Science B.V. All rights reserved.", + "link": "https://www.semanticscholar.org/paper/daac06a4830488f0d47278e3c0c17394646d8354", + "scraped_abstract": null, + "citation_best": 1538 + }, + { + "paper": "2096037858", + "venue": "1184914352", + "year": "1998", + "title": "learning evaluation functions for global optimization and boolean satisfiability", + "label": [ + "119857082", + "135450995", + "122357587", + "109718341", + "131806220", + "164752517", + "19889080", + "92995354", + "135320971", + "90189156", + "190509626", + "29282572", + "150185637", + "164088818", + "123370116", + "137836250" + ], + "author": [ + "440525932", + "2170629902" + ], + "reference": [ + "167254664", + "1531229281", + "1543921836", + "1574618220", + "1629758943", + "1698663318", + "1813877927", + "1878040096", + "1967853707", + "2017127020", + "2027394155", + "2032431794", + "2072339136", + "2106440199", + "2113347106", + "3021909058" + ], + "abstract": "this paper describes stage a learning approach to automatically improving search performance on optimization problems stage learns an evaluation function which predicts the outcome of a local search algorithm such as hillclimbing or walksat as a function of state features along its search trajectories the learned evaluation function is used to bias future search trajectories toward better optima we present positive results on six large scale optimization domains", + "title_raw": "Learning evaluation functions for global optimization and Boolean satisfiability", + "abstract_raw": "This paper describes STAGE, a learning approach to automatically improving search performance on optimization problems. STAGE learns an evaluation function which predicts the outcome of a local search algorithm, such as hillclimbing or WALKSAT, as a function of state features along its search trajectories. The learned evaluation function is used to bias future search trajectories toward better optima. We present positive results on six large-scale optimization domains.", + "link": "https://www.semanticscholar.org/paper/bddddac9b0e4c3a3a451f797aa2e8641be79bc60", + "scraped_abstract": null, + "citation_best": 71 + }, + { + "paper": "2123564449", + "venue": "1184914352", + "year": "1998", + "title": "the interactive museum tour guide robot", + "label": [ + "107457646", + "49774154", + "89505385", + "162947575", + "105339364", + "168402607", + "101188967", + "35869016", + "90509273", + "26990112" + ], + "author": [ + "343811326", + "2098737035", + "2231782831", + "2071961238", + "2993977779", + "2061120763", + "2503801593", + "2075956027" + ], + "reference": [ + "73143588", + "107715831", + "116412713", + "1517795220", + "1519865121", + "1525064124", + "1537435730", + "1573376273", + "1579593387", + "1611476298", + "1644054796", + "1656165940", + "1912541283", + "1976792226", + "2028085177", + "2028145673", + "2030021468", + "2045474169", + "2096794105", + "2115356364", + "2117211893", + "2122695052", + "2145060371", + "2153852998", + "2166996723", + "2341171179" + ], + "abstract": "this paper describes the software architecture of an autonomous tour guideltutor robot this robot was recently deployed in the deutsches museum bonn were it guided hundreds of visitors through the museum during a six day deployment period the robot s control software integrates low level probabilistic reasoning with high level problem solving embedded in first order logic a collection of software innovations described in this paper enabled the robot to navigate at high speeds through dense crowds while reliably avoiding collisions with obstacles some of which could not even be perceived also described in this paper is a user interface tailored towards non expert users which was essential for the robot s success in the museum based on these experiences this paper argues that time is ripe for the development of ai based commercial service robots that assist people in everyday life", + "title_raw": "The interactive museum tour-guide robot", + "abstract_raw": "This paper describes the software architecture of an autonomous tour-guideltutor robot. This robot was recently deployed in the \"Deutsches Museum Bonn,\" were it guided hundreds of visitors through the museum during a six-day deployment period. The robot's control software integrates low-level probabilistic reasoning with high-level problem solving embedded in first order logic. A collection of software innovations, described in this paper, enabled the robot to navigate at high speeds through dense crowds, while reliably avoiding collisions with obstacles--some of which could not even be perceived. Also described in this paper is a user interface tailored towards non-expert users, which was essential for the robot's success in the museum. Based on these experiences, this paper argues that time is ripe for the development of AI-based commercial service robots that assist people in everyday life.", + "link": "https://www.semanticscholar.org/paper/198a5389d395f7a2aea1523e8d54d8c0f72ac366", + "scraped_abstract": null, + "citation_best": 500 + }, + { + "paper": "83766967", + "venue": "1130985203", + "year": "1998", + "title": "occam s two razors the sharp and the blunt", + "label": [ + "78469957", + "154945302", + "136197465" + ], + "author": [ + "2169012919" + ], + "reference": [ + "3422034", + "4310203", + "5667951", + "47621535", + "121619852", + "170692059", + "631714075", + "1480922794", + "1506385404", + "1540214124", + "1543415913", + "1554663460", + "1558203233", + "1559996711", + "1567331820", + "1573228426", + "1573707804", + "1575887049", + "1588691067", + "1604329830", + "1731670240", + "1840338487", + "1966280301", + "1982499516", + "1983661866", + "1994022788", + "1995023359", + "2015140204", + "2046600124", + "2054658115", + "2063961549", + "2070902649", + "2099111195", + "2100483895", + "2107844279", + "2110381504", + "2111766645", + "2132166479", + "2134682899", + "2140785063", + "2142771821", + "2142920735", + "2156909104", + "2161766620", + "2163321856", + "2165520550", + "2168175751", + "2168796272", + "2175490861", + "2911546748" + ], + "abstract": "occam s razor has been the subject of much controversy this paper argues that this is partly because it has been interpreted in two quite different ways the first of which simplicity is a goal in itself is essentially correct while the second simplicity leads to greater accuracy is not the paper reviews the large variety of theoretical arguments and empirical evidence for and against the second razor and concludes that the balance is strongly against it in particular it builds on the case of schaffer 1993 and webb 1996 by considering additional theoretical arguments and recent empirical evidence that the second razor fails in most domains a version of the first razor more appropriate to kdd is proposed and we argue that continuing to apply the second razor risks causing significant opportunities to be missed", + "title_raw": "Occam's two razors: the sharp and the blunt", + "abstract_raw": "Occam's razor has been the subject of much controversy. This paper argues that this is partly because it has been interpreted in two quite different ways, the first of which (simplicity is a goal in itself) is essentially correct, while the second (simplicity leads to greater accuracy) is not. The paper reviews the large variety of theoretical arguments and empirical evidence for and against the \"second razor,\" and concludes that the balance is strongly against it. In particular, it builds on the case of (Schaffer, 1993) and (Webb, 1996) by considering additional theoretical arguments and recent empirical evidence that the second razor fails in most domains. A version of the first razor more appropriate to KDD is proposed, and we argue that continuing to apply the second razor risks causing significant opportunities to be missed.", + "link": "https://www.semanticscholar.org/paper/d7845d20f5245218217404d3fe8fc4c5fd62c7ea", + "scraped_abstract": null, + "citation_best": 73 + }, + { + "paper": "2061628264", + "venue": "1184151122", + "year": "1998", + "title": "expressiveness of structured document query languages based on attribute grammars", + "label": [ + "121017423", + "97212296", + "164041254", + "67621940", + "97169998", + "199360897", + "125950753", + "134083981" + ], + "author": [ + "1870307126", + "688217074" + ], + "reference": [ + "151728962", + "166785350", + "1482193060", + "1483164218", + "1486993902", + "1504436033", + "1529427704", + "1591825827", + "1690636059", + "1816620374", + "1938581950", + "1964071625", + "2003804054", + "2033188125", + "2035020702", + "2055874330", + "2073663206", + "2085111352", + "2105505307", + "2250859116", + "2752853835", + "3184585588" + ], + "abstract": "", + "title_raw": "Expressiveness of structured document query languages based on attribute grammars", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/2f537b09cc0d76320c2b6739de5fe0c3a97dd9d5", + "scraped_abstract": null, + "citation_best": 55 + }, + { + "paper": "2067970404", + "venue": "1140684652", + "year": "1998", + "title": "a theory of term weighting based on exploratory data analysis", + "label": [ + "124101348", + "120894424", + "23123220" + ], + "author": [ + "2945577422" + ], + "reference": [ + "15201044", + "80619934", + "1479966022", + "1605873790", + "1956559956", + "1966880694", + "1968386718", + "1998041663", + "2007563768", + "2010652031", + "2011232895", + "2012318340", + "2017722302", + "2022516005", + "2025558965", + "2037745957", + "2040337753", + "2043909051", + "2066873261", + "2102046030", + "2144211451", + "2319794630", + "2421218773", + "3015462599" + ], + "abstract": "", + "title_raw": "A theory of term weighting based on exploratory data analysis", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/ee57ce8ffd5988dbbd18579afeb73eb6a3d16af6", + "scraped_abstract": null, + "citation_best": 90 + }, + { + "paper": "1997506570", + "venue": "1175089206", + "year": "1998", + "title": "efficient transparent application recovery in client server information systems", + "label": [ + "63540848", + "152880691", + "31258907", + "78161392", + "205295232", + "105908466", + "171494039", + "120314980", + "107535962", + "94925161" + ], + "author": [ + "1983664831", + "3198302491" + ], + "reference": [ + "1491227338", + "1502870524", + "1506585574", + "1515932031", + "1533675634", + "1569650328", + "1571432445", + "1583699287", + "1630105971", + "1855065956", + "1866901121", + "1977773183", + "2015234709", + "2024016608", + "2044667076", + "2052915895", + "2077122164", + "2100970777", + "2104954161", + "2109739361", + "2131053137", + "2134533588", + "2150581873", + "2150680879", + "2156871818", + "2168262126", + "2579007764" + ], + "abstract": "database systems recover persistent data providing high database availability however database applications typically residing on client or middle tier application server machines may lose work because of a server failure this prevents the masking of server failures from the human user and substantially degrades application availability this paper aims to enable high application availability with an integrated method for database server recovery and transparent application recovery in a client server system the approach based on application message logging is similar to earlier work on distributed system fault tolerance however we exploit advanced database logging and recovery techniques and request reply messaging properties to significantly improve efficiency forced log i os frequently required by other methods are usually avoided restart time for both failed server and failed client is reduced by checkpointing and log truncation our method ensures that a server can recover independently of clients a client may reduce logging overhead in return for dependency on server availability during client restart", + "title_raw": "Efficient transparent application recovery in client-server information systems", + "abstract_raw": "Database systems recover persistent data, providing high database availability. However, database applications, typically residing on client or \u201cmiddle-tier\u201d application-server machines, may lose work because of a server failure. This prevents the masking of server failures from the human user and substantially degrades application availability. This paper aims to enable high application availability with an integrated method for database server recovery and transparent application recovery in a client-server system. The approach, based on application message logging, is similar to earlier work on distributed system fault tolerance. However, we exploit advanced database logging and recovery techniques and request/reply messaging properties to significantly improve efficiency. Forced log I/Os, frequently required by other methods, are usually avoided. Restart time, for both failed server and failed client, is reduced by checkpointing and log truncation. Our method ensures that a server can recover independently of clients. A client may reduce logging overhead in return for dependency on server availability during client restart.", + "link": "https://www.semanticscholar.org/paper/3bb2f341f17863c6185a3884394fa02ce1410c2c", + "scraped_abstract": null, + "citation_best": 43 + }, + { + "paper": "2064803206", + "venue": "1175089206", + "year": "1998", + "title": "integrating association rule mining with relational database systems alternatives and implications", + "label": [ + "154420247", + "24394798", + "63000827", + "77088390", + "135572916", + "164833996", + "2776990265", + "124101348", + "193524817", + "510870499" + ], + "author": [ + "156875573", + "2168219635", + "2439481350" + ], + "reference": [ + "114279768", + "115903057", + "201801013", + "1483679765", + "1496285866", + "1520890006", + "1539916429", + "1553696291", + "1554917839", + "1576962511", + "1597161471", + "1597561788", + "1641039719", + "1948199107", + "2037965136", + "2052629503", + "2086906616", + "2093456341", + "2100901653", + "2108457969", + "2114326888", + "2126256482", + "2143979434", + "2166559705", + "2399145551", + "3086865932" + ], + "abstract": "data mining on large data warehouses is becoming increasingly important in support of this trend we consider a spectrum of architectural alternatives for coupling mining with database systems these alternatives include loose coupling through a sql cursor interface encapsulation of a mining algorithm in a stored procedure caching the data to a file system on the fly and mining tight coupling using primarily user defined functions and sql implementations for processing in the dbms we comprehensively study the option of expressing the mining algorithm in the form of sql queries using association rule mining as a case in point we consider four options in sql 92 and six options in sql enhanced with object relational extensions sql or our evaluation of the different architectural alternatives shows that from a performance perspective the cache mine option is superior although the performance of the sql or option is within a factor of two both the cache mine and the sql or approaches incur a higher storage penalty than the loose coupling approach which performance wise is a factor of 3 to 4 worse than cache mine the sql 92 implementations were too slow to qualify as a competitive option we also compare these alternatives on the basis of qualitative factors like automatic parallelization development ease portability and inter operability", + "title_raw": "Integrating association rule mining with relational database systems: alternatives and implications", + "abstract_raw": "Data mining on large data warehouses is becoming increasingly important. In support of this trend, we consider a spectrum of architectural alternatives for coupling mining with database systems. These alternatives include: loose-coupling through a SQL cursor interface; encapsulation of a mining algorithm in a stored procedure; caching the data to a file system on-the-fly and mining; tight-coupling using primarily user-defined functions; and SQL implementations for processing in the DBMS. We comprehensively study the option of expressing the mining algorithm in the form of SQL queries using Association rule mining as a case in point. We consider four options in SQL-92 and six options in SQL enhanced with object-relational extensions (SQL-OR). Our evaluation of the different architectural alternatives shows that from a performance perspective, the Cache-Mine option is superior, although the performance of the SQL-OR option is within a factor of two. Both the Cache-Mine and the SQL-OR approaches incur a higher storage penalty than the loose-coupling approach which performance-wise is a factor of 3 to 4 worse than Cache-Mine. The SQL-92 implementations were too slow to qualify as a competitive option. We also compare these alternatives on the basis of qualitative factors like automatic parallelization, development ease, portability and inter-operability.", + "link": "https://www.semanticscholar.org/paper/8716b5ab1185e6dd4ba8adf25753613d874d9cc9", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2029114021", + "venue": "1135342153", + "year": "1998", + "title": "the interactive multimedia jukebox imj a new paradigm for the on demand delivery of audio video", + "label": [ + "190144534", + "110875604", + "113200698", + "49774154", + "32295351", + "136764020", + "203004452" + ], + "author": [ + "321516573", + "2130907696" + ], + "reference": [ + "78783877", + "1729394517", + "1974032588", + "1974778994", + "2003671567", + "2011771459", + "2041668415", + "2062444655", + "2063354751", + "2065619953", + "2079994203", + "2097018352", + "2099040451", + "2101838603", + "2105125871", + "2112912183", + "2122332501", + "2126331934", + "2131987682", + "2150291558", + "2153203381", + "2157848702" + ], + "abstract": "abstract straightforward one way delivery of video programming through television sets has existed for many decades in the 1980s new services like pay per view and video on demand were touted as the killer application for next generation internet and tv services however the hype has quickly died away leaving only hard technical problems and costly systems as an alternative and what we propose is a new paradigm offering flexibility in how programs are requested and scheduled for playout ranging from complete viewer control true vod to complete service provider control traditional broadcast or cable tv in this paper we describe our proposed jukebox paradigm and relate it to other on demand paradigms our new paradigm presents some challenges of its own including how to best schedule viewer requests how to provide vcr style interactive functions and how to track viewer usage patterns in addition to addressing these issues we also present our implementation of a jukebox based service called the interactive multimedia jukebox imj the imj provides scheduling via the world wide web www and content delivery via the multicast backbone mbone we discuss the challenges of building a functioning system and our ongoing efforts to improve the jukebox paradigm", + "title_raw": "The interactive multimedia jukebox (IMJ): a new paradigm for the on-demand delivery of audio/video", + "abstract_raw": "Abstract Straightforward, one-way delivery of video programming through television sets has existed for many decades. In the 1980s, new services like Pay-Per-View and Video-on-Demand were touted as the \u201ckiller application\u201d for next-generation Internet and TV services. However, the hype has quickly died away leaving only hard technical problems and costly systems. As an alternative, and what we propose, is a new paradigm offering flexibility in how programs are requested and scheduled for playout, ranging from complete viewer control (true VoD), to complete service provider control (traditional broadcast or cable TV). In this paper, we describe our proposed jukebox paradigm and relate it to other on-demand paradigms. Our new paradigm presents some challenges of its own, including how to best schedule viewer requests, how to provide VCR-style interactive functions, and how to track viewer usage patterns. In addition to addressing these issues we also present our implementation of a jukebox-based service called the Interactive Multimedia Jukebox (IMJ). The IMJ provides scheduling via the World Wide Web (WWW) and content delivery via the Multicast Backbone (MBone). We discuss the challenges of building a functioning system and our ongoing efforts to improve the jukebox paradigm.", + "link": "https://www.semanticscholar.org/paper/80f61eb8d89a4c5d66b98afedc60a481ee2fd3c8", + "scraped_abstract": null, + "citation_best": 40 + }, + { + "paper": "1567570606", + "venue": "1184914352", + "year": "1997", + "title": "statistical parsing with a context free grammar and word statistics", + "label": [ + "28490314", + "2777875368", + "97212296", + "137293760", + "2777530160", + "42560504", + "60690694", + "147547768", + "118364021", + "166651950", + "186644900", + "204321447", + "2777753217" + ], + "author": [ + "103415842" + ], + "reference": [ + "170869742", + "1632114991", + "2110882317", + "2127314673", + "2153439141", + "2161204834", + "2787704407" + ], + "abstract": "we describe a parsing system based upon a language model for english that is in turn based upon assigning probabilities to possible parses for a sentence this model is used in a parsing system by finding the parse for the sentence with the highest probability this system outperforms previous schemes as this is the third in a series of parsers by different authors that are similar enough to invite detailed comparisons but different enough to give rise to different levels of performance we also report on some experiments designed to identify what aspects of these systems best explain their relative performance", + "title_raw": "Statistical parsing with a context-free grammar and word statistics", + "abstract_raw": "We describe a parsing system based upon a language model for English that is, in turn, based upon assigning probabilities to possible parses for a sentence. This model is used in a parsing system by finding the parse for the sentence with the highest probability. This system outperforms previous schemes. As this is the third in a series of parsers by different authors that are similar enough to invite detailed comparisons but different enough to give rise to different levels of performance, we also report on some experiments designed to identify what aspects of these systems best explain their relative performance.", + "link": "https://www.semanticscholar.org/paper/2a5e619f2c5f4220438b1357e596db5b1578398d", + "scraped_abstract": null, + "citation_best": 562 + }, + { + "paper": "2114975944", + "venue": "1184914352", + "year": "1997", + "title": "building concept representations from reusable components", + "label": [ + "4554734", + "154945302", + "107457646", + "115925183" + ], + "author": [ + "3167873500", + "2162042060" + ], + "reference": [ + "1493693580", + "1514356467", + "1533008268", + "1579803652", + "1616151740", + "1677497782", + "1733433686", + "2010315125", + "2019837585", + "2023546887", + "2038386889", + "2052006943", + "2057440130", + "2066814802", + "2131172842" + ], + "abstract": "our goal is to build knowledge based systems capable of answering a wide variety of questions including questions that are unanticipated when the knowledge base is built for systems to achieve this level of competence and generality they require the ability to dynamically construct new concept representations and to do so in response to the questions arld tasks posed to them our approach to meeting this requirement is to build knowledge bases of generalized representational components and to develop methods for automatically composing components on demand this work extends the normal inheritance approach used in frame based systems and imports ideas from several different areas of ai in particular compositional modeling terminological reasoning and ontological engineering the contribution of this work is a novel integration of these methods that improves the efficiency of building knowledge bases and the robustness of using them", + "title_raw": "Building concept representations from reusable components", + "abstract_raw": "Our goal is to build knowledge-based systems capable of answering a wide variety of questions, including questions that are unanticipated when the knowledge base is built. For systems to achieve this level of competence and generality, they require the ability to dynamically construct new concept representations, and to do so in response to the questions arLd tasks posed to them. Our approach to meeting this requirement is to build knowledge bases of generalized, representational components, and to develop methods for automatically composing components on demand. This work extends the normal inheritance approach used in frame-based systems, and imports ideas from several different areas of AI, in particular compositional modeling, terminological reasoning, and ontological engineering. The contribution of this work is a novel integration of these methods that improves the efficiency of building knowledge bases and the robustness of using them.", + "link": "https://www.semanticscholar.org/paper/00ee2e3f19be3be3a0303ac08ed5fc60e0d38c69", + "scraped_abstract": null, + "citation_best": 63 + }, + { + "paper": "1483210996", + "venue": "1184914352", + "year": "1997", + "title": "fast context switching in real time propositional reasoning", + "label": [ + "2777027219", + "100850083", + "119857082", + "2777502361", + "2781039887", + "80444323", + "53833338" + ], + "author": [ + "2135971826", + "2103347707" + ], + "reference": [ + "78724527", + "1485493958", + "1605093979", + "1696263757", + "1965834286", + "1976303542", + "2128344648", + "2142597947", + "2147096558" + ], + "abstract": "the trend to increasingly capable and affordable control processors has generated an explosion of embedded real time gadgets that serve almost every function imaginable the daunting task of programming these gadgets is greatly alleviated with real time deductive engines that perform all execution and monitoring functions from a single core model fast response times are achieved using an incremental propositional deductive database an ltms ideally the cost of an ltms s incremental update should be linear in the number of labels that change between successive contexts unfortunately an ltms can expend a significant percentage of its time working on labels that remain constant between contexts this is caused by the ltms s conservative approach a context switch first removes all consequences of deleted clauses whether or not those consequences hold in the new context this paper presents a more aggressive incremental tms called the itms that avoids processing a significant number of these consequences that are unchanged our empirical evaluation for spacecraft control shows that the overhead of processing unchanged consequences can be reduced by a factor of seven", + "title_raw": "Fast context switching in real-time propositional reasoning", + "abstract_raw": "The trend to increasingly capable and affordable control processors has generated an explosion of embedded real-time gadgets that serve almost every function imaginable. The daunting task of programming these gadgets is greatly alleviated with real-time deductive engines that perform all execution and monitoring functions from a single core model. Fast response times are achieved using an incremental propositional deductive database (an LTMS). Ideally the cost of an LTMS's incremental update should be linear in the number of labels that change between successive contexts. Unfortunately an LTMS can expend a significant percentage of its time working on labels that remain constant between contexts. This is caused by the LTMS's conservative approach: a context switch first removes all consequences of deleted clauses, whether or not those consequences hold in the new context. This paper presents a more aggressive incremental TMS, called the ITMS, that avoids processing a significant number of these consequences that are unchanged. Our empirical evaluation for spacecraft control shows that the overhead of processing unchanged consequences can be reduced by a factor of seven.", + "link": "https://www.semanticscholar.org/paper/0bfa40fe50255a42cac34c3223871fec2239f4e4", + "scraped_abstract": null, + "citation_best": 39 + }, + { + "paper": "2098854122", + "venue": "1184914352", + "year": "1997", + "title": "a practical algorithm for finding optimal triangulations", + "label": [ + "68010082", + "167117609", + "178609930" + ], + "author": [ + "2776302560", + "1982493283" + ], + "reference": [ + "111380827", + "1533693718", + "1538881034", + "1593793857", + "2024291212", + "2034501206", + "2037106315", + "2038674294", + "2064796716", + "2085550081", + "2141876475", + "2143075689", + "2143474538", + "2154837415", + "2159080219" + ], + "abstract": "an algorithm called quicktree is developed for finding a triangulation t of a given undirected graph g such that the size of t s maximal clique is minimum and such that no other triangulation of g is a subgraph of t we have tested quicktree on graphs of up to 100 nodes for which the maximum clique in an optimal triangulation is of size 11 this is the first algorithm that can optimally triangulate graphs of such size in a reasonable time frame this algorithm is useful for constraint satisfaction problems and for bayesian inference through the clique tree inference algorithm", + "title_raw": "A practical algorithm for finding optimal triangulations", + "abstract_raw": "An algorithm called QUICKTREE is developed for finding a triangulation T of a given undirected graph G such that the size of T's maximal clique is minimum and such that no other triangulation of G is a subgraph of T. We have tested QUICKTREE on graphs of up to 100 nodes for which the maximum clique in an optimal triangulation is of size 11. This is the first algorithm that can optimally triangulate graphs of such size in a reasonable time frame. This algorithm is useful for constraint satisfaction problems and for Bayesian inference through the clique tree inference algorithm.", + "link": "https://www.semanticscholar.org/paper/7c1f5a78fdb24266467ee95dbae4f2447a0add17", + "scraped_abstract": null, + "citation_best": 74 + }, + { + "paper": "1589050831", + "venue": "1203999783", + "year": "1997", + "title": "translingual information retrieval a comparative evaluation", + "label": [ + "2779532271", + "2779235283", + "90288658", + "87546605", + "2779364598", + "23123220" + ], + "author": [ + "2100444261", + "2159253281", + "1988690815", + "2107643066", + "2156035455", + "2678562861" + ], + "reference": [], + "abstract": "translingual information retrieval tir con sists of providing a query in one language and searching document collections in one or more di erent languages this paper introduces new tir methods and reports on comparative tir experiments with these new methods and with previously reported ones in a realistic setting methods fall into two categories query trans lation based and statistical ir approaches es tablishing translingual associations the re sults show that using bilingual corpora for au tomated extraction of term equivalences in con text outperforms other methods translin gual versions of the generalized vector space model gvsm and latent semantic indexing lsi perform relatively well as does translin gual pseudo relevance feedback prf all showed relatively small performance loss be tween monolingual and translingual versions query translation based on a general machine readable bilingual dictionary heretofore the most popular method did not match the per formance of other more sophisticated methods also the previous very high lsi results in the literature were discon rmed by more realistic relevance based evaluations", + "title_raw": "Translingual Information Retrieval: A Comparative Evaluation", + "abstract_raw": "Translingual information retrieval TIR con sists of providing a query in one language and searching document collections in one or more di erent languages This paper introduces new TIR methods and reports on comparative TIR experiments with these new methods and with previously reported ones in a realistic setting Methods fall into two categories query trans lation based and statistical IR approaches es tablishing translingual associations The re sults show that using bilingual corpora for au tomated extraction of term equivalences in con text outperforms other methods Translin gual versions of the Generalized Vector Space Model GVSM and Latent Semantic Indexing LSI perform relatively well as does translin gual pseudo relevance feedback PRF All showed relatively small performance loss be tween monolingual and translingual versions Query translation based on a general machine readable bilingual dictionary heretofore the most popular method did not match the per formance of other more sophisticated methods Also the previous very high LSI results in the literature were discon rmed by more realistic relevance based evaluations", + "link": "https://www.semanticscholar.org/paper/29480bf9360ad251e97e428015d8c4861d97eabc", + "scraped_abstract": null, + "citation_best": 167 + }, + { + "paper": "2170913656", + "venue": "1130985203", + "year": "1997", + "title": "analysis and visualization of classifier performance comparison under imprecise class and cost distributions", + "label": [ + "119857082", + "36464697", + "178980831", + "95623464" + ], + "author": [ + "2158932634", + "2159035819" + ], + "reference": [ + "1520046401", + "1536719366", + "1554701668", + "1594031697", + "1595468493", + "1696243063", + "1882120692", + "1990748933", + "2016025989", + "2039024435", + "2104597806", + "2128128566", + "2146935111", + "2153504150", + "2912610563", + "3017143921", + "3085162807" + ], + "abstract": "applications of inductive learning algorithms to real world data mining problems have shown repeatedly that using accuracy to compare classifiers is not adequate because the underlying assumptions rarely hold we present a method for the comparison of classifier performance that is robust to imprecise class distributions and misclassification costs the roc convex hull method combines techniques from roc analysis decision analysis and computational geometry and adapts them to the particulars of analyzing learned classifiers the method is efficient and incremental minimizes the management of classifier performance data and allows for clear visual comparisons and sensitivity analyses", + "title_raw": "Analysis and visualization of classifier performance: comparison under imprecise class and cost distributions", + "abstract_raw": "Applications of inductive learning algorithms to real-world data mining problems have shown repeatedly that using accuracy to compare classifiers is not adequate because the underlying assumptions rarely hold. We present a method for the comparison of classifier performance that is robust to imprecise class distributions and misclassification costs. The ROC convex hull method combines techniques from ROC analysis, decision analysis and computational geometry, and adapts them to the particulars of analyzing learned classifiers. The method is efficient and incremental, minimizes the management of classifier performance data, and allows for clear visual comparisons and sensitivity analyses.", + "link": "https://www.semanticscholar.org/paper/4463e0b669b615252494547f2dd012815ec7595b", + "scraped_abstract": null, + "citation_best": 704 + }, + { + "paper": "282367566", + "venue": "1184151122", + "year": "1997", + "title": "on the complexity of database queries", + "label": [ + "192939062", + "172722865", + "77088390", + "65647387", + "118689300", + "192028432", + "24028149", + "99016210", + "157692150", + "23123220" + ], + "author": [ + "2220829341", + "294360276" + ], + "reference": [ + "52840101", + "193724012", + "1558832481", + "1586904125", + "1965737603", + "1973852671", + "1987965750", + "2022841964", + "2296148711" + ], + "abstract": "", + "title_raw": "On the Complexity of Database Queries.", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/4f077ff4b673cd6695a2ad5e60fa5fc46f6f843f", + "scraped_abstract": null, + "citation_best": 182 + }, + { + "paper": "1983078185", + "venue": "1140684652", + "year": "1997", + "title": "feature selection perceptron learning and a usability case study for text categorization", + "label": [ + "170130773", + "119857082", + "2777309117", + "60908668", + "148483581" + ], + "author": [ + "2146810117", + "2100531454", + "2125018349" + ], + "reference": [ + "1539741229", + "1594962278", + "1993934121", + "2040870580", + "2049384587", + "2058982198", + "2060216474", + "2094934653", + "2127994451", + "2440833291", + "3037715718" + ], + "abstract": "", + "title_raw": "Feature selection, perceptron learning, and a usability case study for text categorization", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/0c97e8fcd80d9a3779826f2930724c9d789faa05", + "scraped_abstract": null, + "citation_best": 127 + }, + { + "paper": "2129938590", + "venue": "1175089206", + "year": "1997", + "title": "fast parallel similarity search in multimedia databases", + "label": [ + "60509570", + "77088390", + "7374053", + "116738811", + "2781142347", + "83665646", + "178980831" + ], + "author": [ + "2013057434", + "2771206607", + "1136592811", + "2147343253", + "1919135125" + ], + "reference": [ + "1497131759", + "1578013635", + "1723433588", + "1975830550", + "1982822209", + "2000830496", + "2010595692", + "2024668293", + "2037365151", + "2041989747", + "2046144220", + "2052695126", + "2055043387", + "2063908367", + "2104128006", + "2136006972", + "2136196605", + "2150661231", + "2151135734", + "2151367350", + "2238624099", + "2997141990", + "3041834803", + "3143316023" + ], + "abstract": "most similarity search techniques map the data objects into some high dimensional feature space the similarity search then corresponds to a nearest neighbor search in the feature space which is computationally very intensive in this paper we present a new parallel method for fast nearest neighbor search in high dimensional feature spaces the core problem of designing a parallel nearest neighbor algorithm is to find an adequate distribution of the data onto the disks unfortunately the known declustering methods to not perform well for high dimensional nearest neighbor search in contrast our method has been optimized based on the special properties of high dimensional spaces and therefore provides a near optimal distribution of the data items among the disks the basic idea of our data declustering technique is to assign the buckets corresponding to different quadrants of the data space to different disks we show that our technique in contrast to other declustering methods guarantees that all buckets corresponding to neighboring quadrants are assigned to different disks we evaluate our method using large amounts of real data up to 40 mbytes and compare it with the best known data declustering method the hilbert curve our experiments show that our method provides an almost linear speed up and a constant scale up additionally it outperforms the hilbert approach by a factor of up to 5", + "title_raw": "Fast parallel similarity search in multimedia databases", + "abstract_raw": "Most similarity search techniques map the data objects into some high-dimensional feature space. The similarity search then corresponds to a nearest-neighbor search in the feature space which is computationally very intensive. In this paper, we present a new parallel method for fast nearest-neighbor search in high-dimensional feature spaces. The core problem of designing a parallel nearest-neighbor algorithm is to find an adequate distribution of the data onto the disks. Unfortunately, the known declustering methods to not perform well for high-dimensional nearest-neighbor search. In contrast, our method has been optimized based on the special properties of high-dimensional spaces and therefore provides a near-optimal distribution of the data items among the disks. The basic idea of our data declustering technique is to assign the buckets corresponding to different quadrants of the data space to different disks. We show that our technique - in contrast to other declustering methods - guarantees that all buckets corresponding to neighboring quadrants are assigned to different disks. We evaluate our method using large amounts of real data (up to 40 MBytes) and compare it with the best known data declustering method, the Hilbert curve. Our experiments show that our method provides an almost linear speed-up and a constant scale-up. Additionally, it outperforms the Hilbert approach by a factor of up to 5.", + "link": "https://www.semanticscholar.org/paper/5b6f2a31b14b3f6bfc89384ae16a8f4652186df6", + "scraped_abstract": null, + "citation_best": 173 + }, + { + "paper": "2153131460", + "venue": "1171178643", + "year": "1997", + "title": "continuous profiling where have all the cycles gone", + "label": [ + "160145156", + "4822641", + "149635348", + "98183937", + "111919701", + "187191949", + "163985040", + "115537543", + "176001210", + "189783530" + ], + "author": [ + "2190404745", + "194999255", + "2429370538", + "2575315241", + "2695385476", + "2129318710", + "2677335079", + "2277801069", + "2113042477", + "206448238" + ], + "reference": [ + "90136652", + "150431575", + "1974087624", + "2007845800", + "2031222117", + "2031487553", + "2032732648", + "2084531306", + "2088607339", + "2111692137", + "2141342364", + "2144433126", + "2153228154", + "2162612712", + "2163488221" + ], + "abstract": "this article describes the digital continuous profiling infrastructure a sampling based profiling system designed to run continuously on production systems the system supports multiprocessors works on unmodified executables and collects profiles for entire systems including user programs shared libraries and the operating system kernel samples are collected at a high rate over 5200 samples sec per 333mhz processor yet with low overhead 1 3 slowdown for most workloads analysis tools supplied with the profiling system use the sample data to produce a precise and accurate accounting down to the level of pipeline stalls incurred by individual instructions of where time is bring spent when instructions incur stalls the tools identify possible reasons such as cache misses branch mispredictions and functional unit contention the fine grained instruction level analysis guides users and automated optimizers to the causes of performance problems and provides important insights for fixing them", + "title_raw": "Continuous profiling: where have all the cycles gone?", + "abstract_raw": "This article describes the Digital Continuous Profiling Infrastructure, a sampling-based profiling system designed to run continuously on production systems. The system supports multiprocessors, works on unmodified executables, and collects profiles for entire systems, including user programs, shared libraries, and the operating system kernel. Samples are collected at a high rate (over 5200 samples/sec. per 333MHz processor), yet with low overhead (1\u20133% slowdown for most workloads). Analysis tools supplied with the profiling system use the sample data to produce a precise and accurate accounting, down to the level of pipeline stalls incurred by individual instructions, of where time is bring spent. When instructions incur stalls, the tools identify possible reasons, such as cache misses, branch mispredictions, and functional unit contention. The fine-grained instruction-level analysis guides users and automated optimizers to the causes of performance problems and provides important insights for fixing them.", + "link": "https://www.semanticscholar.org/paper/00c581e956843c6e93009ea9146d69201a928888", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2154766204", + "venue": "1171178643", + "year": "1997", + "title": "disco running commodity operating systems on scalable multiprocessors", + "label": [ + "133875982", + "25344961", + "4822641", + "149635348", + "35939892", + "111919701", + "48044578", + "2777071140", + "2780940931", + "2779960034" + ], + "author": [ + "2670407451", + "2137499016", + "2102969555" + ], + "reference": [ + "1558024929", + "1650006494", + "1975025054", + "2011992756", + "2029764709", + "2064343267", + "2085407655", + "2088837715", + "2100720297", + "2122960384", + "2125895608", + "2128336546", + "2129269323", + "2134484933", + "2140455011", + "2141253292", + "2145021036", + "2150348590", + "2152056423", + "2157074753", + "2171989104", + "2537320856", + "2752885492", + "3160366303" + ], + "abstract": "in this paper we examine the problem of extending modern operating systems to run efficiently on large scale shared memory multiprocessors without a large implementation effort our approach brings back an idea popular in the 1970s virtual machine monitors we use virtual machines to run multiple commodity operating systems on a scalable multiprocessor this solution addresses many of the challenges facing the system software for these machines we demonstrate our approach with a prototype called disco that can run multiple copies of silicon graphics irix operating system on a multiprocessor our experience shows that the overheads of the monitor are small and that the approach provides scalability as well as the ability to deal with the non uniform memory access time of these systems to reduce the memory overheads associated with running multiple operating systems we have developed techniques where the virtual machines transparently share major data structures such as the program code and the file system buffer cache we use the distributed system support of modern operating systems to export a partial single system image to the users the overall solution achieves most of the benefits of operating systems customized for scalable multiprocessors yet it can be achieved with a significantly smaller implementation effort", + "title_raw": "Disco: running commodity operating systems on scalable multiprocessors", + "abstract_raw": "In this paper we examine the problem of extending modern operating systems to run efficiently on large-scale shared memory multiprocessors without a large implementation effort. Our approach brings back an idea popular in the 1970s, virtual machine monitors. We use virtual machines to run multiple commodity operating systems on a scalable multiprocessor. This solution addresses many of the challenges facing the system software for these machines. We demonstrate our approach with a prototype called Disco that can run multiple copies of Silicon Graphics' IRIX operating system on a multiprocessor. Our experience shows that the overheads of the monitor are small and that the approach provides scalability as well as the ability to deal with the non-uniform memory access time of these systems. To reduce the memory overheads associated with running multiple operating systems, we have developed techniques where the virtual machines transparently share major data structures such as the program code and the file system buffer cache. We use the distributed system support of modern operating systems to export a partial single system image to the users. The overall solution achieves most of the benefits of operating systems customized for scalable multiprocessors yet it can be achieved with a significantly smaller implementation effort.", + "link": "https://www.semanticscholar.org/paper/5d6134c43022494d65299500b22380105abec301", + "scraped_abstract": null, + "citation_best": 0 + }, + { + "paper": "2113913089", + "venue": "1133523790", + "year": "1998", + "title": "integrating reliable memory in databases", + "label": [ + "89089495", + "53838383", + "51290061", + "98986596", + "115537543", + "201148951", + "113166858", + "144240696", + "162100846", + "93446704", + "74426580", + "176649486", + "196697905", + "171675096", + "63511323", + "202623185", + "133875982", + "149635348", + "77088390", + "3720319", + "41036726", + "57863822", + "133371097", + "136085584" + ], + "author": [ + "2973276855", + "2101505567" + ], + "reference": [ + "178848230", + "1500454533", + "1515932031", + "1529012417", + "1552081212", + "1573399142", + "1911660734", + "1977773183", + "1984252352", + "1985467476", + "2010042648", + "2020082738", + "2043934800", + "2047226031", + "2070761976", + "2079029390", + "2079267582", + "2083469471", + "2088221489", + "2098473740", + "2104954161", + "2106887953", + "2111499272", + "2113375596", + "2117034946", + "2119601151", + "2124310356", + "2124516381", + "2129269323", + "2136390693", + "2138458852", + "2138553204", + "2144329343", + "2147853062", + "2150864656", + "2151666023", + "2151745115", + "2153531096", + "2154817671", + "2155323584", + "2156205360", + "2156601455", + "2157381427", + "2159477904", + "2172126130", + "2178497290", + "2914261617" + ], + "abstract": "recent results in the rio project at the university of michigan show that it is possible to create an area of main memory that is as safe as disk from operating system crashes this paper explores how to integrate the reliable memory provided by the rio file cache into a database system prior studies have analyzed the performance benefits of reliable memory we focus instead on how different designs affect reliability we propose three designs for integrating reliable memory into databases non persistent database buffer cache persistent database buffer cache and persistent database buffer cache with protection non persistent buffer caches use an i o interface to reliable memory and require the fewest modifications to existing databases however they waste memory capacity and bandwidth due to double buffering persistent buffer caches use a memory interface to reliable memory by mapping it into the database address space this places reliable memory under complete database control and eliminates double buffering but it may expose the buffer cache to database errors our third design reduces this exposure by write protecting the buffer pages extensive fault tests show that mapping reliable memory into the database address space does not significantly hurt reliability this is because wild stores rarely touch dirty committed pages written by previous transactions as a result we believe that databases should use a memory interface to reliable memory", + "title_raw": "Integrating reliable memory in databases", + "abstract_raw": "Recent results in the Rio project at the University of Michigan show that it is possible to create an area of main memory that is as safe as disk from operating system crashes. This paper explores how to integrate the reliable memory provided by the Rio file cache into a database system. Prior studies have analyzed the performance benefits of reliable memory; we focus instead on how different designs affect reliability. We propose three designs for integrating reliable memory into databases: non-persistent database buffer cache, persistent database buffer cache, and persistent database buffer cache with protection. Non-persistent buffer caches use an I/O interface to reliable memory and require the fewest modifications to existing databases. However, they waste memory capacity and bandwidth due to double buffering. Persistent buffer caches use a memory interface to reliable memory by mapping it into the database address space. This places reliable memory under complete database control and eliminates double buffering, but it may expose the buffer cache to database errors. Our third design reduces this exposure by write protecting the buffer pages. Extensive fault tests show that mapping reliable memory into the database address space does not significantly hurt reliability. This is because wild stores rarely touch dirty, committed pages written by previous transactions. As a result, we believe that databases should use a memory interface to reliable memory.", + "link": "https://www.semanticscholar.org/paper/fe9ef1589f2cae5df499efb0b975ddda2acfdf25", + "scraped_abstract": null, + "citation_best": 32 + }, + { + "paper": "2138637314", + "venue": "1184914352", + "year": "1996", + "title": "a novel application of theory refinement to student modeling", + "label": [ + "119857082", + "4554734", + "154945302", + "2778371403", + "2776803701", + "177264268", + "2779336797" + ], + "author": [ + "2475685823", + "2167433806" + ], + "reference": [ + "3748936", + "121826095", + "124949870", + "1530984916", + "1573248988", + "1593523470", + "1966935808", + "1984938651", + "1998696323", + "1999138184", + "2044764672", + "2098031089", + "2130340789", + "2132602293", + "2146777307", + "2162152259", + "2165520550", + "2189004000" + ], + "abstract": "theory refinement systems developed in machine learning automatically modify a knowledge base to render it consistent with a set of classified training examples we illustrate a novel application of these techniques to the problem of constructing a student model for an intelligent tutoring system its our approach is implemented in an its authoring system called assert which uses theory refinement to introduce errors into an initially correct knowledge base so that it models incorrect student behavior the efficacy of the approach has been demonstrated by evaluating a tutor developed with assert with 75 students tested on a classification task covering concepts from an introductory course on the c programmm g ia nguage the system produced reasonably accurate models and students who received feedback based on these models performed significantly better on a post test than students who received simple reteaching", + "title_raw": "A novel application of theory refinement to student modeling", + "abstract_raw": "Theory refinement systems developed in machine learning automatically modify a knowledge base to render it consistent with a set of classified training examples. We illustrate a novel application of these techniques to the problem of constructing a student model for an intelligent tutoring system (ITS). Our approach is implemented in an ITS authoring system called ASSERT which uses theory refinement to introduce errors into an initially correct knowledge base so that it models incorrect student behavior. The efficacy of the approach has been demonstrated by evaluating a tutor developed with ASSERT with 75 students tested on a classification task covering concepts from an introductory course on the C++ programmm. g Ia nguage. The system produced reasonably accurate models and students who received feedback based on these models performed significantly better on a post test than students who received simple reteaching.", + "link": "https://www.semanticscholar.org/paper/466b65ec9f13864e0804362aa64d352ba33a117f", + "scraped_abstract": null, + "citation_best": 29 + }, + { + "paper": "1600919542", + "venue": "1184914352", + "year": "1996", + "title": "pushing the envelope planning propositional logic and stochastic search", + "label": [ + "203208320", + "2777116644", + "69562738", + "154945302", + "2777252908", + "2778247108", + "80444323", + "190509626", + "125583679" + ], + "author": [ + "1966271946", + "1966117383" + ], + "reference": [ + "4789806", + "84772334", + "109854102", + "141377150", + "145815032", + "1491690797", + "1502646816", + "1507678707", + "1515486073", + "1561608403", + "1572073643", + "1599103728", + "1599735882", + "1605189129", + "1619748997", + "1667362966", + "1667614912", + "1699282338", + "1731763842", + "1843145778", + "1964878205", + "1983792103", + "2017127020", + "2024060531", + "2054497239", + "2057361103", + "2121766240", + "2138162238", + "2145280355", + "2161256997", + "2165372647", + "3021539726" + ], + "abstract": "planning is a notoriously hard combinatorial search problem in many interesting domains current planning algorithms fail to scale up gracefully by combining a general stochastic search algorithm and appropriate problem encodings based on propositional logic we are able to solve hard planning problems many times faster than the best current planning systems although stochastic methods have been shown to be very effective on a wide range of scheduling problems this is the first demonstration of its power on truly challenging classical planning instances this work also provides a new perspective on representational issues in planning", + "title_raw": "Pushing the envelope: planning, propositional logic, and stochastic search", + "abstract_raw": "Planning is a notoriously hard combinatorial search problem. In many interesting domains, current planning algorithms fail to scale up gracefully. By combining a general, stochastic search algorithm and appropriate problem encodings based on propositional logic, we are able to solve hard planning problems many times faster than the best current planning systems. Although stochastic methods have been shown to be very effective on a wide range of scheduling problems, this is the first demonstration of its power on truly challenging classical planning instances. This work also provides a new perspective on representational issues in planning.", + "link": "https://www.semanticscholar.org/paper/141c77b1d82bcae04a293c972cea02502e181dba", + "scraped_abstract": null, + "citation_best": 872 + }, + { + "paper": "1510087232", + "venue": "1184914352", + "year": "1996", + "title": "verification of knowledge bases based on containment checking", + "label": [ + "120567893", + "4554734", + "207685749", + "153269930", + "115925183", + "124101348", + "2781289151", + "16963264", + "102993220", + "80444323" + ], + "author": [ + "2308649827", + "2048382749" + ], + "reference": [ + "13925634", + "27258760", + "87432413", + "90144283", + "174875978", + "208439881", + "229019754", + "1498153914", + "1514356467", + "1536088796", + "1992681763", + "1992810975", + "2017372888", + "2025868887", + "2028222220", + "2028864278", + "2039832885", + "2056660085", + "2110463644", + "2159525276", + "2171094216", + "2172111502", + "2270745039", + "2487964923" + ], + "abstract": "building complex knowledge based applications requires encoding large amounts of domain knowledge after acquiring knowledge from domain experts much of the effort in building a knowledge base goes into verifying that the knowledge is encoded correctly we consider the problem of verifying hybrid knowledge bases that contain both horn rules and a terminology in a description logic our approach to the verification problem is based on showing a close relationship to the problem of query containment our first contribution based on this relationship is presenting a thorough analysis of the decidability and complexity of the verification problem for knowledge bases containing recursive rules and the interpreted predicates and second we show that important new classes of constraints on correct inputs and outputs can be expressed in a hybrid setting in which a description logic class hierarchy is also considered and we present the first complete algorithm for verifying such hybrid knowledge bases", + "title_raw": "Verification of knowledge bases based on containment checking", + "abstract_raw": "Building complex knowledge based applications requires encoding large amounts of domain knowledge. After acquiring knowledge from domain experts, much of the effort in building a knowledge base goes into verifying that the knowledge is encoded correctly. We consider the problem of verifying hybrid knowledge bases that contain both Horn rules and a terminology in a description logic. Our approach to the verification problem is based on showing a close relationship to the problem of query containment. Our first contribution, based on this relationship, is presenting a thorough analysis of the decidability and complexity of the verification problem, for knowledge bases containing recursive rules and the interpreted predicates =, \u2264, < and \u2260. Second, we show that important new classes of constraints on correct inputs and outputs can be expressed in a hybrid setting, in which a description logic class hierarchy is also considered, and we present the first complete algorithm for verifying such hybrid knowledge bases.", + "link": "https://www.semanticscholar.org/paper/5c8f5e53e0ba6c7034c16c678143930fd76f89f5", + "scraped_abstract": null, + "citation_best": 14 + }, + { + "paper": "2167345029", + "venue": "1185109434", + "year": "1996", + "title": "automatic compiler inserted i o prefetching for out of core applications", + "label": [ + "111919701", + "76399640", + "169590947", + "154690210", + "88196245", + "133588205", + "2778514511", + "79470037" + ], + "author": [ + "40863599", + "2032689864", + "2172560949" + ], + "reference": [ + "45001393", + "88132825", + "95542644", + "100424709", + "110734074", + "112400010", + "182231065", + "1498009952", + "1971636727", + "1973326289", + "2007194775", + "2023034777", + "2043473567", + "2054408207", + "2055437997", + "2055951513", + "2064343267", + "2064712511", + "2074919500", + "2077790567", + "2080141004", + "2099958604", + "2108155100", + "2112121929", + "2126292142", + "2135652458", + "2143468440", + "2147139455", + "2147853062", + "2150864656", + "2152018352", + "2421851683", + "2504458123" + ], + "abstract": "current operating systems offer poor performance when a numeric application s working set does not fit in main memory as a result programmers who wish to solve out of core problems efficiently are typically faced with the onerous task of rewriting an application to use explicit i o operations e g read write in this paper we propose and evaluate a fully automatic technique which liberates the programmer from this task provides high performance and requires only minimal changes to current operating systems in our scheme the compiler provides the crucial information on future access patterns without burdening the programmer the operating system supports non binding prefetch and release hints for managing i o and the operating system cooperates with a run time layer to accelerate performance by adapting to dynamic behavior and minimizing prefetch overhead this approach maintains the abstraction of unlimited virtual memory for the programmer gives the compiler the flexibility to aggressively move prefetches back ahead of references and gives the operating system the flexibility to arbitrate between the competing resource demands of multiple applications we have implemented our scheme using the suif compiler and the hurricane operating system our experimental results demonstrate that our fully automatic scheme effectively hides the i o latency in out ofcore versions of the entire nas parallel benchmark suite thus resulting in speedups of roughly twofold for five of the eight applications with one application speeding up by over threefold", + "title_raw": "Automatic compiler-inserted I/O prefetching for out-of-core applications", + "abstract_raw": "Current operating systems offer poor performance when a numeric application\u2019s working set does not fit in main memory. As a result, programmers who wish to solve \u201cout-of-core\u201d problems efficiently are typically faced with the onerous task of rewriting an application to use explicit I/O operations (e.g., read/write). In this paper, we propose and evaluate a fully-automatic technique which liberates the programmer from this task, provides high performance, and requires only minimal changes to current operating systems. In our scheme, the compiler provides the crucial information on future access patterns without burdening the programmer, the operating system supports non-binding prefetch and release hints for managing I/O, and the operating system cooperates with a run-time layer to accelerate performance by adapting to dynamic behavior and minimizing prefetch overhead. This approach maintains the abstraction of unlimited virtual memory for the programmer, gives the compiler the flexibility to aggressively move prefetches back ahead of references, and gives the operating system the flexibility to arbitrate between the competing resource demands of multiple applications. We have implemented our scheme using the SUIF compiler and the Hurricane operating system. Our experimental results demonstrate that our fully-automatic scheme effectively hides the I/O latency in out-ofcore versions of the entire NAS Parallel benchmark suite, thus resulting in speedups of roughly twofold for five of the eight applications, with one application speeding up by over threefold.", + "link": "https://www.semanticscholar.org/paper/364b13a64c5cdd2af8a9bbd29ced268cc1546108", + "scraped_abstract": null, + "citation_best": 203 + }, + { + "paper": "2150769115", + "venue": "1185109434", + "year": "1996", + "title": "safe kernel extensions without run time checking", + "label": [ + "50831359", + "158379750", + "98234853", + "199360897", + "2775836774", + "115168132", + "2776219632" + ], + "author": [ + "281330718", + "2139030624" + ], + "reference": [ + "1965348053", + "2011669306", + "2034711041", + "2046079722", + "2079029390", + "2137296754", + "2171691057" + ], + "abstract": "abstract this paper describes a mechanism by which an operating system kernel can determine with certainty that it is safe to execute a binary supplied by an untrusted source the kernel first defines a safety policy and makes it public then using this policy an application can provide binaries in a special form called proof carrying code or simply pcc each pcc binary contains in addition to the native code a formal proof that the code obeys the safety policy the kernel can easily validate the proof without using cryptography and without consulting any external trusted entities if the validation succeeds the code is guaranteed to respect the safety policy without relying on run time checks the main practical difficulty of pcc is in generating the safety proofs in order to gain some preliminary experience with this we have written several network packet filters in hand tuned dec alpha assembly language and then generated pcc binaries for them using a special prototype assembler the pcc binaries can be executed with no run time over head beyond a one time cost of 1 to 3 milliseconds for validating the enclosed proofs the net result is that our packet filters are formally guaranteed to be safe and are faster than packet filters created using berkeley packet filters software fault isolation or safe languages such as modula 3", + "title_raw": "Safe kernel extensions without run-time checking", + "abstract_raw": "Abstract : This paper describes a mechanism by which an operating system kernel can determine with certainty that it is safe to execute a binary supplied by an untrusted source. The kernel first defines a safety policy and makes it public. Then, using this policy, an application can provide binaries in a special form called proof-carrying code, or simply PCC. Each PCC binary contains, in addition to the native code, a formal proof that the code obeys the safety policy. The kernel can easily validate the proof without using cryptography and without consulting any external trusted entities. If the validation succeeds, the code is guaranteed to respect the safety policy without relying on run-time checks. The main practical difficulty of PCC is in generating the safety proofs. In order to gain some preliminary experience with this, we have written several network packet filters in hand-tuned DEC Alpha assembly language, and then generated PCC binaries for them using a special prototype assembler. The PCC binaries can be executed with no run-time over-head, beyond a one-time cost of 1 to 3 milliseconds for validating the enclosed proofs. The net result is that our packet filters are formally guaranteed to be safe and are faster than packet filters created using Berkeley Packet Filters, Software Fault Isolation, or safe languages such as Modula-3.", + "link": "https://www.semanticscholar.org/paper/bd881b522b9ec92f10cfdc3ffc68a2c33e386067", + "scraped_abstract": null, + "citation_best": 56 + }, + { + "paper": "2146815834", + "venue": "1140684652", + "year": "1996", + "title": "retrieving spoken documents by combining multiple index sources", + "label": [ + "2522767166", + "23123220" + ], + "author": [ + "3111909583", + "2778484070", + "2169254595", + "2125228453" + ], + "reference": [ + "102158453", + "149529473", + "860905747", + "1482214997", + "1484670493", + "2014415866", + "2084457609", + "2087801610", + "2096421062", + "2098162425", + "2099733939", + "2099886908", + "2125838338", + "2130722890", + "2134275669", + "2139090444", + "2142749559", + "2153331812", + "2157887661", + "2158069733", + "2158623315", + "2610440952" + ], + "abstract": "", + "title_raw": "Retrieving spoken documents by combining multiple index sources", + "abstract_raw": "", + "link": "https://www.semanticscholar.org/paper/85fd61753b8f3574d79a4454349594c33e1624af", + "scraped_abstract": null, + "citation_best": 115 + }, + { + "paper": "2009343025", + "venue": "1131589359", + "year": "1996", + "title": "supporting stored video reducing rate variability and end to end resource requirements through optimal smoothing", + "label": [ + "3770464", + "31258907", + "761482", + "97824396", + "2779618445", + "177264268", + "79403827", + "57667952", + "74296488" + ], + "author": [ + "1971000731", + "2164420873", + "1600468001", + "2177075905" + ], + "reference": [ + "1527961422", + "1600675845", + "1602452794", + "1602612625", + "1985112963", + "1991573814", + "2006309149", + "2027515838", + "2032159439", + "2040448692", + "2048283109", + "2093601931", + "2095451707", + "2107479178", + "2111764355", + "2116653150", + "2124430877", + "2126030708", + "2126194836", + "2126699048", + "2139080599", + "2141103050", + "2142810995", + "2148063787", + "2151666832", + "2155303087", + "2158144737", + "2158712333", + "2158863575", + "2163105713", + "2163654538", + "2165308660", + "3163467684" + ], + "abstract": "vbr compressed video is known to exhibit significant multiple time scale bit rate variability in this paper we consider the transmission of stored video from a server to a client across a high speed network and explore how the client buffer space can be used most effectively toward reducing the variability of the transmitted bit rate we present two basic results first we present an optimal smoothing algorithm for achieving the greatest possible reduction in rate variability when transmitting stored video to a client with given buffer size we provide a formal proof of optimality and demonstrate the performance of the algorithm on a set of long mpeg 1 encoded video traces second we evaluate the impact of optimal smoothing on the network resources needed for video transport under two network service models deterministic guaranteed service 1 9 and renegotiated cbr rcbr service 8 7 under both models we find the impact of optimal smoothing to be dramatic", + "title_raw": "Supporting stored video: reducing rate variability and end-to-end resource requirements through optimal smoothing", + "abstract_raw": "VBR compressed video is known to exhibit significant, multiple-time-scale bit rate variability. In this paper, we consider the transmission of stored video from a server to a client across a high speed network, and explore how the client buffer space can be used most effectively toward reducing the variability of the transmitted bit rate.We present two basic results. First, we present an optimal smoothing algorithm for achieving the greatest possible reduction in rate variability when transmitting stored video to a client with given buffer size. We provide a formal proof of optimality, and demonstrate the performance of the algorithm on a set of long MPEG-1 encoded video traces. Second, we evaluate the impact of optimal smoothing on the network resources needed for video transport, under two network service models: Deterministic Guaranteed service [1, 9] and Renegotiated CBR (RCBR) service [8, 7]. Under both models, we find the impact of optimal smoothing to be dramatic.", + "link": "https://www.semanticscholar.org/paper/cd253ccdf2ab366a1ffcc6d70bdeb0feac0f4b13", + "scraped_abstract": null, + "citation_best": 398 + }, + { + "paper": "2248022866", + "venue": "1131589359", + "year": "1996", + "title": "exploiting process lifetime distributions for dynamic load balancing", + "label": [ + "22684755", + "139330139", + "112968700", + "120314980" + ], + "author": [ + "2101141533", + "2492372021" + ], + "reference": [ + "63775848", + "76669767", + "195726286", + "1235030804", + "1513913200", + "1541695179", + "1555915743", + "1828132036", + "1979612918", + "1980249022", + "1984775898", + "1984869672", + "1994826215", + "2005387459", + "2022049964", + "2032237458", + "2041402579", + "2041927050", + "2050765519", + "2063026287", + "2068154195", + "2080634500", + "2081904683", + "2087388231", + "2089181240", + "2091040042", + "2096928599", + "2111087562", + "2112049462", + "2116118217", + "2130400345", + "2131906734", + "2132572305", + "2135184188", + "2137594707", + "2139571722", + "2140332639", + "2144771500", + "2152683364", + "2164106601", + "2164901581", + "2175085668", + "3152703111" + ], + "abstract": "we measure the distribution of lifetimes for unix processes and propose a functional form that fits this distribution well we use this functional form to derive a policy for preemptive migration and then use a trace driven simulator to compare our proposed policy with other preemptive migration policies and with a non preemptive load balancing strategy we find that contrary to previous reports the performance benefits of preemptive migration are significantly greater than those of non preemptive migration even when the memory transfer cost is high using a model of migration costs representative of current systems we find that preemptive migration reduces the mean delay queueing and migration by 35 50 compared to non preemptive migration", + "title_raw": "Exploiting process lifetime distributions for dynamic load balancing", + "abstract_raw": "We measure the distribution of lifetimes for UNIX processes and propose a functional form that fits this distribution well. We use this functional form to derive a policy for preemptive migration, and then use a trace-driven simulator to compare our proposed policy with other preemptive migration policies, and with a non-preemptive load balancing strategy. We find that, contrary to previous reports, the performance benefits of preemptive migration are significantly greater than those of non-preemptive migration, even when the memory-transfer cost is high. Using a model of migration costs representative of current systems, we find that preemptive migration reduces the mean delay (queueing and migration) by 35 - 50%, compared to non-preemptive migration.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Exploiting+Process+Lifetime+Distributions+for+Dynamic+Load+Balancing&as_oq=&as_eq=&as_occt=any&as_sauthors=Harchol-Balter", + "scraped_abstract": null, + "citation_best": 13 + }, + { + "paper": "2143401113", + "venue": "1175089206", + "year": "1996", + "title": "implementing data cubes efficiently", + "label": [ + "45607770", + "2777032813", + "132964779", + "98199447", + "124101348", + "78168278", + "51823790", + "50820777", + "157692150", + "80444323", + "2780075982" + ], + "author": [ + "2324343394", + "2001295541", + "2289364316" + ], + "reference": [ + "173120155", + "1488870204", + "1538786304", + "1601435884", + "2000869424", + "2103201239", + "2113888164", + "2140646908", + "2143996311", + "2158237121" + ], + "abstract": "decision support applications involve complex queries on very large databases since response times should be small query optimization is critical users typically view the data as multidimensional data cubes each cell of the data cube is a view consisting of an aggregation of interest like total sales the values of many of these cells are dependent on the values of other cells in the data cube a common and powerful query optimization technique is to materialize some or all of these cells rather than compute them from raw data each time commercial systems differ mainly in their approach to materializing the data cube in this paper we investigate the issue of which cells views to materialize when it is too expensive to materialize all views a lattice framework is used to express dependencies among views we present greedy algorithms that work off this lattice and determine a good set of views to materialize the greedy algorithm performs within a small constant factor of optimal under a variety of models we then consider the most common case of the hypercube lattice and examine the choice of materialized views for hypercubes in detail giving some good tradeoffs between the space used and the average time to answer a query", + "title_raw": "Implementing data cubes efficiently", + "abstract_raw": "Decision support applications involve complex queries on very large databases. Since response times should be small, query optimization is critical. Users typically view the data as multidimensional data cubes. Each cell of the data cube is a view consisting of an aggregation of interest, like total sales. The values of many of these cells are dependent on the values of other cells in the data cube. A common and powerful query optimization technique is to materialize some or all of these cells rather than compute them from raw data each time. Commercial systems differ mainly in their approach to materializing the data cube. In this paper, we investigate the issue of which cells (views) to materialize when it is too expensive to materialize all views. A lattice framework is used to express dependencies among views. We present greedy algorithms that work off this lattice and determine a good set of views to materialize. The greedy algorithm performs within a small constant factor of optimal under a variety of models. We then consider the most common case of the hypercube lattice and examine the choice of materialized views for hypercubes in detail, giving some good tradeoffs between the space used and the average time to answer a query.", + "link": "https://www.semanticscholar.org/paper/f449bfc57afa2a0bd65de0173815b25ec4bf3046", + "scraped_abstract": null, + "citation_best": 1182 + }, + { + "paper": "2128061541", + "venue": "1175089206", + "year": "1994", + "title": "fast subsequence matching in time series databases", + "label": [ + "102392041", + "75165309", + "19453392", + "77088390", + "83665646", + "162319229", + "2778618084" + ], + "author": [ + "2198983026", + "2638814695", + "276012958" + ], + "reference": [ + "102536876", + "1487801850", + "1499049447", + "1589922412", + "1595505223", + "1611060744", + "1975830550", + "2040903332", + "2049202698", + "2055043387", + "2074429597", + "2078206416", + "2084367148", + "2093191240", + "2096137215", + "2100406636", + "2107110814", + "2117074936", + "2118269922", + "2126455177", + "2131620262", + "2147956424", + "2149173084", + "2150661231", + "2151135734", + "2166559705", + "2784619191", + "2997027240" + ], + "abstract": "we present an efficient indexing method to locate 1 dimensional subsequences within a collection of sequences such that the subsequences match a given query pattern within a specified tolerance the idea is to map each data sequences into a small set of multidimensional rectangles in feature space then these rectangles can be readily indexed using traditional spatial access methods like the r tree 9 in more detail we use a sliding window over the data sequence and extract its features the result is a trail in feature space we propose an efficient and effective algorithm to divide such trails into sub trails which are subsequently represented by their minimum bounding rectangles mbrs we also examine queries of varying lengths and we show how to handle each case efficiently we implemented our method and carried out experiments on synthetic and real data stock price movements we compared the method to sequential scanning which is the only obvious competitor the results were excellent our method accelerated the search time from 3 times up to 100 times", + "title_raw": "Fast subsequence matching in time-series databases", + "abstract_raw": "We present an efficient indexing method to locate 1-dimensional subsequences within a collection of sequences, such that the subsequences match a given (query) pattern within a specified tolerance. The idea is to map each data sequences into a small set of multidimensional rectangles in feature space. Then, these rectangles can be readily indexed using traditional spatial access methods, like the R*-tree [9]. In more detail, we use a sliding window over the data sequence and extract its features; the result is a trail in feature space. We propose an efficient and effective algorithm to divide such trails into sub-trails, which are subsequently represented by their Minimum Bounding Rectangles (MBRs). We also examine queries of varying lengths, and we show how to handle each case efficiently. We implemented our method and carried out experiments on synthetic and real data (stock price movements). We compared the method to sequential scanning, which is the only obvious competitor. The results were excellent: our method accelerated the search time from 3 times up to 100 times.", + "link": "https://scholar.google.com/scholar?as_q=&num=10&btnG=Search+Scholar&as_epq=Fast+Subsequence+Matching+in+Time-Series+Databases&as_oq=&as_eq=&as_occt=any&as_sauthors=Faloutsos", + "scraped_abstract": null, + "citation_best": 1709 + } +] \ No newline at end of file