[ { "chunk_id": "6cc99256-c035-4b29-85dd-8274e357de2c", "text": "How To Grade a Test Without Knowing the Answers — A Bayesian\nGraphical Model for Adaptive Crowdsourcing and Aptitude Testing", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 0, "total_chunks": 22, "char_count": 124, "word_count": 20, "chunking_strategy": "semantic" }, { "chunk_id": "489e40b1-fc89-4aa4-96f2-f8fb442605ef", "text": "Yoram Bachrach yobach@microsoft.com\nMicrosoft Research, Cambridge, UK Tom Minka minka@microsoft.com\nMicrosoft Research, Cambridge, UK John Guiver joguiver@microsoft.com\nMicrosoft Research, Cambridge, UK Thore Graepel thoreg@microsoft.com\nMicrosoft Research, Cambridge, UK", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 1, "total_chunks": 22, "char_count": 271, "word_count": 28, "chunking_strategy": "semantic" }, { "chunk_id": "f2e125d8-1123-4181-badb-01a60015bfb6", "text": "Abstract two outcomes of the vote is \"correct\", and that each individual independently chooses the \"correct\" response\nWe propose a new probabilistic graphical 1\nwith probability p. The theorem states that if p > 2, model that jointly models the difficulties of\nthen adding more agents increases the probability of\nquestions, the abilities of participants and the\nmaking the correct decision, and the probability that\ncorrect answers to questions in aptitude testthe collective decision will be \"correct\" approaches 1\ning and crowdsourcing settings. We devise\nin the limit of infinitely many participating agents.\nan active learning/adaptive testing scheme\nbased on a greedy minimization of expected Recent technological advances make it easier to share\nmodel entropy, which allows a more efficient opinions and knowledge, enabling us to harness the\nresource allocation by dynamically choosing collective intelligence of crowds for solving tasks.\nthe next question to be asked based on the Companies can use crowdsourcing to carry out busiprevious responses. We present experimental ness tasks, using platforms such as Amazon Mechaniresults that confirm the ability of our model cal Turk. Such services allow us to collect the opinions\nto infer the required parameters and demon- of many individuals, but leave open the question of\nstrate that the adaptive testing scheme re- how to aggregate the collected data to reach decisions.\nquires fewer questions to obtain the same acA key technique for solving tasks using collective\ncuracy as a static test scenario.\nintelligence is to obtain information from multiple\nsources and aggregate it into a single complete solution. Consider a crowd of experts who are assigned\n1. Introduction\nwith many similar classification tasks, such as classiCollective decision making is a well-studied topic in fying many news articles according to their topic (\"polsocial choice, voting and artificial intelligence. It has itics\", \"business\", \"entertainment\" etc.).", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 2, "total_chunks": 22, "char_count": 1994, "word_count": 300, "chunking_strategy": "semantic" }, { "chunk_id": "0e89aa9a-b8b4-4295-b29a-f050a936cdac", "text": "We refer to\nlong been known that decisions based on aggregating each expert as a participant and to each classification\nthe opinions of several agents can be of higher quality task as a question. Suppose each participant expresses\nthan those based on the opinions of single individuals. her opinion regarding the correct answer for each quesThe Condorcet Jury Theorem (de Caritat et al., 1785), tion, in the form of a response, chosen by her from the\ndating back to the 18th century, is concerned with a list of possible answers for that question. Similarly\ngroup of individuals attempting to reach a binary de- to Condorcet's Jury Theorem, we make the simplifycision by majority vote; it is assumed that one of the ing assumption that for each of the questions only one\nanswer is correct. We call such a domain a multiple\nAppearing in Proceedings of the 29 th International Confer- problem domain. Given the responses provided by the\nence on Machine Learning, Edinburgh, Scotland, UK, 2012. participants regarding the various questions in a mulCopyright 2012 by the author(s)/owner(s). How To Grade a Test Without Knowing the Answers tiple problem domain, how should we best determine 2. Related Work\nthe correct answer for each of the items?", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 3, "total_chunks": 22, "char_count": 1243, "word_count": 206, "chunking_strategy": "semantic" }, { "chunk_id": "39be1429-d0c7-4254-a0b6-a5477dc42823", "text": "Which quesMeasuring intelligence is a key topic in psychology.tions are easy and which are hard? How can we find\nPsychologists showed that peoples' performance onthe most competent participants in the crowd? Which\nmany cognitive tasks is strongly correlated, so aquestions best test the ability of a participant?\nsingle statistical factor called \"general intelligence\"\nGiven the correct answers to each of the items, it is emerges (Anastasi et al., 1997). A measure for the pereasy to find the most competent participants, or dif- formance of groups of people in joint tasks, called \"colferentiate the easy and hard questions — a participant lective intelligence\", was investigated (Woolley et al.,\nwho has answered almost all the questions correctly is 2010). This approach focuses on explicit collaboralikely to be more skilled than a participant who had tion and interaction between members of the crowd.\nvery few correct responses. Typically, however, the Although in our setting participants do not interact\ncorrect answers are not known in advance — the whole directly, one can view our model as a method for inferpoint of crowdsourcing a classification task is to deter- ring correct answers to questions given the responses of\nmine the correct classifications for the items. a crowd of individuals. The number of correct answers\ninferred can serve as a measure of the intelligence of theA possible solution to the above problem is to first\ncrowd. In this sense, our work is somewhat similar toevaluate the skill of each expert by asking her to proother approaches which also use aggregated responsesvide responses to a set of items for which the correct\nto IQ tests for measuring collective intelligence (Lyle,answer is known (sometimes called a \"gold-set\"). A\n2008; Bachrach et al., 2012; Kosinski et al., 2012).prominent example of this, where all correct answers\nare known, is intelligence testing (Anastasi et al., Psychometricians developed a body of theory called\n1997). Psychologists have studied human intelligence, \"test theory\" which analyzes outcomes of psychologiand designed IQ tests for evaluating the aptitude of cal testing, such as the ability levels of participants or\nindividuals. These tests have been shown to be predic- the difficulty of questions in a test, trying to improve\ntive of a person's performance in many domains, such reliability in such tests (Anastasi et al., 1997). One\nas academic attainment and job success (Anastasi paradigm for designing tests of mental abilities, is the\net al., 1997). Such tests typically ask participants to \"item-response theory\" (Hambleton et al., 1991) (IRT\nrespond to questionnaires composed of many multiple- for short). IRT has been used to develop high-stakes\nchoice questions, and allow ranking participants ac- adaptive tests such as the Graduate Management Adcording to their individual skill levels after examining mission Test (GMAT). IRT is based on the idea that\nthe responses.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 4, "total_chunks": 22, "char_count": 2960, "word_count": 461, "chunking_strategy": "semantic" }, { "chunk_id": "57fd2b29-ea22-42aa-9241-2a95838f3012", "text": "The properties of responses to IQ tests the probability of a participant providing the correct\nhave been widely studied by psychometricians, so such response to a question is a function of both a paramdatasets can serve as a testing ground for exploring in- eter of the question and a parameter of the item (for\nference models for multiple problem domains. example, the question's difficulty and the person's aptitude). When applying aptitude tests, the parameterOur Contribution: We propose a new family of\nof the person is latent (cannot be directly observed),graphical models for analyzing responses in multiple\nand only its manifestation, in the form of the partici-problem domains and evaluate the models on a data\npant's responses, can be directly observed. Our frame-set of completed questionnaires of a standard IQ test.\nwork relies on a probabilistic graphical model (KollerThe proposed framework enables us to jointly infer\n& Friedman, 2009), using themes similar to IRT.the correct answer for each question (when these are\nnot known in advance), the difficulty levels of the ques- Many papers deal with merging opinions, ranging from\ntions, and the ability of each participant. We show how information aggregation in the semantic web (Kasneci\nthe model can: determine a probability distribution et al., 2010) to prediction markets (Pennock & Sami,\nover answers for a given question by aggregating the 2007). Frameworks such as Probabilistic Relational\nresponses of participants based on their abilities and Models (Getoor et al., 2007) combine a logical reprethe questions' difficulties; test the ability levels of par- sentation with probabilistic semantics, and allow inticipants efficiently by finding the best next question ference to aggregate information and opinions. One\nto ask in an adaptive way, depending on the previous basic method for collective decision making is voting.\nresponses; automatically calibrate aptitude tests from Voting was studied in social choice theory (Sen, 1986),\na set of questions and the responses provided by the which focuses on how participants can manipulate by\nparticipants, determining the relative difficulty levels lying about their preferences. We assume that the\nof the questions and their ability to discriminate be- experts' responses are their true opinion and focus on\ntween participants of similar but uneven skill levels. the inference problem. One application of our model is How To Grade a Test Without Knowing the Answers", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 5, "total_chunks": 22, "char_count": 2488, "word_count": 382, "chunking_strategy": "semantic" }, { "chunk_id": "c4e54185-632e-49a6-9c70-fa64ff306244", "text": "aggregating crowdsourced opinions. A machine learn- ability of a participant, or the difficulty of a question.\ning approach for doing so which does not model task\ndifficulty was proposed in (Raykar et al., 2010) and a 3.1. The DARE Model\ntechnique that models task difficulty but uses an EM\nWe model a situation in which a set P of participantsapproach was proposed in (Whitehill et al., 2009). Anis available to answer a set Q of multiple choice ques-other method based on graphical models is (Welinder\ntions. We assume that for each question q ∈Q thereet al., 2010). In that model questions are endowed\nare Rq possible answers, only one of which, yq ∈Rq, iswith features, which could represent concepts or topcorrect. We model the process by which participantsics and participants have different areas of expertise\np ∈P produce responses rpq ∈Rq to questions q ∈Q.matching these topics. Our model focuses on a genWe assume that: a) Every participant has an under-eral domain in the spirit of test theory and IRT, and\nlying ability ap ∈R which determines her ability todoes not rely on specific features. An active learning\ndetermine the correct answer to questions q ∈Q. b)approach for labeling data was proposed in (Yan et al.,\nEach question q has an inherent difficulty dq ∈R which2011) and is similar to our adaptive IQ testing techdetermines how likely it is that participants p ∈P willnique. Another approach akin to ours is the TrueSkill\nknow the correct answer to question q.system (Herbrich et al., 2007), which uses a graphical model to estimate the relative skill levels of people We propose a joint probabilistic model whose factor\nbased on past contests. graph is given in Figure 1: The model has two parts,\none modeling the probability of participant p knowing\n3. Joint Probabilistic Model of the correct answer to question q (left of cpq in Figure 1), and one relating the true answer yq to question Difficulty, Ability, and Response\nq to the response rpq given by participant p depending\nWe present a probabilistic model for analyzing on them knowing the correct answer as represented by\nmultiple problem domains, which we refer to as cpq of the answer (right of cpq in Figure 1). Knowledge\nthe Difficulty-Ability-REsponse estimation model, or of the correct answer, cpq ∈{T, F}, is modeled as an\nDARE for short. The inputs to the model are re- interaction of the ability ap ∈R of participant p, and\nsponses that participants give to multiple choice ques- the difficulty dq ∈R of question q.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 6, "total_chunks": 22, "char_count": 2508, "word_count": 423, "chunking_strategy": "semantic" }, { "chunk_id": "16dee987-9871-4d6a-ad5e-4354848e75aa", "text": "Specifically, it is\ntions. Additional inputs may be ground truth informa- assumed to depend on the difference tpq := ap−dq via:\ntion for some or all of the questions. The model falls ∞\nZinto the framework of probabilistic graphical models. P(cpq = T|tpq, τq) := φ(√τq(x −tpq))θ(x) dx\nSuch models allow structurally describing the genera- −∞\ntive process assumed to underlie the observed data in = Φ √τqtpq . (1)\nterms of latent and observed random variables. In the Here φ denotes the standard Gaussian density φ(x) :=\ndomain of interest, information such as the correct re- √ −1\n2π exp(−x2/2) and Φ denotes the (sigmoidal) cusponse to a question, the ability of a participant, and t\nmulative Gaussian distribution Φ(t) := R −∞φ(x) dx;the difficulty of a question are modeled as unobserved\nθ(·) denotes the step function, and the precision τqvariables whereas the given response to a question by\ndetermines how discriminative question q is. The in-a user is viewed as an observed variable. The structegral representation emphasizes that the probabilityture of the model is determined by the conditional incan be viewed as emerging from a binary process re-dependence assumptions made about the variables in\nsulting from evaluating the step function θ on variablethe model. Pearl (Pearl, 1988) introduced Bayesian\nt with added Gaussian noise of variance τ −1.Networks (directed graphical models), which encode\nassumptions of conditional independence as a graph The response rpq is modeled as a mixture of two distriwhose vertices represent variables and whose edges butions. If participant p knows the correct answer to\nrepresent dependencies between variables. We use the question q, cpq = T, we constrain the response rpq to\nmore general notion of a factor graph, see e.g. (Koller match the correct answer, rpq = yq, otherwise we as-\n& Friedman, 2009), to describe the factorial structure sume that rpq is sampled uniformly at random from the\nof the assumed joint probability distribution among available answers, rpq ∼DiscreteUniform(Rq). After defining the structure of the model how this mixture is expressed as a gate (dashed pair of\nas a factor graph and setting the observed variables to boxes in Figure 1), which switches the factor connecttheir observed values, approximate message passing al- ing to rpq depending on the state of the variable cpq.\ngorithms (Koller & Friedman, 2009) can infer marginal Gates were introduced in (Minka & Winn, 2008) as\nprobability distributions of unknown variables of in- a powerful and flexible notation that simplifies factorterest such as the correct response to a question, the graph representations of mixture models. How To Grade a Test Without Knowing the Answers", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 7, "total_chunks": 22, "char_count": 2713, "word_count": 434, "chunking_strategy": "semantic" }, { "chunk_id": "a54be0b5-72db-4b89-a34e-bec833da59a5", "text": "used to represent context-dependent conditional independence relations, and are suited for implementations\nof approximate message passing inference. In order to do inference on the model, we need to\ndefine prior distributions for the variables of interest. We assume factorizing Gaussian priors for the\nabilities ap ∼Normal(µp, σ2p) and difficulties dq ∼\nNormal(µq, σ2q). We choose a Gaussian prior as it lets\nus specify a range of plausible values based on two\nparameters (mean and variance) per variable, and admits a relatively simple approximate inference. The\nfactorization assumption reflects the belief that a pri- Figure 1. Factor graph for the joint difficulty-abilityori knowing the difficulty of one question would not response estimation (DARE) model.\nbe informative about the difficulty of another question, and similarly for the abilities of participants. We\nalso assume factorizing discrete uniform priors for the question-response triples depending on budget and\ntrue answers yq ∼DiscreteUniform(Rq) and for the time constraints. As shown in Section 4, providresponses rpq ∼DiscreteUniform(Rq) for participant- ing some ground-truth question-answer pairs (a \"goldquestion pairs. Finally, we define factorizing Gamma set\"), can improve the accuracy of the inferred answers\npriors for the precision parameters τq ∼Gamma(k, θ). yq because it can be used to assess the abilities ap of\nThe Gamma prior is conveniently parameterized by a the participants p more accurately, leading to more\nshape parameter k and a scale parameter θ, and is the accurate inference on the answers yq. Generally, for\nconjugate prior for the precision parameter τ := σ−2 every observed response r∗pq, we set the discrete prior\nof the normal distribution if the mean µ is known. distribution p(rpq) to a single point distribution conThis choice simplifies inference by approximate mes- centrated on the observed response r∗pq, and similarly\nsage passing because the posterior also takes the func- for every known ground-truth question-answer pair y∗q.\ntional form of the Gamma distribution. Given the data R and y, we wish to infer sevBased on the above specification we defined a joint eral approximate marginal (posterior) distributions:\nprobability distribution p(ap, dq, tpq, τq, cpq, rpq, yq) for the discrete distribution p(yq|R, y) over correct anspecific pairs of question q and participant p. Assum- swers yq, which assign a probability πqr ∈[0, 1] to\ning exchangeability of questions q and participants p each of the possible responses r ∈Rq; the Gaussian\nwe get a model with two plates as depicted in Fig- density p(ap|R, y) over abilities ap of participants p\nure 1, where one plate runs over participants p ∈P with means ˜µp and variances ˜σ2p; the Gaussian denand the other over questions q ∈Q (plates denote a sity p(dq|R, y) over difficulties dq of questions q with\nreplication of the fraction of the graphical model they means ˜µq and variances ˜σ2q; the Bernoulli distribution\ncontain). From a generative point of view, this models p(cpq|R, y) over correctness cpq of participant p's rea table with |P| rows (one for each participant p) and sponse to question q given by probabilities πpq; the\n|Q| columns (one for each question q), where entries discrete distribution p(rpq|R, y) over responses rpq of\nare the responses rpq of participants to questions. participant p to question q, which assign a probability πpqr ∈[0, 1] to each of the possible responses\n3.2. Probabilistic Inference r ∈Rq; the Gamma distribution p(τq|R, y) over the\nprecision/discrimination parameter τq, with scale paWe show how the model can infer quantities of interest. rameters θq and shape parameters kq. Generally, the data is given in the form of two incomInference in the model is done using approximateplete sets: A set of m participant-question-response\nmessage passing (see (Koller & Friedman, 2009)).triples R := {rp1q1, . . . , rpmqm} and a set of n groundWe used Infer.NET (Minka et al., 2010), a pack-truth question-answer pairs y := {yq1, . . . , yqn}.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 8, "total_chunks": 22, "char_count": 4046, "word_count": 639, "chunking_strategy": "semantic" }, { "chunk_id": "b3337a2e-d81a-404b-bfd9-56546883c9f1", "text": "One\nage for probabilistic inference. Specifically, we usedspecial case is when all the ground-truth questionthe expectation-propagation (EP) algorithm presentedanswer pairs are known. This is the traditional test\nin (Minka, 2001). EP allows us to calculate marginalscenario as used in aptitude tests including school\ndistributions of interest on a given factor graph by it-tests, GMAT, IQ tests, etc.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 9, "total_chunks": 22, "char_count": 400, "word_count": 57, "chunking_strategy": "semantic" }, { "chunk_id": "85c853b3-f8d2-4df3-b0dd-dfe802005b63", "text": "Another special case is\neratively calculating messages along edges that propa-crowdsourcing domains where we may not have any\ngate information across the factor graph. In our case,ground-truth available, but can obtain participantEP provides only an approximation to the exact soluHow To Grade a Test Without Knowing the Answers tion because a) the underlying factor graph is loopy , adaptive testing, where all the ground-truth answers\nand b) the messages at the junction between cpq, tpq, yq are available, and where the goal is to determine\nand τq are approximations, and so are the messages go- the ability of a participant p as accurately as possiing in and out of the gate connected to cpq. Thus EP ble, using as few questions as possible.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 10, "total_chunks": 22, "char_count": 745, "word_count": 123, "chunking_strategy": "semantic" }, { "chunk_id": "c6f88f4c-449e-4777-a706-c343a236c113", "text": "In this speis run iteratively until convergence, so its running time cial case, the parameter vector w only includes the\nis linear in the input size (variables and observations). ability ap of participant p. The posterior distribution\npm(ap) before inclusion of the new observation is Nor-\n3.3. Active Learning and Adaptive Testing mal, pm(ap) := Normal(ap; µp.m, σ2p,m), and so is the\nposterior distribution after inclusion, pm+1(ap|rpq) :=\nHaving a joint probabilistic model of the data has a Normal(ap; µp,m+1(rpq), σ2p,m+1(rpq)). The entropy of\nnumber of advantages, including the ability to query a univariate Gaussian with parameters µ and σ2 is\ndifferent distributions of interest and to handle missing 1 2 ln(2πeσ2), so the entropy reduction ∆S(rpq) is:\ndata in a principled way. In addition, maintaining in-\n1formation about the uncertainty present in the model ∆S(rpq) = ln(σ2p,m/σ2p,m+1(rpq))\nallows us to reason about the impact of future obser- 2\nvations on the model uncertainty. This idea forms the Thus the response minimizing posterior variance is\nbasis of active learning, a variant of which is known in preferred. Given participant p, for each possible questhe psychometrics literature as adaptive testing. tion q the expectation Epm(rpq|R,y)[∆S(rpq)] is calculated by examining the following quantities for all posOften there is a considerable cost associated with obsible responses rpq ∈Rq: a) their probabilities πpq, and\ntaining additional data points, so one can use the\nb) the resulting posterior variances σ2p,m+1(rpq) in themodel of the data to determine which measurements to\nupdated model. From these we compute the expected\ntake next so as to improve the inferred knowledge acentropy reduction for each question q:\ncording to a pre-determined criterion. In the absence\nof problem specific information, a reasonable goal is re- X πpq ln(σ2p,m/σ2p,m+1(rpq))\nducing uncertainty in estimates of model parameters 2 rpq∈R\nas measured by the entropy of the posterior distribuWe then pick the question q∗that reduces the expected\ntions, an idea put forward in (MacKay, 1992).\nentropy the most. Suppose we have determined a set of model parameters\nof interest, denoted here as a vector w. Empirical Analysis\nfind a criterion by which to decide which response rpq\nto elicit in order to maximally reduce the posterior We empirically tested the DARE model discussed in\nentropy S(w) of those parameters defined as: Section 3.1 using a dataset of responses to a standard\nintelligence test, called Raven's Standard Progressive\nZ Z m(w)\nS(w) = · · · p(w) log dw , Matrices (SPM) (Raven), which falls within the catp(w) egory of multiple choice domains. It consists of sixty\nwhere m(w) is an arbitrary base measure which questions, each of which consists of a matrix of shapes\ndoes not influence the outcome (see (MacKay, 1992)). with one element missing and eight possible answers. We consider two posterior distributions, pm(w) := Each answer is a possible shape that completes the map(w|R, y) before inclusion of the new data point, trix, but only one answer is correct. A sample item,\nand pm+1(w) := p(w|R, y, rpq) after inclusion of similar1 to those in SPM is shown in Figure 2. We then aim at maximizing the en- is one of the most popular intelligence tests, and was\ntropy reduction ∆S(rpq) := S(pm(w)) −S(pm+1(w)) used both for research and clinical purposes.\nover the choice of response rpq to elicit.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 11, "total_chunks": 22, "char_count": 3423, "word_count": 549, "chunking_strategy": "semantic" }, { "chunk_id": "8c1f70ed-afed-45aa-aafa-68aff99c4b03", "text": "Since The sample consisted of 120 individuals who filled\nthe actual response rpq is unknown, this choice can SPM for its standardization in the British market in\nonly be guided by the expected entropy reduction 2006 (Raven). The mean number of correct responses,\nEpm(rpq|R,y)[∆S(rpq)] , where the expectation is taken called \"raw score\", was 99.57 (STD=14.16).\nover the predictions of the model before inclusion of\nthe new data point, i.e., based on the predictive dis-\n4.1. Unobserved Correct Answers\ntribution pm(rpq|R, y) obtained by message passing. First, we investigate the DARE model's ability to hanIn its full generality, this active learning scheme can\ndle missing correct answers yq. In this case the modelguide the full observation/measurement process including all possible responses rpq and ground truth 1The SPM test is copyright protected, so we only proanswers yq. However, here we focus on the case of vide an example question similar to those in the real test. How To Grade a Test Without Knowing the Answers Item similar to those in the SPM test\nFigure 3. Estimates of skill levels for missing information\nregarding the correct answers to the questions. allows us to compute the probability p(yq|R, y) that a\ngiven answer yq is correct. To minimize the probabil- with the above scenarios as special cases.\nity of error we select the mode of that distribution as\nthe model's answer for that question. When provided We refer to the model with different question difficulwith the responses of all 120 participants the DARE ties as the question model, and the model with different\nmodel correctly infers the correct responses for 46 of participant abilities as the participant model. Note, that the number of errors is not amine how such simplifications affect the model's abiltoo surprising because some items in the test were very ity to infer correct answers as a function of the amount\ndifficult and few participants answered them correctly. of available data. Figure 4 shows how well the quesThe highest raw score was fifty so even the top scoring tion, participant and DARE models perform in this\nparticipant answered ten items incorrectly. regard. For any given crowd size, shown on the x-axis,\nwe randomly selected 10, 000 subsets of participants\nWe can calculate a participant's raw IQ score with re- of that size.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 12, "total_chunks": 22, "char_count": 2339, "word_count": 382, "chunking_strategy": "semantic" }, { "chunk_id": "5a0c56ce-1bba-4418-b149-79d2eb6f1de8", "text": "For each such crowd we inferred the corspect to the true correct answers y∗q or with respect to rect answer ˆyq to each question using the model, and\nthe predicted \"correct\" answers ˆyq, and we refer to the used the number of questions for which the inferred\nlatter score as the model raw score. In crowdsourcing answer ˆyq was equal to the true correct answer y∗q as a\nsituations when the correct answer for each question is measure of the model's performance. The y-axis is the\nunknown, one can use the model raw scores as an esti- quality of each model, averaged over the 10, 000 sammate of participants' abilities. Figure 3 shows a scatter pled crowds. Figure 4 shows the ability of all models\nplot, in which each point represents a participant; the to infer correct answers increases with the amount of\nposition on the x-axis represents the participant's raw data. It also shows that DARE outperforms the simIQ score, and the position along the y-axis their model pler models. Interestingly, only modeling participants\nraw score. As Figure 3 indicates, there is a very strong ability of is better than only modeling question difficorrelation between the true raw IQ scores and model culty (which is equivalent to majority vote).\nraw scores (R2 = 0.7243), and the difference between\nthe two scores is rather small across all participants. One can think of DARE as an aggregator that receives the responses of a crowd of participants, and\noutputs the inferred answer for each question. Existing work (Lyle, 2008; Bachrach et al., 2012) tests\nsimpler aggregators using IQ test data. The former\nuses majority voting, and the latter does consider the\nability levels of participants but assumes all items to\nbe of equal difficulty. Another possible simplifying assumption, not examined in this earlier work, is that all\nthe participants have equal ability. In contrast, in the\nDARE model, the probability of a participant to know\nthe correct answer depends both on the difficulty dq\nof the question and the ability ap of the participant, Figure 4. Effect of crowd size on correct responses inferred. How To Grade a Test Without Knowing the Answers", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 13, "total_chunks": 22, "char_count": 2146, "word_count": 361, "chunking_strategy": "semantic" }, { "chunk_id": "1b84ef21-23f5-4e9c-90ca-90c732e524ad", "text": "Crowdsourcing Data: To examine the applicability of our model to crowdsourcing, we tested\nour model on the TREC 2011 Crowdsourcing Track\ndataset (Lease & Kazai, 2011), generated by crowdsourced workers which classified search engine responses for queries (relevant / irrelevant). Each querydocument pair is a \"question as workers must determine if the document is relevant for the query. This\ndataset is sparse, as most workers only examined few\n\"questions, and all \"questions have at most 10 answers\nin total, and includes ground-truth judgements. We\nisolated the 369 \"questions with the most answers (8\nper \"question), and the 84 workers who answered the\nmost \"questions (at least 30 answers each). Effect of partial information on correct answers.\nysis shows that DARE slightly outperforms majority\nvoting on this dataset. Majority voting gets 206 questions correct, while DARE gets 210 correct. We also question set used for all the participants, and measure\ntested how well DARE estimate participants skills, the RMSE in estimated raw IQ across all participants.\nsimilarly to Figure 3. Although for this crowdsourcing To choose the best set of questions of size b for the\ndataset the resulting scatter plot is quite noisy (r2 of static approach, one must enumerate over all possible\n0.79), it is similar to the one in Figure 3. |Q| b question sets of size b, and use the one minimizing the error. This is intractable when |Q| is large, so\n4.2. Partial Information on Correct Answers we heuristically choose the question set. We selected\nstatic question set for a given budget b by choosingWe now examine situations where participants are first\nquestions that equally partition the participant pop-tested on a \"gold-set\" of questions for which the corulation in terms of the fraction of participants whorect answer is known. Consider choosing i questions\nsolved the question2 For example, with a budget b = 2and making the correct answer to these questions obwe selected a question that roughly two thirds of theservable to the model. This does not reveal the corparticipants solved correctly and one that roughly onerect answer to the remaining |Q| −i questions, but\nthird of the participants solved incorrectly.it does allow the model to better estimate the ability levels of the participants, which in turn allows the We also implemented the adaptive testing scheme of\nmodel to better infer the correct answer to these re- Section 3.3 and compared it to the baseline static apmaining items. Figure 5 shows this effect in DARE. proach. Under the adaptive approach, the next quesThe x-axis represents the number of \"revealed\" ques- tion to ask depends on the participant's response to\ntions and the y-axis represents the proportion of the earlier questions, so we reveal the participant's reremaining questions for which the model inferred the sponses one at a time.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 14, "total_chunks": 22, "char_count": 2871, "word_count": 462, "chunking_strategy": "semantic" }, { "chunk_id": "f218576d-8707-486d-9ef0-bd7b97684179", "text": "To measure the RMSE for a\nright answer. For each number i of \"revealed\" items, given budget b, we simulated the adaptive process for\nwe sampled 100, 000 crowds of 20 participants and i each of the participants and averaged the errors across\nrevealed questions (uniformly at random), and the lo- all participants. Figure 6 shows RMSEs for the static\ncation on the y-axis is the average proportion of the and adaptive approaches for different budget levels. It\nremaining questions for which the model inferred the shows the adaptive approach has a smaller error in its\nright answer over this sample. As the figure shows, inferred ability levels for any given budget. 3\nhaving a larger \"gold-set\" increases the model's ability\nto infer the correct response for the remaining items.\n5. Conclusions and Limitations Adaptive Skill Testing We presented the DARE model for inferring the correct answers, difficulty levels of questions and abilWe now show how DARE can be used for adaptive\nskill testing. Given a budget of b questions to ask, 2The static approach essentially has access to informaour goal is to infer the participants' ability levels. We tion regarding the difficulty of the questions which is not\nnormally be available. As our analysis shows, our activeuse DARE to estimate a participant's raw IQ score\napproach beats the static approach even when the static\nafter only observing this participant's responses to a approach can use such information.\nset of \"asked\" questions (revealed responses). In a 3The standard deviation for the RMSEs is 1.07 for the\nstatic approach, for each budget b we choose a specific adaptive scheme and 0.99 for the static scheme. How To Grade a Test Without Knowing the Answers", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 15, "total_chunks": 22, "char_count": 1715, "word_count": 283, "chunking_strategy": "semantic" }, { "chunk_id": "d32f9e55-a888-4ebb-b940-2c9ad43c6eba", "text": "Gao, X.A., Bachrach, Y., Key, P., and Graepel, T. Quality expectation-variance tradeoffs in crowdsourcing contests. Getoor, L., Friedman, N., Koller, D., Pfeffer, A., and\nTaskar, B. 5 probabilistic relational models. Statistical relational learning, pp. 129, 2007.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 16, "total_chunks": 22, "char_count": 264, "word_count": 36, "chunking_strategy": "semantic" }, { "chunk_id": "b92c0fd3-4cbc-4da1-a4d7-53fe58e62558", "text": "Hambleton, R.K., Swaminathan, H., and Rogers, H.J. Fundamentals of item response theory, volume 2. 1991. Herbrich, R., Minka, T., and Graepel, T. Trueskill: A\nbayesian skill rating system. Kasneci, G., Van Gael, J., Herbrich, R., and Graepel, T. Bayesian knowledge corroboration with logical rules and\nuser feedback. Static and adaptive skill testing.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 17, "total_chunks": 22, "char_count": 351, "word_count": 52, "chunking_strategy": "semantic" }, { "chunk_id": "3f79423b-d0b3-40f0-84fc-e78de3f22c46", "text": "Koller, D. and Friedman, N. Probabilistic Graphical Models: Principles and Techniques. 2009.\nity levels of participants in multiple problem domains. Kosinski, M., Bachrach, Y., Kasneci, G., Van-Gael, J., and\nOur evaluation of the model shows that joint infer- Graepel, T. Crowd IQ: Measuring the intelligence of\nence of these quantities is possible to a high level of crowdsourcing platforms. In ACM Web Sciences, 2012.\naccuracy and that it is indeed possible to grade a test Lease, M. and Kazai, G. Overview of the trec 2011 crowdwithout knowing the answers. We showed that in our sourcing track (conference notebook).", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 18, "total_chunks": 22, "char_count": 619, "word_count": 98, "chunking_strategy": "semantic" }, { "chunk_id": "2d112652-a9df-4bc4-b62f-618499a490b6", "text": "In TREC, 2011.\nsetting modeling participants' ability levels is more imLyle, J.A. Collective problem solving: Are the many\nportant than questions' difficulty levels, that including smarter than the few? 2008.\na \"gold-set\" helps, and that active learning leads to\nMacKay, David J.C. Information-based objective functionsmore efficient testing.\nfor active data selection. Neural Computation, 1992. Our approach is subject to several limitations. Our\nMinka, T., Winn, J.M., Guiver, J.P., and Knowles, D.A.\nevaluation used an IQ dataset, whereas crowdsourc- Infer.NET 2.4, 2010.\ning tasks may exhibit different properties, such as a\nMinka, Tom and Winn, John. NIPS, 21, 2008.greater homogeneity in task difficulty levels.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 19, "total_chunks": 22, "char_count": 717, "word_count": 103, "chunking_strategy": "semantic" }, { "chunk_id": "b835b3f7-6016-4c7d-b97f-05f1b27e1883", "text": "Also, we\nassume that participants answer to the best of their Minka, T.P. A family of algorithms for approximate\nability, but participants may be selfish agents with Bayesian inference. PhD thesis, 2001.\nvarying motives. For a game theoretic treatment of Pearl, J. Probabilistic reasoning in intelligent systems :\nsuch issues see (DiPalantino & Vojnovic, 2009; Gao networks of plausible inference. 1988.\net al., 2012). Pennock, D.M. and Sami, R.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 20, "total_chunks": 22, "char_count": 445, "word_count": 69, "chunking_strategy": "semantic" }, { "chunk_id": "bd40e927-c2ee-4d93-bddc-26ef4df2656c", "text": "Computational aspects of\nMany questions are open for future research. Are prediction markets, 2007.\nthere better models for aggregating responses, or modRaven, J.C. Standard progressive matrices plus.els better tailored to other domains? How can one\ntractably compute the optimal non-adaptive test for a Raykar, V.C., Yu, S., Zhao, L.H., Valadez, G.H., Florin, C.,\ngiven population? Can we use similar models to infer Bogoni, L., and Moy, L. Learning from crowds. JMLR,\nthe ability levels of individuals when only their perfor- 2010.\nmance within the context of a group is known? Social choice theory. Handbook of mathematical\neconomics, 3:1073–1181, 1986.", "paper_id": "1206.6386", "title": "How To Grade a Test Without Knowing the Answers --- A Bayesian Graphical Model for Adaptive Crowdsourcing and Aptitude Testing", "authors": [ "Yoram Bachrach", "Thore Graepel", "Tom Minka", "John Guiver" ], "published_date": "2012-06-27", "primary_category": "cs.LG", "arxiv_url": "http://arxiv.org/abs/1206.6386v1", "chunk_index": 21, "total_chunks": 22, "char_count": 656, "word_count": 99, "chunking_strategy": "semantic" } ]